sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
Dao-AILab/flash-attention:tests/cute/test_flash_attn.py | # Copyright (c) 2025, Jay Shah, Ganesh Bikshandi, Ying Zhang, Vijay Thakkar, Pradeep Ramani, Tri Dao.
import math
import itertools
import os
import random
import pytest
import torch
from einops import rearrange, repeat
try:
from flash_attn.layers.rotary import apply_rotary_emb
except ImportError:
apply_rotary_emb = None
from flash_attn.cute.testing import (
attention_ref,
generate_qkv,
generate_random_padding_mask,
pad_input,
unpad_input,
maybe_fake_tensor_mode,
is_fake_mode,
)
from flash_attn.cute.interface import (
flash_attn_func,
flash_attn_varlen_func,
flash_attn_combine,
)
# torch FakeTensorMode would enable fast cutedsl kernel compilation without allocating the actual GPU memory or running the kernel
# When operating fake tensors, we cannot perform data-dependent operations (e.g., `tensor.max()`).
USE_FAKE_TENSOR = int(os.getenv("FLASH_ATTENTION_FAKE_TENSOR", 0)) == 1
DISABLE_SPLIT = os.getenv("FLASH_ATTENTION_DISABLE_SPLIT", "FALSE") == "TRUE"
# SplitKV and paged KV are not supported on SM90
IS_SM90 = torch.cuda.get_device_capability()[0] == 9
TEST_BWD_ONLY = False
VERBOSE = True
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
@pytest.mark.parametrize("has_learnable_sink", [False, True])
# @pytest.mark.parametrize("has_learnable_sink", [False])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [False])
# @pytest.mark.parametrize("softcap", [0.0, 15.0])
@pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local_enum", [0, 1, 2, 3])
# @pytest.mark.parametrize("local_enum", [0])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [False])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize("d", [64, 128, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128, 192])
# @pytest.mark.parametrize("d", [128, 192])
@pytest.mark.parametrize("d", [64, 128])
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 1),
(3, 3),
(64, 32),
(64, 128),
(128, 128),
(128, 192),
(256, 256),
(239, 1),
(799, 3),
(113, 203),
(113, 128),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(384, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(2048, 2048),
(4096, 4096),
(4224, 4224),
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(128, 128)])
@maybe_fake_tensor_mode(USE_FAKE_TENSOR)
def test_flash_attn_output(
seqlen_q,
seqlen_k,
d,
causal,
local_enum,
softcap,
deterministic,
has_qv,
has_learnable_sink,
mha_type,
dtype,
):
local = local_enum > 0
if local and causal:
pytest.skip()
device = "cuda"
# set seed
seed = 0
random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.empty_cache()
torch.cuda.synchronize()
batch_size = 9 if seqlen_k <= 2048 else 2
# batch_size = 2
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (3 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
dv_vals = [128] if d == 192 else ([d] if d != 128 else [64, d])
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(
batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref
)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = q_ref * softcap / 4
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
v_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
if has_qv:
qv_ref = (
torch.randn(
batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (
(None, None) if not local else tuple(random.randrange(0, seqlen_k) for _ in range(2))
)
if local_enum == 2:
window_size = (None, -window_size[1])
elif local_enum == 3:
window_size = (-window_size[0], None)
if local:
print("window size = ", window_size)
# window_size = (-1, -1) if not local else (16, 0)
if has_learnable_sink:
learnable_sink = torch.randn(nheads, dtype=torch.bfloat16, device=device)
else:
learnable_sink = None
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [
torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32)
* 2
for _ in range(3)
]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().to(dtype).requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach().to(dtype).requires_grad_() if has_qv else None
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
None,
None,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
# k_extended = repeat(k_ref, "b s h d -> b s (h k) d", k=nheads // nheads_kv)
# qk = torch.einsum('bshd,bthd->bhst', q_ref, k_extended).float()
# # if qv is not None:
# # qk += torch.einsum('bshd,bthd->bhst', qv_ref, v_ref).float()
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# exp_sum = s_tmp.sum(-1)
# # qk = torch.einsum('bthd,bshd->bhts', q_ref.float() / math.sqrt(d), k_ref.float())
# # lse_ref = torch.logsumexp(qk, dim=-1)
# Numerical error if we just do any arithmetic on out_ref
if not is_fake_mode():
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# num_splits_vals = [1, 3]
pack_gqa_vals = [False, True, None] if not TEST_BWD_ONLY else [False]
# SplitKV is not supported for hdim >= 192
# pack_gqa_vals = [False]
num_splits_vals = [1, 3] if d < 192 and not DISABLE_SPLIT and not TEST_BWD_ONLY else [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
# SplitKV not supported on SM90 - skip this iteration
if IS_SM90 and num_splits > 1:
continue
out, lse = flash_attn_func(
q,
k,
v,
causal=causal,
# qv=qv,
# q_descale=q_descale, k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
# attention_chunk=attention_chunk,
softcap=softcap,
learnable_sink=learnable_sink,
pack_gqa=pack_gqa,
num_splits=num_splits,
deterministic=deterministic,
)
if is_fake_mode():
# no more flash_attn cutedsl calls for the rest of the loop
# skip data-dependent postprocessing
continue
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (
out_pt - out_ref
).abs().max().item() + fwd_atol
if (
dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
and softcap == 0.0
and ((dv == d and d <= 128) or (d == 192 and dv == 128))
and learnable_sink is None
# and False
and not ((causal or local) and seqlen_k < seqlen_q)
):
# TODO: SM90 backward pass has invalid MMA tile config for d=64 + non-causal
# The m_block_size=80 (non-causal) with head_dim=64 creates an invalid tile.
# Fix requires adjusting m_block_size or MMA config in flash_bwd_sm90.py
if IS_SM90 and d == 64 and not causal:
pytest.xfail("SM90 backward: d=64 + non-causal has invalid MMA tile config (m_block=80)")
# TODO: SM90 backward pass does not support local attention yet
if IS_SM90 and local:
pytest.xfail("SM90 backward: local attention not supported yet")
if d == 192 and local:
pytest.xfail("hdim 192 backward: local attention not supported yet")
g = torch.randn_like(out)
# do_o = ((g.float() * out.float()).sum(-1)).transpose(1, 2)
dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
if is_fake_mode():
# no more flash_attn cutedsl calls for the rest of the loop
# skip data-dependent postprocessing
continue
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - do_o.transpose(1, 2).unsqueeze(1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# breakpoint()
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(
out_ref, (q_ref, k_ref, v_ref), g
)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
if VERBOSE:
diff_dq = (dq - dq_ref).abs()
max_idx = diff_dq.argmax()
coords = torch.unravel_index(max_idx, diff_dq.shape)
print(f"dQ max diff: {diff_dq.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dQ={dq[coords].item()}, dQ_ref={dq_ref[coords].item()}")
diff_dk = (dk - dk_ref).abs()
max_idx = diff_dk.argmax()
coords = torch.unravel_index(max_idx, diff_dk.shape)
print(f"dK max diff: {diff_dk.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dK={dk[coords].item()}, dK_ref={dk_ref[coords].item()}")
diff_dv = (dv - dv_ref).abs()
max_idx = diff_dv.argmax()
coords = torch.unravel_index(max_idx, diff_dv.shape)
print(f"dV max diff: {diff_dv.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dV={dv[coords].item()}, dV_ref={dv_ref[coords].item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dq - dq_ref).abs().max().item() <= rtol * (
dq_pt - dq_ref
).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dk - dk_ref).abs().max().item() <= rtol * (
dk_pt - dk_ref
).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dv - dv_ref).abs().max().item() <= rtol * (
dv_pt - dv_ref
).abs().max().item() + dv_atol
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
# @pytest.mark.parametrize("has_learnable_sink", [False, True])
@pytest.mark.parametrize("has_learnable_sink", [False])
# @pytest.mark.parametrize("has_qv", [False, True])
@pytest.mark.parametrize("has_qv", [False])
# @pytest.mark.parametrize("deterministic", [False, True])
@pytest.mark.parametrize("deterministic", [False])
# @pytest.mark.parametrize("softcap", [0.0, 15.0])
@pytest.mark.parametrize("softcap", [0.0])
@pytest.mark.parametrize("local_enum", [0, 1, 2, 3])
# @pytest.mark.parametrize("local_enum", [0])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [False])
# @pytest.mark.parametrize("add_unused_qkv", [False, True])
@pytest.mark.parametrize("add_unused_qkv", [False])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192, 256])
# @pytest.mark.parametrize('d', [32, 64, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128])
# @pytest.mark.parametrize("d", [64, 96, 128])
# @pytest.mark.parametrize("d", [128, 192])
@pytest.mark.parametrize("d", [64, 128])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
# (1, 1),
# (1, 3),
# (2, 1),
(511, 1),
(3, 513),
(64, 128),
(128, 128),
(256, 256),
(113, 203),
(128, 217),
(113, 211),
(108, 256),
(256, 512),
(307, 256),
(640, 128),
(512, 256),
(1024, 1024),
(1023, 1024),
(1024, 1023),
(2048, 2048),
],
)
@pytest.mark.parametrize("varlen_mode", ["random", "third", "full"])
# @pytest.mark.parametrize("varlen_mode", ["full"])
@pytest.mark.parametrize(
"zero_lengths_q, zero_lengths_k",
[
(False, False),
(True, False),
(False, True),
(True, True),
],
)
@pytest.mark.parametrize(
"unpad_q, unpad_kv",
[
(True, True),
(False, False),
(True, False),
(False, True),
],
)
@maybe_fake_tensor_mode(USE_FAKE_TENSOR)
def test_flash_attn_varlen_output(
seqlen_q,
seqlen_k,
d,
add_unused_qkv,
causal,
local_enum,
softcap,
deterministic,
has_qv,
has_learnable_sink,
mha_type,
dtype,
varlen_mode,
zero_lengths_q,
zero_lengths_k,
unpad_q,
unpad_kv,
):
local = local_enum > 0
if local and causal:
pytest.skip()
if (
causal or local
): # Right now reference only supports causal attention with seqlen_k == seqlen_q
seqlen_k = seqlen_q
device = "cuda"
# set seed
seed = seqlen_q + seqlen_k + d + int(causal) * 2 + int(local)
random.seed(seed)
torch.random.manual_seed(seed)
batch_size = 49 if seqlen_q <= 1024 else 7
nheads = 6
# nheads = 1
nheads_kv = nheads if mha_type == "mha" else (3 if mha_type == "gqa" else 1)
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
dv_vals = [128] if d == 192 else ([d] if d != 128 else [64, d])
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if seqlen_q <= seqlen_k else [0]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
q_ref = torch.randn(
batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref
)
if softcap > 0.0:
# Ensure the values of qk are at least within softcap range.
q_ref = (q_ref * softcap / 4).detach().requires_grad_()
q_ref = q_ref.to(dtype).to(dtype_ref).requires_grad_()
k_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, d, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
v_ref = (
torch.randn(
batch_size, seqlen_k, nheads_kv, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
.requires_grad_()
)
if has_qv:
qv_ref = (
torch.randn(
batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
else:
qv_ref = None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (
(None, None) if not local else tuple(random.randrange(0, seqlen_k) for _ in range(2))
)
if local_enum == 2:
window_size = (None, window_size[1])
elif local_enum == 3:
window_size = (window_size[0], None)
if local:
print("window size = ", window_size)
if has_learnable_sink:
learnable_sink = torch.randn(nheads, dtype=torch.bfloat16, device=device)
else:
learnable_sink = None
if dtype == torch.float8_e4m3fn:
q_descale, k_descale, v_descale = [
torch.rand(batch_size, nheads_kv, device=device, dtype=torch.float32)
* 2
for _ in range(3)
]
else:
q_descale, k_descale, v_descale = None, None, None
q, k, v = [x.detach().requires_grad_() for x in (q_ref, k_ref, v_ref)]
qv = qv_ref.detach() if has_qv else None
query_padding_mask = generate_random_padding_mask(
seqlen_q,
batch_size,
device,
mode=varlen_mode,
zero_lengths=zero_lengths_q,
)
key_padding_mask = generate_random_padding_mask(
seqlen_k,
batch_size,
device,
mode=varlen_mode,
zero_lengths=zero_lengths_k,
)
def _gen_unused_masks(padding_mask, add_unused, max_seq_len, bs, device):
if add_unused:
another_mask = generate_random_padding_mask(max_seq_len, bs, device)
attn_mask = torch.logical_and(padding_mask, another_mask)
unused_mask = torch.logical_xor(
torch.logical_or(padding_mask, another_mask), attn_mask
)
else:
attn_mask = padding_mask
unused_mask = None
return attn_mask, unused_mask
query_padding_mask, query_unused_mask = _gen_unused_masks(
query_padding_mask, add_unused_qkv, seqlen_q, batch_size, q.device
)
# query_padding_mask[:] = True
# query_unused_mask = None
key_padding_mask, key_unused_mask = _gen_unused_masks(
key_padding_mask, add_unused_qkv, seqlen_k, batch_size, k.device
)
if causal or local:
key_padding_mask = query_padding_mask
(
q_unpad,
k_unpad,
v_unpad,
qv_unpad,
cu_seqlens_q,
cu_seqlens_k,
seqused_q,
seqused_k,
max_seqlen_q,
max_seqlen_k,
q,
k,
v,
qv,
output_pad_fn,
dq_pad_fn,
dk_pad_fn,
) = generate_qkv(
q,
k,
v,
query_padding_mask,
key_padding_mask,
qv=qv,
kvpacked=False,
query_unused_mask=query_unused_mask,
key_unused_mask=key_unused_mask,
)
if unpad_q:
print("cu_seqlens_q = ", cu_seqlens_q)
else:
print("seqused_q = ", seqused_q)
if unpad_kv:
print("cu_seqlens_k = ", cu_seqlens_k)
else:
print("seqused_k = ", seqused_k)
q_unpad, k_unpad, v_unpad = [
x.detach().to(dtype).requires_grad_() for x in (q_unpad, k_unpad, v_unpad)
]
out_ref, attn_ref = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
)
out_pt, attn_pt = attention_ref(
q_ref,
k_ref,
v_ref,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv_ref,
q_descale=q_descale,
k_descale=k_descale,
v_descale=v_descale,
window_size=window_size,
attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
upcast=False,
reorder_ops=True,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
if not is_fake_mode():
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
if query_unused_mask is not None:
q_zero_masking = rearrange(query_unused_mask, "b s -> b s 1 1")
# Numerical error if we just do any arithmetic on out_ref
fwd_atol = 2 * (out_ref + 0.3 - 0.3 - out_ref).abs().max().item()
rtol = 2 if softcap == 0.0 else 3
pack_gqa_vals = [False, True, None] if not TEST_BWD_ONLY else [False]
# pack_gqa_vals = [False]
# num_splits_vals = [1, 3]
# SplitKV is not supported for hdim >= 192
num_splits_vals = [1, 3] if d < 192 and not DISABLE_SPLIT and not TEST_BWD_ONLY else [1]
for pack_gqa, num_splits in itertools.product(pack_gqa_vals, num_splits_vals):
# SplitKV not supported on SM90 - skip this iteration
if IS_SM90 and num_splits > 1:
continue
out_unpad, lse = flash_attn_varlen_func(
q_unpad if unpad_q else q,
k_unpad if unpad_kv else k,
v_unpad if unpad_kv else v,
cu_seqlens_q=cu_seqlens_q if unpad_q else None,
cu_seqlens_k=cu_seqlens_k if unpad_kv else None,
max_seqlen_q=seqlen_q,
max_seqlen_k=seqlen_k,
seqused_q=seqused_q if not unpad_q else None,
seqused_k=seqused_k if not unpad_kv else None,
causal=causal,
# qv=qv_unpad,
# q_descale=q_descale,
# k_descale=k_descale, v_descale=v_descale,
window_size=window_size,
# attention_chunk=attention_chunk,
learnable_sink=learnable_sink,
softcap=softcap,
num_splits=num_splits,
pack_gqa=pack_gqa,
deterministic=deterministic,
)
out = output_pad_fn(out_unpad) if unpad_q else out_unpad
if is_fake_mode():
# no more flash_attn cutedsl calls for the rest of the loop
# skip data-dependent postprocessing
continue
if query_unused_mask is not None:
out.masked_fill_(q_zero_masking, 0.0)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
# if not causal:
# print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most 3x the numerical error
# of a Pytorch implementation.
assert (out - out_ref).abs().max().item() <= rtol * (
out_pt - out_ref
).abs().max().item() + fwd_atol
if (
dtype != torch.float8_e4m3fn
and not has_qv
and not dv > 256
and not attention_chunk != 0
and ((dv == d and d <= 128) or (d == 192 and dv == 128))
and not has_learnable_sink
and not IS_SM90
# and False
):
if d == 192 and local:
pytest.xfail("hdim 192 backward: local attention not supported yet")
g_unpad = torch.randn_like(out_unpad)
# do_o = ((g_unpad.float() * out_unpad.float()).sum(-1)).transpose(-1, -2)
# import flash_attn_3_cuda
# dq_unpad, dk_unpad, dv_unpad, softmax_d, dq_accum, lse_log2 = flash_attn_3_cuda.bwd_varlen(
# g_unpad,
# q_unpad,
# k_unpad,
# v_unpad,
# out_unpad,
# lse,
# None,
# None,
# None,
# cu_seqlens_q,
# cu_seqlens_k,
# None, None,
# max_seqlen_q,
# max_seqlen_k,
# d ** (-0.5),
# causal,
# window_size[0], window_size[1],
# softcap,
# deterministic,
# 0, # sm_margin
# )
dq_unpad, dk_unpad, dv_unpad = torch.autograd.grad(
out_unpad,
(
q_unpad if unpad_q else q,
k_unpad if unpad_kv else k,
v_unpad if unpad_kv else v,
),
g_unpad
)
if is_fake_mode():
# no more flash_attn cutedsl calls for the rest of the loop
# skip data-dependent postprocessing
continue
dq = dq_pad_fn(dq_unpad) if unpad_q else dq_unpad
dk = dk_pad_fn(dk_unpad) if unpad_kv else dk_unpad
dv = dk_pad_fn(dv_unpad) if unpad_kv else dv_unpad
if key_unused_mask is not None:
k_zero_masking = rearrange(key_unused_mask, "b s -> b s 1 1")
dk.masked_fill_(k_zero_masking, 0.0)
dv.masked_fill_(k_zero_masking, 0.0)
if query_unused_mask is not None:
dq.masked_fill_(q_zero_masking, 0.0)
if not unpad_kv:
dk.masked_fill_(rearrange(~key_padding_mask, "b s -> b s 1 1"), 0.0)
dv.masked_fill_(rearrange(~key_padding_mask, "b s -> b s 1 1"), 0.0)
if not unpad_q:
dq.masked_fill_(rearrange(~query_padding_mask, "b s -> b s 1 1"), 0.0)
# print(f"dO_O max diff: {(softmax_d - do_o).abs().max().item()}")
# assert (softmax_d - do_o).abs().max().item() <= 1e-5
# assert dq_accum.abs().max().item() == 0.0
g = output_pad_fn(g_unpad) if unpad_q else g_unpad
# qk = torch.einsum('bthd,bshd->bhts', q / (d ** 0.5), k).float()
# qk = torch.masked_fill(qk, rearrange(~key_padding_mask, "b s -> b 1 1 s"), float("-inf"))
# dS = torch.einsum('bthd,bshd->bhts', g.float(), v.float())
# P = torch.softmax(qk, -1)
# dP = P * (dS - (g.float() * out.float()).sum(-1).transpose(1, 2).unsqueeze(-1))
# dQ = torch.einsum('bhts,bshd->bthd', dP, k.float())
# dV = torch.einsum('bhts,bthd->bshd', P, g.float())
# dK = torch.einsum('bhts,bthd->bshd', dP, q.float())
# dq, dk, dv = torch.autograd.grad(out, (q, k, v), g)
dq_ref, dk_ref, dv_ref = torch.autograd.grad(
out_ref, (q_ref, k_ref, v_ref), g
)
dq_pt, dk_pt, dv_pt = torch.autograd.grad(out_pt, (q_ref, k_ref, v_ref), g)
print(f"dQ max diff: {(dq - dq_ref).abs().max().item()}")
print(f"dK max diff: {(dk - dk_ref).abs().max().item()}")
print(f"dV max diff: {(dv - dv_ref).abs().max().item()}")
print(f"dQ mean diff: {(dq - dq_ref).abs().mean().item()}")
print(f"dK mean diff: {(dk - dk_ref).abs().mean().item()}")
print(f"dV mean diff: {(dv - dv_ref).abs().mean().item()}")
print(f"dQ Pytorch max diff: {(dq_pt - dq_ref).abs().max().item()}")
print(f"dK Pytorch max diff: {(dk_pt - dk_ref).abs().max().item()}")
print(f"dV Pytorch max diff: {(dv_pt - dv_ref).abs().max().item()}")
print(f"dQ Pytorch mean diff: {(dq_pt - dq_ref).abs().mean().item()}")
print(f"dK Pytorch mean diff: {(dk_pt - dk_ref).abs().mean().item()}")
print(f"dV Pytorch mean diff: {(dv_pt - dv_ref).abs().mean().item()}")
if VERBOSE:
diff_dq = (dq - dq_ref).abs()
max_idx = diff_dq.argmax()
coords = torch.unravel_index(max_idx, diff_dq.shape)
print(f"dQ max diff: {diff_dq.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dQ={dq[coords].item()}, dQ_ref={dq_ref[coords].item()}")
diff_dk = (dk - dk_ref).abs()
max_idx = diff_dk.argmax()
coords = torch.unravel_index(max_idx, diff_dk.shape)
print(f"dK max diff: {diff_dk.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dK={dk[coords].item()}, dK_ref={dk_ref[coords].item()}")
diff_dv = (dv - dv_ref).abs()
max_idx = diff_dv.argmax()
coords = torch.unravel_index(max_idx, diff_dv.shape)
print(f"dV max diff: {diff_dv.max().item()}")
print(f" at coordinates {tuple(c.item() for c in coords)}: dV={dv[coords].item()}, dV_ref={dv_ref[coords].item()}")
# breakpoint()
dq_atol = 2 * (dq_ref + 0.3 - 0.3 - dq_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dq - dq_ref).abs().max().item() <= rtol * (
dq_pt - dq_ref
).abs().max().item() + dq_atol
dk_atol = 2 * (dk_ref + 0.3 - 0.3 - dk_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dk - dk_ref).abs().max().item() <= rtol * (
dk_pt - dk_ref
).abs().max().item() + dk_atol
dv_atol = 2 * (dv_ref + 0.3 - 0.3 - dv_ref).abs().max().item() + (
0 if softcap == 0 else 3e-4
)
assert (dv - dv_ref).abs().max().item() <= rtol * (
dv_pt - dv_ref
).abs().max().item() + dv_atol
# @pytest.mark.parametrize("dtype", [torch.float16, torch.bfloat16, torch.float8_e4m3fn])
@pytest.mark.parametrize("dtype", [torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float8_e4m3fn])
@pytest.mark.parametrize("mha_type", ["mha", "mqa", "gqa"])
# @pytest.mark.parametrize("mha_type", ["mha"])
@pytest.mark.parametrize("has_learnable_sink", [False, True])
# @pytest.mark.parametrize("has_learnable_sink", [False])
# @pytest.mark.parametrize("new_kv", [False, True])
@pytest.mark.parametrize("new_kv", [False])
@pytest.mark.parametrize("local", [False, True])
# @pytest.mark.parametrize("local", [False])
@pytest.mark.parametrize("causal", [False, True])
# @pytest.mark.parametrize("causal", [True])
# @pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [True, False])
@pytest.mark.parametrize("seqlen_new_eq_seqlen_q", [False])
# @pytest.mark.parametrize("has_rotary_seqlens", [False, True])
@pytest.mark.parametrize("has_rotary_seqlens", [False])
# @pytest.mark.parametrize("rotary_interleaved", [False, True])
@pytest.mark.parametrize("rotary_interleaved", [True])
# @pytest.mark.parametrize("rotary_fraction", [0.0, 0.5, 1.0])
@pytest.mark.parametrize("rotary_fraction", [0.0])
@pytest.mark.parametrize("page_size", [None] + ([1, 4, 128]))
# @pytest.mark.parametrize("page_size", [None, 128])
# @pytest.mark.parametrize("page_size", [128])
# @pytest.mark.parametrize("has_leftpad", [False, True])
@pytest.mark.parametrize("has_leftpad", [False])
# @pytest.mark.parametrize("has_batch_idx", [False, True])
@pytest.mark.parametrize("has_batch_idx", [False])
@pytest.mark.parametrize("varlen_q", [False, True])
# @pytest.mark.parametrize("varlen_q", [False])
# @pytest.mark.parametrize("d", [32, 59, 64, 80, 128, 256])
# @pytest.mark.parametrize("d", [32, 64, 96, 128, 160, 192, 224, 256])
# @pytest.mark.parametrize('d', [32, 40, 64, 80, 96, 128, 160, 192])
# @pytest.mark.parametrize('d', [56, 80])
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize("d", [64, 128])
# @pytest.mark.parametrize("d", [192])
@pytest.mark.parametrize(
"seqlen_q,seqlen_k",
[
(1, 128),
(1, 339),
(3, 1024),
(64, 800),
(64, 256),
(3, 799),
(64, 2048),
(16, 20000),
# # (1, 128 * 1024),
# # (16, 128 * 1024),
# (128, 128),
# (256, 512), # To test appending KV with more than 1 block
# (2048, 3577), # Enough tile to test persistent scheduler
],
)
# @pytest.mark.parametrize('seqlen_q,seqlen_k', [(256, 128)])
@maybe_fake_tensor_mode(USE_FAKE_TENSOR)
def test_flash_attn_kvcache(
seqlen_q,
seqlen_k,
d,
varlen_q,
has_batch_idx,
has_leftpad,
page_size,
rotary_fraction,
rotary_interleaved,
has_rotary_seqlens,
seqlen_new_eq_seqlen_q,
causal,
local,
new_kv,
has_learnable_sink,
mha_type,
dtype,
):
if page_size is not None and seqlen_k % page_size != 0:
pytest.skip()
if page_size is not None and IS_SM90:
pytest.xfail("paged KV not supported on SM90")
if seqlen_q > seqlen_k and new_kv:
pytest.skip()
if not new_kv and rotary_fraction > 0.0:
pytest.skip()
if rotary_fraction == 0.0 and has_rotary_seqlens:
pytest.skip()
device = "cuda"
# set seed
seed = 0
random.seed(seed)
torch.random.manual_seed(seed)
batch_size = 5
# batch_size = 1
batch_size_cache = batch_size if not has_batch_idx else batch_size * 2
nheads = 6
# nheads = 1
# rotary_dim must be a multiple of 16, and must be <= d
rotary_dim = math.floor(int(rotary_fraction * d) / 16) * 16
nheads_k = nheads if mha_type == "mha" else (1 if mha_type == "mqa" else 3)
assert nheads % nheads_k == 0
dtype_ref = torch.bfloat16 if dtype == torch.float8_e4m3fn else dtype
# dv_vals = [128, d] if d > 128 and d <= 192 else ([256, 512, d] if d <= 64 else [d])
dv_vals = [d]
if dtype == torch.float8_e4m3fn:
dv_vals = [d]
# attention_chunk_vals = [torch.randint(1, seqlen_k * 2, (1,)).item(), 0] if (causal or local) else [0]
attention_chunk_vals = [0]
for dv, attention_chunk in itertools.product(dv_vals, attention_chunk_vals):
# has_qv = d == 64 and dv >= 256
has_qv = False
q = (
torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype_ref)
.to(dtype)
.to(dtype_ref)
)
if has_qv:
qv = (
torch.randn(
batch_size, seqlen_q, nheads, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
else:
qv = None
if varlen_q:
query_padding_mask = generate_random_padding_mask(
seqlen_q, batch_size, device, mode="random"
)
q_unpad, indices_q, cu_seqlens_q, max_seqlen_q, *rest = unpad_input(
q, query_padding_mask
)
output_pad_fn = lambda output_unpad: pad_input(
output_unpad, indices_q, batch_size, seqlen_q
)
qv_unpad = (
rearrange(qv, "b s ... -> (b s) ...")[indices_q] if has_qv else None
)
else:
query_padding_mask = None
q_unpad = q
qv_unpad = qv
cu_seqlens_q, max_seqlen_q = None, None
# Put window_size after QKV randn so that window_size changes from test to test
window_size = (
(None, None) if not local else tuple(random.randrange(0, seqlen_k) for _ in range(2))
)
if has_learnable_sink:
learnable_sink = torch.randn(nheads, dtype=torch.bfloat16, device=device)
else:
learnable_sink = None
seqlen_new = (
seqlen_q
if seqlen_new_eq_seqlen_q
else random.randrange(1, seqlen_q + 1)
)
cu_seqlens_k_new = None
key_new_padding_mask = None
if new_kv:
k = (
torch.randn(
batch_size, seqlen_new, nheads_k, d, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
v = (
torch.randn(
batch_size, seqlen_new, nheads_k, dv, device=device, dtype=dtype_ref
)
.to(dtype)
.to(dtype_ref)
)
if varlen_q: # k & v are also varlen
key_new_padding_mask = generate_random_padding_mask(
seqlen_new, batch_size, device, mode="random"
)
k_unpad, indices_k, cu_seqlens_k_new, *rest = unpad_input(
k, key_new_padding_mask
)
v_unpad, *rest = unpad_input(v, key_new_padding_mask)
else:
k_unpad, v_unpad = k, v
else:
k, v, k_unpad, v_unpad = None, None, None, None
if page_size is None:
k_cache = (
torch.randn(
batch_size_cache,
seqlen_k,
nheads_k,
d,
device=device,
dtype=dtype_ref,
)
.to(dtype)
.to(dtype_ref)
)
v_cache = (
torch.randn(
batch_size_cache,
seqlen_k,
nheads_k,
dv,
device=device,
dtype=dtype_ref,
)
.to(dtype)
.to(dtype_ref)
)
page_table = None
else:
(
k_cache,
v_cache,
page_table,
k_cache_paged,
v_cache_paged,
num_blocks,
) = _generate_block_kvcache(
seqlen_k,
page_size,
batch_size_cache,
nheads_k,
d,
dv,
device,
dtype,
dtype_ref,
)
if not is_fake_mode():
cache_seqlens = torch.randint(
0 if new_kv else 1,
# If we don't use seqlen_q in the case of causal and rotary, cos/sin won't be long enough
(
(
seqlen_k
- (seqlen_q if (causal or local) and rotary_dim > 1 else seqlen_new)
+ 1
)
if new_kv
else (seqlen_k + 1)
),
(batch_size,),
dtype=torch.int32,
device=device,
)
else:
cache_seqlens = torch.ones(
batch_size,
dtype=torch.int32,
device=device,
)
if has_leftpad:
if not is_fake_mode():
cache_leftpad = torch.cat(
[
torch.randint(
0,
cache_seqlens[i].item(),
(1,),
dtype=torch.int32,
device=device,
)
if cache_seqlens[i].item() > 0
else torch.zeros(1, dtype=torch.int32, device=device)
for i in range(batch_size)
]
)
else:
cache_leftpad = torch.zeros(batch_size, dtype=torch.int32, device=device)
else:
cache_leftpad = None
if has_batch_idx:
if not is_fake_mode():
cache_batch_idx = torch.randperm(
batch_size_cache, dtype=torch.int32, device=device
)[:batch_size]
else:
cache_batch_idx = torch.arange(
batch_size, dtype=torch.int32, device=device
)
else:
cache_batch_idx = None
arange = rearrange(torch.arange(seqlen_k, device=device), "s -> 1 s")
cache_seqlens_expanded = rearrange(cache_seqlens, "b -> b 1")
if not new_kv:
key_padding_mask = arange < cache_seqlens_expanded
else:
k_new_seqlens = (
key_new_padding_mask.sum(-1, keepdims=True) if varlen_q else seqlen_new
)
key_padding_mask = arange < cache_seqlens_expanded + k_new_seqlens
if has_leftpad:
key_padding_mask = torch.logical_and(
key_padding_mask,
arange >= cache_leftpad.unsqueeze(-1).expand(-1, seqlen_k),
)
# cache_seqlens = torch.tensor([64], dtype=torch.int32, device=device)
rotary_seqlens = cache_seqlens if not has_rotary_seqlens else cache_seqlens // 2
if rotary_dim > 0:
angle = (
torch.rand(
seqlen_k if page_size is None else num_blocks * page_size,
rotary_dim // 2,
device=device,
)
* 2
* math.pi
)
cos = torch.cos(angle).to(dtype=dtype_ref).to(dtype).to(dtype_ref)
sin = torch.sin(angle).to(dtype=dtype_ref).to(dtype).to(dtype_ref)
if causal or local:
q_ro = apply_rotary_emb(
q,
cos,
sin,
seqlen_offsets=rotary_seqlens,
interleaved=rotary_interleaved,
)
else:
q_ro = rearrange(
apply_rotary_emb(
rearrange(q, "b s h d -> b 1 (s h) d"),
cos,
sin,
seqlen_offsets=rotary_seqlens,
interleaved=rotary_interleaved,
),
"b 1 (s h) d -> b s h d",
s=seqlen_q,
)
# q_ro = q
k_ro = apply_rotary_emb(
k,
cos,
sin,
seqlen_offsets=rotary_seqlens,
interleaved=rotary_interleaved,
)
else:
cos, sin = None, None
q_ro, k_ro = q, k
# k_cache[:, 64:] = -1
k_cache_ref = (
k_cache if not has_batch_idx else k_cache[cache_batch_idx]
).clone()
v_cache_ref = (
v_cache if not has_batch_idx else v_cache[cache_batch_idx]
).clone()
if new_kv:
update_mask = torch.logical_and(
cache_seqlens_expanded <= arange,
arange < cache_seqlens_expanded + k_new_seqlens,
)
k_to_update = rearrange(k_ro, "b s ... -> (b s) ...")
v_to_update = rearrange(v, "b s ... -> (b s) ...")
if varlen_q:
k_to_update = k_to_update[indices_k]
v_to_update = v_to_update[indices_k]
k_cache_ref[update_mask] = k_to_update
v_cache_ref[update_mask] = v_to_update
k_cache_rep = repeat(
k_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k
)
v_cache_rep = repeat(
v_cache_ref, "b s h d -> b s (h g) d", g=nheads // nheads_k
)
out_ref, _ = attention_ref(
q_ro,
k_cache_rep,
v_cache_rep,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv,
window_size=window_size,
learnable_sink=learnable_sink,
attention_chunk=attention_chunk,
key_leftpad=cache_leftpad,
)
out_pt, _ = attention_ref(
q_ro,
k_cache_rep,
v_cache_rep,
query_padding_mask,
key_padding_mask,
causal=causal,
qv=qv,
window_size=window_size,
learnable_sink=learnable_sink,
attention_chunk=attention_chunk,
upcast=False,
reorder_ops=True,
key_leftpad=cache_leftpad,
intermediate_dtype=dtype if dtype == torch.float8_e4m3fn else None,
)
q = q.to(dtype)
q_unpad = q_unpad.to(dtype) if varlen_q else None
k_cache = k_cache.to(dtype)
v_cache = v_cache.to(dtype)
k_cache_paged = k_cache_paged.to(dtype) if page_size is not None else None
v_cache_paged = v_cache_paged.to(dtype) if page_size is not None else None
k = k.to(dtype) if k is not None else None
v = v.to(dtype) if v is not None else None
k_unpad = k_unpad.to(dtype) if k_unpad is not None else None
v_unpad = v_unpad.to(dtype) if v_unpad is not None else None
qv = qv.to(dtype) if qv is not None else None
qv_unpad = qv_unpad.to(dtype) if (varlen_q and qv is not None) else None
cos = cos.to(dtype) if cos is not None else None
sin = sin.to(dtype) if sin is not None else None
k_cache_saved = k_cache.clone() if page_size is None else k_cache_paged.clone()
v_cache_saved = v_cache.clone() if page_size is None else v_cache_paged.clone()
# num_splits_vals = [1, 0]
# SplitKV is not supported for hdim >= 192
num_splits_vals = [1, 3] if d < 192 and not DISABLE_SPLIT else [1]
# precompute_metadata_vals = [False, True]
precompute_metadata_vals = [False]
for num_splits, precompute_metadata in itertools.product(
num_splits_vals, precompute_metadata_vals
):
# SplitKV not supported on SM90 - skip this iteration
if IS_SM90 and num_splits > 1:
continue
# if precompute_metadata:
# scheduler_metadata = get_scheduler_metadata(
# batch_size, max_seqlen_q if varlen_q else seqlen_q, seqlen_k, nheads, nheads_k, d,
# cache_seqlens, q.dtype, headdim_v=dv, cu_seqlens_q=cu_seqlens_q,
# cu_seqlens_k_new=cu_seqlens_k_new, cache_leftpad=cache_leftpad,
# max_seqlen_k_new=seqlen_new, page_size=page_size,
# causal=causal, window_size=window_size, attention_chunk=attention_chunk,
# num_splits=num_splits
# )
# else:
# scheduler_metadata = None
scheduler_metadata = None
# Repeat to test metadata reuse
for _ in range(1 if not precompute_metadata else 2):
if page_size is None:
k_cache.copy_(k_cache_saved)
v_cache.copy_(v_cache_saved)
else:
k_cache_paged.copy_(k_cache_saved)
v_cache_paged.copy_(v_cache_saved)
# out, lse, *rest = flash_attn_with_kvcache(
out, lse, *rest = flash_attn_varlen_func(
q if not varlen_q else q_unpad,
k_cache if page_size is None else k_cache_paged,
v_cache if page_size is None else v_cache_paged,
# k if not new_kv or not varlen_q else k_unpad,
# v if not new_kv or not varlen_q else v_unpad,
# qv=qv if not varlen_q else qv_unpad,
# rotary_cos=cos,
# rotary_sin=sin,
seqused_k=cache_seqlens,
# cache_batch_idx=cache_batch_idx,
# cache_leftpad=cache_leftpad,
page_table=page_table,
cu_seqlens_q=cu_seqlens_q,
# cu_seqlens_k_new=cu_seqlens_k_new,
# rotary_seqlens=rotary_seqlens,
causal=causal,
window_size=window_size,
learnable_sink=learnable_sink,
# attention_chunk=attention_chunk,
# rotary_interleaved=rotary_interleaved,
# scheduler_metadata=scheduler_metadata,
num_splits=num_splits,
# return_softmax_lse=True
)
if varlen_q:
out = output_pad_fn(out)
if is_fake_mode():
# no more flash_attn cutedsl calls for the rest of the loop
# skip data-dependent postprocessing
continue
# out = flash_attn_with_kvcache(
# q, k_cache, v_cache, cache_seqlens=cache_seqlens, causal=causal, window_size=window_size
# )
# out = flash_attn_with_kvcache(q, k_cache, v_cache, causal=causal, window_size=window_size)
# qk = torch.einsum("bqhd,bkhd->bhqk", q, k_cache_ref)
# m = qk.amax(-1, keepdim=True)
# s_tmp = torch.exp((qk - m) / math.sqrt(d))
# o1 = torch.einsum('bhst,bthd->bshd', s_tmp, v_cache_ref)
# lse_ref = torch.logsumexp(qk / math.sqrt(d), -1)
# probs = torch.softmax(qk, dim=-1)
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# breakpoint()
# Check that FlashAttention's numerical error is at most twice the numerical error
# of a Pytorch implementation.
if new_kv:
if page_size is None:
k_cache_select = (
k_cache.to(dtype_ref)
if not has_batch_idx
else k_cache.to(dtype_ref)[cache_batch_idx]
)
v_cache_select = (
v_cache.to(dtype_ref)
if not has_batch_idx
else v_cache.to(dtype_ref)[cache_batch_idx]
)
else:
k_cache_select = rearrange(
k_cache_paged.to(dtype_ref)[
(
page_table
if not has_batch_idx
else page_table[cache_batch_idx]
).flatten()
],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k].to(dtype_ref)
v_cache_select = rearrange(
v_cache_paged.to(dtype_ref)[
(
page_table
if not has_batch_idx
else page_table[cache_batch_idx]
).flatten()
],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k].to(dtype_ref)
k_cache_ref = k_cache_ref.to(dtype).to(dtype_ref)
v_cache_ref = v_cache_ref.to(dtype).to(dtype_ref)
if dtype is not torch.float8_e4m3fn:
assert torch.equal(v_cache_select, v_cache_ref)
else:
assert torch.allclose(
v_cache_select, v_cache_ref, rtol=1e-3, atol=1e-3
)
# breakpoint()
# if rotary_dim == 0 and dtype is not torch.float8_e4m3fn:
if rotary_dim == 0:
assert torch.equal(k_cache_select, k_cache_ref)
else:
# if not torch.allclose(k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3):
# breakpoint()
if dtype is not torch.float8_e4m3fn:
assert torch.allclose(
k_cache_select, k_cache_ref, rtol=1e-3, atol=1e-3
)
else:
assert torch.allclose(
k_cache_select, k_cache_ref, rtol=1e-1, atol=1e-1
)
mult = 4 if dtype == torch.float8_e4m3fn else 2
assert (out - out_ref).abs().max().item() <= mult * (
out_pt - out_ref
).abs().max().item() + 1e-5
mult_mean = 3 if dtype == torch.float8_e4m3fn else 1.5
assert (out - out_ref).abs().mean().item() <= mult_mean * (
out_pt - out_ref
).abs().mean().item()
@pytest.mark.parametrize("dtype", [torch.bfloat16])
@pytest.mark.parametrize("causal", [False, True])
@pytest.mark.parametrize("d", [64, 128])
@pytest.mark.parametrize("seqlen_q,seqlen_k", [(128, 128), (256, 256)])
@maybe_fake_tensor_mode(USE_FAKE_TENSOR)
def test_flash_attn_bwd_preallocated_outputs(seqlen_q, seqlen_k, d, causal, dtype):
if IS_SM90 and d == 64 and not causal:
pytest.xfail("SM90 backward: d=64 + non-causal has invalid MMA tile config (m_block=80)")
from flash_attn.cute.interface import _flash_attn_fwd, _flash_attn_bwd
device = "cuda"
torch.random.manual_seed(42)
batch_size = 2
nheads = 4
q = torch.randn(batch_size, seqlen_q, nheads, d, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
v = torch.randn(batch_size, seqlen_k, nheads, d, device=device, dtype=dtype, requires_grad=True)
out, lse = _flash_attn_fwd(q, k, v, causal=causal, return_lse=True)
dout = torch.randn_like(out)
dq_ref, dk_ref, dv_ref = _flash_attn_bwd(q, k, v, out, dout, lse, causal=causal)
dq = torch.empty_like(q)
dk = torch.empty_like(k)
dv = torch.empty_like(v)
dq_out, dk_out, dv_out = _flash_attn_bwd(
q, k, v, out, dout, lse, causal=causal, dq=dq, dk=dk, dv=dv
)
if is_fake_mode():
return
assert dq_out is dq
assert dk_out is dk
assert dv_out is dv
assert torch.allclose(dq, dq_ref, atol=1e-5, rtol=1e-5)
assert torch.allclose(dk, dk_ref, atol=1e-5, rtol=1e-5)
assert torch.allclose(dv, dv_ref, atol=1e-5, rtol=1e-5)
def _generate_block_kvcache(
seqlen_k, page_size, batch_size, nheads_k, d, dv, device, dtype, dtype_ref
):
num_blocks = math.ceil(seqlen_k / page_size) * batch_size * 3
k_cache_paged = (
torch.randn(num_blocks, page_size, nheads_k, d, device=device, dtype=dtype_ref)
.to(dtype)
.to(dtype_ref)
)
v_cache_paged = (
torch.randn(num_blocks, page_size, nheads_k, dv, device=device, dtype=dtype_ref)
.to(dtype)
.to(dtype_ref)
)
page_table = rearrange(
torch.randperm(num_blocks, dtype=torch.int32, device=device),
"(b nblocks) -> b nblocks",
b=batch_size,
)
k_cache = rearrange(
k_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
v_cache = rearrange(
v_cache_paged[page_table.flatten()],
"(b nblocks) block_size ... -> b (nblocks block_size) ...",
b=batch_size,
)[:, :seqlen_k]
return k_cache, v_cache, page_table, k_cache_paged, v_cache_paged, num_blocks
def attention_combine_ref(out_partial, lse_partial):
"""
out_partial: (num_splits, batch_size, seqlen, nheads, d)
lse_partial: (num_splits, batch_size, seqlen, nheads)
"""
lse = torch.logsumexp(lse_partial, dim=0)
scale = torch.exp(lse_partial - lse)
scale = torch.where(
torch.isinf(scale) | torch.isnan(scale), torch.zeros_like(scale), scale
)
out = (scale.unsqueeze(-1) * out_partial).sum(0)
return out, lse
@pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16])
# @pytest.mark.parametrize("dtype", [torch.float32])
# @pytest.mark.parametrize("d", [32, 40, 59, 64, 80, 96, 111, 128, 160, 192, 224, 256])
@pytest.mark.parametrize("d", [64, 96, 128, 192, 256, 512])
# @pytest.mark.parametrize("d", [128])
@pytest.mark.parametrize("seqlen", [1, 2, 3, 32, 64, 256, 113, 108, 640, 1024])
# @pytest.mark.parametrize("seqlen", [12, 32, 64, 256, 112, 108, 640, 1024, 2048, 8192])
# @pytest.mark.parametrize("seqlen", [15])
@pytest.mark.parametrize("num_splits", [1, 2, 3, 5, 17, 32, 55, 97, 133])
# @pytest.mark.parametrize("num_splits", [1, 2, 3, 5, 11])
# @pytest.mark.parametrize("num_splits", [11])
@maybe_fake_tensor_mode(USE_FAKE_TENSOR)
def test_flash_attn_combine(num_splits, seqlen, d, dtype):
device = "cuda"
# set seed
torch.random.manual_seed(1)
batch_size = 5
nheads = 16
# batch_size = 1
# nheads = 1
# Create tensors in the expected format: (num_splits, batch_size, seqlen, nheads, d) and (num_splits, batch_size, seqlen, nheads)
out_partial = torch.randn(
num_splits * 2,
batch_size,
nheads,
seqlen,
d,
device=device,
dtype=torch.float32,
).transpose(2, 3)[:num_splits] # To test non-contiguous tensor
lse_partial = torch.randn(
num_splits, batch_size, nheads * 2, seqlen, device=device, dtype=torch.float32
).transpose(-1, -2)[:, :, :, :nheads] # To test non-contiguous tensor
# To test short-circuiting based on num_splits
lse_partial[num_splits // 2 :, : batch_size // 3] = -float("inf")
# Test with LSE returned (default behavior)
out, lse = flash_attn_combine(
out_partial, lse_partial, out_dtype=dtype, return_lse=True
)
if is_fake_mode():
return
out_ref, lse_ref = attention_combine_ref(out_partial, lse_partial)
out_pt = out_ref.to(dtype)
print(f"LSE max diff: {(lse - lse_ref).abs().max().item()}")
print(f"LSE mean diff: {(lse - lse_ref).abs().mean().item()}")
print(f"Output max diff: {(out - out_ref).abs().max().item()}")
print(f"Output mean diff: {(out - out_ref).abs().mean().item()}")
print(f"Pytorch max diff: {(out_pt - out_ref).abs().max().item()}")
print(f"Pytorch mean diff: {(out_pt - out_ref).abs().mean().item()}")
# breakpoint()
assert torch.allclose(lse, lse_ref, atol=1e-5, rtol=1e-5)
multiple = 2
assert (
(out - out_ref).abs().max().item()
<= multiple * (out_pt - out_ref).abs().max().item()
) or torch.allclose(out, out_pt, atol=1e-5, rtol=1e-5)
# Test with LSE not returned
out_no_lse, lse_no_lse = flash_attn_combine(
out_partial, lse_partial, out_dtype=dtype, return_lse=False
)
assert lse_no_lse is None, "LSE should be None when return_lse=False"
assert torch.allclose(out_no_lse, out, atol=1e-5, rtol=1e-5), (
"Output should be the same regardless of return_lse"
)
| {
"repo_id": "Dao-AILab/flash-attention",
"file_path": "tests/cute/test_flash_attn.py",
"license": "BSD 3-Clause \"New\" or \"Revised\" License",
"lines": 1527,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Delgan/loguru:tests/exceptions/source/backtrace/missing_lineno_frame_objects.py | import sys
from collections import namedtuple
from loguru import logger
logger.remove()
logger.add(
sys.stderr,
format="{line}: {message}",
colorize=False,
backtrace=True,
diagnose=False,
)
# Regression since CPython 3.10: the `lineno` can be `None`: https://github.com/python/cpython/issues/89726
fake_code = namedtuple("fake_code", ("co_filename", "co_name"))
fake_frame = namedtuple("fake_frame", ("f_back", "f_code", "f_globals", "f_lineno", "f_locals"))
fake_traceback = namedtuple("fake_traceback", ("tb_frame", "tb_lasti", "tb_lineno", "tb_next"))
def make_fake(tb):
if not tb:
return None
code = fake_code(tb.tb_frame.f_code.co_filename, tb.tb_frame.f_code.co_name)
frame = fake_frame(None, code, {}, None, {})
tb = fake_traceback(frame, tb.tb_lasti, None, make_fake(tb.tb_next))
return tb
def a():
1 / 0
def b():
a()
try:
b()
except Exception as e:
type_, value, tb = sys.exc_info()
tb = make_fake(tb)
logger.opt(exception=(type_, value, tb)).error("An error occurred")
| {
"repo_id": "Delgan/loguru",
"file_path": "tests/exceptions/source/backtrace/missing_lineno_frame_objects.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:tests/test_logger.py | import unittest
import os
import sys
import shutil
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sources.logger import Logger
class TestLogger(unittest.TestCase):
"""Test suite for the Logger class."""
def setUp(self):
self.logger = Logger("test_logger.log")
def tearDown(self):
if os.path.exists('.logs'):
for handler in self.logger.logger.handlers[:]:
handler.close()
self.logger.logger.removeHandler(handler)
log_path = os.path.join('.logs', 'test_logger.log')
if os.path.exists(log_path):
os.remove(log_path)
def test_initialization(self):
"""Test logger initializes correctly."""
self.assertTrue(self.logger.enabled)
self.assertIsNotNone(self.logger.logger)
self.assertTrue(os.path.exists('.logs'))
def test_log_creates_file(self):
"""Test that logging creates a log file."""
self.logger.info("test message")
self.assertTrue(os.path.exists(self.logger.log_path))
def test_log_writes_message(self):
"""Test that log messages are written to file."""
self.logger.info("hello world")
with open(self.logger.log_path, 'r') as f:
content = f.read()
self.assertIn("hello world", content)
def test_log_deduplication(self):
"""Test that consecutive identical messages are not duplicated."""
self.logger.info("duplicate message")
self.logger.info("duplicate message")
with open(self.logger.log_path, 'r') as f:
content = f.read()
self.assertEqual(content.count("duplicate message"), 1)
def test_log_different_messages(self):
"""Test that different messages are all written."""
self.logger.info("message one")
self.logger.info("message two")
with open(self.logger.log_path, 'r') as f:
content = f.read()
self.assertIn("message one", content)
self.assertIn("message two", content)
def test_error_level(self):
"""Test error level logging."""
self.logger.error("error occurred")
with open(self.logger.log_path, 'r') as f:
content = f.read()
self.assertIn("ERROR", content)
self.assertIn("error occurred", content)
def test_warning_level(self):
"""Test warning level logging."""
self.logger.warning("warning issued")
with open(self.logger.log_path, 'r') as f:
content = f.read()
self.assertIn("WARNING", content)
self.assertIn("warning issued", content)
def test_create_folder(self):
"""Test folder creation."""
test_path = ".test_log_folder"
result = self.logger.create_folder(test_path)
self.assertTrue(result)
self.assertTrue(os.path.exists(test_path))
os.rmdir(test_path)
def test_create_folder_already_exists(self):
"""Test folder creation when folder already exists."""
result = self.logger.create_folder('.logs')
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "Fosowl/agenticSeek",
"file_path": "tests/test_logger.py",
"license": "GNU General Public License v3.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:tests/test_utility.py | import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sources.utility import get_color_map
class TestUtility(unittest.TestCase):
"""Test suite for utility module functions."""
def test_get_color_map_returns_dict(self):
"""Test that get_color_map returns a dictionary."""
color_map = get_color_map()
self.assertIsInstance(color_map, dict)
def test_get_color_map_has_required_keys(self):
"""Test that color map contains all required color keys."""
color_map = get_color_map()
required_keys = ["success", "failure", "status", "code", "warning", "output", "info"]
for key in required_keys:
self.assertIn(key, color_map, f"Missing key: {key}")
def test_get_color_map_values_are_strings(self):
"""Test that all color values are strings."""
color_map = get_color_map()
for key, value in color_map.items():
self.assertIsInstance(value, str, f"Value for '{key}' should be a string")
def test_success_is_green(self):
"""Test that success maps to green."""
color_map = get_color_map()
self.assertEqual(color_map["success"], "green")
def test_failure_is_red(self):
"""Test that failure maps to red."""
color_map = get_color_map()
self.assertEqual(color_map["failure"], "red")
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "Fosowl/agenticSeek",
"file_path": "tests/test_utility.py",
"license": "GNU General Public License v3.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:tests/test_chromedriver_update.py | import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from unittest.mock import patch, MagicMock
# Mock heavy dependencies
for mod_name in [
'torch', 'transformers', 'kokoro', 'adaptive_classifier', 'text2emotion',
'ollama', 'openai', 'together', 'IPython', 'IPython.display',
'playsound3', 'soundfile', 'pyaudio', 'librosa',
'pypdf', 'langid', 'pypinyin', 'fake_useragent',
'num2words', 'sentencepiece', 'sacremoses',
'scipy', 'numpy', 'selenium_stealth', 'undetected_chromedriver',
'markdownify', 'chromedriver_autoinstaller',
]:
if mod_name not in sys.modules:
sys.modules[mod_name] = MagicMock()
os.environ.setdefault('WORK_DIR', '/tmp')
from sources.browser import get_chromedriver_version, is_chromedriver_compatible
class TestChromedriverVersionCheck(unittest.TestCase):
"""Test suite for ChromeDriver version checking and auto-update logic."""
@patch('sources.browser.subprocess.run')
def test_get_chromedriver_version_success(self, mock_run):
"""Test extracting major version from chromedriver --version output."""
mock_run.return_value = MagicMock(
stdout="ChromeDriver 125.0.6422.78 (abc123)\n"
)
self.assertEqual(get_chromedriver_version("/usr/bin/chromedriver"), "125")
@patch('sources.browser.subprocess.run')
def test_get_chromedriver_version_failure(self, mock_run):
"""Test graceful failure when chromedriver --version fails."""
mock_run.side_effect = FileNotFoundError("not found")
self.assertEqual(get_chromedriver_version("/nonexistent"), "")
@patch('sources.browser.subprocess.run')
def test_get_chromedriver_version_timeout(self, mock_run):
"""Test graceful failure on timeout."""
import subprocess
mock_run.side_effect = subprocess.TimeoutExpired(cmd="chromedriver", timeout=10)
self.assertEqual(get_chromedriver_version("/usr/bin/chromedriver"), "")
@patch('sources.browser.chromedriver_autoinstaller.get_chrome_version')
@patch('sources.browser.get_chromedriver_version')
def test_compatible_versions(self, mock_driver_ver, mock_chrome_ver):
"""Test that matching major versions are compatible."""
mock_chrome_ver.return_value = "125.0.6422.78"
mock_driver_ver.return_value = "125"
self.assertTrue(is_chromedriver_compatible("/usr/bin/chromedriver"))
@patch('sources.browser.chromedriver_autoinstaller.get_chrome_version')
@patch('sources.browser.get_chromedriver_version')
def test_incompatible_versions(self, mock_driver_ver, mock_chrome_ver):
"""Test that mismatched major versions are incompatible."""
mock_chrome_ver.return_value = "126.0.6478.55"
mock_driver_ver.return_value = "125"
self.assertFalse(is_chromedriver_compatible("/usr/bin/chromedriver"))
@patch('sources.browser.chromedriver_autoinstaller.get_chrome_version')
def test_no_chrome_version_assumes_compatible(self, mock_chrome_ver):
"""Test that missing Chrome version defaults to compatible."""
mock_chrome_ver.return_value = None
self.assertTrue(is_chromedriver_compatible("/usr/bin/chromedriver"))
@patch('sources.browser.chromedriver_autoinstaller.get_chrome_version')
@patch('sources.browser.get_chromedriver_version')
def test_no_driver_version_assumes_compatible(self, mock_driver_ver, mock_chrome_ver):
"""Test that missing driver version defaults to compatible."""
mock_chrome_ver.return_value = "125.0.6422.78"
mock_driver_ver.return_value = ""
self.assertTrue(is_chromedriver_compatible("/usr/bin/chromedriver"))
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "Fosowl/agenticSeek",
"file_path": "tests/test_chromedriver_update.py",
"license": "GNU General Public License v3.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:tests/test_planner_agent_parsing.py | import unittest
import json
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from unittest.mock import MagicMock, patch
# Mock heavy dependencies to allow import without installing them all
for mod_name in [
'torch', 'transformers', 'kokoro', 'adaptive_classifier', 'text2emotion',
'ollama', 'openai', 'together', 'IPython', 'IPython.display',
'playsound3', 'soundfile', 'pyaudio', 'librosa',
'pypdf', 'langid', 'pypinyin', 'fake_useragent',
'chromedriver_autoinstaller', 'num2words', 'sentencepiece', 'sacremoses',
'scipy', 'numpy', 'selenium_stealth', 'undetected_chromedriver',
'markdownify',
]:
if mod_name not in sys.modules:
sys.modules[mod_name] = MagicMock()
os.environ.setdefault('WORK_DIR', '/tmp')
from sources.agents.planner_agent import PlannerAgent
class TestPlannerAgentParsing(unittest.TestCase):
"""Test suite for PlannerAgent.parse_agent_tasks JSON parsing robustness."""
def setUp(self):
self.agent = PlannerAgent.__new__(PlannerAgent)
self.agent.tools = {"json": MagicMock()}
self.agent.tools["json"].tag = "json"
self.agent.logger = MagicMock()
self.agent.agents = {
"coder": MagicMock(),
"file": MagicMock(),
"web": MagicMock(),
"casual": MagicMock()
}
def test_parse_valid_json(self):
"""Test that valid JSON plan is parsed correctly."""
valid_json = '{"plan": [{"agent": "web", "id": "1", "task": "Search info", "need": []}]}'
self.agent.tools["json"].load_exec_block.return_value = ([valid_json], None)
with patch.object(self.agent, 'get_task_names', return_value=["Task 1: Search info"]):
result = self.agent.parse_agent_tasks("dummy text")
self.assertEqual(len(result), 1)
self.assertEqual(result[0][1]['agent'], 'web')
def test_parse_malformed_json_returns_empty(self):
"""Test that malformed JSON returns empty list instead of crashing."""
malformed_json = '{"plan": [{"agent": "web", "id": "1" "task": "missing comma"}]}'
self.agent.tools["json"].load_exec_block.return_value = ([malformed_json], None)
with patch.object(self.agent, 'get_task_names', return_value=[]):
result = self.agent.parse_agent_tasks("dummy text")
self.assertEqual(result, [])
self.agent.logger.warning.assert_called_once()
def test_parse_truncated_json_returns_empty(self):
"""Test that truncated JSON returns empty list instead of crashing."""
truncated_json = '{"plan": [{"agent": "web", "id": "1", "task": "Search'
self.agent.tools["json"].load_exec_block.return_value = ([truncated_json], None)
with patch.object(self.agent, 'get_task_names', return_value=[]):
result = self.agent.parse_agent_tasks("dummy text")
self.assertEqual(result, [])
self.agent.logger.warning.assert_called_once()
def test_parse_no_blocks_returns_empty(self):
"""Test that missing blocks returns empty list."""
self.agent.tools["json"].load_exec_block.return_value = (None, None)
with patch.object(self.agent, 'get_task_names', return_value=[]):
result = self.agent.parse_agent_tasks("no json here")
self.assertEqual(result, [])
def test_parse_invalid_agent_returns_empty(self):
"""Test that an unknown agent name returns empty list."""
valid_json = '{"plan": [{"agent": "unknown_agent", "id": "1", "task": "Do something", "need": []}]}'
self.agent.tools["json"].load_exec_block.return_value = ([valid_json], None)
with patch.object(self.agent, 'get_task_names', return_value=["Task 1"]):
result = self.agent.parse_agent_tasks("dummy text")
self.assertEqual(result, [])
def test_parse_multiple_tasks(self):
"""Test parsing a plan with multiple tasks."""
multi_task_json = '{"plan": [{"agent": "web", "id": "1", "task": "Search", "need": []}, {"agent": "coder", "id": "2", "task": "Code it", "need": ["1"]}]}'
self.agent.tools["json"].load_exec_block.return_value = ([multi_task_json], None)
with patch.object(self.agent, 'get_task_names', return_value=["Task 1: Search", "Task 2: Code it"]):
result = self.agent.parse_agent_tasks("dummy text")
self.assertEqual(len(result), 2)
self.assertEqual(result[0][1]['agent'], 'web')
self.assertEqual(result[1][1]['agent'], 'coder')
if __name__ == '__main__':
unittest.main()
| {
"repo_id": "Fosowl/agenticSeek",
"file_path": "tests/test_planner_agent_parsing.py",
"license": "GNU General Public License v3.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:tests/test_tools_parsing.py | import unittest
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from sources.tools.tools import Tools
class TestToolsParsing(unittest.TestCase):
"""
Test suite for the Tools class parsing functionality, specifically the load_exec_block method.
This method is responsible for extracting code blocks from LLM-generated text.
"""
def setUp(self):
"""Set up test fixtures before each test method."""
class TestTool(Tools):
def execute(self, blocks, safety=False):
return "test execution"
def execution_failure_check(self, output):
return False
def interpreter_feedback(self, output):
return "test feedback"
self.tool = TestTool()
self.tool.tag = "python" # Set tag for testing
def test_load_exec_block_single_block(self):
"""Test parsing a single code block from LLM text."""
llm_text = """Here's some Python code:
```python
print("Hello, World!")
x = 42
```
That's the code."""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0], '\nprint("Hello, World!")\nx = 42\n')
self.assertIsNone(save_path)
def test_load_exec_block_multiple_blocks(self):
"""Test parsing multiple code blocks from LLM text."""
llm_text = """First block:
```python
import os
print("First block")
```
Second block:
```python
import sys
print("Second block")
```
Done."""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0], '\nimport os\nprint("First block")\n')
self.assertEqual(blocks[1], '\nimport sys\nprint("Second block")\n')
self.assertIsNone(save_path)
def test_load_exec_block_with_save_path(self):
"""Test parsing code block with save path specification."""
llm_text = """```python
save_path: test_file.py
import os
print("Hello with save path")
```"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0], '\nsave_path: test_file.py\nimport os\nprint("Hello with save path")\n')
self.assertIsNone(save_path)
def test_load_exec_block_with_indentation(self):
"""Test parsing code blocks with leading whitespace/indentation."""
llm_text = """ Here's indented code:
```python
def hello():
print("Hello")
return True
```
End of code."""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 1)
expected_code = '\ndef hello():\n print("Hello")\n return True\n'
self.assertEqual(blocks[0], expected_code)
def test_load_exec_block_no_blocks(self):
"""Test parsing text with no code blocks."""
llm_text = """This is just regular text with no code blocks.
There are no python blocks here.
Just plain text."""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNone(blocks)
self.assertIsNone(save_path)
def test_load_exec_block_wrong_tag(self):
"""Test parsing text with code blocks but wrong language tag."""
llm_text = """```javascript
console.log("This is JavaScript, not Python");
```"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNone(blocks)
self.assertIsNone(save_path)
def test_load_exec_block_incomplete_block(self):
"""Test parsing text with incomplete code block (missing closing tag)."""
llm_text = """```python
print("This block has no closing tag")
x = 42"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertEqual(blocks, [])
self.assertIsNone(save_path)
def test_load_exec_block_empty_block(self):
"""Test parsing empty code block."""
llm_text = """```python
```"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0], '\n')
def test_load_exec_block_mixed_content(self):
"""Test parsing text with mixed content including code blocks."""
llm_text = """Let me help you with that task.
First, I'll import the necessary modules:
```python
import os
import sys
```
Then I'll define a function:
```python
def process_data(data):
return data.upper()
```
Finally, let's use it:
```python
result = process_data("hello world")
print(result)
```
That should work!"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[0], '\nimport os\nimport sys\n')
self.assertEqual(blocks[1], '\ndef process_data(data):\n return data.upper()\n')
self.assertEqual(blocks[2], '\nresult = process_data("hello world")\nprint(result)\n')
def test_load_exec_block_with_special_characters(self):
"""Test parsing code blocks containing special characters."""
llm_text = """```python
text = "Hello \"world\" with 'quotes'"
regex = r"^\\d+$"
path = "C:\\Users\\test\\file.txt"
```"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertIsNotNone(blocks)
self.assertEqual(len(blocks), 1)
expected = '\ntext = "Hello "world" with \'quotes\'"\nregex = r"^\\d+$"\npath = "C:\\Users\\test\\file.txt"\n'
self.assertEqual(blocks[0], expected)
def test_load_exec_block_tag_undefined(self):
"""Test that assertion error is raised when tag is undefined."""
self.tool.tag = "undefined"
llm_text = """```python
print("test")
```"""
with self.assertRaises(AssertionError):
self.tool.load_exec_block(llm_text)
def test_found_executable_blocks_flag(self):
"""Test that the executable blocks found flag is set correctly."""
self.assertFalse(self.tool.found_executable_blocks())
llm_text = """```python
print("test")
```"""
blocks, save_path = self.tool.load_exec_block(llm_text)
self.assertTrue(self.tool.found_executable_blocks())
self.assertFalse(self.tool.found_executable_blocks())
def test_get_parameter_value(self):
"""Test the get_parameter_value helper method."""
block = """param1 = value1
param2 = value2
some other text
param3 = value3"""
self.assertEqual(self.tool.get_parameter_value(block, "param1"), "value1")
self.assertEqual(self.tool.get_parameter_value(block, "param2"), "value2")
self.assertEqual(self.tool.get_parameter_value(block, "param3"), "value3")
self.assertIsNone(self.tool.get_parameter_value(block, "nonexistent"))
if __name__ == '__main__':
unittest.main() | {
"repo_id": "Fosowl/agenticSeek",
"file_path": "tests/test_tools_parsing.py",
"license": "GNU General Public License v3.0",
"lines": 177,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Fosowl/agenticSeek:sources/agents/mcp_agent.py | import os
import asyncio
from sources.utility import pretty_print, animate_thinking
from sources.agents.agent import Agent
from sources.tools.mcpFinder import MCP_finder
from sources.memory import Memory
# NOTE MCP agent is an active work in progress, not functional yet.
class McpAgent(Agent):
def __init__(self, name, prompt_path, provider, verbose=False):
"""
The mcp agent is a special agent for using MCPs.
MCP agent will be disabled if the user does not explicitly set the MCP_FINDER_API_KEY in environment variable.
"""
super().__init__(name, prompt_path, provider, verbose, None)
keys = self.get_api_keys()
self.tools = {
"mcp_finder": MCP_finder(keys["mcp_finder"]),
# add mcp tools here
}
self.role = "mcp"
self.type = "mcp_agent"
self.memory = Memory(self.load_prompt(prompt_path),
recover_last_session=False, # session recovery in handled by the interaction class
memory_compression=False,
model_provider=provider.get_model_name())
self.enabled = True
def get_api_keys(self) -> dict:
"""
Returns the API keys for the tools.
"""
api_key_mcp_finder = os.getenv("MCP_FINDER_API_KEY")
if not api_key_mcp_finder or api_key_mcp_finder == "":
pretty_print("MCP Finder disabled.", color="warning")
self.enabled = False
return {
"mcp_finder": api_key_mcp_finder
}
def expand_prompt(self, prompt):
"""
Expands the prompt with the tools available.
"""
tools_str = self.get_tools_description()
prompt += f"""
You can use the following tools and MCPs:
{tools_str}
"""
return prompt
async def process(self, prompt, speech_module) -> str:
if self.enabled == False:
return "MCP Agent is disabled."
prompt = self.expand_prompt(prompt)
self.memory.push('user', prompt)
working = True
while working == True:
animate_thinking("Thinking...", color="status")
answer, reasoning = await self.llm_request()
exec_success, _ = self.execute_modules(answer)
answer = self.remove_blocks(answer)
self.last_answer = answer
self.status_message = "Ready"
if len(self.blocks_result) == 0:
working = False
return answer, reasoning
if __name__ == "__main__":
pass | {
"repo_id": "Fosowl/agenticSeek",
"file_path": "sources/agents/mcp_agent.py",
"license": "GNU General Public License v3.0",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/utils/role_zero_utils.py | from __future__ import annotations
import json
import re
import traceback
from typing import Tuple
from metagpt.const import IMAGES
from metagpt.logs import logger
from metagpt.prompts.di.role_zero import (
ASK_HUMAN_COMMAND,
ASK_HUMAN_GUIDANCE_FORMAT,
END_COMMAND,
JSON_REPAIR_PROMPT,
REGENERATE_PROMPT,
SUMMARY_PROBLEM_WHEN_DUPLICATE,
)
from metagpt.schema import Message, UserMessage
from metagpt.utils.common import CodeParser, extract_and_encode_images
from metagpt.utils.repair_llm_raw_output import (
RepairType,
repair_escape_error,
repair_llm_raw_output,
)
async def parse_browser_actions(memory: list[Message], browser) -> list[Message]:
if not browser.is_empty_page:
pattern = re.compile(r"Command Browser\.(\w+) executed")
for index, msg in zip(range(len(memory), 0, -1), memory[::-1]):
if pattern.search(msg.content):
memory.insert(index, UserMessage(cause_by="browser", content=await browser.view()))
break
return memory
async def parse_editor_result(memory: list[Message], keep_latest_count=5) -> list[Message]:
"""Retain the latest result and remove outdated editor results."""
pattern = re.compile(r"Command Editor\.(\w+?) executed")
new_memory = []
i = 0
for msg in reversed(memory):
matches = pattern.findall(msg.content)
if matches:
i += 1
if i > keep_latest_count:
new_content = msg.content[: msg.content.find("Command Editor")]
new_content += "\n".join([f"Command Editor.{match} executed." for match in matches])
msg = UserMessage(content=new_content)
new_memory.append(msg)
# Reverse the new memory list so the latest message is at the end
new_memory.reverse()
return new_memory
async def parse_images(memory: list[Message], llm) -> list[Message]:
if not llm.support_image_input():
return memory
for msg in memory:
if IMAGES in msg.metadata or msg.role != "user":
continue
images = extract_and_encode_images(msg.content)
if images:
msg.add_metadata(IMAGES, images)
return memory
async def check_duplicates(
req: list[dict], command_rsp: str, rsp_hist: list[str], llm, respond_language: str, check_window: int = 10
) -> str:
past_rsp = rsp_hist[-check_window:]
if command_rsp in past_rsp and '"command_name": "end"' not in command_rsp:
# Normal response with thought contents are highly unlikely to reproduce
# If an identical response is detected, it is a bad response, mostly due to LLM repeating generated content
# In this case, ask human for help and regenerate
# TODO: switch to llm_cached_aask
# Hard rule to ask human for help
if past_rsp.count(command_rsp) >= 3:
if '"command_name": "Plan.finish_current_task",' in command_rsp:
# Detect the duplicate of the 'Plan.finish_current_task' command, and use the 'end' command to finish the task.
logger.warning(f"Duplicate response detected: {command_rsp}")
return END_COMMAND
problem = await llm.aask(
req + [UserMessage(content=SUMMARY_PROBLEM_WHEN_DUPLICATE.format(language=respond_language))]
)
ASK_HUMAN_COMMAND[0]["args"]["question"] = ASK_HUMAN_GUIDANCE_FORMAT.format(problem=problem).strip()
ask_human_command = "```json\n" + json.dumps(ASK_HUMAN_COMMAND, indent=4, ensure_ascii=False) + "\n```"
return ask_human_command
# Try correction by self
logger.warning(f"Duplicate response detected: {command_rsp}")
regenerate_req = req + [UserMessage(content=REGENERATE_PROMPT)]
regenerate_req = llm.format_msg(regenerate_req)
command_rsp = await llm.aask(regenerate_req)
return command_rsp
async def parse_commands(command_rsp: str, llm, exclusive_tool_commands: list[str]) -> Tuple[list[dict], bool]:
"""Retrieves commands from the Large Language Model (LLM).
This function attempts to retrieve a list of commands from the LLM by
processing the response (`command_rsp`). It handles potential errors
during parsing and LLM response formats.
Returns:
A tuple containing:
- A boolean flag indicating success (True) or failure (False).
"""
try:
commands = CodeParser.parse_code(block=None, lang="json", text=command_rsp)
if commands.endswith("]") and not commands.startswith("["):
commands = "[" + commands
commands = json.loads(repair_llm_raw_output(output=commands, req_keys=[None], repair_type=RepairType.JSON))
except json.JSONDecodeError as e:
logger.warning(f"Failed to parse JSON for: {command_rsp}. Trying to repair...")
commands = await llm.aask(msg=JSON_REPAIR_PROMPT.format(json_data=command_rsp, json_decode_error=str(e)))
try:
commands = json.loads(CodeParser.parse_code(block=None, lang="json", text=commands))
except json.JSONDecodeError:
# repair escape error of code and math
commands = CodeParser.parse_code(block=None, lang="json", text=command_rsp)
new_command = repair_escape_error(commands)
commands = json.loads(
repair_llm_raw_output(output=new_command, req_keys=[None], repair_type=RepairType.JSON)
)
except Exception as e:
tb = traceback.format_exc()
print(tb)
error_msg = str(e)
return error_msg, False, command_rsp
# 为了对LLM不按格式生成进行容错
if isinstance(commands, dict):
commands = commands["commands"] if "commands" in commands else [commands]
# Set the exclusive command flag to False.
command_flag = [command["command_name"] not in exclusive_tool_commands for command in commands]
if command_flag.count(False) > 1:
# Keep only the first exclusive command
index_of_first_exclusive = command_flag.index(False)
commands = commands[: index_of_first_exclusive + 1]
command_rsp = "```json\n" + json.dumps(commands, indent=4, ensure_ascii=False) + "\n```"
logger.info("exclusive command more than one in current command list. change the command list.\n" + command_rsp)
return commands, True, command_rsp
def get_plan_status(planner) -> Tuple[str, str]:
plan_status = planner.plan.model_dump(include=["goal", "tasks"])
current_task = (
planner.plan.current_task.model_dump(exclude=["code", "result", "is_success"])
if planner.plan.current_task
else ""
)
# format plan status
# Example:
# [GOAL] create a 2048 game
# [TASK_ID 1] (finished) Create a Product Requirement Document (PRD) for the 2048 game. This task depends on tasks[]. [Assign to Alice]
# [TASK_ID 2] ( ) Design the system architecture for the 2048 game. This task depends on tasks[1]. [Assign to Bob]
formatted_plan_status = f"[GOAL] {plan_status['goal']}\n"
if len(plan_status["tasks"]) > 0:
formatted_plan_status += "[Plan]\n"
for task in plan_status["tasks"]:
formatted_plan_status += f"[TASK_ID {task['task_id']}] ({'finished' if task['is_finished'] else ' '}){task['instruction']} This task depends on tasks{task['dependent_task_ids']}. [Assign to {task['assignee']}]\n"
else:
formatted_plan_status += "No Plan \n"
return formatted_plan_status, current_task
def format_terminal_output(cmd: dict, raw_output: str) -> str:
if len(raw_output) <= 10:
command_output = f"\n[command]: {cmd['args']['cmd']} \n[command output] : {raw_output} (pay attention to this.)"
else:
command_output = f"\n[command]: {cmd['args']['cmd']} \n[command output] : {raw_output}"
return command_output
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/utils/role_zero_utils.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/MetaGPT:metagpt/provider/openrouter_reasoning.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Desc :
import json
from metagpt.configs.llm_config import LLMConfig, LLMType
from metagpt.const import USE_CONFIG_TIMEOUT
from metagpt.logs import log_llm_stream
from metagpt.provider.base_llm import BaseLLM
from metagpt.provider.general_api_requestor import GeneralAPIRequestor, OpenAIResponse
from metagpt.provider.llm_provider_registry import register_provider
@register_provider([LLMType.OPENROUTER_REASONING])
class OpenrouterReasoningLLM(BaseLLM):
def __init__(self, config: LLMConfig):
self.client = GeneralAPIRequestor(base_url=config.base_url)
self.config = config
self.model = self.config.model
self.http_method = "post"
self.base_url = "https://openrouter.ai/api/v1"
self.url_suffix = "/chat/completions"
self.headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.config.api_key}"}
def decode(self, response: OpenAIResponse) -> dict:
return json.loads(response.data.decode("utf-8"))
def _const_kwargs(
self, messages: list[dict], stream: bool = False, timeout=USE_CONFIG_TIMEOUT, **extra_kwargs
) -> dict:
kwargs = {
"messages": messages,
"include_reasoning": True,
"max_tokens": self.config.max_token,
"temperature": self.config.temperature,
"model": self.model,
"stream": stream,
}
return kwargs
def get_choice_text(self, rsp: dict) -> str:
if "reasoning" in rsp["choices"][0]["message"]:
self.reasoning_content = rsp["choices"][0]["message"]["reasoning"]
return rsp["choices"][0]["message"]["content"]
async def _achat_completion(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> dict:
payload = self._const_kwargs(messages)
resp, _, _ = await self.client.arequest(
url=self.url_suffix, method=self.http_method, params=payload, headers=self.headers # empty
)
resp = resp.decode_asjson()
self._update_costs(resp["usage"], model=self.model)
return resp
async def acompletion(self, messages: list[dict], timeout=USE_CONFIG_TIMEOUT) -> dict:
return await self._achat_completion(messages, timeout=self.get_timeout(timeout))
async def _achat_completion_stream(self, messages: list[dict], timeout: int = USE_CONFIG_TIMEOUT) -> str:
self.headers["Content-Type"] = "text/event-stream" # update header to adapt the client
payload = self._const_kwargs(messages, stream=True)
resp, _, _ = await self.client.arequest(
url=self.url_suffix, method=self.http_method, params=payload, headers=self.headers, stream=True # empty
)
collected_content = []
collected_reasoning_content = []
usage = {}
async for chunk in resp:
chunk = chunk.decode_asjson()
if not chunk:
continue
delta = chunk["choices"][0]["delta"]
if "reasoning" in delta and delta["reasoning"]:
collected_reasoning_content.append(delta["reasoning"])
elif delta["content"]:
collected_content.append(delta["content"])
log_llm_stream(delta["content"])
usage = chunk.get("usage")
log_llm_stream("\n")
self._update_costs(usage, model=self.model)
full_content = "".join(collected_content)
if collected_reasoning_content:
self.reasoning_content = "".join(collected_reasoning_content)
return full_content
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/provider/openrouter_reasoning.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/di/atomization_capacity_plan.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
async def main():
tl = TeamLeader()
logger.info("\n=== Adding Initial Tasks ===")
tl.planner.plan.append_task(
task_id="T1", dependent_task_ids=[], instruction="Create Product Requirements Document (PRD)", assignee="Alice"
)
tl.planner.plan.append_task(
task_id="T2", dependent_task_ids=["T1"], instruction="Design System Architecture", assignee="Bob"
)
# 1. Add Development Tasks
logger.info("\n=== Adding Development Tasks ===")
tl.planner.plan.append_task(
task_id="T3", dependent_task_ids=["T2"], instruction="Implement Core Function Modules", assignee="Alex"
)
tl.planner.plan.append_task(
task_id="T4", dependent_task_ids=["T2"], instruction="Implement User Interface", assignee="Alex"
)
# 2. Complete Some Tasks
logger.info("\n=== Execute and Complete Tasks ===")
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
tl.planner.plan.finish_current_task() # Complete T1
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
tl.planner.plan.finish_current_task() # Complete T2
# 3. Replace Tasks
logger.info("\n=== Replace Task ===")
tl.planner.plan.replace_task(
task_id="T3",
new_dependent_task_ids=["T2"],
new_instruction="Implement Core Function Modules (Add New Features)",
new_assignee="Senior_Developer",
)
# 4. Add Testing Tasks
logger.info("\n=== Add Testing Tasks ===")
tl.planner.plan.append_task(
task_id="T5", dependent_task_ids=["T3", "T4"], instruction="Execute Integration Tests", assignee="Edward"
)
# 5. Reset Task Demonstration
logger.info("\n=== Reset Task ===")
logger.info("Reset Task T3 (This will also reset T5 which depends on it)")
tl.planner.plan.reset_task("T3")
# Display Final Status
logger.info("\n=== Final Status ===")
logger.info(f"Completed Tasks: {len([t for t in tl.planner.plan.tasks if t.is_finished])}")
logger.info(f"Current Task: {tl.planner.plan.current_task.instruction}")
logger.info("All Tasks:")
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
if __name__ == "__main__":
fire.Fire(main)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/di/atomization_capacity_plan.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/di/automated_planning_of_tasks.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
async def main():
# Create an instance of TeamLeader
tl = TeamLeader()
# Update the plan with the goal to create a 2048 game
# This will auto generate tasks needed to accomplish the goal
await tl.planner.update_plan(goal="create a 2048 game.")
# Iterate through all tasks in the plan
# Log each task's ID, instruction and completion status
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
if __name__ == "__main__":
fire.Fire(main)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/di/automated_planning_of_tasks.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/di/data_analyst_write_code.py | import fire
from metagpt.logs import logger
from metagpt.roles.di.data_analyst import DataAnalyst
async def main():
# Create an instance of DataAnalyst role
analyst = DataAnalyst()
# Set the main goal for the planner - constructing a 2D array
analyst.planner.plan.goal = "construct a two-dimensional array"
# Add a specific task to the planner with detailed parameters:
# - task_id: Unique identifier for the task
# - dependent_task_ids: List of tasks that need to be completed before this one (empty in this case)
# - instruction: Description of what needs to be done
# - assignee: Who will execute the task (David)
# - task_type: Category of the task (DATA_ANALYSIS)
analyst.planner.plan.append_task(
task_id="1",
dependent_task_ids=[],
instruction="construct a two-dimensional array",
assignee="David",
task_type="DATA_ANALYSIS",
)
# Execute the code generation and execution for creating a 2D array
# The write_and_exec_code method will:
# 1. Generate the necessary code for creating a 2D array
# 2. Execute the generated code
# 3. Return the result
result = await analyst.write_and_exec_code("construct a two-dimensional array")
# Log the result of the code execution
logger.info(result)
if __name__ == "__main__":
fire.Fire(main)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/di/data_analyst_write_code.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/di/interacting_with_human.py | import fire
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main():
# Initialize the MetaGPT environment
env = MGXEnv()
# Add a TeamLeader role to the environment
env.add_roles([TeamLeader()])
# Get input from human user about what they want to do
human_rsp = await env.ask_human("What do you want to do?")
# Log the human response for tracking
logger.info(human_rsp)
# Create and publish a message with the human response in the environment
env.publish_message(Message(content=human_rsp, role="user"))
# Get the TeamLeader role instance named 'Mike'
tl = env.get_role("Mike")
# Execute the TeamLeader's tasks
await tl.run()
# Log information about each task in the TeamLeader's plan
for task in tl.planner.plan.tasks:
logger.info(f"- {task.task_id}: {task.instruction} (Completed: {task.is_finished})")
# Send an empty response back to the human and log it
resp = await env.reply_to_human("")
logger.info(resp)
if __name__ == "__main__":
fire.Fire(main)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/di/interacting_with_human.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/write_design.py | import asyncio
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.logs import logger
from metagpt.roles.architect import Architect
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main():
msg = "Write a TRD for a snake game"
env = MGXEnv()
env.add_roles([TeamLeader(), Architect()])
env.publish_message(Message(content=msg, role="user"))
tl = env.get_role("Mike")
await tl.run()
role = env.get_role("Bob")
result = await role.run(msg)
logger.info(result)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/write_design.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:examples/write_game_code.py | import asyncio
import time
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles.di.engineer2 import Engineer2
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
async def main(requirement="", user_defined_recipient="", enable_human_input=False, allow_idle_time=30):
env = MGXEnv()
env.add_roles([TeamLeader(), Engineer2()])
msg = Message(content=requirement)
env.attach_images(msg) # attach image content if applicable
if user_defined_recipient:
msg.send_to = {user_defined_recipient}
env.publish_message(msg, user_defined_recipient=user_defined_recipient)
else:
env.publish_message(msg)
allow_idle_time = allow_idle_time if enable_human_input else 1
start_time = time.time()
while time.time() - start_time < allow_idle_time:
if not env.is_idle:
await env.run()
start_time = time.time() # reset start time
if __name__ == "__main__":
requirement = "Write code for a 2048 game"
user_defined_recipient = ""
asyncio.run(
main(
requirement=requirement,
user_defined_recipient=user_defined_recipient,
enable_human_input=False,
allow_idle_time=60,
)
)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/write_game_code.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:tests/metagpt/roles/di/test_role_zero.py | import pytest
from metagpt.actions import UserRequirement
from metagpt.logs import logger
from metagpt.roles.di.role_zero import RoleZero
from metagpt.schema import Message
@pytest.mark.asyncio
async def test_model_validators():
"""Test all model validators"""
role = RoleZero()
# Test set_plan_and_tool
assert role.react_mode == "react"
assert role.planner is not None
# Test set_tool_execution
assert "Plan.append_task" in role.tool_execution_map
assert "RoleZero.ask_human" in role.tool_execution_map
# Test set_longterm_memory
assert role.rc.memory is not None
@pytest.mark.asyncio
async def test_think_react_cycle():
"""Test the think-react cycle"""
# Setup test conditions
role = RoleZero(tools=["Plan"])
role.rc.todo = True
role.planner.plan.goal = "Test goal"
role.respond_language = "English"
# Test _think
result = await role._think()
assert result is True
role.rc.news = [Message(content="Test", cause_by=UserRequirement())]
result = await role._react()
logger.info(result)
assert isinstance(result, Message)
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/roles/di/test_role_zero.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:tests/metagpt/roles/di/test_swe_agent.py | import pytest
from metagpt.environment.mgx.mgx_env import MGXEnv
from metagpt.roles.di.swe_agent import SWEAgent
from metagpt.roles.di.team_leader import TeamLeader
from metagpt.schema import Message
from metagpt.tools.libs.terminal import Bash
@pytest.fixture
def env():
test_env = MGXEnv()
tl = TeamLeader()
test_env.add_roles([tl, SWEAgent()])
return test_env
@pytest.mark.asyncio
async def test_swe_agent(env):
requirement = "Fix bug in the calculator app"
swe = env.get_role("Swen")
message = Message(content=requirement, send_to={swe.name})
env.publish_message(message)
await swe.run()
history = env.history.get()
agent_messages = [msg for msg in history if msg.sent_from == swe.name]
assert swe.name == "Swen"
assert swe.profile == "Issue Solver"
assert isinstance(swe.terminal, Bash)
assert "Bash" in swe.tools
assert "git_create_pull" in swe.tool_execution_map
def is_valid_instruction_message(msg: Message) -> bool:
content = msg.content.lower()
return any(word in content for word in ["git", "bash", "check", "fix"])
assert any(is_valid_instruction_message(msg) for msg in agent_messages), "Should have valid instruction messages"
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/roles/di/test_swe_agent.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:tests/metagpt/tools/libs/test_cr.py | import json
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from metagpt.tools.libs.cr import CodeReview
class MockFile:
def __init__(self, content):
self.content = content
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
pass
async def read(self):
return self.content
@pytest.mark.asyncio
class TestCodeReview:
@pytest_asyncio.fixture(autouse=True)
async def setup(self):
"""Fixture to initialize the CodeReview instance."""
self.cr = CodeReview()
@patch("aiofiles.open", new_callable=MagicMock)
@patch("metagpt.utils.report.EditorReporter.async_report", new_callable=AsyncMock)
@patch("metagpt.ext.cr.actions.code_review.CodeReview.run", new_callable=AsyncMock)
async def test_review(self, mock_run, mock_report, mock_aiofiles_open):
"""Test the review method with a local patch file."""
# mock patch_content
patch_content = """diff --git a/test.py b/test.py
index 1234567..89abcde 100644
--- a/test.py
+++ b/test.py
@@ -1,3 +1,3 @@
def foo():
- print("Hello")
+ print("World")
- print("Another line")
+ print("Another modified line")"""
# mock point file content
point_file_content = json.dumps([{"id": 1, "description": "Test point"}])
mock_patch_file = MockFile(patch_content)
mock_point_file = MockFile(point_file_content)
mock_aiofiles_open.side_effect = [mock_patch_file, mock_point_file]
mock_run.return_value = [{"comment": "Fix this line"}]
# run
result = await self.cr.review(patch_path="test.patch", output_file="output.json")
# assert
assert "The number of defects: 1" in result
mock_run.assert_called_once()
mock_report.assert_called()
@patch("aiofiles.open", new_callable=MagicMock)
@patch("metagpt.ext.cr.actions.modify_code.ModifyCode.run", new_callable=AsyncMock)
async def test_fix(self, mock_run, mock_aiofiles_open):
"""Test the fix method."""
patch_content = """diff --git a/test.py b/test.py
index 1234567..89abcde 100644
--- a/test.py
+++ b/test.py
@@ -1,3 +1,3 @@
def foo():
- print("Hello")
+ print("World")
- print("Another line")
+ print("Another modified line")"""
cr_file_content = json.dumps([{"comment": "Fix this line"}])
# mock file obj
mock_path_file = MockFile(patch_content)
mock_cr_file = MockFile(cr_file_content)
mock_aiofiles_open.side_effect = [mock_path_file, mock_cr_file]
# run fix
result = await self.cr.fix(patch_path="test.patch", cr_file="cr.json", output_dir="output")
# assert
assert "The fixed patch files store in output" in result
mock_run.assert_called_once()
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/tools/libs/test_cr.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:tests/metagpt/tools/libs/test_env.py | import os
from unittest.mock import AsyncMock
import pytest
from metagpt.tools.libs.env import (
EnvKeyNotFoundError,
default_get_env_description,
get_env,
get_env_default,
set_get_env_entry,
)
@pytest.mark.asyncio
class TestEnv:
@pytest.fixture(autouse=True)
def setup_and_teardown(self):
"""Setup and teardown for environment variables."""
self.mock_os_env = {
"TEST_APP-KEY": "value1",
"TEST_APP_KEY": "value2",
}
os.environ.update(self.mock_os_env)
yield
# Clear added environment variables
for key in self.mock_os_env.keys():
del os.environ[key]
async def test_get_env(self):
"""Test retrieving an environment variable."""
result = await get_env("KEY", app_name="TEST_APP")
assert result == "value1"
with pytest.raises(EnvKeyNotFoundError):
await get_env("NON_EXISTENT_KEY")
# Using no app_name
result = await get_env("TEST_APP_KEY")
assert result == "value2"
async def test_get_env_default(self):
"""Test retrieving environment variable with default value."""
result = await get_env_default("NON_EXISTENT_KEY", app_name="TEST_APP", default_value="default")
assert result == "default"
async def test_get_env_description(self):
"""Test retrieving descriptions for environment variables."""
descriptions = await default_get_env_description()
assert 'await get_env(key="KEY", app_name="TEST_APP")' in descriptions
assert (
descriptions['await get_env(key="KEY", app_name="TEST_APP")']
== "Return the value of environment variable `TEST_APP-KEY`."
)
async def test_set_get_env_entry(self):
"""Test overriding get_env functionality."""
mock_get_env_value = "mocked_value"
mock_func = AsyncMock(return_value=mock_get_env_value)
set_get_env_entry(mock_func, default_get_env_description)
result = await get_env("set_get_env")
assert result == mock_get_env_value
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/tools/libs/test_env.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:tests/metagpt/tools/libs/test_image_getter.py | from pathlib import Path
from unittest.mock import AsyncMock, patch
import pytest
import pytest_asyncio
from metagpt.tools.libs.image_getter import ImageGetter
@pytest.mark.asyncio
class TestImageGetter:
@pytest_asyncio.fixture(autouse=True)
async def image_getter_client(self):
"""Fixture to initialize the ImageGetter."""
self.image_getter = ImageGetter(headless=True)
await self.image_getter.start()
yield self.image_getter
if self.image_getter.browser_instance:
await self.image_getter.browser_instance.close()
@patch("metagpt.tools.libs.image_getter.decode_image")
async def test_get_image_success(self, mock_decode_image):
"""Test successfully retrieving and saving an image."""
search_term = "nature"
image_save_path = Path.cwd() / "test_image_getter.jpg"
# Mock the decode_image to avoid actual image decoding
mock_image = AsyncMock()
mock_decode_image.return_value = mock_image
# Mock the Playwright page evaluation result to return a dummy base64 image string
self.image_getter.page.goto = AsyncMock()
self.image_getter.page.wait_for_selector = AsyncMock()
self.image_getter.page.evaluate = AsyncMock(return_value="data:image/png;base64,FAKEBASE64STRING")
result = await self.image_getter.get_image(search_term, str(image_save_path))
assert f"{search_term} found." in result
mock_decode_image.assert_called_once()
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/tools/libs/test_image_getter.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:tests/metagpt/tools/libs/test_linter.py | import tempfile
from pathlib import Path
import pytest
from metagpt.tools.libs.linter import Linter, LintResult
def test_linter_initialization():
linter = Linter(encoding="utf-8", root="/test/root")
assert linter.encoding == "utf-8"
assert linter.root == "/test/root"
assert "python" in linter.languages
assert callable(linter.languages["python"])
def test_get_abs_fname():
linter = Linter(root="/test/root")
abs_path = linter.get_abs_fname("test_file.py")
assert abs_path == linter.get_rel_fname("test_file.py")
def test_py_lint():
linter = Linter()
code = "print('Hello, World!')"
test_file_path = str(Path(__file__).resolve())
result = linter.py_lint(test_file_path, test_file_path, code)
assert result is None # No errors expected for valid Python code
def test_lint_with_python_file():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".py", delete=True) as temp_file:
temp_file.write(b"def hello():\nprint('Hello')\n") # IndentationError
temp_file.flush()
result = linter.lint(temp_file.name)
assert isinstance(result, LintResult)
assert "IndentationError" in result.text
assert len(result.lines) > 0
def test_lint_with_unsupported_language():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".unsupported", delete=True) as temp_file:
temp_file.write(b"This is unsupported code.")
temp_file.flush()
result = linter.lint(temp_file.name)
assert result is None # Unsupported language should return None
def test_run_cmd():
linter = Linter()
with tempfile.NamedTemporaryFile(suffix=".py", delete=True) as temp_file:
temp_file.write(b"print('Hello, World!')\n")
temp_file.flush()
result = linter.run_cmd("flake8", temp_file.name, "print('Hello, World!')")
# Since flake8 might not be installed in the test environment, we just ensure no exception is raised
assert result is None or isinstance(result, LintResult)
if __name__ == "__main__":
pytest.main([__file__, "-s"])
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "tests/metagpt/tools/libs/test_linter.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FoundationAgents/MetaGPT:examples/spo/optimize.py | import argparse
from metagpt.ext.spo.components.optimizer import PromptOptimizer
from metagpt.ext.spo.utils.llm_client import SPO_LLM
def parse_args():
parser = argparse.ArgumentParser(description="SPO PromptOptimizer CLI")
# LLM parameter
parser.add_argument("--opt-model", type=str, default="claude-3-5-sonnet-20240620", help="Model for optimization")
parser.add_argument("--opt-temp", type=float, default=0.7, help="Temperature for optimization")
parser.add_argument("--eval-model", type=str, default="gpt-4o-mini", help="Model for evaluation")
parser.add_argument("--eval-temp", type=float, default=0.3, help="Temperature for evaluation")
parser.add_argument("--exec-model", type=str, default="gpt-4o-mini", help="Model for execution")
parser.add_argument("--exec-temp", type=float, default=0, help="Temperature for execution")
# PromptOptimizer parameter
parser.add_argument("--workspace", type=str, default="workspace", help="Path for optimized output")
parser.add_argument("--initial-round", type=int, default=1, help="Initial round number")
parser.add_argument("--max-rounds", type=int, default=10, help="Maximum number of rounds")
parser.add_argument("--template", type=str, default="Poem.yaml", help="Template file name")
parser.add_argument("--name", type=str, default="Poem", help="Project name")
return parser.parse_args()
def main():
args = parse_args()
SPO_LLM.initialize(
optimize_kwargs={"model": args.opt_model, "temperature": args.opt_temp},
evaluate_kwargs={"model": args.eval_model, "temperature": args.eval_temp},
execute_kwargs={"model": args.exec_model, "temperature": args.exec_temp},
)
optimizer = PromptOptimizer(
optimized_path=args.workspace,
initial_round=args.initial_round,
max_rounds=args.max_rounds,
template=args.template,
name=args.name,
)
optimizer.optimize()
if __name__ == "__main__":
main()
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "examples/spo/optimize.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/ext/spo/app.py | import asyncio
from pathlib import Path
from typing import Dict
import streamlit as st
import yaml
from loguru import logger as _logger
from metagpt.const import METAGPT_ROOT
from metagpt.ext.spo.components.optimizer import PromptOptimizer
from metagpt.ext.spo.utils.llm_client import SPO_LLM, RequestType
def load_yaml_template(template_path: Path) -> Dict:
if template_path.exists():
with open(template_path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
return {"prompt": "", "requirements": "", "count": None, "qa": [{"question": "", "answer": ""}]}
def save_yaml_template(template_path: Path, data: Dict) -> None:
template_format = {
"prompt": str(data.get("prompt", "")),
"requirements": str(data.get("requirements", "")),
"count": data.get("count"),
"qa": [
{"question": str(qa.get("question", "")).strip(), "answer": str(qa.get("answer", "")).strip()}
for qa in data.get("qa", [])
],
}
template_path.parent.mkdir(parents=True, exist_ok=True)
with open(template_path, "w", encoding="utf-8") as f:
yaml.dump(template_format, f, allow_unicode=True, sort_keys=False, default_flow_style=False, indent=2)
def display_optimization_results(result_data):
for result in result_data:
round_num = result["round"]
success = result["succeed"]
prompt = result["prompt"]
with st.expander(f"Round {round_num} {':white_check_mark:' if success else ':x:'}"):
st.markdown("**Prompt:**")
st.code(prompt, language="text")
st.markdown("<br>", unsafe_allow_html=True)
col1, col2 = st.columns(2)
with col1:
st.markdown(f"**Status:** {'Success ✅ ' if success else 'Failed ❌ '}")
with col2:
st.markdown(f"**Tokens:** {result['tokens']}")
st.markdown("**Answers:**")
for idx, answer in enumerate(result["answers"]):
st.markdown(f"**Question {idx + 1}:**")
st.text(answer["question"])
st.markdown("**Answer:**")
st.text(answer["answer"])
st.markdown("---")
# Summary
success_count = sum(1 for r in result_data if r["succeed"])
total_rounds = len(result_data)
st.markdown("### Summary")
col1, col2 = st.columns(2)
with col1:
st.metric("Total Rounds", total_rounds)
with col2:
st.metric("Successful Rounds", success_count)
def main():
if "optimization_results" not in st.session_state:
st.session_state.optimization_results = []
st.markdown(
"""
<div style="background-color: #f0f2f6; padding: 20px; border-radius: 10px; margin-bottom: 25px">
<div style="display: flex; justify-content: space-between; align-items: center; margin-bottom: 10px">
<h1 style="margin: 0;">SPO | Self-Supervised Prompt Optimization 🤖</h1>
</div>
<div style="display: flex; gap: 20px; align-items: center">
<a href="https://arxiv.org/pdf/2502.06855" target="_blank" style="text-decoration: none;">
<img src="https://img.shields.io/badge/Paper-PDF-red.svg" alt="Paper">
</a>
<a href="https://github.com/geekan/MetaGPT/blob/main/examples/spo/README.md" target="_blank" style="text-decoration: none;">
<img src="https://img.shields.io/badge/GitHub-Repository-blue.svg" alt="GitHub">
</a>
<span style="color: #666;">A framework for self-supervised prompt optimization</span>
</div>
</div>
""",
unsafe_allow_html=True,
)
# Sidebar for configurations
with st.sidebar:
st.header("Configuration")
# Template Selection/Creation
settings_path = Path("metagpt/ext/spo/settings")
existing_templates = [f.stem for f in settings_path.glob("*.yaml")]
template_mode = st.radio("Template Mode", ["Use Existing", "Create New"])
if template_mode == "Use Existing":
template_name = st.selectbox("Select Template", existing_templates)
else:
template_name = st.text_input("New Template Name")
if template_name and not template_name.endswith(".yaml"):
template_name = f"{template_name}"
# LLM Settings
st.subheader("LLM Settings")
opt_model = st.selectbox(
"Optimization Model", ["claude-3-5-sonnet-20240620", "gpt-4o", "gpt-4o-mini", "deepseek-chat"], index=0
)
opt_temp = st.slider("Optimization Temperature", 0.0, 1.0, 0.7)
eval_model = st.selectbox(
"Evaluation Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
)
eval_temp = st.slider("Evaluation Temperature", 0.0, 1.0, 0.3)
exec_model = st.selectbox(
"Execution Model", ["gpt-4o-mini", "claude-3-5-sonnet-20240620", "gpt-4o", "deepseek-chat"], index=0
)
exec_temp = st.slider("Execution Temperature", 0.0, 1.0, 0.0)
# Optimizer Settings
st.subheader("Optimizer Settings")
initial_round = st.number_input("Initial Round", 1, 100, 1)
max_rounds = st.number_input("Maximum Rounds", 1, 100, 10)
# Main content area
st.header("Template Configuration")
if template_name:
template_path = settings_path / f"{template_name}.yaml"
template_data = load_yaml_template(template_path)
if "current_template" not in st.session_state or st.session_state.current_template != template_name:
st.session_state.current_template = template_name
st.session_state.qas = template_data.get("qa", [])
# Edit template sections
prompt = st.text_area("Prompt", template_data.get("prompt", ""), height=100)
requirements = st.text_area("Requirements", template_data.get("requirements", ""), height=100)
# qa section
st.subheader("Q&A Examples")
# Add new qa button
if st.button("Add New Q&A"):
st.session_state.qas.append({"question": "", "answer": ""})
# Edit qas
new_qas = []
for i in range(len(st.session_state.qas)):
st.markdown(f"**QA #{i + 1}**")
col1, col2, col3 = st.columns([45, 45, 10])
with col1:
question = st.text_area(
f"Question {i + 1}", st.session_state.qas[i].get("question", ""), key=f"q_{i}", height=100
)
with col2:
answer = st.text_area(
f"Answer {i + 1}", st.session_state.qas[i].get("answer", ""), key=f"a_{i}", height=100
)
with col3:
if st.button("🗑️", key=f"delete_{i}"):
st.session_state.qas.pop(i)
st.rerun()
new_qas.append({"question": question, "answer": answer})
# Save template button
if st.button("Save Template"):
new_template_data = {"prompt": prompt, "requirements": requirements, "count": None, "qa": new_qas}
save_yaml_template(template_path, new_template_data)
st.session_state.qas = new_qas
st.success(f"Template saved to {template_path}")
st.subheader("Current Template Preview")
preview_data = {"qa": new_qas, "requirements": requirements, "prompt": prompt}
st.code(yaml.dump(preview_data, allow_unicode=True), language="yaml")
st.subheader("Optimization Logs")
log_container = st.empty()
class StreamlitSink:
def write(self, message):
current_logs = st.session_state.get("logs", [])
current_logs.append(message.strip())
st.session_state.logs = current_logs
log_container.code("\n".join(current_logs), language="plaintext")
streamlit_sink = StreamlitSink()
_logger.remove()
def prompt_optimizer_filter(record):
return "optimizer" in record["name"].lower()
_logger.add(
streamlit_sink.write,
format="{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: <8} | {name}:{function}:{line} - {message}",
filter=prompt_optimizer_filter,
)
_logger.add(METAGPT_ROOT / "logs/{time:YYYYMMDD}.txt", level="DEBUG")
# Start optimization button
if st.button("Start Optimization"):
try:
# Initialize LLM
SPO_LLM.initialize(
optimize_kwargs={"model": opt_model, "temperature": opt_temp},
evaluate_kwargs={"model": eval_model, "temperature": eval_temp},
execute_kwargs={"model": exec_model, "temperature": exec_temp},
)
# Create optimizer instance
optimizer = PromptOptimizer(
optimized_path="workspace",
initial_round=initial_round,
max_rounds=max_rounds,
template=f"{template_name}.yaml",
name=template_name,
)
# Run optimization with progress bar
with st.spinner("Optimizing prompts..."):
optimizer.optimize()
st.success("Optimization completed!")
st.header("Optimization Results")
prompt_path = optimizer.root_path / "prompts"
result_data = optimizer.data_utils.load_results(prompt_path)
st.session_state.optimization_results = result_data
except Exception as e:
st.error(f"An error occurred: {str(e)}")
_logger.error(f"Error during optimization: {str(e)}")
if st.session_state.optimization_results:
st.header("Optimization Results")
display_optimization_results(st.session_state.optimization_results)
st.markdown("---")
st.subheader("Test Optimized Prompt")
col1, col2 = st.columns(2)
with col1:
test_prompt = st.text_area("Optimized Prompt", value="", height=200, key="test_prompt")
with col2:
test_question = st.text_area("Your Question", value="", height=200, key="test_question")
if st.button("Test Prompt"):
if test_prompt and test_question:
try:
with st.spinner("Generating response..."):
SPO_LLM.initialize(
optimize_kwargs={"model": opt_model, "temperature": opt_temp},
evaluate_kwargs={"model": eval_model, "temperature": eval_temp},
execute_kwargs={"model": exec_model, "temperature": exec_temp},
)
llm = SPO_LLM.get_instance()
messages = [{"role": "user", "content": f"{test_prompt}\n\n{test_question}"}]
async def get_response():
return await llm.responser(request_type=RequestType.EXECUTE, messages=messages)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
response = loop.run_until_complete(get_response())
finally:
loop.close()
st.subheader("Response:")
st.markdown(response)
except Exception as e:
st.error(f"Error generating response: {str(e)}")
else:
st.warning("Please enter both prompt and question.")
if __name__ == "__main__":
main()
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/app.py",
"license": "MIT License",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/MetaGPT:metagpt/ext/spo/components/evaluator.py | # -*- coding: utf-8 -*-
# @Date : 8/23/2024 10:00 AM
# @Author : all
# @Desc : Evaluation for different datasets
import asyncio
import random
from typing import Any, Dict
from metagpt.ext.spo.prompts.evaluate_prompt import EVALUATE_PROMPT
from metagpt.ext.spo.utils import load
from metagpt.ext.spo.utils.llm_client import SPO_LLM, RequestType, extract_content
from metagpt.logs import logger
class QuickExecute:
"""
Execute Prompt
"""
def __init__(self, prompt: str):
self.prompt = prompt
self.llm = SPO_LLM.get_instance()
async def prompt_execute(self) -> tuple[Any]:
_, _, qa, _ = load.load_meta_data()
answers = []
async def fetch_answer(q: str) -> Dict[str, Any]:
messages = [{"role": "user", "content": f"{self.prompt}\n\n{q}"}]
try:
answer = await self.llm.responser(request_type=RequestType.EXECUTE, messages=messages)
return {"question": q, "answer": answer}
except Exception as e:
return {"question": q, "answer": str(e)}
tasks = [fetch_answer(item["question"]) for item in qa]
answers = await asyncio.gather(*tasks)
return answers
class QuickEvaluate:
"""
Complete the evaluation for different answers here.
"""
def __init__(self):
self.llm = SPO_LLM.get_instance()
async def prompt_evaluate(self, samples: dict, new_samples: dict) -> bool:
_, requirement, qa, _ = load.load_meta_data()
if random.random() < 0.5:
samples, new_samples = new_samples, samples
is_swapped = True
else:
is_swapped = False
messages = [
{
"role": "user",
"content": EVALUATE_PROMPT.format(
requirement=requirement, sample=samples, new_sample=new_samples, answers=str(qa)
),
}
]
try:
response = await self.llm.responser(request_type=RequestType.EVALUATE, messages=messages)
choose = extract_content(response, "choose")
return choose == "A" if is_swapped else choose == "B"
except Exception as e:
logger.error(e)
return False
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/components/evaluator.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/ext/spo/components/optimizer.py | # -*- coding: utf-8 -*-
# @Date : 8/12/2024 22:00 PM
# @Author : issac
# @Desc : optimizer for prompt
import asyncio
from pathlib import Path
from typing import List
from metagpt.ext.spo.prompts.optimize_prompt import PROMPT_OPTIMIZE_PROMPT
from metagpt.ext.spo.utils import load
from metagpt.ext.spo.utils.data_utils import DataUtils
from metagpt.ext.spo.utils.evaluation_utils import EvaluationUtils
from metagpt.ext.spo.utils.llm_client import SPO_LLM, RequestType, extract_content
from metagpt.ext.spo.utils.prompt_utils import PromptUtils
from metagpt.logs import logger
class PromptOptimizer:
def __init__(
self,
optimized_path: str = None,
initial_round: int = 1,
max_rounds: int = 10,
name: str = "",
template: str = "",
) -> None:
self.name = name
self.root_path = Path(optimized_path) / self.name
self.top_scores = []
self.round = initial_round
self.max_rounds = max_rounds
self.template = template
self.prompt_utils = PromptUtils(self.root_path)
self.data_utils = DataUtils(self.root_path)
self.evaluation_utils = EvaluationUtils(self.root_path)
self.llm = SPO_LLM.get_instance()
def optimize(self):
for opt_round in range(self.max_rounds):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(self._optimize_prompt())
self.round += 1
self.show_final_result()
def show_final_result(self):
best_round = self.data_utils.get_best_round()
logger.info("\n" + "=" * 50)
logger.info("\n🏆 OPTIMIZATION COMPLETED - FINAL RESULTS 🏆\n")
logger.info(f"\n📌 Best Performing Round: {best_round['round']}")
logger.info(f"\n🎯 Final Optimized Prompt:\n{best_round['prompt']}")
logger.info("\n" + "=" * 50 + "\n")
async def _optimize_prompt(self):
prompt_path = self.root_path / "prompts"
load.set_file_name(self.template)
data = self.data_utils.load_results(prompt_path)
if self.round == 1:
await self._handle_first_round(prompt_path, data)
return
directory = self.prompt_utils.create_round_directory(prompt_path, self.round)
new_prompt = await self._generate_optimized_prompt()
self.prompt = new_prompt
logger.info(f"\nRound {self.round} Prompt: {self.prompt}\n")
self.prompt_utils.write_prompt(directory, prompt=self.prompt)
success, answers = await self._evaluate_new_prompt(prompt_path, data, directory)
self._log_optimization_result(success)
return self.prompt
async def _handle_first_round(self, prompt_path: Path, data: List[dict]) -> None:
logger.info("\n⚡ RUNNING Round 1 PROMPT ⚡\n")
directory = self.prompt_utils.create_round_directory(prompt_path, self.round)
prompt, _, _, _ = load.load_meta_data()
self.prompt = prompt
self.prompt_utils.write_prompt(directory, prompt=self.prompt)
new_samples = await self.evaluation_utils.execute_prompt(self, directory)
_, answers = await self.evaluation_utils.evaluate_prompt(
self, None, new_samples, path=prompt_path, data=data, initial=True
)
self.prompt_utils.write_answers(directory, answers=answers)
async def _generate_optimized_prompt(self):
_, requirements, qa, count = load.load_meta_data()
samples = self.data_utils.get_best_round()
logger.info(f"\n🚀Round {self.round} OPTIMIZATION STARTING 🚀\n")
logger.info(f"\nSelecting prompt for round {samples['round']} and advancing to the iteration phase\n")
golden_answer = self.data_utils.list_to_markdown(qa)
best_answer = self.data_utils.list_to_markdown(samples["answers"])
optimize_prompt = PROMPT_OPTIMIZE_PROMPT.format(
prompt=samples["prompt"],
answers=best_answer,
requirements=requirements,
golden_answers=golden_answer,
count=count,
)
response = await self.llm.responser(
request_type=RequestType.OPTIMIZE, messages=[{"role": "user", "content": optimize_prompt}]
)
modification = extract_content(response, "modification")
logger.info(f"Modification of {self.round} round: {modification}")
prompt = extract_content(response, "prompt")
return prompt if prompt else ""
async def _evaluate_new_prompt(self, prompt_path, data, directory):
logger.info("\n⚡ RUNNING OPTIMIZED PROMPT ⚡\n")
new_samples = await self.evaluation_utils.execute_prompt(self, directory)
logger.info("\n📊 EVALUATING OPTIMIZED PROMPT 📊\n")
samples = self.data_utils.get_best_round()
success, answers = await self.evaluation_utils.evaluate_prompt(
self, samples, new_samples, path=prompt_path, data=data, initial=False
)
self.prompt_utils.write_answers(directory, answers=answers)
return success, answers
def _log_optimization_result(self, success):
logger.info("\n🎯 OPTIMIZATION RESULT 🎯\n")
logger.info(f"\nRound {self.round} Optimization: {'✅ SUCCESS' if success else '❌ FAILED'}\n")
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/components/optimizer.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/ext/spo/prompts/evaluate_prompt.py | EVALUATE_PROMPT = """
Based on the original requirements, evaluate the two responses, A and B, and determine which one better meets the requirements. If a reference answer is provided, strictly follow the format/content of the reference answer.
# Requirement
{requirement}
# A
{sample}
# B
{new_sample}
# Golden answer
{answers}
Provide your analysis and the choice you believe is better, using XML tags to encapsulate your response.
<analyse>Some analysis</analyse>
<choose>A/B (the better answer in your opinion)</choose>
"""
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/prompts/evaluate_prompt.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
FoundationAgents/MetaGPT:metagpt/ext/spo/prompts/optimize_prompt.py | PROMPT_OPTIMIZE_PROMPT = """
You are building a prompt to address user requirement. Based on the given prompt,
please reconstruct and optimize it. You can add, modify, or delete prompts. Please include a single modification in
XML tags in your reply. During the optimization, you can incorporate any thinking models.
This is a prompt that performed excellently in a previous iteration. You must make further optimizations and improvements based on this prompt. The modified prompt must differ from the provided example.
requirements:
```
{requirements}
```
reference prompt:
```
{prompt}
```
The execution result of this reference prompt is(some cases):
```
{answers}
```
The best answer we expect(some cases):
```
{golden_answers}
```
Provide your analysis, optimization points, and the complete optimized prompt using the following XML format:
<analyse>Analyze what drawbacks exist in the results produced by the reference prompt and how to improve them.</analyse>
<modification>Summarize the key points for improvement in one sentence</modification>
<prompt>Provide the complete optimized prompt {count}</prompt>
"""
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/prompts/optimize_prompt.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
FoundationAgents/MetaGPT:metagpt/ext/spo/utils/data_utils.py | import datetime
import json
from pathlib import Path
from typing import Dict, List, Union
import pandas as pd
from metagpt.logs import logger
class DataUtils:
def __init__(self, root_path: Path):
self.root_path = root_path
self.top_scores = []
def load_results(self, path: Path) -> list:
result_path = self.get_results_file_path(path)
if result_path.exists():
try:
return json.loads(result_path.read_text())
except json.JSONDecodeError:
return []
return []
def get_best_round(self):
self._load_scores()
for entry in self.top_scores:
if entry["succeed"]:
return entry
return None
def get_results_file_path(self, prompt_path: Path) -> Path:
return prompt_path / "results.json"
def create_result_data(self, round: int, answers: list[dict], prompt: str, succeed: bool, tokens: int) -> dict:
now = datetime.datetime.now()
return {"round": round, "answers": answers, "prompt": prompt, "succeed": succeed, "tokens": tokens, "time": now}
def save_results(self, json_file_path: Path, data: Union[List, Dict]):
json_path = json_file_path
json_path.write_text(json.dumps(data, default=str, indent=4))
def _load_scores(self):
rounds_dir = self.root_path / "prompts"
result_file = rounds_dir / "results.json"
self.top_scores = []
try:
if not result_file.exists():
logger.warning(f"Results file not found at {result_file}")
return self.top_scores
data = json.loads(result_file.read_text(encoding="utf-8"))
df = pd.DataFrame(data)
for index, row in df.iterrows():
self.top_scores.append(
{
"round": row["round"],
"succeed": row["succeed"],
"prompt": row["prompt"],
"answers": row["answers"],
}
)
self.top_scores.sort(key=lambda x: x["round"], reverse=True)
except FileNotFoundError:
logger.error(f"Could not find results file: {result_file}")
except json.JSONDecodeError:
logger.error(f"Invalid JSON format in file: {result_file}")
except Exception as e:
logger.error(f"Unexpected error loading scores: {str(e)}")
return self.top_scores
def list_to_markdown(self, questions_list: list):
"""
Convert a list of question-answer dictionaries to a formatted Markdown string.
Args:
questions_list (list): List of dictionaries containing 'question' and 'answer' keys
Returns:
str: Formatted Markdown string
"""
markdown_text = "```\n"
for i, qa_pair in enumerate(questions_list, 1):
# Add question section
markdown_text += f"Question {i}\n\n"
markdown_text += f"{qa_pair['question']}\n\n"
# Add answer section
markdown_text += f"Answer {i}\n\n"
markdown_text += f"{qa_pair['answer']}\n\n"
# Add separator between QA pairs except for the last one
if i < len(questions_list):
markdown_text += "---\n\n"
markdown_text += "\n```"
return markdown_text
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/utils/data_utils.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/MetaGPT:metagpt/ext/spo/utils/evaluation_utils.py | import asyncio
from pathlib import Path
from typing import Any, List, Optional, Tuple
import tiktoken
from metagpt.ext.spo.components.evaluator import QuickEvaluate, QuickExecute
from metagpt.logs import logger
EVALUATION_REPETITION = 4
def count_tokens(sample: dict):
if not sample:
return 0
else:
encoding = tiktoken.get_encoding("cl100k_base")
return len(encoding.encode(str(sample["answers"])))
class EvaluationUtils:
def __init__(self, root_path: Path) -> None:
self.root_path = root_path
async def execute_prompt(self, optimizer: Any, prompt_path: Path) -> dict:
optimizer.prompt = optimizer.prompt_utils.load_prompt(optimizer.round, prompt_path)
executor = QuickExecute(prompt=optimizer.prompt)
answers = await executor.prompt_execute()
cur_round = optimizer.round
new_data = {"round": cur_round, "answers": answers, "prompt": optimizer.prompt}
return new_data
async def evaluate_prompt(
self,
optimizer: Any,
samples: Optional[dict],
new_samples: dict,
path: Path,
data: List[dict],
initial: bool = False,
) -> Tuple[bool, dict]:
evaluator = QuickEvaluate()
new_token = count_tokens(new_samples)
if initial is True:
succeed = True
else:
evaluation_results = []
evaluation_results.extend(
await asyncio.gather(
*(
evaluator.prompt_evaluate(samples=samples, new_samples=new_samples)
for _ in range(EVALUATION_REPETITION)
)
)
)
logger.info(f"Evaluation Results {evaluation_results}")
true_count = evaluation_results.count(True)
false_count = evaluation_results.count(False)
succeed = true_count > false_count
new_data = optimizer.data_utils.create_result_data(
new_samples["round"], new_samples["answers"], new_samples["prompt"], succeed, new_token
)
data.append(new_data)
result_path = optimizer.data_utils.get_results_file_path(path)
optimizer.data_utils.save_results(result_path, data)
answers = new_samples["answers"]
return succeed, answers
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/utils/evaluation_utils.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/ext/spo/utils/llm_client.py | import asyncio
import re
from enum import Enum
from typing import Any, List, Optional
from metagpt.configs.models_config import ModelsConfig
from metagpt.llm import LLM
from metagpt.logs import logger
class RequestType(Enum):
OPTIMIZE = "optimize"
EVALUATE = "evaluate"
EXECUTE = "execute"
class SPO_LLM:
_instance: Optional["SPO_LLM"] = None
def __init__(
self,
optimize_kwargs: Optional[dict] = None,
evaluate_kwargs: Optional[dict] = None,
execute_kwargs: Optional[dict] = None,
) -> None:
self.evaluate_llm = LLM(llm_config=self._load_llm_config(evaluate_kwargs))
self.optimize_llm = LLM(llm_config=self._load_llm_config(optimize_kwargs))
self.execute_llm = LLM(llm_config=self._load_llm_config(execute_kwargs))
def _load_llm_config(self, kwargs: dict) -> Any:
model = kwargs.get("model")
if not model:
raise ValueError("'model' parameter is required")
try:
model_config = ModelsConfig.default().get(model)
if model_config is None:
raise ValueError(f"Model '{model}' not found in configuration")
config = model_config.model_copy()
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
return config
except AttributeError:
raise ValueError(f"Model '{model}' not found in configuration")
except Exception as e:
raise ValueError(f"Error loading configuration for model '{model}': {str(e)}")
async def responser(self, request_type: RequestType, messages: List[dict]) -> str:
llm_mapping = {
RequestType.OPTIMIZE: self.optimize_llm,
RequestType.EVALUATE: self.evaluate_llm,
RequestType.EXECUTE: self.execute_llm,
}
llm = llm_mapping.get(request_type)
if not llm:
raise ValueError(f"Invalid request type. Valid types: {', '.join([t.value for t in RequestType])}")
response = await llm.acompletion(messages)
return response.choices[0].message.content
@classmethod
def initialize(cls, optimize_kwargs: dict, evaluate_kwargs: dict, execute_kwargs: dict) -> None:
"""Initialize the global instance"""
cls._instance = cls(optimize_kwargs, evaluate_kwargs, execute_kwargs)
@classmethod
def get_instance(cls) -> "SPO_LLM":
"""Get the global instance"""
if cls._instance is None:
raise RuntimeError("SPO_LLM not initialized. Call initialize() first.")
return cls._instance
def extract_content(xml_string: str, tag: str) -> Optional[str]:
pattern = rf"<{tag}>(.*?)</{tag}>"
match = re.search(pattern, xml_string, re.DOTALL)
return match.group(1).strip() if match else None
async def main():
# test LLM
SPO_LLM.initialize(
optimize_kwargs={"model": "gpt-4o", "temperature": 0.7},
evaluate_kwargs={"model": "gpt-4o-mini", "temperature": 0.3},
execute_kwargs={"model": "gpt-4o-mini", "temperature": 0.3},
)
llm = SPO_LLM.get_instance()
# test messages
hello_msg = [{"role": "user", "content": "hello"}]
response = await llm.responser(request_type=RequestType.EXECUTE, messages=hello_msg)
logger(f"AI: {response}")
response = await llm.responser(request_type=RequestType.OPTIMIZE, messages=hello_msg)
logger(f"AI: {response}")
response = await llm.responser(request_type=RequestType.EVALUATE, messages=hello_msg)
logger(f"AI: {response}")
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/utils/llm_client.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/MetaGPT:metagpt/ext/spo/utils/load.py | import random
from pathlib import Path
import yaml
FILE_NAME = ""
SAMPLE_K = 3
def set_file_name(name: str):
global FILE_NAME
FILE_NAME = name
def load_meta_data(k: int = SAMPLE_K):
# load yaml file
config_path = Path(__file__).parent.parent / "settings" / FILE_NAME
if not config_path.exists():
raise FileNotFoundError(f"Configuration file '{FILE_NAME}' not found in settings directory")
try:
with config_path.open("r", encoding="utf-8") as file:
data = yaml.safe_load(file)
except yaml.YAMLError as e:
raise ValueError(f"Error parsing YAML file '{FILE_NAME}': {str(e)}")
except Exception as e:
raise Exception(f"Error reading file '{FILE_NAME}': {str(e)}")
qa = []
for item in data["qa"]:
question = item["question"]
answer = item["answer"]
qa.append({"question": question, "answer": answer})
prompt = data["prompt"]
requirements = data["requirements"]
count = data["count"]
if isinstance(count, int):
count = f", within {count} words"
else:
count = ""
random_qa = random.sample(qa, min(k, len(qa)))
return prompt, requirements, random_qa, count
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/utils/load.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/MetaGPT:metagpt/ext/spo/utils/prompt_utils.py | from pathlib import Path
from metagpt.logs import logger
class PromptUtils:
def __init__(self, root_path: Path):
self.root_path = root_path
def create_round_directory(self, prompt_path: Path, round_number: int) -> Path:
directory = prompt_path / f"round_{round_number}"
directory.mkdir(parents=True, exist_ok=True)
return directory
def load_prompt(self, round_number: int, prompts_path: Path):
prompt_file = prompts_path / "prompt.txt"
try:
return prompt_file.read_text(encoding="utf-8")
except FileNotFoundError as e:
logger.info(f"Error loading prompt for round {round_number}: {e}")
raise
def write_answers(self, directory: Path, answers: dict, name: str = "answers.txt"):
answers_file = directory / name
with answers_file.open("w", encoding="utf-8") as file:
for item in answers:
file.write(f"Question:\n{item['question']}\n")
file.write(f"Answer:\n{item['answer']}\n")
file.write("\n")
def write_prompt(self, directory: Path, prompt: str):
prompt_file = directory / "prompt.txt"
prompt_file.write_text(prompt, encoding="utf-8")
| {
"repo_id": "FoundationAgents/MetaGPT",
"file_path": "metagpt/ext/spo/utils/prompt_utils.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/OpenManus:app/agent/sandbox_agent.py | from typing import Dict, List, Optional
from pydantic import Field, model_validator
from app.agent.browser import BrowserContextHelper
from app.agent.toolcall import ToolCallAgent
from app.config import config
from app.daytona.sandbox import create_sandbox, delete_sandbox
from app.daytona.tool_base import SandboxToolsBase
from app.logger import logger
from app.prompt.manus import NEXT_STEP_PROMPT, SYSTEM_PROMPT
from app.tool import Terminate, ToolCollection
from app.tool.ask_human import AskHuman
from app.tool.mcp import MCPClients, MCPClientTool
from app.tool.sandbox.sb_browser_tool import SandboxBrowserTool
from app.tool.sandbox.sb_files_tool import SandboxFilesTool
from app.tool.sandbox.sb_shell_tool import SandboxShellTool
from app.tool.sandbox.sb_vision_tool import SandboxVisionTool
class SandboxManus(ToolCallAgent):
"""A versatile general-purpose agent with support for both local and MCP tools."""
name: str = "SandboxManus"
description: str = "A versatile agent that can solve various tasks using multiple sandbox-tools including MCP-based tools"
system_prompt: str = SYSTEM_PROMPT.format(directory=config.workspace_root)
next_step_prompt: str = NEXT_STEP_PROMPT
max_observe: int = 10000
max_steps: int = 20
# MCP clients for remote tool access
mcp_clients: MCPClients = Field(default_factory=MCPClients)
# Add general-purpose tools to the tool collection
available_tools: ToolCollection = Field(
default_factory=lambda: ToolCollection(
# PythonExecute(),
# BrowserUseTool(),
# StrReplaceEditor(),
AskHuman(),
Terminate(),
)
)
special_tool_names: list[str] = Field(default_factory=lambda: [Terminate().name])
browser_context_helper: Optional[BrowserContextHelper] = None
# Track connected MCP servers
connected_servers: Dict[str, str] = Field(
default_factory=dict
) # server_id -> url/command
_initialized: bool = False
sandbox_link: Optional[dict[str, dict[str, str]]] = Field(default_factory=dict)
@model_validator(mode="after")
def initialize_helper(self) -> "SandboxManus":
"""Initialize basic components synchronously."""
self.browser_context_helper = BrowserContextHelper(self)
return self
@classmethod
async def create(cls, **kwargs) -> "SandboxManus":
"""Factory method to create and properly initialize a Manus instance."""
instance = cls(**kwargs)
await instance.initialize_mcp_servers()
await instance.initialize_sandbox_tools()
instance._initialized = True
return instance
async def initialize_sandbox_tools(
self,
password: str = config.daytona.VNC_password,
) -> None:
try:
# 创建新沙箱
if password:
sandbox = create_sandbox(password=password)
self.sandbox = sandbox
else:
raise ValueError("password must be provided")
vnc_link = sandbox.get_preview_link(6080)
website_link = sandbox.get_preview_link(8080)
vnc_url = vnc_link.url if hasattr(vnc_link, "url") else str(vnc_link)
website_url = (
website_link.url if hasattr(website_link, "url") else str(website_link)
)
# Get the actual sandbox_id from the created sandbox
actual_sandbox_id = sandbox.id if hasattr(sandbox, "id") else "new_sandbox"
if not self.sandbox_link:
self.sandbox_link = {}
self.sandbox_link[actual_sandbox_id] = {
"vnc": vnc_url,
"website": website_url,
}
logger.info(f"VNC URL: {vnc_url}")
logger.info(f"Website URL: {website_url}")
SandboxToolsBase._urls_printed = True
sb_tools = [
SandboxBrowserTool(sandbox),
SandboxFilesTool(sandbox),
SandboxShellTool(sandbox),
SandboxVisionTool(sandbox),
]
self.available_tools.add_tools(*sb_tools)
except Exception as e:
logger.error(f"Error initializing sandbox tools: {e}")
raise
async def initialize_mcp_servers(self) -> None:
"""Initialize connections to configured MCP servers."""
for server_id, server_config in config.mcp_config.servers.items():
try:
if server_config.type == "sse":
if server_config.url:
await self.connect_mcp_server(server_config.url, server_id)
logger.info(
f"Connected to MCP server {server_id} at {server_config.url}"
)
elif server_config.type == "stdio":
if server_config.command:
await self.connect_mcp_server(
server_config.command,
server_id,
use_stdio=True,
stdio_args=server_config.args,
)
logger.info(
f"Connected to MCP server {server_id} using command {server_config.command}"
)
except Exception as e:
logger.error(f"Failed to connect to MCP server {server_id}: {e}")
async def connect_mcp_server(
self,
server_url: str,
server_id: str = "",
use_stdio: bool = False,
stdio_args: List[str] = None,
) -> None:
"""Connect to an MCP server and add its tools."""
if use_stdio:
await self.mcp_clients.connect_stdio(
server_url, stdio_args or [], server_id
)
self.connected_servers[server_id or server_url] = server_url
else:
await self.mcp_clients.connect_sse(server_url, server_id)
self.connected_servers[server_id or server_url] = server_url
# Update available tools with only the new tools from this server
new_tools = [
tool for tool in self.mcp_clients.tools if tool.server_id == server_id
]
self.available_tools.add_tools(*new_tools)
async def disconnect_mcp_server(self, server_id: str = "") -> None:
"""Disconnect from an MCP server and remove its tools."""
await self.mcp_clients.disconnect(server_id)
if server_id:
self.connected_servers.pop(server_id, None)
else:
self.connected_servers.clear()
# Rebuild available tools without the disconnected server's tools
base_tools = [
tool
for tool in self.available_tools.tools
if not isinstance(tool, MCPClientTool)
]
self.available_tools = ToolCollection(*base_tools)
self.available_tools.add_tools(*self.mcp_clients.tools)
async def delete_sandbox(self, sandbox_id: str) -> None:
"""Delete a sandbox by ID."""
try:
await delete_sandbox(sandbox_id)
logger.info(f"Sandbox {sandbox_id} deleted successfully")
if sandbox_id in self.sandbox_link:
del self.sandbox_link[sandbox_id]
except Exception as e:
logger.error(f"Error deleting sandbox {sandbox_id}: {e}")
raise e
async def cleanup(self):
"""Clean up Manus agent resources."""
if self.browser_context_helper:
await self.browser_context_helper.cleanup_browser()
# Disconnect from all MCP servers only if we were initialized
if self._initialized:
await self.disconnect_mcp_server()
await self.delete_sandbox(self.sandbox.id if self.sandbox else "unknown")
self._initialized = False
async def think(self) -> bool:
"""Process current state and decide next actions with appropriate context."""
if not self._initialized:
await self.initialize_mcp_servers()
self._initialized = True
original_prompt = self.next_step_prompt
recent_messages = self.memory.messages[-3:] if self.memory.messages else []
browser_in_use = any(
tc.function.name == SandboxBrowserTool().name
for msg in recent_messages
if msg.tool_calls
for tc in msg.tool_calls
)
if browser_in_use:
self.next_step_prompt = (
await self.browser_context_helper.format_next_step_prompt()
)
result = await super().think()
# Restore original prompt
self.next_step_prompt = original_prompt
return result
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/agent/sandbox_agent.py",
"license": "MIT License",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/daytona/sandbox.py | import time
from daytona import (
CreateSandboxFromImageParams,
Daytona,
DaytonaConfig,
Resources,
Sandbox,
SandboxState,
SessionExecuteRequest,
)
from app.config import config
from app.utils.logger import logger
# load_dotenv()
daytona_settings = config.daytona
logger.info("Initializing Daytona sandbox configuration")
daytona_config = DaytonaConfig(
api_key=daytona_settings.daytona_api_key,
server_url=daytona_settings.daytona_server_url,
target=daytona_settings.daytona_target,
)
if daytona_config.api_key:
logger.info("Daytona API key configured successfully")
else:
logger.warning("No Daytona API key found in environment variables")
if daytona_config.server_url:
logger.info(f"Daytona server URL set to: {daytona_config.server_url}")
else:
logger.warning("No Daytona server URL found in environment variables")
if daytona_config.target:
logger.info(f"Daytona target set to: {daytona_config.target}")
else:
logger.warning("No Daytona target found in environment variables")
daytona = Daytona(daytona_config)
logger.info("Daytona client initialized")
async def get_or_start_sandbox(sandbox_id: str):
"""Retrieve a sandbox by ID, check its state, and start it if needed."""
logger.info(f"Getting or starting sandbox with ID: {sandbox_id}")
try:
sandbox = daytona.get(sandbox_id)
# Check if sandbox needs to be started
if (
sandbox.state == SandboxState.ARCHIVED
or sandbox.state == SandboxState.STOPPED
):
logger.info(f"Sandbox is in {sandbox.state} state. Starting...")
try:
daytona.start(sandbox)
# Wait a moment for the sandbox to initialize
# sleep(5)
# Refresh sandbox state after starting
sandbox = daytona.get(sandbox_id)
# Start supervisord in a session when restarting
start_supervisord_session(sandbox)
except Exception as e:
logger.error(f"Error starting sandbox: {e}")
raise e
logger.info(f"Sandbox {sandbox_id} is ready")
return sandbox
except Exception as e:
logger.error(f"Error retrieving or starting sandbox: {str(e)}")
raise e
def start_supervisord_session(sandbox: Sandbox):
"""Start supervisord in a session."""
session_id = "supervisord-session"
try:
logger.info(f"Creating session {session_id} for supervisord")
sandbox.process.create_session(session_id)
# Execute supervisord command
sandbox.process.execute_session_command(
session_id,
SessionExecuteRequest(
command="exec /usr/bin/supervisord -n -c /etc/supervisor/conf.d/supervisord.conf",
var_async=True,
),
)
time.sleep(25) # Wait a bit to ensure supervisord starts properly
logger.info(f"Supervisord started in session {session_id}")
except Exception as e:
logger.error(f"Error starting supervisord session: {str(e)}")
raise e
def create_sandbox(password: str, project_id: str = None):
"""Create a new sandbox with all required services configured and running."""
logger.info("Creating new Daytona sandbox environment")
logger.info("Configuring sandbox with browser-use image and environment variables")
labels = None
if project_id:
logger.info(f"Using sandbox_id as label: {project_id}")
labels = {"id": project_id}
params = CreateSandboxFromImageParams(
image=daytona_settings.sandbox_image_name,
public=True,
labels=labels,
env_vars={
"CHROME_PERSISTENT_SESSION": "true",
"RESOLUTION": "1024x768x24",
"RESOLUTION_WIDTH": "1024",
"RESOLUTION_HEIGHT": "768",
"VNC_PASSWORD": password,
"ANONYMIZED_TELEMETRY": "false",
"CHROME_PATH": "",
"CHROME_USER_DATA": "",
"CHROME_DEBUGGING_PORT": "9222",
"CHROME_DEBUGGING_HOST": "localhost",
"CHROME_CDP": "",
},
resources=Resources(
cpu=2,
memory=4,
disk=5,
),
auto_stop_interval=15,
auto_archive_interval=24 * 60,
)
# Create the sandbox
sandbox = daytona.create(params)
logger.info(f"Sandbox created with ID: {sandbox.id}")
# Start supervisord in a session for new sandbox
start_supervisord_session(sandbox)
logger.info(f"Sandbox environment successfully initialized")
return sandbox
async def delete_sandbox(sandbox_id: str):
"""Delete a sandbox by its ID."""
logger.info(f"Deleting sandbox with ID: {sandbox_id}")
try:
# Get the sandbox
sandbox = daytona.get(sandbox_id)
# Delete the sandbox
daytona.delete(sandbox)
logger.info(f"Successfully deleted sandbox {sandbox_id}")
return True
except Exception as e:
logger.error(f"Error deleting sandbox {sandbox_id}: {str(e)}")
raise e
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/daytona/sandbox.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/daytona/tool_base.py | from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, ClassVar, Dict, Optional
from daytona import Daytona, DaytonaConfig, Sandbox, SandboxState
from pydantic import Field
from app.config import config
from app.daytona.sandbox import create_sandbox, start_supervisord_session
from app.tool.base import BaseTool
from app.utils.files_utils import clean_path
from app.utils.logger import logger
# load_dotenv()
daytona_settings = config.daytona
daytona_config = DaytonaConfig(
api_key=daytona_settings.daytona_api_key,
server_url=daytona_settings.daytona_server_url,
target=daytona_settings.daytona_target,
)
daytona = Daytona(daytona_config)
@dataclass
class ThreadMessage:
"""
Represents a message to be added to a thread.
"""
type: str
content: Dict[str, Any]
is_llm_message: bool = False
metadata: Optional[Dict[str, Any]] = None
timestamp: Optional[float] = field(
default_factory=lambda: datetime.now().timestamp()
)
def to_dict(self) -> Dict[str, Any]:
"""Convert the message to a dictionary for API calls"""
return {
"type": self.type,
"content": self.content,
"is_llm_message": self.is_llm_message,
"metadata": self.metadata or {},
"timestamp": self.timestamp,
}
class SandboxToolsBase(BaseTool):
"""Base class for all sandbox tools that provides project-based sandbox access."""
# Class variable to track if sandbox URLs have been printed
_urls_printed: ClassVar[bool] = False
# Required fields
project_id: Optional[str] = None
# thread_manager: Optional[ThreadManager] = None
# Private fields (not part of the model schema)
_sandbox: Optional[Sandbox] = None
_sandbox_id: Optional[str] = None
_sandbox_pass: Optional[str] = None
workspace_path: str = Field(default="/workspace", exclude=True)
_sessions: dict[str, str] = {}
class Config:
arbitrary_types_allowed = True # Allow non-pydantic types like ThreadManager
underscore_attrs_are_private = True
async def _ensure_sandbox(self) -> Sandbox:
"""Ensure we have a valid sandbox instance, retrieving it from the project if needed."""
if self._sandbox is None:
# Get or start the sandbox
try:
self._sandbox = create_sandbox(password=config.daytona.VNC_password)
# Log URLs if not already printed
if not SandboxToolsBase._urls_printed:
vnc_link = self._sandbox.get_preview_link(6080)
website_link = self._sandbox.get_preview_link(8080)
vnc_url = (
vnc_link.url if hasattr(vnc_link, "url") else str(vnc_link)
)
website_url = (
website_link.url
if hasattr(website_link, "url")
else str(website_link)
)
print("\033[95m***")
print(f"VNC URL: {vnc_url}")
print(f"Website URL: {website_url}")
print("***\033[0m")
SandboxToolsBase._urls_printed = True
except Exception as e:
logger.error(f"Error retrieving or starting sandbox: {str(e)}")
raise e
else:
if (
self._sandbox.state == SandboxState.ARCHIVED
or self._sandbox.state == SandboxState.STOPPED
):
logger.info(f"Sandbox is in {self._sandbox.state} state. Starting...")
try:
daytona.start(self._sandbox)
# Wait a moment for the sandbox to initialize
# sleep(5)
# Refresh sandbox state after starting
# Start supervisord in a session when restarting
start_supervisord_session(self._sandbox)
except Exception as e:
logger.error(f"Error starting sandbox: {e}")
raise e
return self._sandbox
@property
def sandbox(self) -> Sandbox:
"""Get the sandbox instance, ensuring it exists."""
if self._sandbox is None:
raise RuntimeError("Sandbox not initialized. Call _ensure_sandbox() first.")
return self._sandbox
@property
def sandbox_id(self) -> str:
"""Get the sandbox ID, ensuring it exists."""
if self._sandbox_id is None:
raise RuntimeError(
"Sandbox ID not initialized. Call _ensure_sandbox() first."
)
return self._sandbox_id
def clean_path(self, path: str) -> str:
"""Clean and normalize a path to be relative to /workspace."""
cleaned_path = clean_path(path, self.workspace_path)
logger.debug(f"Cleaned path: {path} -> {cleaned_path}")
return cleaned_path
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/daytona/tool_base.py",
"license": "MIT License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/tool/computer_use_tool.py | import asyncio
import base64
import logging
import os
import time
from typing import Dict, Literal, Optional
import aiohttp
from pydantic import Field
from app.daytona.tool_base import Sandbox, SandboxToolsBase
from app.tool.base import ToolResult
KEYBOARD_KEYS = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"enter",
"esc",
"backspace",
"tab",
"space",
"delete",
"ctrl",
"alt",
"shift",
"win",
"up",
"down",
"left",
"right",
"f1",
"f2",
"f3",
"f4",
"f5",
"f6",
"f7",
"f8",
"f9",
"f10",
"f11",
"f12",
"ctrl+c",
"ctrl+v",
"ctrl+x",
"ctrl+z",
"ctrl+a",
"ctrl+s",
"alt+tab",
"alt+f4",
"ctrl+alt+delete",
]
MOUSE_BUTTONS = ["left", "right", "middle"]
_COMPUTER_USE_DESCRIPTION = """\
A comprehensive computer automation tool that allows interaction with the desktop environment.
* This tool provides commands for controlling mouse, keyboard, and taking screenshots
* It maintains state including current mouse position
* Use this when you need to automate desktop applications, fill forms, or perform GUI interactions
Key capabilities include:
* Mouse Control: Move, click, drag, scroll
* Keyboard Input: Type text, press keys or key combinations
* Screenshots: Capture and save screen images
* Waiting: Pause execution for specified duration
"""
class ComputerUseTool(SandboxToolsBase):
"""Computer automation tool for controlling the desktop environment."""
name: str = "computer_use"
description: str = _COMPUTER_USE_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"move_to",
"click",
"scroll",
"typing",
"press",
"wait",
"mouse_down",
"mouse_up",
"drag_to",
"hotkey",
"screenshot",
],
"description": "The computer action to perform",
},
"x": {"type": "number", "description": "X coordinate for mouse actions"},
"y": {"type": "number", "description": "Y coordinate for mouse actions"},
"button": {
"type": "string",
"enum": MOUSE_BUTTONS,
"description": "Mouse button for click/drag actions",
"default": "left",
},
"num_clicks": {
"type": "integer",
"description": "Number of clicks",
"enum": [1, 2, 3],
"default": 1,
},
"amount": {
"type": "integer",
"description": "Scroll amount (positive for up, negative for down)",
"minimum": -10,
"maximum": 10,
},
"text": {"type": "string", "description": "Text to type"},
"key": {
"type": "string",
"enum": KEYBOARD_KEYS,
"description": "Key to press",
},
"keys": {
"type": "string",
"enum": KEYBOARD_KEYS,
"description": "Key combination to press",
},
"duration": {
"type": "number",
"description": "Duration in seconds to wait",
"default": 0.5,
},
},
"required": ["action"],
"dependencies": {
"move_to": ["x", "y"],
"click": [],
"scroll": ["amount"],
"typing": ["text"],
"press": ["key"],
"wait": [],
"mouse_down": [],
"mouse_up": [],
"drag_to": ["x", "y"],
"hotkey": ["keys"],
"screenshot": [],
},
}
session: Optional[aiohttp.ClientSession] = Field(default=None, exclude=True)
mouse_x: int = Field(default=0, exclude=True)
mouse_y: int = Field(default=0, exclude=True)
api_base_url: Optional[str] = Field(default=None, exclude=True)
def __init__(self, sandbox: Optional[Sandbox] = None, **data):
"""Initialize with optional sandbox."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox # 直接操作基类的私有属性
self.api_base_url = sandbox.get_preview_link(8000).url
logging.info(
f"Initialized ComputerUseTool with API URL: {self.api_base_url}"
)
@classmethod
def create_with_sandbox(cls, sandbox: Sandbox) -> "ComputerUseTool":
"""Factory method to create a tool with sandbox."""
return cls(sandbox=sandbox) # 通过构造函数初始化
async def _get_session(self) -> aiohttp.ClientSession:
"""Get or create aiohttp session for API requests."""
if self.session is None or self.session.closed:
self.session = aiohttp.ClientSession()
return self.session
async def _api_request(
self, method: str, endpoint: str, data: Optional[Dict] = None
) -> Dict:
"""Send request to automation service API."""
try:
session = await self._get_session()
url = f"{self.api_base_url}/api{endpoint}"
logging.debug(f"API request: {method} {url} {data}")
if method.upper() == "GET":
async with session.get(url) as response:
result = await response.json()
else: # POST
async with session.post(url, json=data) as response:
result = await response.json()
logging.debug(f"API response: {result}")
return result
except Exception as e:
logging.error(f"API request failed: {str(e)}")
return {"success": False, "error": str(e)}
async def execute(
self,
action: Literal[
"move_to",
"click",
"scroll",
"typing",
"press",
"wait",
"mouse_down",
"mouse_up",
"drag_to",
"hotkey",
"screenshot",
],
x: Optional[float] = None,
y: Optional[float] = None,
button: str = "left",
num_clicks: int = 1,
amount: Optional[int] = None,
text: Optional[str] = None,
key: Optional[str] = None,
keys: Optional[str] = None,
duration: float = 0.5,
**kwargs,
) -> ToolResult:
"""
Execute a specified computer automation action.
Args:
action: The action to perform
x: X coordinate for mouse actions
y: Y coordinate for mouse actions
button: Mouse button for click/drag actions
num_clicks: Number of clicks to perform
amount: Scroll amount (positive for up, negative for down)
text: Text to type
key: Key to press
keys: Key combination to press
duration: Duration in seconds to wait
**kwargs: Additional arguments
Returns:
ToolResult with the action's output or error
"""
try:
if action == "move_to":
if x is None or y is None:
return ToolResult(error="x and y coordinates are required")
x_int = int(round(float(x)))
y_int = int(round(float(y)))
result = await self._api_request(
"POST", "/automation/mouse/move", {"x": x_int, "y": y_int}
)
if result.get("success", False):
self.mouse_x = x_int
self.mouse_y = y_int
return ToolResult(output=f"Moved to ({x_int}, {y_int})")
else:
return ToolResult(
error=f"Failed to move: {result.get('error', 'Unknown error')}"
)
elif action == "click":
x_val = x if x is not None else self.mouse_x
y_val = y if y is not None else self.mouse_y
x_int = int(round(float(x_val)))
y_int = int(round(float(y_val)))
num_clicks = int(num_clicks)
result = await self._api_request(
"POST",
"/automation/mouse/click",
{
"x": x_int,
"y": y_int,
"clicks": num_clicks,
"button": button.lower(),
},
)
if result.get("success", False):
self.mouse_x = x_int
self.mouse_y = y_int
return ToolResult(
output=f"{num_clicks} {button} click(s) performed at ({x_int}, {y_int})"
)
else:
return ToolResult(
error=f"Failed to click: {result.get('error', 'Unknown error')}"
)
elif action == "scroll":
if amount is None:
return ToolResult(error="Scroll amount is required")
amount = int(float(amount))
amount = max(-10, min(10, amount))
result = await self._api_request(
"POST",
"/automation/mouse/scroll",
{"clicks": amount, "x": self.mouse_x, "y": self.mouse_y},
)
if result.get("success", False):
direction = "up" if amount > 0 else "down"
steps = abs(amount)
return ToolResult(
output=f"Scrolled {direction} {steps} step(s) at position ({self.mouse_x}, {self.mouse_y})"
)
else:
return ToolResult(
error=f"Failed to scroll: {result.get('error', 'Unknown error')}"
)
elif action == "typing":
if text is None:
return ToolResult(error="Text is required for typing")
text = str(text)
result = await self._api_request(
"POST",
"/automation/keyboard/write",
{"message": text, "interval": 0.01},
)
if result.get("success", False):
return ToolResult(output=f"Typed: {text}")
else:
return ToolResult(
error=f"Failed to type: {result.get('error', 'Unknown error')}"
)
elif action == "press":
if key is None:
return ToolResult(error="Key is required for press action")
key = str(key).lower()
result = await self._api_request(
"POST", "/automation/keyboard/press", {"keys": key, "presses": 1}
)
if result.get("success", False):
return ToolResult(output=f"Pressed key: {key}")
else:
return ToolResult(
error=f"Failed to press key: {result.get('error', 'Unknown error')}"
)
elif action == "wait":
duration = float(duration)
duration = max(0, min(10, duration))
await asyncio.sleep(duration)
return ToolResult(output=f"Waited {duration} seconds")
elif action == "mouse_down":
x_val = x if x is not None else self.mouse_x
y_val = y if y is not None else self.mouse_y
x_int = int(round(float(x_val)))
y_int = int(round(float(y_val)))
result = await self._api_request(
"POST",
"/automation/mouse/down",
{"x": x_int, "y": y_int, "button": button.lower()},
)
if result.get("success", False):
self.mouse_x = x_int
self.mouse_y = y_int
return ToolResult(
output=f"{button} button pressed at ({x_int}, {y_int})"
)
else:
return ToolResult(
error=f"Failed to press button: {result.get('error', 'Unknown error')}"
)
elif action == "mouse_up":
x_val = x if x is not None else self.mouse_x
y_val = y if y is not None else self.mouse_y
x_int = int(round(float(x_val)))
y_int = int(round(float(y_val)))
result = await self._api_request(
"POST",
"/automation/mouse/up",
{"x": x_int, "y": y_int, "button": button.lower()},
)
if result.get("success", False):
self.mouse_x = x_int
self.mouse_y = y_int
return ToolResult(
output=f"{button} button released at ({x_int}, {y_int})"
)
else:
return ToolResult(
error=f"Failed to release button: {result.get('error', 'Unknown error')}"
)
elif action == "drag_to":
if x is None or y is None:
return ToolResult(error="x and y coordinates are required")
target_x = int(round(float(x)))
target_y = int(round(float(y)))
start_x = self.mouse_x
start_y = self.mouse_y
result = await self._api_request(
"POST",
"/automation/mouse/drag",
{"x": target_x, "y": target_y, "duration": 0.3, "button": "left"},
)
if result.get("success", False):
self.mouse_x = target_x
self.mouse_y = target_y
return ToolResult(
output=f"Dragged from ({start_x}, {start_y}) to ({target_x}, {target_y})"
)
else:
return ToolResult(
error=f"Failed to drag: {result.get('error', 'Unknown error')}"
)
elif action == "hotkey":
if keys is None:
return ToolResult(error="Keys are required for hotkey action")
keys = str(keys).lower().strip()
key_sequence = keys.split("+")
result = await self._api_request(
"POST",
"/automation/keyboard/hotkey",
{"keys": key_sequence, "interval": 0.01},
)
if result.get("success", False):
return ToolResult(output=f"Pressed key combination: {keys}")
else:
return ToolResult(
error=f"Failed to press keys: {result.get('error', 'Unknown error')}"
)
elif action == "screenshot":
result = await self._api_request("POST", "/automation/screenshot")
if "image" in result:
base64_str = result["image"]
timestamp = time.strftime("%Y%m%d_%H%M%S")
# Save screenshot to file
screenshots_dir = "screenshots"
if not os.path.exists(screenshots_dir):
os.makedirs(screenshots_dir)
timestamped_filename = os.path.join(
screenshots_dir, f"screenshot_{timestamp}.png"
)
latest_filename = "latest_screenshot.png"
# Decode base64 string and save to file
img_data = base64.b64decode(base64_str)
with open(timestamped_filename, "wb") as f:
f.write(img_data)
# Save a copy as the latest screenshot
with open(latest_filename, "wb") as f:
f.write(img_data)
return ToolResult(
output=f"Screenshot saved as {timestamped_filename}",
base64_image=base64_str,
)
else:
return ToolResult(error="Failed to capture screenshot")
else:
return ToolResult(error=f"Unknown action: {action}")
except Exception as e:
return ToolResult(error=f"Computer action failed: {str(e)}")
async def cleanup(self):
"""Clean up resources."""
if self.session and not self.session.closed:
await self.session.close()
self.session = None
def __del__(self):
"""Ensure cleanup on destruction."""
if hasattr(self, "session") and self.session is not None:
try:
asyncio.run(self.cleanup())
except RuntimeError:
loop = asyncio.new_event_loop()
loop.run_until_complete(self.cleanup())
loop.close()
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/tool/computer_use_tool.py",
"license": "MIT License",
"lines": 473,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/tool/sandbox/sb_browser_tool.py | import base64
import io
import json
import traceback
from typing import Optional # Add this import for Optional
from PIL import Image
from pydantic import Field
from app.daytona.tool_base import ( # Ensure Sandbox is imported correctly
Sandbox,
SandboxToolsBase,
ThreadMessage,
)
from app.tool.base import ToolResult
from app.utils.logger import logger
# Context = TypeVar("Context")
_BROWSER_DESCRIPTION = """\
A sandbox-based browser automation tool that allows interaction with web pages through various actions.
* This tool provides commands for controlling a browser session in a sandboxed environment
* It maintains state across calls, keeping the browser session alive until explicitly closed
* Use this when you need to browse websites, fill forms, click buttons, or extract content in a secure sandbox
* Each action requires specific parameters as defined in the tool's dependencies
Key capabilities include:
* Navigation: Go to specific URLs, go back in history
* Interaction: Click elements by index, input text, send keyboard commands
* Scrolling: Scroll up/down by pixel amount or scroll to specific text
* Tab management: Switch between tabs or close tabs
* Content extraction: Get dropdown options or select dropdown options
"""
# noinspection PyArgumentList
class SandboxBrowserTool(SandboxToolsBase):
"""Tool for executing tasks in a Daytona sandbox with browser-use capabilities."""
name: str = "sandbox_browser"
description: str = _BROWSER_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"navigate_to",
"go_back",
"wait",
"click_element",
"input_text",
"send_keys",
"switch_tab",
"close_tab",
"scroll_down",
"scroll_up",
"scroll_to_text",
"get_dropdown_options",
"select_dropdown_option",
"click_coordinates",
"drag_drop",
],
"description": "The browser action to perform",
},
"url": {
"type": "string",
"description": "URL for 'navigate_to' action",
},
"index": {
"type": "integer",
"description": "Element index for interaction actions",
},
"text": {
"type": "string",
"description": "Text for input or scroll actions",
},
"amount": {
"type": "integer",
"description": "Pixel amount to scroll",
},
"page_id": {
"type": "integer",
"description": "Tab ID for tab management actions",
},
"keys": {
"type": "string",
"description": "Keys to send for keyboard actions",
},
"seconds": {
"type": "integer",
"description": "Seconds to wait",
},
"x": {
"type": "integer",
"description": "X coordinate for click or drag actions",
},
"y": {
"type": "integer",
"description": "Y coordinate for click or drag actions",
},
"element_source": {
"type": "string",
"description": "Source element for drag and drop",
},
"element_target": {
"type": "string",
"description": "Target element for drag and drop",
},
},
"required": ["action"],
"dependencies": {
"navigate_to": ["url"],
"click_element": ["index"],
"input_text": ["index", "text"],
"send_keys": ["keys"],
"switch_tab": ["page_id"],
"close_tab": ["page_id"],
"scroll_down": ["amount"],
"scroll_up": ["amount"],
"scroll_to_text": ["text"],
"get_dropdown_options": ["index"],
"select_dropdown_option": ["index", "text"],
"click_coordinates": ["x", "y"],
"drag_drop": ["element_source", "element_target"],
"wait": ["seconds"],
},
}
browser_message: Optional[ThreadMessage] = Field(default=None, exclude=True)
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox # Directly set the base class private attribute
def _validate_base64_image(
self, base64_string: str, max_size_mb: int = 10
) -> tuple[bool, str]:
"""
Validate base64 image data.
Args:
base64_string: The base64 encoded image data
max_size_mb: Maximum allowed image size in megabytes
Returns:
Tuple of (is_valid, error_message)
"""
try:
if not base64_string or len(base64_string) < 10:
return False, "Base64 string is empty or too short"
if base64_string.startswith("data:"):
try:
base64_string = base64_string.split(",", 1)[1]
except (IndexError, ValueError):
return False, "Invalid data URL format"
import re
if not re.match(r"^[A-Za-z0-9+/]*={0,2}$", base64_string):
return False, "Invalid base64 characters detected"
if len(base64_string) % 4 != 0:
return False, "Invalid base64 string length"
try:
image_data = base64.b64decode(base64_string, validate=True)
except Exception as e:
return False, f"Base64 decoding failed: {str(e)}"
max_size_bytes = max_size_mb * 1024 * 1024
if len(image_data) > max_size_bytes:
return False, f"Image size exceeds limit ({max_size_bytes} bytes)"
try:
image_stream = io.BytesIO(image_data)
with Image.open(image_stream) as img:
img.verify()
supported_formats = {"JPEG", "PNG", "GIF", "BMP", "WEBP", "TIFF"}
if img.format not in supported_formats:
return False, f"Unsupported image format: {img.format}"
image_stream.seek(0)
with Image.open(image_stream) as img_check:
width, height = img_check.size
max_dimension = 8192
if width > max_dimension or height > max_dimension:
return (
False,
f"Image dimensions exceed limit ({max_dimension}x{max_dimension})",
)
if width < 1 or height < 1:
return False, f"Invalid image dimensions: {width}x{height}"
except Exception as e:
return False, f"Invalid image data: {str(e)}"
return True, "Valid image"
except Exception as e:
logger.error(f"Unexpected error during base64 image validation: {e}")
return False, f"Validation error: {str(e)}"
async def _execute_browser_action(
self, endpoint: str, params: dict = None, method: str = "POST"
) -> ToolResult:
"""Execute a browser automation action through the sandbox API."""
try:
await self._ensure_sandbox()
url = f"http://localhost:8003/api/automation/{endpoint}"
if method == "GET" and params:
query_params = "&".join([f"{k}={v}" for k, v in params.items()])
url = f"{url}?{query_params}"
curl_cmd = (
f"curl -s -X {method} '{url}' -H 'Content-Type: application/json'"
)
else:
curl_cmd = (
f"curl -s -X {method} '{url}' -H 'Content-Type: application/json'"
)
if params:
json_data = json.dumps(params)
curl_cmd += f" -d '{json_data}'"
logger.debug(f"Executing curl command: {curl_cmd}")
response = self.sandbox.process.exec(curl_cmd, timeout=30)
if response.exit_code == 0:
try:
result = json.loads(response.result)
result.setdefault("content", "")
result.setdefault("role", "assistant")
if "screenshot_base64" in result:
screenshot_data = result["screenshot_base64"]
is_valid, validation_message = self._validate_base64_image(
screenshot_data
)
if not is_valid:
logger.warning(
f"Screenshot validation failed: {validation_message}"
)
result["image_validation_error"] = validation_message
del result["screenshot_base64"]
# added_message = await self.thread_manager.add_message(
# thread_id=self.thread_id,
# type="browser_state",
# content=result,
# is_llm_message=False
# )
message = ThreadMessage(
type="browser_state", content=result, is_llm_message=False
)
self.browser_message = message
success_response = {
"success": result.get("success", False),
"message": result.get("message", "Browser action completed"),
}
# if added_message and 'message_id' in added_message:
# success_response['message_id'] = added_message['message_id']
for field in [
"url",
"title",
"element_count",
"pixels_below",
"ocr_text",
"image_url",
]:
if field in result:
success_response[field] = result[field]
return (
self.success_response(success_response)
if success_response["success"]
else self.fail_response(success_response)
)
except json.JSONDecodeError as e:
logger.error(f"Failed to parse response JSON: {e}")
return self.fail_response(f"Failed to parse response JSON: {e}")
else:
logger.error(f"Browser automation request failed: {response}")
return self.fail_response(
f"Browser automation request failed: {response}"
)
except Exception as e:
logger.error(f"Error executing browser action: {e}")
logger.debug(traceback.format_exc())
return self.fail_response(f"Error executing browser action: {e}")
async def execute(
self,
action: str,
url: Optional[str] = None,
index: Optional[int] = None,
text: Optional[str] = None,
amount: Optional[int] = None,
page_id: Optional[int] = None,
keys: Optional[str] = None,
seconds: Optional[int] = None,
x: Optional[int] = None,
y: Optional[int] = None,
element_source: Optional[str] = None,
element_target: Optional[str] = None,
**kwargs,
) -> ToolResult:
"""
Execute a browser action in the sandbox environment.
Args:
action: The browser action to perform
url: URL for navigation
index: Element index for interaction
text: Text for input or scroll actions
amount: Pixel amount to scroll
page_id: Tab ID for tab management
keys: Keys to send for keyboard actions
seconds: Seconds to wait
x: X coordinate for click/drag
y: Y coordinate for click/drag
element_source: Source element for drag and drop
element_target: Target element for drag and drop
Returns:
ToolResult with the action's output or error
"""
# async with self.lock:
try:
# Navigation actions
if action == "navigate_to":
if not url:
return self.fail_response("URL is required for navigation")
return await self._execute_browser_action("navigate_to", {"url": url})
elif action == "go_back":
return await self._execute_browser_action("go_back", {})
# Interaction actions
elif action == "click_element":
if index is None:
return self.fail_response("Index is required for click_element")
return await self._execute_browser_action(
"click_element", {"index": index}
)
elif action == "input_text":
if index is None or not text:
return self.fail_response(
"Index and text are required for input_text"
)
return await self._execute_browser_action(
"input_text", {"index": index, "text": text}
)
elif action == "send_keys":
if not keys:
return self.fail_response("Keys are required for send_keys")
return await self._execute_browser_action("send_keys", {"keys": keys})
# Tab management
elif action == "switch_tab":
if page_id is None:
return self.fail_response("Page ID is required for switch_tab")
return await self._execute_browser_action(
"switch_tab", {"page_id": page_id}
)
elif action == "close_tab":
if page_id is None:
return self.fail_response("Page ID is required for close_tab")
return await self._execute_browser_action(
"close_tab", {"page_id": page_id}
)
# Scrolling actions
elif action == "scroll_down":
params = {"amount": amount} if amount is not None else {}
return await self._execute_browser_action("scroll_down", params)
elif action == "scroll_up":
params = {"amount": amount} if amount is not None else {}
return await self._execute_browser_action("scroll_up", params)
elif action == "scroll_to_text":
if not text:
return self.fail_response("Text is required for scroll_to_text")
return await self._execute_browser_action(
"scroll_to_text", {"text": text}
)
# Dropdown actions
elif action == "get_dropdown_options":
if index is None:
return self.fail_response(
"Index is required for get_dropdown_options"
)
return await self._execute_browser_action(
"get_dropdown_options", {"index": index}
)
elif action == "select_dropdown_option":
if index is None or not text:
return self.fail_response(
"Index and text are required for select_dropdown_option"
)
return await self._execute_browser_action(
"select_dropdown_option", {"index": index, "text": text}
)
# Coordinate-based actions
elif action == "click_coordinates":
if x is None or y is None:
return self.fail_response(
"X and Y coordinates are required for click_coordinates"
)
return await self._execute_browser_action(
"click_coordinates", {"x": x, "y": y}
)
elif action == "drag_drop":
if not element_source or not element_target:
return self.fail_response(
"Source and target elements are required for drag_drop"
)
return await self._execute_browser_action(
"drag_drop",
{
"element_source": element_source,
"element_target": element_target,
},
)
# Utility actions
elif action == "wait":
seconds_to_wait = seconds if seconds is not None else 3
return await self._execute_browser_action(
"wait", {"seconds": seconds_to_wait}
)
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing browser action: {e}")
return self.fail_response(f"Error executing browser action: {e}")
async def get_current_state(
self, message: Optional[ThreadMessage] = None
) -> ToolResult:
"""
Get the current browser state as a ToolResult.
If context is not provided, uses self.context.
"""
try:
# Use provided context or fall back to self.context
message = message or self.browser_message
if not message:
return ToolResult(error="Browser context not initialized")
state = message.content
screenshot = state.get("screenshot_base64")
# Build the state info with all required fields
state_info = {
"url": state.get("url", ""),
"title": state.get("title", ""),
"tabs": [tab.model_dump() for tab in state.get("tabs", [])],
"pixels_above": getattr(state, "pixels_above", 0),
"pixels_below": getattr(state, "pixels_below", 0),
"help": "[0], [1], [2], etc., represent clickable indices corresponding to the elements listed. Clicking on these indices will navigate to or interact with the respective content behind them.",
}
return ToolResult(
output=json.dumps(state_info, indent=4, ensure_ascii=False),
base64_image=screenshot,
)
except Exception as e:
return ToolResult(error=f"Failed to get browser state: {str(e)}")
@classmethod
def create_with_sandbox(cls, sandbox: Sandbox) -> "SandboxBrowserTool":
"""Factory method to create a tool with sandbox."""
return cls(sandbox=sandbox)
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/tool/sandbox/sb_browser_tool.py",
"license": "MIT License",
"lines": 434,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/tool/sandbox/sb_files_tool.py | import asyncio
from typing import Optional, TypeVar
from pydantic import Field
from app.daytona.tool_base import Sandbox, SandboxToolsBase
from app.tool.base import ToolResult
from app.utils.files_utils import clean_path, should_exclude_file
from app.utils.logger import logger
Context = TypeVar("Context")
_FILES_DESCRIPTION = """\
A sandbox-based file system tool that allows file operations in a secure sandboxed environment.
* This tool provides commands for creating, reading, updating, and deleting files in the workspace
* All operations are performed relative to the /workspace directory for security
* Use this when you need to manage files, edit code, or manipulate file contents in a sandbox
* Each action requires specific parameters as defined in the tool's dependencies
Key capabilities include:
* File creation: Create new files with specified content and permissions
* File modification: Replace specific strings or completely rewrite files
* File deletion: Remove files from the workspace
* File reading: Read file contents with optional line range specification
"""
class SandboxFilesTool(SandboxToolsBase):
name: str = "sandbox_files"
description: str = _FILES_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"create_file",
"str_replace",
"full_file_rewrite",
"delete_file",
],
"description": "The file operation to perform",
},
"file_path": {
"type": "string",
"description": "Path to the file, relative to /workspace (e.g., 'src/main.py')",
},
"file_contents": {
"type": "string",
"description": "Content to write to the file",
},
"old_str": {
"type": "string",
"description": "Text to be replaced (must appear exactly once)",
},
"new_str": {
"type": "string",
"description": "Replacement text",
},
"permissions": {
"type": "string",
"description": "File permissions in octal format (e.g., '644')",
"default": "644",
},
},
"required": ["action"],
"dependencies": {
"create_file": ["file_path", "file_contents"],
"str_replace": ["file_path", "old_str", "new_str"],
"full_file_rewrite": ["file_path", "file_contents"],
"delete_file": ["file_path"],
},
}
SNIPPET_LINES: int = Field(default=4, exclude=True)
# workspace_path: str = Field(default="/workspace", exclude=True)
# sandbox: Optional[Sandbox] = Field(default=None, exclude=True)
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox
def clean_path(self, path: str) -> str:
"""Clean and normalize a path to be relative to /workspace"""
return clean_path(path, self.workspace_path)
def _should_exclude_file(self, rel_path: str) -> bool:
"""Check if a file should be excluded based on path, name, or extension"""
return should_exclude_file(rel_path)
def _file_exists(self, path: str) -> bool:
"""Check if a file exists in the sandbox"""
try:
self.sandbox.fs.get_file_info(path)
return True
except Exception:
return False
async def get_workspace_state(self) -> dict:
"""Get the current workspace state by reading all files"""
files_state = {}
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
files = self.sandbox.fs.list_files(self.workspace_path)
for file_info in files:
rel_path = file_info.name
# Skip excluded files and directories
if self._should_exclude_file(rel_path) or file_info.is_dir:
continue
try:
full_path = f"{self.workspace_path}/{rel_path}"
content = self.sandbox.fs.download_file(full_path).decode()
files_state[rel_path] = {
"content": content,
"is_dir": file_info.is_dir,
"size": file_info.size,
"modified": file_info.mod_time,
}
except Exception as e:
print(f"Error reading file {rel_path}: {e}")
except UnicodeDecodeError:
print(f"Skipping binary file: {rel_path}")
return files_state
except Exception as e:
print(f"Error getting workspace state: {str(e)}")
return {}
async def execute(
self,
action: str,
file_path: Optional[str] = None,
file_contents: Optional[str] = None,
old_str: Optional[str] = None,
new_str: Optional[str] = None,
permissions: Optional[str] = "644",
**kwargs,
) -> ToolResult:
"""
Execute a file operation in the sandbox environment.
Args:
action: The file operation to perform
file_path: Path to the file relative to /workspace
file_contents: Content to write to the file
old_str: Text to be replaced (for str_replace)
new_str: Replacement text (for str_replace)
permissions: File permissions in octal format
Returns:
ToolResult with the operation's output or error
"""
async with asyncio.Lock():
try:
# File creation
if action == "create_file":
if not file_path or not file_contents:
return self.fail_response(
"file_path and file_contents are required for create_file"
)
return await self._create_file(
file_path, file_contents, permissions
)
# String replacement
elif action == "str_replace":
if not file_path or not old_str or not new_str:
return self.fail_response(
"file_path, old_str, and new_str are required for str_replace"
)
return await self._str_replace(file_path, old_str, new_str)
# Full file rewrite
elif action == "full_file_rewrite":
if not file_path or not file_contents:
return self.fail_response(
"file_path and file_contents are required for full_file_rewrite"
)
return await self._full_file_rewrite(
file_path, file_contents, permissions
)
# File deletion
elif action == "delete_file":
if not file_path:
return self.fail_response(
"file_path is required for delete_file"
)
return await self._delete_file(file_path)
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing file action: {e}")
return self.fail_response(f"Error executing file action: {e}")
async def _create_file(
self, file_path: str, file_contents: str, permissions: str = "644"
) -> ToolResult:
"""Create a new file with the provided contents"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if self._file_exists(full_path):
return self.fail_response(
f"File '{file_path}' already exists. Use full_file_rewrite to modify existing files."
)
# Create parent directories if needed
parent_dir = "/".join(full_path.split("/")[:-1])
if parent_dir:
self.sandbox.fs.create_folder(parent_dir, "755")
# Write the file content
self.sandbox.fs.upload_file(file_contents.encode(), full_path)
self.sandbox.fs.set_file_permissions(full_path, permissions)
message = f"File '{file_path}' created successfully."
# Check if index.html was created and add 8080 server info (only in root workspace)
if file_path.lower() == "index.html":
try:
website_link = self.sandbox.get_preview_link(8080)
website_url = (
website_link.url
if hasattr(website_link, "url")
else str(website_link).split("url='")[1].split("'")[0]
)
message += f"\n\n[Auto-detected index.html - HTTP server available at: {website_url}]"
message += "\n[Note: Use the provided HTTP server URL above instead of starting a new server]"
except Exception as e:
logger.warning(
f"Failed to get website URL for index.html: {str(e)}"
)
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error creating file: {str(e)}")
async def _str_replace(
self, file_path: str, old_str: str, new_str: str
) -> ToolResult:
"""Replace specific text in a file"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(f"File '{file_path}' does not exist")
content = self.sandbox.fs.download_file(full_path).decode()
old_str = old_str.expandtabs()
new_str = new_str.expandtabs()
occurrences = content.count(old_str)
if occurrences == 0:
return self.fail_response(f"String '{old_str}' not found in file")
if occurrences > 1:
lines = [
i + 1
for i, line in enumerate(content.split("\n"))
if old_str in line
]
return self.fail_response(
f"Multiple occurrences found in lines {lines}. Please ensure string is unique"
)
# Perform replacement
new_content = content.replace(old_str, new_str)
self.sandbox.fs.upload_file(new_content.encode(), full_path)
# Show snippet around the edit
replacement_line = content.split(old_str)[0].count("\n")
start_line = max(0, replacement_line - self.SNIPPET_LINES)
end_line = replacement_line + self.SNIPPET_LINES + new_str.count("\n")
snippet = "\n".join(new_content.split("\n")[start_line : end_line + 1])
message = f"Replacement successful."
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error replacing string: {str(e)}")
async def _full_file_rewrite(
self, file_path: str, file_contents: str, permissions: str = "644"
) -> ToolResult:
"""Completely rewrite an existing file with new content"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(
f"File '{file_path}' does not exist. Use create_file to create a new file."
)
self.sandbox.fs.upload_file(file_contents.encode(), full_path)
self.sandbox.fs.set_file_permissions(full_path, permissions)
message = f"File '{file_path}' completely rewritten successfully."
# Check if index.html was rewritten and add 8080 server info (only in root workspace)
if file_path.lower() == "index.html":
try:
website_link = self.sandbox.get_preview_link(8080)
website_url = (
website_link.url
if hasattr(website_link, "url")
else str(website_link).split("url='")[1].split("'")[0]
)
message += f"\n\n[Auto-detected index.html - HTTP server available at: {website_url}]"
message += "\n[Note: Use the provided HTTP server URL above instead of starting a new server]"
except Exception as e:
logger.warning(
f"Failed to get website URL for index.html: {str(e)}"
)
return self.success_response(message)
except Exception as e:
return self.fail_response(f"Error rewriting file: {str(e)}")
async def _delete_file(self, file_path: str) -> ToolResult:
"""Delete a file at the given path"""
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
file_path = self.clean_path(file_path)
full_path = f"{self.workspace_path}/{file_path}"
if not self._file_exists(full_path):
return self.fail_response(f"File '{file_path}' does not exist")
self.sandbox.fs.delete_file(full_path)
return self.success_response(f"File '{file_path}' deleted successfully.")
except Exception as e:
return self.fail_response(f"Error deleting file: {str(e)}")
async def cleanup(self):
"""Clean up sandbox resources."""
@classmethod
def create_with_context(cls, context: Context) -> "SandboxFilesTool[Context]":
"""Factory method to create a SandboxFilesTool with a specific context."""
raise NotImplementedError(
"create_with_context not implemented for SandboxFilesTool"
)
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/tool/sandbox/sb_files_tool.py",
"license": "MIT License",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/tool/sandbox/sb_shell_tool.py | import asyncio
import time
from typing import Any, Dict, Optional, TypeVar
from uuid import uuid4
from app.daytona.tool_base import Sandbox, SandboxToolsBase
from app.tool.base import ToolResult
from app.utils.logger import logger
Context = TypeVar("Context")
_SHELL_DESCRIPTION = """\
Execute a shell command in the workspace directory.
IMPORTANT: Commands are non-blocking by default and run in a tmux session.
This is ideal for long-running operations like starting servers or build processes.
Uses sessions to maintain state between commands.
This tool is essential for running CLI tools, installing packages, and managing system operations.
"""
class SandboxShellTool(SandboxToolsBase):
"""Tool for executing tasks in a Daytona sandbox with browser-use capabilities.
Uses sessions for maintaining state between commands and provides comprehensive process management.
"""
name: str = "sandbox_shell"
description: str = _SHELL_DESCRIPTION
parameters: dict = {
"type": "object",
"properties": {
"action": {
"type": "string",
"enum": [
"execute_command",
"check_command_output",
"terminate_command",
"list_commands",
],
"description": "The shell action to perform",
},
"command": {
"type": "string",
"description": "The shell command to execute. Use this for running CLI tools, installing packages, "
"or system operations. Commands can be chained using &&, ||, and | operators.",
},
"folder": {
"type": "string",
"description": "Optional relative path to a subdirectory of /workspace where the command should be "
"executed. Example: 'data/pdfs'",
},
"session_name": {
"type": "string",
"description": "Optional name of the tmux session to use. Use named sessions for related commands "
"that need to maintain state. Defaults to a random session name.",
},
"blocking": {
"type": "boolean",
"description": "Whether to wait for the command to complete. Defaults to false for non-blocking "
"execution.",
"default": False,
},
"timeout": {
"type": "integer",
"description": "Optional timeout in seconds for blocking commands. Defaults to 60. Ignored for "
"non-blocking commands.",
"default": 60,
},
"kill_session": {
"type": "boolean",
"description": "Whether to terminate the tmux session after checking. Set to true when you're done "
"with the command.",
"default": False,
},
},
"required": ["action"],
"dependencies": {
"execute_command": ["command"],
"check_command_output": ["session_name"],
"terminate_command": ["session_name"],
"list_commands": [],
},
}
def __init__(
self, sandbox: Optional[Sandbox] = None, thread_id: Optional[str] = None, **data
):
"""Initialize with optional sandbox and thread_id."""
super().__init__(**data)
if sandbox is not None:
self._sandbox = sandbox
async def _ensure_session(self, session_name: str = "default") -> str:
"""Ensure a session exists and return its ID."""
if session_name not in self._sessions:
session_id = str(uuid4())
try:
await self._ensure_sandbox() # Ensure sandbox is initialized
self.sandbox.process.create_session(session_id)
self._sessions[session_name] = session_id
except Exception as e:
raise RuntimeError(f"Failed to create session: {str(e)}")
return self._sessions[session_name]
async def _cleanup_session(self, session_name: str):
"""Clean up a session if it exists."""
if session_name in self._sessions:
try:
await self._ensure_sandbox() # Ensure sandbox is initialized
self.sandbox.process.delete_session(self._sessions[session_name])
del self._sessions[session_name]
except Exception as e:
print(f"Warning: Failed to cleanup session {session_name}: {str(e)}")
async def _execute_raw_command(self, command: str) -> Dict[str, Any]:
"""Execute a raw command directly in the sandbox."""
# Ensure session exists for raw commands
session_id = await self._ensure_session("raw_commands")
# Execute command in session
from app.daytona.sandbox import SessionExecuteRequest
req = SessionExecuteRequest(
command=command, run_async=False, cwd=self.workspace_path
)
response = self.sandbox.process.execute_session_command(
session_id=session_id,
req=req,
timeout=30, # Short timeout for utility commands
)
logs = self.sandbox.process.get_session_command_logs(
session_id=session_id, command_id=response.cmd_id
)
return {"output": logs, "exit_code": response.exit_code}
async def _execute_command(
self,
command: str,
folder: Optional[str] = None,
session_name: Optional[str] = None,
blocking: bool = False,
timeout: int = 60,
) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Set up working directory
cwd = self.workspace_path
if folder:
folder = folder.strip("/")
cwd = f"{self.workspace_path}/{folder}"
# Generate a session name if not provided
if not session_name:
session_name = f"session_{str(uuid4())[:8]}"
# Check if tmux session already exists
check_session = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
session_exists = "not_exists" not in check_session.get("output", "")
if not session_exists:
# Create a new tmux session
await self._execute_raw_command(
f"tmux new-session -d -s {session_name}"
)
# Ensure we're in the correct directory and send command to tmux
full_command = f"cd {cwd} && {command}"
wrapped_command = full_command.replace('"', '\\"') # Escape double quotes
# Send command to tmux session
await self._execute_raw_command(
f'tmux send-keys -t {session_name} "{wrapped_command}" Enter'
)
if blocking:
# For blocking execution, wait and capture output
start_time = time.time()
while (time.time() - start_time) < timeout:
# Wait a bit before checking
time.sleep(2)
# Check if session still exists (command might have exited)
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'ended'"
)
if "ended" in check_result.get("output", ""):
break
# Get current output and check for common completion indicators
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
current_output = output_result.get("output", "")
# Check for prompt indicators that suggest command completion
last_lines = current_output.split("\n")[-3:]
completion_indicators = [
"$",
"#",
">",
"Done",
"Completed",
"Finished",
"✓",
]
if any(
indicator in line
for indicator in completion_indicators
for line in last_lines
):
break
# Capture final output
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
final_output = output_result.get("output", "")
# Kill the session after capture
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
return self.success_response(
{
"output": final_output,
"session_name": session_name,
"cwd": cwd,
"completed": True,
}
)
else:
# For non-blocking, just return immediately
return self.success_response(
{
"session_name": session_name,
"cwd": cwd,
"message": f"Command sent to tmux session '{session_name}'. Use check_command_output to view results.",
"completed": False,
}
)
except Exception as e:
# Attempt to clean up session in case of error
if session_name:
try:
await self._execute_raw_command(
f"tmux kill-session -t {session_name}"
)
except:
pass
return self.fail_response(f"Error executing command: {str(e)}")
async def _check_command_output(
self, session_name: str, kill_session: bool = False
) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Check if session exists
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
if "not_exists" in check_result.get("output", ""):
return self.fail_response(
f"Tmux session '{session_name}' does not exist."
)
# Get output from tmux pane
output_result = await self._execute_raw_command(
f"tmux capture-pane -t {session_name} -p -S - -E -"
)
output = output_result.get("output", "")
# Kill session if requested
if kill_session:
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
termination_status = "Session terminated."
else:
termination_status = "Session still running."
return self.success_response(
{
"output": output,
"session_name": session_name,
"status": termination_status,
}
)
except Exception as e:
return self.fail_response(f"Error checking command output: {str(e)}")
async def _terminate_command(self, session_name: str) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# Check if session exists
check_result = await self._execute_raw_command(
f"tmux has-session -t {session_name} 2>/dev/null || echo 'not_exists'"
)
if "not_exists" in check_result.get("output", ""):
return self.fail_response(
f"Tmux session '{session_name}' does not exist."
)
# Kill the session
await self._execute_raw_command(f"tmux kill-session -t {session_name}")
return self.success_response(
{"message": f"Tmux session '{session_name}' terminated successfully."}
)
except Exception as e:
return self.fail_response(f"Error terminating command: {str(e)}")
async def _list_commands(self) -> ToolResult:
try:
# Ensure sandbox is initialized
await self._ensure_sandbox()
# List all tmux sessions
result = await self._execute_raw_command(
"tmux list-sessions 2>/dev/null || echo 'No sessions'"
)
output = result.get("output", "")
if "No sessions" in output or not output.strip():
return self.success_response(
{"message": "No active tmux sessions found.", "sessions": []}
)
# Parse session list
sessions = []
for line in output.split("\n"):
if line.strip():
parts = line.split(":")
if parts:
session_name = parts[0].strip()
sessions.append(session_name)
return self.success_response(
{
"message": f"Found {len(sessions)} active sessions.",
"sessions": sessions,
}
)
except Exception as e:
return self.fail_response(f"Error listing commands: {str(e)}")
async def execute(
self,
action: str,
command: str,
folder: Optional[str] = None,
session_name: Optional[str] = None,
blocking: bool = False,
timeout: int = 60,
kill_session: bool = False,
) -> ToolResult:
"""
Execute a browser action in the sandbox environment.
Args:
timeout:
blocking:
session_name:
folder:
command:
kill_session:
action: The browser action to perform
Returns:
ToolResult with the action's output or error
"""
async with asyncio.Lock():
try:
# Navigation actions
if action == "execute_command":
if not command:
return self.fail_response("command is required for navigation")
return await self._execute_command(
command, folder, session_name, blocking, timeout
)
elif action == "check_command_output":
if session_name is None:
return self.fail_response(
"session_name is required for navigation"
)
return await self._check_command_output(session_name, kill_session)
elif action == "terminate_command":
if session_name is None:
return self.fail_response(
"session_name is required for click_element"
)
return await self._terminate_command(session_name)
elif action == "list_commands":
return await self._list_commands()
else:
return self.fail_response(f"Unknown action: {action}")
except Exception as e:
logger.error(f"Error executing shell action: {e}")
return self.fail_response(f"Error executing shell action: {e}")
async def cleanup(self):
"""Clean up all sessions."""
for session_name in list(self._sessions.keys()):
await self._cleanup_session(session_name)
# Also clean up any tmux sessions
try:
await self._ensure_sandbox()
await self._execute_raw_command("tmux kill-server 2>/dev/null || true")
except Exception as e:
logger.error(f"Error shell box cleanup action: {e}")
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/tool/sandbox/sb_shell_tool.py",
"license": "MIT License",
"lines": 369,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:app/utils/logger.py | import logging
import os
import structlog
ENV_MODE = os.getenv("ENV_MODE", "LOCAL")
renderer = [structlog.processors.JSONRenderer()]
if ENV_MODE.lower() == "local".lower():
renderer = [structlog.dev.ConsoleRenderer()]
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.dict_tracebacks,
structlog.processors.CallsiteParameterAdder(
{
structlog.processors.CallsiteParameter.FILENAME,
structlog.processors.CallsiteParameter.FUNC_NAME,
structlog.processors.CallsiteParameter.LINENO,
}
),
structlog.processors.TimeStamper(fmt="iso"),
structlog.contextvars.merge_contextvars,
*renderer,
],
cache_logger_on_first_use=True,
)
logger: structlog.stdlib.BoundLogger = structlog.get_logger(level=logging.DEBUG)
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/utils/logger.py",
"license": "MIT License",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/OpenManus:sandbox_main.py | import argparse
import asyncio
from app.agent.sandbox_agent import SandboxManus
from app.logger import logger
async def main():
# Parse command line arguments
parser = argparse.ArgumentParser(description="Run Manus agent with a prompt")
parser.add_argument(
"--prompt", type=str, required=False, help="Input prompt for the agent"
)
args = parser.parse_args()
# Create and initialize Manus agent
agent = await SandboxManus.create()
try:
# Use command line prompt if provided, otherwise ask for input
prompt = args.prompt if args.prompt else input("Enter your prompt: ")
if not prompt.strip():
logger.warning("Empty prompt provided.")
return
logger.warning("Processing your request...")
await agent.run(prompt)
logger.info("Request processing completed.")
except KeyboardInterrupt:
logger.warning("Operation interrupted.")
finally:
# Ensure agent resources are cleaned up before exiting
await agent.cleanup()
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "sandbox_main.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/OpenManus:app/tool/crawl4ai.py | """
Crawl4AI Web Crawler Tool for OpenManus
This tool integrates Crawl4AI, a high-performance web crawler designed for LLMs and AI agents,
providing fast, precise, and AI-ready data extraction with clean Markdown generation.
"""
import asyncio
from typing import List, Union
from urllib.parse import urlparse
from app.logger import logger
from app.tool.base import BaseTool, ToolResult
class Crawl4aiTool(BaseTool):
"""
Web crawler tool powered by Crawl4AI.
Provides clean markdown extraction optimized for AI processing.
"""
name: str = "crawl4ai"
description: str = """Web crawler that extracts clean, AI-ready content from web pages.
Features:
- Extracts clean markdown content optimized for LLMs
- Handles JavaScript-heavy sites and dynamic content
- Supports multiple URLs in a single request
- Fast and reliable with built-in error handling
Perfect for content analysis, research, and feeding web content to AI models."""
parameters: dict = {
"type": "object",
"properties": {
"urls": {
"type": "array",
"items": {"type": "string"},
"description": "(required) List of URLs to crawl. Can be a single URL or multiple URLs.",
"minItems": 1,
},
"timeout": {
"type": "integer",
"description": "(optional) Timeout in seconds for each URL. Default is 30.",
"default": 30,
"minimum": 5,
"maximum": 120,
},
"bypass_cache": {
"type": "boolean",
"description": "(optional) Whether to bypass cache and fetch fresh content. Default is false.",
"default": False,
},
"word_count_threshold": {
"type": "integer",
"description": "(optional) Minimum word count for content blocks. Default is 10.",
"default": 10,
"minimum": 1,
},
},
"required": ["urls"],
}
async def execute(
self,
urls: Union[str, List[str]],
timeout: int = 30,
bypass_cache: bool = False,
word_count_threshold: int = 10,
) -> ToolResult:
"""
Execute web crawling for the specified URLs.
Args:
urls: Single URL string or list of URLs to crawl
timeout: Timeout in seconds for each URL
bypass_cache: Whether to bypass cache
word_count_threshold: Minimum word count for content blocks
Returns:
ToolResult with crawl results
"""
# Normalize URLs to list
if isinstance(urls, str):
url_list = [urls]
else:
url_list = urls
# Validate URLs
valid_urls = []
for url in url_list:
if self._is_valid_url(url):
valid_urls.append(url)
else:
logger.warning(f"Invalid URL skipped: {url}")
if not valid_urls:
return ToolResult(error="No valid URLs provided")
try:
# Import crawl4ai components
from crawl4ai import (
AsyncWebCrawler,
BrowserConfig,
CacheMode,
CrawlerRunConfig,
)
# Configure browser settings
browser_config = BrowserConfig(
headless=True,
verbose=False,
browser_type="chromium",
ignore_https_errors=True,
java_script_enabled=True,
)
# Configure crawler settings
run_config = CrawlerRunConfig(
cache_mode=CacheMode.BYPASS if bypass_cache else CacheMode.ENABLED,
word_count_threshold=word_count_threshold,
process_iframes=True,
remove_overlay_elements=True,
excluded_tags=["script", "style"],
page_timeout=timeout * 1000, # Convert to milliseconds
verbose=False,
wait_until="domcontentloaded",
)
results = []
successful_count = 0
failed_count = 0
# Process each URL
async with AsyncWebCrawler(config=browser_config) as crawler:
for url in valid_urls:
try:
logger.info(f"🕷️ Crawling URL: {url}")
start_time = asyncio.get_event_loop().time()
result = await crawler.arun(url=url, config=run_config)
end_time = asyncio.get_event_loop().time()
execution_time = end_time - start_time
if result.success:
# Count words in markdown
word_count = 0
if hasattr(result, "markdown") and result.markdown:
word_count = len(result.markdown.split())
# Count links
links_count = 0
if hasattr(result, "links") and result.links:
internal_links = result.links.get("internal", [])
external_links = result.links.get("external", [])
links_count = len(internal_links) + len(external_links)
# Count images
images_count = 0
if hasattr(result, "media") and result.media:
images = result.media.get("images", [])
images_count = len(images)
results.append(
{
"url": url,
"success": True,
"status_code": getattr(result, "status_code", 200),
"title": result.metadata.get("title")
if result.metadata
else None,
"markdown": result.markdown
if hasattr(result, "markdown")
else None,
"word_count": word_count,
"links_count": links_count,
"images_count": images_count,
"execution_time": execution_time,
}
)
successful_count += 1
logger.info(
f"✅ Successfully crawled {url} in {execution_time:.2f}s"
)
else:
results.append(
{
"url": url,
"success": False,
"error_message": getattr(
result, "error_message", "Unknown error"
),
"execution_time": execution_time,
}
)
failed_count += 1
logger.warning(f"❌ Failed to crawl {url}")
except Exception as e:
error_msg = f"Error crawling {url}: {str(e)}"
logger.error(error_msg)
results.append(
{"url": url, "success": False, "error_message": error_msg}
)
failed_count += 1
# Format output
output_lines = [f"🕷️ Crawl4AI Results Summary:"]
output_lines.append(f"📊 Total URLs: {len(valid_urls)}")
output_lines.append(f"✅ Successful: {successful_count}")
output_lines.append(f"❌ Failed: {failed_count}")
output_lines.append("")
for i, result in enumerate(results, 1):
output_lines.append(f"{i}. {result['url']}")
if result["success"]:
output_lines.append(
f" ✅ Status: Success (HTTP {result.get('status_code', 'N/A')})"
)
if result.get("title"):
output_lines.append(f" 📄 Title: {result['title']}")
if result.get("markdown"):
# Show first 300 characters of markdown content
content_preview = result["markdown"]
if len(result["markdown"]) > 300:
content_preview += "..."
output_lines.append(f" 📝 Content: {content_preview}")
output_lines.append(
f" 📊 Stats: {result.get('word_count', 0)} words, {result.get('links_count', 0)} links, {result.get('images_count', 0)} images"
)
if result.get("execution_time"):
output_lines.append(
f" ⏱️ Time: {result['execution_time']:.2f}s"
)
else:
output_lines.append(f" ❌ Status: Failed")
if result.get("error_message"):
output_lines.append(f" 🚫 Error: {result['error_message']}")
output_lines.append("")
return ToolResult(output="\n".join(output_lines))
except ImportError:
error_msg = "Crawl4AI is not installed. Please install it with: pip install crawl4ai"
logger.error(error_msg)
return ToolResult(error=error_msg)
except Exception as e:
error_msg = f"Crawl4AI execution failed: {str(e)}"
logger.error(error_msg)
return ToolResult(error=error_msg)
def _is_valid_url(self, url: str) -> bool:
"""Validate if a URL is properly formatted."""
try:
result = urlparse(url)
return all([result.scheme, result.netloc]) and result.scheme in [
"http",
"https",
]
except Exception:
return False
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "app/tool/crawl4ai.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FoundationAgents/OpenManus:protocol/a2a/app/agent.py | from typing import Any, AsyncIterable, ClassVar, Dict, List, Literal
from pydantic import BaseModel
from app.agent.manus import Manus
class ResponseFormat(BaseModel):
"""Respond to the user in this format."""
status: Literal["input_required", "completed", "error"] = "input_required"
message: str
class A2AManus(Manus):
async def invoke(self, query, sessionId) -> str:
config = {"configurable": {"thread_id": sessionId}}
response = await self.run(query)
return self.get_agent_response(config, response)
async def stream(self, query: str) -> AsyncIterable[Dict[str, Any]]:
"""Streaming is not supported by Manus."""
raise NotImplementedError("Streaming is not supported by Manus yet.")
def get_agent_response(self, config, agent_response):
return {
"is_task_complete": True,
"require_user_input": False,
"content": agent_response,
}
SUPPORTED_CONTENT_TYPES: ClassVar[List[str]] = ["text", "text/plain"]
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "protocol/a2a/app/agent.py",
"license": "MIT License",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/OpenManus:protocol/a2a/app/agent_executor.py | import logging
from typing import Awaitable, Callable
from a2a.server.agent_execution import AgentExecutor, RequestContext
from a2a.server.events import EventQueue
from a2a.types import (
InvalidParamsError,
Part,
Task,
TextPart,
UnsupportedOperationError,
)
from a2a.utils import completed_task, new_artifact
from a2a.utils.errors import ServerError
from .agent import A2AManus
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ManusExecutor(AgentExecutor):
"""Currency Conversion AgentExecutor Example."""
def __init__(self, agent_factory: Callable[[], Awaitable[A2AManus]]):
self.agent_factory = agent_factory
async def execute(
self,
context: RequestContext,
event_queue: EventQueue,
) -> None:
error = self._validate_request(context)
if error:
raise ServerError(error=InvalidParamsError())
query = context.get_user_input()
try:
self.agent = await self.agent_factory()
result = await self.agent.invoke(query, context.context_id)
print(f"Final Result ===> {result}")
except Exception as e:
print("Error invoking agent: %s", e)
raise ServerError(error=ValueError(f"Error invoking agent: {e}")) from e
parts = [
Part(
root=TextPart(
text=(
result["content"]
if result["content"]
else "failed to generate response"
)
),
)
]
event_queue.enqueue_event(
completed_task(
context.task_id,
context.context_id,
[new_artifact(parts, f"task_{context.task_id}")],
[context.message],
)
)
def _validate_request(self, context: RequestContext) -> bool:
return False
async def cancel(
self, request: RequestContext, event_queue: EventQueue
) -> Task | None:
raise ServerError(error=UnsupportedOperationError())
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "protocol/a2a/app/agent_executor.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FoundationAgents/OpenManus:protocol/a2a/app/main.py | import argparse
import asyncio
import logging
from typing import Optional
import httpx
from a2a.server.apps import A2AStarletteApplication
from a2a.server.request_handlers import DefaultRequestHandler
from a2a.server.tasks import InMemoryPushNotifier, InMemoryTaskStore
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
from dotenv import load_dotenv
from app.tool.browser_use_tool import _BROWSER_DESCRIPTION
from app.tool.str_replace_editor import _STR_REPLACE_EDITOR_DESCRIPTION
from app.tool.terminate import _TERMINATE_DESCRIPTION
from .agent import A2AManus
from .agent_executor import ManusExecutor
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
async def main(host: str = "localhost", port: int = 10000):
"""Starts the Manus Agent server."""
try:
capabilities = AgentCapabilities(streaming=False, pushNotifications=True)
skills = [
AgentSkill(
id="Python Execute",
name="Python Execute Tool",
description="Executes Python code string. Note: Only print outputs are visible, function return values are not captured. Use print statements to see results.",
tags=["Execute Python Code"],
examples=[
"Execute Python code:'''python \n Print('Hello World') \n '''"
],
),
AgentSkill(
id="Browser use",
name="Browser use Tool",
description=_BROWSER_DESCRIPTION,
tags=["Use Browser"],
examples=["go_to 'https://www.google.com'"],
),
AgentSkill(
id="Replace String",
name="Str_replace Tool",
description=_STR_REPLACE_EDITOR_DESCRIPTION,
tags=["Operate Files"],
examples=["Replace 'old' with 'new' in 'file.txt'"],
),
AgentSkill(
id="Ask human",
name="Ask human Tool",
description="Use this tool to ask human for help.",
tags=["Ask human for help"],
examples=["Ask human: 'What time is it?'"],
),
AgentSkill(
id="terminate",
name="terminate Tool",
description=_TERMINATE_DESCRIPTION,
tags=["terminate task"],
examples=["terminate"],
),
# Add more skills as needed
]
agent_card = AgentCard(
name="Manus Agent",
description="A versatile agent that can solve various tasks using multiple tools including MCP-based tools",
url=f"http://{host}:{port}/",
version="1.0.0",
defaultInputModes=A2AManus.SUPPORTED_CONTENT_TYPES,
defaultOutputModes=A2AManus.SUPPORTED_CONTENT_TYPES,
capabilities=capabilities,
skills=skills,
)
httpx_client = httpx.AsyncClient()
request_handler = DefaultRequestHandler(
agent_executor=ManusExecutor(
agent_factory=lambda: A2AManus.create(max_steps=3)
),
task_store=InMemoryTaskStore(),
push_notifier=InMemoryPushNotifier(httpx_client),
)
server = A2AStarletteApplication(
agent_card=agent_card, http_handler=request_handler
)
logger.info(f"Starting server on {host}:{port}")
return server.build()
except Exception as e:
logger.error(f"An error occurred during server startup: {e}")
exit(1)
def run_server(host: Optional[str] = "localhost", port: Optional[int] = 10000):
try:
import uvicorn
app = asyncio.run(main(host, port))
config = uvicorn.Config(
app=app, host=host, port=port, loop="asyncio", proxy_headers=True
)
uvicorn.Server(config=config).run()
logger.info(f"Server started on {host}:{port}")
except Exception as e:
logger.error(f"An error occurred while starting the server: {e}")
if __name__ == "__main__":
# Parse command line arguments for host and port, with default values
parser = argparse.ArgumentParser(description="Start Manus Agent service")
parser.add_argument(
"--host",
type=str,
default="localhost",
help="Server host address, default is localhost",
)
parser.add_argument(
"--port", type=int, default=10000, help="Server port, default is 10000"
)
args = parser.parse_args()
# Start the server with the specified or default host and port
run_server(args.host, args.port)
| {
"repo_id": "FoundationAgents/OpenManus",
"file_path": "protocol/a2a/app/main.py",
"license": "MIT License",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:cosyvoice/utils/onnx.py | import onnxruntime
import torch, random
import os
import torchaudio.compliance.kaldi as kaldi
class SpeechTokenExtractor():
def __init__(self, model_path):
self.local_rank = int(os.environ.get("LOCAL_RANK", 0))
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
self.speech_tokenizer_session = onnxruntime.InferenceSession(model_path,
sess_options=option,
providers=[("CUDAExecutionProvider", {'device_id': self.local_rank})])
def inference(self, feat, feat_lengths, device):
speech_token = self.speech_tokenizer_session.run(None,
{self.speech_tokenizer_session.get_inputs()[0].name:
feat.transpose(1, 2).detach().cpu().numpy(),
self.speech_tokenizer_session.get_inputs()[1].name:
feat_lengths.detach().cpu().numpy()})[0]
return torch.tensor(speech_token).to(torch.int32).to(device), (feat_lengths / 4).to(torch.int32).to(device)
class EmbeddingExtractor():
def __init__(self, model_path):
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
self.max_len = 10 * 16000
self.campplus_session = onnxruntime.InferenceSession(model_path,
sess_options=option,
providers=["CPUExecutionProvider"])
def inference(self, speech):
if speech.shape[1] > self.max_len:
start_index = random.randint(0, speech.shape[1] - self.max_len)
speech = speech[:, start_index: start_index + self.max_len]
feat = kaldi.fbank(speech,
num_mel_bins=80,
dither=0,
sample_frequency=16000)
feat = feat - feat.mean(dim=0, keepdim=True)
embedding = self.campplus_session.run(None,
{self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist()
return torch.tensor(embedding).to(speech.device)
# singleton mode, only initialized once
onnx_path = os.environ.get('onnx_path')
if onnx_path is not None:
embedding_extractor, online_feature = EmbeddingExtractor(model_path=os.path.join(onnx_path, 'campplus.onnx')), True
else:
embedding_extractor, online_feature = None, False | {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "cosyvoice/utils/onnx.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:cosyvoice/flow/DiT/dit.py |
"""
ein notation:
b - batch
n - sequence
nt - text sequence
nw - raw wave length
d - dimension
"""
from __future__ import annotations
import torch
from torch import nn
import torch.nn.functional as F
from einops import repeat
from x_transformers.x_transformers import RotaryEmbedding
from cosyvoice.utils.mask import add_optional_chunk_mask
from cosyvoice.flow.DiT.modules import (
TimestepEmbedding,
ConvNeXtV2Block,
CausalConvPositionEmbedding,
DiTBlock,
AdaLayerNormZero_Final,
precompute_freqs_cis,
get_pos_embed_indices,
)
# Text embedding
class TextEmbedding(nn.Module):
def __init__(self, text_num_embeds, text_dim, conv_layers=0, conv_mult=2):
super().__init__()
self.text_embed = nn.Embedding(text_num_embeds + 1, text_dim) # use 0 as filler token
if conv_layers > 0:
self.extra_modeling = True
self.precompute_max_pos = 4096 # ~44s of 24khz audio
self.register_buffer("freqs_cis", precompute_freqs_cis(text_dim, self.precompute_max_pos), persistent=False)
self.text_blocks = nn.Sequential(
*[ConvNeXtV2Block(text_dim, text_dim * conv_mult) for _ in range(conv_layers)]
)
else:
self.extra_modeling = False
def forward(self, text: int["b nt"], seq_len, drop_text=False): # noqa: F722
batch, text_len = text.shape[0], text.shape[1]
text = text + 1 # use 0 as filler token. preprocess of batch pad -1, see list_str_to_idx()
text = text[:, :seq_len] # curtail if character tokens are more than the mel spec tokens
text = F.pad(text, (0, seq_len - text_len), value=0)
if drop_text: # cfg for text
text = torch.zeros_like(text)
text = self.text_embed(text) # b n -> b n d
# possible extra modeling
if self.extra_modeling:
# sinus pos emb
batch_start = torch.zeros((batch,), dtype=torch.long)
pos_idx = get_pos_embed_indices(batch_start, seq_len, max_pos=self.precompute_max_pos)
text_pos_embed = self.freqs_cis[pos_idx]
text = text + text_pos_embed
# convnextv2 blocks
text = self.text_blocks(text)
return text
# noised input audio and context mixing embedding
class InputEmbedding(nn.Module):
def __init__(self, mel_dim, text_dim, out_dim, spk_dim=None):
super().__init__()
spk_dim = 0 if spk_dim is None else spk_dim
self.spk_dim = spk_dim
self.proj = nn.Linear(mel_dim * 2 + text_dim + spk_dim, out_dim)
self.conv_pos_embed = CausalConvPositionEmbedding(dim=out_dim)
def forward(
self,
x: float["b n d"],
cond: float["b n d"],
text_embed: float["b n d"],
spks: float["b d"],
):
to_cat = [x, cond, text_embed]
if self.spk_dim > 0:
spks = repeat(spks, "b c -> b t c", t=x.shape[1])
to_cat.append(spks)
x = self.proj(torch.cat(to_cat, dim=-1))
x = self.conv_pos_embed(x) + x
return x
# Transformer backbone using DiT blocks
class DiT(nn.Module):
def __init__(
self,
*,
dim,
depth=8,
heads=8,
dim_head=64,
dropout=0.1,
ff_mult=4,
mel_dim=80,
mu_dim=None,
long_skip_connection=False,
spk_dim=None,
out_channels=None,
static_chunk_size=50,
num_decoding_left_chunks=2
):
super().__init__()
self.time_embed = TimestepEmbedding(dim)
if mu_dim is None:
mu_dim = mel_dim
self.input_embed = InputEmbedding(mel_dim, mu_dim, dim, spk_dim)
self.rotary_embed = RotaryEmbedding(dim_head)
self.dim = dim
self.depth = depth
self.transformer_blocks = nn.ModuleList(
[DiTBlock(dim=dim, heads=heads, dim_head=dim_head, ff_mult=ff_mult, dropout=dropout) for _ in range(depth)]
)
self.long_skip_connection = nn.Linear(dim * 2, dim, bias=False) if long_skip_connection else None
self.norm_out = AdaLayerNormZero_Final(dim) # final modulation
self.proj_out = nn.Linear(dim, mel_dim)
self.out_channels = out_channels
self.static_chunk_size = static_chunk_size
self.num_decoding_left_chunks = num_decoding_left_chunks
def forward(self, x, mask, mu, t, spks=None, cond=None, streaming=False):
x = x.transpose(1, 2)
mu = mu.transpose(1, 2)
cond = cond.transpose(1, 2)
spks = spks.unsqueeze(dim=1)
batch, seq_len = x.shape[0], x.shape[1]
if t.ndim == 0:
t = t.repeat(batch)
# t: conditioning time, c: context (text + masked cond audio), x: noised input audio
t = self.time_embed(t)
x = self.input_embed(x, cond, mu, spks.squeeze(1))
rope = self.rotary_embed.forward_from_seq_len(seq_len)
if self.long_skip_connection is not None:
residual = x
if streaming is True:
attn_mask = add_optional_chunk_mask(x, mask.bool(), False, False, 0, self.static_chunk_size, -1).unsqueeze(dim=1)
else:
attn_mask = add_optional_chunk_mask(x, mask.bool(), False, False, 0, 0, -1).repeat(1, x.size(1), 1).unsqueeze(dim=1)
for block in self.transformer_blocks:
x = block(x, t, mask=attn_mask.bool(), rope=rope)
if self.long_skip_connection is not None:
x = self.long_skip_connection(torch.cat((x, residual), dim=-1))
x = self.norm_out(x, t)
output = self.proj_out(x).transpose(1, 2)
return output
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "cosyvoice/flow/DiT/dit.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:cosyvoice/flow/DiT/modules.py |
"""
ein notation:
b - batch
n - sequence
nt - text sequence
nw - raw wave length
d - dimension
"""
from __future__ import annotations
from typing import Optional
import math
import torch
from torch import nn
import torch.nn.functional as F
import torchaudio
from x_transformers.x_transformers import apply_rotary_pos_emb
# raw wav to mel spec
class MelSpec(nn.Module):
def __init__(
self,
filter_length=1024,
hop_length=256,
win_length=1024,
n_mel_channels=100,
target_sample_rate=24_000,
normalize=False,
power=1,
norm=None,
center=True,
):
super().__init__()
self.n_mel_channels = n_mel_channels
self.mel_stft = torchaudio.transforms.MelSpectrogram(
sample_rate=target_sample_rate,
n_fft=filter_length,
win_length=win_length,
hop_length=hop_length,
n_mels=n_mel_channels,
power=power,
center=center,
normalized=normalize,
norm=norm,
)
self.register_buffer("dummy", torch.tensor(0), persistent=False)
def forward(self, inp):
if len(inp.shape) == 3:
inp = inp.squeeze(1) # 'b 1 nw -> b nw'
assert len(inp.shape) == 2
if self.dummy.device != inp.device:
self.to(inp.device)
mel = self.mel_stft(inp)
mel = mel.clamp(min=1e-5).log()
return mel
# sinusoidal position embedding
class SinusPositionEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x, scale=1000):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
emb = scale * x.unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
# convolutional position embedding
class ConvPositionEmbedding(nn.Module):
def __init__(self, dim, kernel_size=31, groups=16):
super().__init__()
assert kernel_size % 2 != 0
self.conv1d = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
nn.Mish(),
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=kernel_size // 2),
nn.Mish(),
)
def forward(self, x: float["b n d"], mask: bool["b n"] | None = None): # noqa: F722
if mask is not None:
mask = mask[..., None]
x = x.masked_fill(~mask, 0.0)
x = x.permute(0, 2, 1)
x = self.conv1d(x)
out = x.permute(0, 2, 1)
if mask is not None:
out = out.masked_fill(~mask, 0.0)
return out
class CausalConvPositionEmbedding(nn.Module):
def __init__(self, dim, kernel_size=31, groups=16):
super().__init__()
assert kernel_size % 2 != 0
self.kernel_size = kernel_size
self.conv1 = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=0),
nn.Mish(),
)
self.conv2 = nn.Sequential(
nn.Conv1d(dim, dim, kernel_size, groups=groups, padding=0),
nn.Mish(),
)
def forward(self, x: float["b n d"], mask: bool["b n"] | None = None): # noqa: F722
if mask is not None:
mask = mask[..., None]
x = x.masked_fill(~mask, 0.0)
x = x.permute(0, 2, 1)
x = F.pad(x, (self.kernel_size - 1, 0, 0, 0))
x = self.conv1(x)
x = F.pad(x, (self.kernel_size - 1, 0, 0, 0))
x = self.conv2(x)
out = x.permute(0, 2, 1)
if mask is not None:
out = out.masked_fill(~mask, 0.0)
return out
# rotary positional embedding related
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0, theta_rescale_factor=1.0):
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
# https://github.com/lucidrains/rotary-embedding-torch/blob/main/rotary_embedding_torch/rotary_embedding_torch.py
theta *= theta_rescale_factor ** (dim / (dim - 2))
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
freqs_cos = torch.cos(freqs) # real part
freqs_sin = torch.sin(freqs) # imaginary part
return torch.cat([freqs_cos, freqs_sin], dim=-1)
def get_pos_embed_indices(start, length, max_pos, scale=1.0):
# length = length if isinstance(length, int) else length.max()
scale = scale * torch.ones_like(start, dtype=torch.float32) # in case scale is a scalar
pos = (
start.unsqueeze(1)
+ (torch.arange(length, device=start.device, dtype=torch.float32).unsqueeze(0) * scale.unsqueeze(1)).long()
)
# avoid extra long error.
pos = torch.where(pos < max_pos, pos, max_pos - 1)
return pos
# Global Response Normalization layer (Instance Normalization ?)
class GRN(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.zeros(1, 1, dim))
self.beta = nn.Parameter(torch.zeros(1, 1, dim))
def forward(self, x):
Gx = torch.norm(x, p=2, dim=1, keepdim=True)
Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
return self.gamma * (x * Nx) + self.beta + x
# ConvNeXt-V2 Block https://github.com/facebookresearch/ConvNeXt-V2/blob/main/models/convnextv2.py
# ref: https://github.com/bfs18/e2_tts/blob/main/rfwave/modules.py#L108
class ConvNeXtV2Block(nn.Module):
def __init__(
self,
dim: int,
intermediate_dim: int,
dilation: int = 1,
):
super().__init__()
padding = (dilation * (7 - 1)) // 2
self.dwconv = nn.Conv1d(
dim, dim, kernel_size=7, padding=padding, groups=dim, dilation=dilation
) # depthwise conv
self.norm = nn.LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(dim, intermediate_dim) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.grn = GRN(intermediate_dim)
self.pwconv2 = nn.Linear(intermediate_dim, dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual = x
x = x.transpose(1, 2) # b n d -> b d n
x = self.dwconv(x)
x = x.transpose(1, 2) # b d n -> b n d
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
return residual + x
# AdaLayerNormZero
# return with modulated x for attn input, and params for later mlp modulation
class AdaLayerNormZero(nn.Module):
def __init__(self, dim):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(dim, dim * 6)
self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
def forward(self, x, emb=None):
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
# AdaLayerNormZero for final layer
# return only with modulated x for attn input, cuz no more mlp modulation
class AdaLayerNormZero_Final(nn.Module):
def __init__(self, dim):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(dim, dim * 2)
self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
def forward(self, x, emb):
emb = self.linear(self.silu(emb))
scale, shift = torch.chunk(emb, 2, dim=1)
x = self.norm(x) * (1 + scale)[:, None, :] + shift[:, None, :]
return x
# FeedForward
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, dropout=0.0, approximate: str = "none"):
super().__init__()
inner_dim = int(dim * mult)
dim_out = dim_out if dim_out is not None else dim
activation = nn.GELU(approximate=approximate)
project_in = nn.Sequential(nn.Linear(dim, inner_dim), activation)
self.ff = nn.Sequential(project_in, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out))
def forward(self, x):
return self.ff(x)
# Attention with possible joint part
# modified from diffusers/src/diffusers/models/attention_processor.py
class Attention(nn.Module):
def __init__(
self,
processor: JointAttnProcessor | AttnProcessor,
dim: int,
heads: int = 8,
dim_head: int = 64,
dropout: float = 0.0,
context_dim: Optional[int] = None, # if not None -> joint attention
context_pre_only=None,
):
super().__init__()
if not hasattr(F, "scaled_dot_product_attention"):
raise ImportError("Attention equires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.")
self.processor = processor
self.dim = dim
self.heads = heads
self.inner_dim = dim_head * heads
self.dropout = dropout
self.context_dim = context_dim
self.context_pre_only = context_pre_only
self.to_q = nn.Linear(dim, self.inner_dim)
self.to_k = nn.Linear(dim, self.inner_dim)
self.to_v = nn.Linear(dim, self.inner_dim)
if self.context_dim is not None:
self.to_k_c = nn.Linear(context_dim, self.inner_dim)
self.to_v_c = nn.Linear(context_dim, self.inner_dim)
if self.context_pre_only is not None:
self.to_q_c = nn.Linear(context_dim, self.inner_dim)
self.to_out = nn.ModuleList([])
self.to_out.append(nn.Linear(self.inner_dim, dim))
self.to_out.append(nn.Dropout(dropout))
if self.context_pre_only is not None and not self.context_pre_only:
self.to_out_c = nn.Linear(self.inner_dim, dim)
def forward(
self,
x: float["b n d"], # noised input x # noqa: F722
c: float["b n d"] = None, # context c # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding for x
c_rope=None, # rotary position embedding for c
) -> torch.Tensor:
if c is not None:
return self.processor(self, x, c=c, mask=mask, rope=rope, c_rope=c_rope)
else:
return self.processor(self, x, mask=mask, rope=rope)
# Attention processor
class AttnProcessor:
def __init__(self):
pass
def __call__(
self,
attn: Attention,
x: float["b n d"], # noised input x # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding
) -> torch.FloatTensor:
batch_size = x.shape[0]
# `sample` projections.
query = attn.to_q(x)
key = attn.to_k(x)
value = attn.to_v(x)
# apply rotary position embedding
if rope is not None:
freqs, xpos_scale = rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
# attention
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# mask. e.g. inference got a batch with different target durations, mask out the padding
if mask is not None:
attn_mask = mask
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
else:
attn_mask = None
x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
x = x.to(query.dtype)
# linear proj
x = attn.to_out[0](x)
# dropout
x = attn.to_out[1](x)
if mask is not None:
if mask.dim() == 2:
mask = mask.unsqueeze(-1)
else:
mask = mask[:, 0, -1].unsqueeze(-1)
x = x.masked_fill(~mask, 0.0)
return x
# Joint Attention processor for MM-DiT
# modified from diffusers/src/diffusers/models/attention_processor.py
class JointAttnProcessor:
def __init__(self):
pass
def __call__(
self,
attn: Attention,
x: float["b n d"], # noised input x # noqa: F722
c: float["b nt d"] = None, # context c, here text # noqa: F722
mask: bool["b n"] | None = None, # noqa: F722
rope=None, # rotary position embedding for x
c_rope=None, # rotary position embedding for c
) -> torch.FloatTensor:
residual = x
batch_size = c.shape[0]
# `sample` projections.
query = attn.to_q(x)
key = attn.to_k(x)
value = attn.to_v(x)
# `context` projections.
c_query = attn.to_q_c(c)
c_key = attn.to_k_c(c)
c_value = attn.to_v_c(c)
# apply rope for context and noised input independently
if rope is not None:
freqs, xpos_scale = rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
query = apply_rotary_pos_emb(query, freqs, q_xpos_scale)
key = apply_rotary_pos_emb(key, freqs, k_xpos_scale)
if c_rope is not None:
freqs, xpos_scale = c_rope
q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale**-1.0) if xpos_scale is not None else (1.0, 1.0)
c_query = apply_rotary_pos_emb(c_query, freqs, q_xpos_scale)
c_key = apply_rotary_pos_emb(c_key, freqs, k_xpos_scale)
# attention
query = torch.cat([query, c_query], dim=1)
key = torch.cat([key, c_key], dim=1)
value = torch.cat([value, c_value], dim=1)
inner_dim = key.shape[-1]
head_dim = inner_dim // attn.heads
query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
# mask. e.g. inference got a batch with different target durations, mask out the padding
if mask is not None:
attn_mask = F.pad(mask, (0, c.shape[1]), value=True) # no mask for c (text)
attn_mask = attn_mask.unsqueeze(1).unsqueeze(1) # 'b n -> b 1 1 n'
attn_mask = attn_mask.expand(batch_size, attn.heads, query.shape[-2], key.shape[-2])
else:
attn_mask = None
x = F.scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=0.0, is_causal=False)
x = x.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
x = x.to(query.dtype)
# Split the attention outputs.
x, c = (
x[:, : residual.shape[1]],
x[:, residual.shape[1]:],
)
# linear proj
x = attn.to_out[0](x)
# dropout
x = attn.to_out[1](x)
if not attn.context_pre_only:
c = attn.to_out_c(c)
if mask is not None:
mask = mask.unsqueeze(-1)
x = x.masked_fill(~mask, 0.0)
# c = c.masked_fill(~mask, 0.) # no mask for c (text)
return x, c
# DiT Block
class DiTBlock(nn.Module):
def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1):
super().__init__()
self.attn_norm = AdaLayerNormZero(dim)
self.attn = Attention(
processor=AttnProcessor(),
dim=dim,
heads=heads,
dim_head=dim_head,
dropout=dropout,
)
self.ff_norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
def forward(self, x, t, mask=None, rope=None): # x: noised input, t: time embedding
# pre-norm & modulation for attention input
norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(x, emb=t)
# attention
attn_output = self.attn(x=norm, mask=mask, rope=rope)
# process attention output for input x
x = x + gate_msa.unsqueeze(1) * attn_output
ff_norm = self.ff_norm(x) * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
ff_output = self.ff(ff_norm)
x = x + gate_mlp.unsqueeze(1) * ff_output
return x
# MMDiT Block https://arxiv.org/abs/2403.03206
class MMDiTBlock(nn.Module):
r"""
modified from diffusers/src/diffusers/models/attention.py
notes.
_c: context related. text, cond, etc. (left part in sd3 fig2.b)
_x: noised input related. (right part)
context_pre_only: last layer only do prenorm + modulation cuz no more ffn
"""
def __init__(self, dim, heads, dim_head, ff_mult=4, dropout=0.1, context_pre_only=False):
super().__init__()
self.context_pre_only = context_pre_only
self.attn_norm_c = AdaLayerNormZero_Final(dim) if context_pre_only else AdaLayerNormZero(dim)
self.attn_norm_x = AdaLayerNormZero(dim)
self.attn = Attention(
processor=JointAttnProcessor(),
dim=dim,
heads=heads,
dim_head=dim_head,
dropout=dropout,
context_dim=dim,
context_pre_only=context_pre_only,
)
if not context_pre_only:
self.ff_norm_c = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff_c = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
else:
self.ff_norm_c = None
self.ff_c = None
self.ff_norm_x = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
self.ff_x = FeedForward(dim=dim, mult=ff_mult, dropout=dropout, approximate="tanh")
def forward(self, x, c, t, mask=None, rope=None, c_rope=None): # x: noised input, c: context, t: time embedding
# pre-norm & modulation for attention input
if self.context_pre_only:
norm_c = self.attn_norm_c(c, t)
else:
norm_c, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.attn_norm_c(c, emb=t)
norm_x, x_gate_msa, x_shift_mlp, x_scale_mlp, x_gate_mlp = self.attn_norm_x(x, emb=t)
# attention
x_attn_output, c_attn_output = self.attn(x=norm_x, c=norm_c, mask=mask, rope=rope, c_rope=c_rope)
# process attention output for context c
if self.context_pre_only:
c = None
else: # if not last layer
c = c + c_gate_msa.unsqueeze(1) * c_attn_output
norm_c = self.ff_norm_c(c) * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
c_ff_output = self.ff_c(norm_c)
c = c + c_gate_mlp.unsqueeze(1) * c_ff_output
# process attention output for input x
x = x + x_gate_msa.unsqueeze(1) * x_attn_output
norm_x = self.ff_norm_x(x) * (1 + x_scale_mlp[:, None]) + x_shift_mlp[:, None]
x_ff_output = self.ff_x(norm_x)
x = x + x_gate_mlp.unsqueeze(1) * x_ff_output
return c, x
# time step conditioning embedding
class TimestepEmbedding(nn.Module):
def __init__(self, dim, freq_embed_dim=256):
super().__init__()
self.time_embed = SinusPositionEmbedding(freq_embed_dim)
self.time_mlp = nn.Sequential(nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
def forward(self, timestep: float["b"]): # noqa: F821
time_hidden = self.time_embed(timestep)
time_hidden = time_hidden.to(timestep.dtype)
time = self.time_mlp(time_hidden) # b d
return time
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "cosyvoice/flow/DiT/modules.py",
"license": "Apache License 2.0",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/cosyvoice2_dit/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import math
import os
import re
import time
from typing import Dict, List, Tuple, Optional, Union
import asyncio
import httpx
import numpy as np
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
import triton_python_backend_utils as pb_utils
from transformers import AutoTokenizer
import torchaudio
from matcha.utils.audio import mel_spectrogram
ORIGINAL_VOCAB_SIZE = 151663
torch.set_num_threads(1)
def parse_speech_token_string(response_text: str) -> List[int]:
"""
Parses a string of speech tokens (e.g., "<|s_123|><|s_456|>") into a list of integer IDs.
"""
speech_tokens = response_text.strip().split('><')
if len(speech_tokens) > 1:
# Add back the missing '<' and '>' for proper parsing
speech_tokens = ['<' + t if not t.startswith('<') else t for t in speech_tokens]
speech_tokens = [t + '>' if not t.endswith('>') else t for t in speech_tokens]
speech_ids = []
for token_str in speech_tokens:
match = re.match(r'<\|s_(\d+)\|>', token_str)
if match:
speech_ids.append(int(match.group(1)))
return speech_ids
class TritonPythonModel:
"""Triton Python model for Spark TTS.
This model orchestrates the end-to-end TTS pipeline by coordinating
between audio tokenizer, LLM, and vocoder components.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
self.logger = pb_utils.Logger
# Parse model parameters
self.model_config = json.loads(args['model_config'])
parameters = self.model_config['parameters']
model_params = {k: v["string_value"] for k, v in parameters.items()}
self.dynamic_chunk_strategy = model_params.get("dynamic_chunk_strategy", "exponential") # "exponential" or "time_based"
self.logger.log_info(f"Using dynamic chunk strategy: {self.dynamic_chunk_strategy}")
# Initialize tokenizer
llm_tokenizer_dir = model_params["llm_tokenizer_dir"]
self.tokenizer = AutoTokenizer.from_pretrained(llm_tokenizer_dir)
self.prompt_template = "<|sos|>{input_text}<|task_id|>"
self.eos_token_id = self.tokenizer.convert_tokens_to_ids("<|eos1|>")
self.device = torch.device("cuda")
self.decoupled = pb_utils.using_decoupled_model_transaction_policy(self.model_config)
self.token_frame_rate = 25
self.flow_pre_lookahead_len = 3
self.token_hop_len = 15
self.http_client = httpx.AsyncClient()
self.api_base = "http://localhost:8000/v1/chat/completions"
self.speaker_cache = {}
def _convert_speech_tokens_to_str(self, speech_tokens: Union[torch.Tensor, List]) -> str:
"""Converts a tensor or list of speech token IDs to a string representation."""
if isinstance(speech_tokens, torch.Tensor):
# Ensure tensor is on CPU and flattened
speech_tokens = speech_tokens.cpu().numpy().flatten().tolist()
speech_id_str = ""
for token_id in speech_tokens:
# Convert token ID back to the speech number N
token_num = token_id - ORIGINAL_VOCAB_SIZE
speech_id_str += f"<|s_{token_num}|>"
return speech_id_str
async def forward_llm_async(self, target_text: str, reference_text: str, prompt_speech_tokens: Union[torch.Tensor, List]):
"""
Asynchronously sends a request to the TRTLLM-serve endpoint and processes the streaming response.
"""
full_text = f"{reference_text}{target_text}"
prompt_speech_tokens_str = self._convert_speech_tokens_to_str(prompt_speech_tokens)
chat = [
{"role": "user", "content": full_text},
{"role": "assistant", "content": prompt_speech_tokens_str}
]
payload = {
"model": "trt_engines_bfloat16",
"messages": chat,
"max_tokens": 750,
"temperature": 0.8,
"top_p": 0.95,
"top_k": 50,
"repetition_penalty": 1.1,
"stop": ["<|eos1|>", "<|eos|>"],
"stream": True,
}
buffer = ""
async with self.http_client.stream("POST", self.api_base, json=payload, timeout=None) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if line.startswith("data: "):
line_data = line[len("data: "):].strip()
if line_data == "[DONE]":
break
try:
json_data = json.loads(line_data)
content = json_data.get("choices", [{}])[0].get("delta", {}).get("content")
if content:
buffer += content
while True:
match = re.search(r"<\|s_(\d+)\|>", buffer)
if not match:
break
token_num = int(match.group(1))
final_id = token_num + ORIGINAL_VOCAB_SIZE
yield final_id
buffer = buffer[match.end():]
except json.JSONDecodeError:
self.logger.log_info(f"Skipping non-JSON line: {line_data}")
continue
# Process any remaining complete tokens in the buffer after the stream ends
while True:
match = re.search(r"<\|s_(\d+)\|>", buffer)
if not match:
break
token_num = int(match.group(1))
final_id = token_num + ORIGINAL_VOCAB_SIZE
yield final_id
buffer = buffer[match.end():]
def forward_audio_tokenizer(self, wav, wav_len):
"""Forward pass through the audio tokenizer component.
Args:
wav: Input waveform tensor
wav_len: Waveform length tensor
Returns:
Tuple of global and semantic tokens
"""
inference_request = pb_utils.InferenceRequest(
model_name='audio_tokenizer',
requested_output_names=['prompt_speech_tokens'],
inputs=[wav, wav_len]
)
inference_response = inference_request.exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output tensors
prompt_speech_tokens = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_speech_tokens')
prompt_speech_tokens = torch.utils.dlpack.from_dlpack(prompt_speech_tokens.to_dlpack()).cpu()
return prompt_speech_tokens
def forward_speaker_embedding(self, wav):
"""Forward pass through the speaker embedding component.
Args:
wav: Input waveform tensor
Returns:
Prompt speaker embedding tensor
"""
inference_request = pb_utils.InferenceRequest(
model_name='speaker_embedding',
requested_output_names=['prompt_spk_embedding'],
inputs=[pb_utils.Tensor.from_dlpack("reference_wav", to_dlpack(wav))]
)
inference_response = inference_request.exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output tensors
prompt_spk_embedding = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_spk_embedding')
prompt_spk_embedding = torch.utils.dlpack.from_dlpack(prompt_spk_embedding.to_dlpack())
return prompt_spk_embedding
async def forward_token2wav(
self,
index: int,
target_speech_tokens: torch.Tensor,
request_id: str,
reference_wav: object,
reference_wav_len: object,
finalize: bool = None) -> torch.Tensor:
"""Forward pass through the vocoder component.
Args:
index: Index of the request
target_speech_tokens: Target speech tokens tensor
request_id: Request ID
reference_wav: Reference waveform tensor
reference_wav_len: Reference waveform length tensor
finalize: Whether to finalize the request
Returns:
Generated waveform tensor
"""
target_speech_tokens_tensor = pb_utils.Tensor.from_dlpack("target_speech_tokens", to_dlpack(target_speech_tokens))
finalize_tensor = pb_utils.Tensor("finalize", np.array([[finalize]], dtype=np.bool_))
inputs_tensor = [target_speech_tokens_tensor, reference_wav, reference_wav_len, finalize_tensor]
# Create and execute inference request
inference_request = pb_utils.InferenceRequest(
model_name='token2wav_dit',
requested_output_names=[
"waveform",
],
inputs=inputs_tensor,
request_id=request_id,
parameters={"priority": index + 1},
)
inference_response = await inference_request.async_exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output waveform
waveform = pb_utils.get_output_tensor_by_name(inference_response, 'waveform')
waveform = torch.utils.dlpack.from_dlpack(waveform.to_dlpack()).cpu()
return waveform
def _extract_speech_feat(self, speech):
speech_feat = mel_spectrogram(
speech,
n_fft=1920,
num_mels=80,
sampling_rate=24000,
hop_size=480,
win_size=1920,
fmin=0,
fmax=8000).squeeze(
dim=0).transpose(
0,
1).to(
self.device)
speech_feat = speech_feat.unsqueeze(dim=0)
return speech_feat
async def _process_request(self, request):
request_id = request.request_id()
reference_text = pb_utils.get_input_tensor_by_name(request, "reference_text").as_numpy()
reference_text = reference_text[0][0].decode('utf-8')
wav = pb_utils.get_input_tensor_by_name(request, "reference_wav")
wav_len = pb_utils.get_input_tensor_by_name(request, "reference_wav_len")
if reference_text not in self.speaker_cache:
self.speaker_cache[reference_text] = self.forward_audio_tokenizer(wav, wav_len).unsqueeze(0)
prompt_speech_tokens = self.speaker_cache[reference_text]
target_text = pb_utils.get_input_tensor_by_name(request, "target_text").as_numpy()
target_text = target_text[0][0].decode('utf-8')
if self.decoupled:
response_sender = request.get_response_sender()
semantic_token_ids_arr = []
token_offset, chunk_index = 0, 0
start_time = time.time()
this_token_hop_len = self.token_hop_len
async for generated_ids in self.forward_llm_async(
target_text=target_text,
reference_text=reference_text,
prompt_speech_tokens=prompt_speech_tokens,
):
if not generated_ids:
break
semantic_token_ids_arr.append(generated_ids)
while True:
pending_num = len(semantic_token_ids_arr) - token_offset
if pending_num >= this_token_hop_len + self.flow_pre_lookahead_len:
this_tts_speech_token = semantic_token_ids_arr[token_offset:token_offset + this_token_hop_len + self.flow_pre_lookahead_len]
this_tts_speech_token = torch.tensor(this_tts_speech_token).unsqueeze(dim=0).to(torch.int32).to(self.device)
sub_tts_speech = await self.forward_token2wav(
chunk_index,
this_tts_speech_token, request_id, wav, wav_len, False
)
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
response_sender.send(inference_response)
token_offset += this_token_hop_len
if self.dynamic_chunk_strategy == "exponential":
this_token_hop_len = self.token_frame_rate * (2 ** chunk_index)
elif self.dynamic_chunk_strategy == "equal":
this_token_hop_len = self.token_hop_len
elif self.dynamic_chunk_strategy == "time_based":
# see https://github.com/qi-hua/async_cosyvoice/blob/main/model.py#L306
cost_time = time.time() - start_time
duration = token_offset / self.token_frame_rate
if chunk_index > 0 and cost_time > 0:
avg_chunk_processing_time = cost_time / (chunk_index + 1)
if avg_chunk_processing_time > 0:
multiples = (duration - cost_time) / avg_chunk_processing_time
next_pending_num = len(semantic_token_ids_arr) - token_offset
if multiples > 4:
this_token_hop_len = (next_pending_num // self.token_hop_len + 1) * self.token_hop_len
elif multiples > 2:
this_token_hop_len = (next_pending_num // self.token_hop_len) * self.token_hop_len
else:
this_token_hop_len = self.token_hop_len
this_token_hop_len = max(self.token_hop_len, this_token_hop_len)
chunk_index += 1
else:
break
this_tts_speech_token = torch.tensor(semantic_token_ids_arr[token_offset:]).unsqueeze(dim=0).to(torch.int32).to(self.device)
sub_tts_speech = await self.forward_token2wav(chunk_index, this_tts_speech_token, request_id, wav, wav_len, True)
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
response_sender.send(inference_response)
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
else:
raise NotImplementedError("Offline TTS mode is not supported")
async def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing generated audio
"""
tasks = [
asyncio.create_task(self._process_request(request))
for request in requests
]
await asyncio.gather(*tasks)
return None
def finalize(self):
self.logger.log_info("Finalizing CosyVoice DIT model")
if hasattr(self, "http_client"):
asyncio.run(self.http_client.aclose())
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/cosyvoice2_dit/1/model.py",
"license": "Apache License 2.0",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/speaker_embedding/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import torch
from torch.utils.dlpack import to_dlpack
import triton_python_backend_utils as pb_utils
import os
import numpy as np
import torchaudio.compliance.kaldi as kaldi
from cosyvoice.utils.file_utils import convert_onnx_to_trt
from cosyvoice.utils.common import TrtContextWrapper
import onnxruntime
class TritonPythonModel:
"""Triton Python model for audio tokenization.
This model takes reference audio input and extracts semantic tokens
using s3tokenizer.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
# Parse model parameters
parameters = json.loads(args['model_config'])['parameters']
model_params = {k: v["string_value"] for k, v in parameters.items()}
self.device = torch.device("cuda")
model_dir = model_params["model_dir"]
gpu = "l20"
enable_trt = True
if enable_trt:
self.load_spk_trt(f'{model_dir}/campplus.{gpu}.fp32.trt',
f'{model_dir}/campplus.onnx',
1,
False)
else:
campplus_model = f'{model_dir}/campplus.onnx'
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
self.spk_model = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"])
def load_spk_trt(self, spk_model, spk_onnx_model, trt_concurrent=1, fp16=True):
if not os.path.exists(spk_model) or os.path.getsize(spk_model) == 0:
trt_kwargs = self.get_spk_trt_kwargs()
convert_onnx_to_trt(spk_model, trt_kwargs, spk_onnx_model, fp16)
import tensorrt as trt
with open(spk_model, 'rb') as f:
spk_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert spk_engine is not None, 'failed to load trt {}'.format(spk_model)
self.spk_model = TrtContextWrapper(spk_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_spk_trt_kwargs(self):
min_shape = [(1, 4, 80)]
opt_shape = [(1, 500, 80)]
max_shape = [(1, 3000, 80)]
input_names = ["input"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def _extract_spk_embedding(self, speech):
feat = kaldi.fbank(speech,
num_mel_bins=80,
dither=0,
sample_frequency=16000)
spk_feat = feat - feat.mean(dim=0, keepdim=True)
if isinstance(self.spk_model, onnxruntime.InferenceSession):
embedding = self.spk_model.run(
None, {self.spk_model.get_inputs()[0].name: spk_feat.unsqueeze(dim=0).cpu().numpy()}
)[0].flatten().tolist()
embedding = torch.tensor([embedding]).to(self.device)
else:
[spk_model, stream], trt_engine = self.spk_model.acquire_estimator()
# NOTE need to synchronize when switching stream
with torch.cuda.device(self.device):
torch.cuda.current_stream().synchronize()
spk_feat = spk_feat.unsqueeze(dim=0).to(self.device)
batch_size = spk_feat.size(0)
with stream:
spk_model.set_input_shape('input', (batch_size, spk_feat.size(1), 80))
embedding = torch.empty((batch_size, 192), device=spk_feat.device)
data_ptrs = [spk_feat.contiguous().data_ptr(),
embedding.contiguous().data_ptr()]
for i, j in enumerate(data_ptrs):
spk_model.set_tensor_address(trt_engine.get_tensor_name(i), j)
# run trt engine
assert spk_model.execute_async_v3(torch.cuda.current_stream().cuda_stream) is True
torch.cuda.current_stream().synchronize()
self.spk_model.release_estimator(spk_model, stream)
return embedding.half()
def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing tokenized outputs
"""
responses = []
# Process each request in batch
for request in requests:
# Extract input tensors
wav_array = pb_utils.get_input_tensor_by_name(
request, "reference_wav").as_numpy()
wav_array = torch.from_numpy(wav_array).to(self.device)
embedding = self._extract_spk_embedding(wav_array)
prompt_spk_embedding_tensor = pb_utils.Tensor.from_dlpack(
"prompt_spk_embedding", to_dlpack(embedding))
inference_response = pb_utils.InferenceResponse(
output_tensors=[prompt_spk_embedding_tensor])
responses.append(inference_response)
return responses
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/speaker_embedding/1/model.py",
"license": "Apache License 2.0",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/token2wav_dit/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import logging
from typing import List, Dict
import torch
from torch.utils.dlpack import to_dlpack
from torch.nn import functional as F
import triton_python_backend_utils as pb_utils
from hyperpyyaml import load_hyperpyyaml
from cosyvoice.utils.common import fade_in_out
from cosyvoice.utils.file_utils import convert_onnx_to_trt, export_cosyvoice2_vllm
from cosyvoice.utils.common import TrtContextWrapper
from collections import defaultdict
import numpy as np
from .token2wav_dit import CosyVoice2_Token2Wav
import hashlib
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
ORIGINAL_VOCAB_SIZE = 151663
torch.set_num_threads(1)
def get_spk_id_from_prompt_audio(tensor: torch.Tensor) -> str:
"""
Generates a unique ID for a torch.Tensor.
Tensors with the same elements and properties will have the same ID.
"""
# Convert tensor to a byte string
tensor_bytes = tensor.numpy().tobytes()
# Create a SHA-256 hash of the byte string
hasher = hashlib.sha256()
hasher.update(tensor_bytes)
return hasher.hexdigest()
class TritonPythonModel:
"""Triton Python model for vocoder.
This model takes global and semantic tokens as input and generates audio waveforms
using the BiCodec vocoder.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
# Parse model parameters
parameters = json.loads(args['model_config'])['parameters']
model_params = {key: value["string_value"] for key, value in parameters.items()}
model_dir = model_params["model_dir"]
# Initialize device and vocoder
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Initializing vocoder from {model_dir} on {self.device}")
# FIXME: device id settings
self.token2wav_model = CosyVoice2_Token2Wav(
model_dir, enable_trt=True, streaming=True
)
logger.info("Token2Wav initialized successfully")
def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing generated waveforms
"""
responses = []
# Process each request in batch
for request in requests:
target_speech_tokens_tensor = pb_utils.get_input_tensor_by_name(request, "target_speech_tokens").as_numpy()
target_speech_tokens = torch.from_numpy(target_speech_tokens_tensor)
target_speech_tokens = target_speech_tokens - ORIGINAL_VOCAB_SIZE
target_speech_tokens = target_speech_tokens.squeeze().tolist()
finalize = pb_utils.get_input_tensor_by_name(request, "finalize").as_numpy().item()
request_id = request.request_id()
wav_array = pb_utils.get_input_tensor_by_name(
request, "reference_wav").as_numpy()
wav_len = pb_utils.get_input_tensor_by_name(
request, "reference_wav_len").as_numpy().item()
wav_array = torch.from_numpy(wav_array)
wav = wav_array[:, :wav_len].squeeze(0)
spk_id = get_spk_id_from_prompt_audio(wav)
audio_hat = self.token2wav_model.forward_streaming(
target_speech_tokens, finalize, request_id=request_id,
speaker_id=f"{spk_id}", prompt_audio=wav, prompt_audio_sample_rate=16000
)
outputs = []
wav_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio_hat))
outputs.append(wav_tensor)
inference_response = pb_utils.InferenceResponse(output_tensors=outputs)
responses.append(inference_response)
return responses
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/token2wav_dit/1/model.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/token2wav_dit/1/token2wav_dit.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Example Usage
CUDA_VISIBLE_DEVICES=0 \
python3 token2wav.py --enable-trt || exit 1
"""
import torch
# from flashcosyvoice.modules.flow import CausalMaskedDiffWithXvec
from flashcosyvoice.modules.hifigan import HiFTGenerator
from flashcosyvoice.utils.audio import mel_spectrogram
import torchaudio.compliance.kaldi as kaldi
import onnxruntime
import s3tokenizer
from torch.utils.data import DataLoader
from datasets import load_dataset
import torchaudio
import os
import logging
import argparse
import queue
import time
import numpy as np
from hyperpyyaml import load_hyperpyyaml
def fade_in_out(fade_in_mel: torch.Tensor, fade_out_mel: torch.Tensor, window: torch.Tensor):
"""perform fade_in_out in tensor style
"""
mel_overlap_len = int(window.shape[0] / 2)
fade_in_mel = fade_in_mel.clone()
fade_in_mel[..., :mel_overlap_len] = \
fade_in_mel[..., :mel_overlap_len] * window[:mel_overlap_len] + \
fade_out_mel[..., -mel_overlap_len:] * window[mel_overlap_len:]
return fade_in_mel
def convert_onnx_to_trt(trt_model, trt_kwargs, onnx_model, dtype):
import tensorrt as trt
logging.info("Converting onnx to trt...")
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
logger = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(logger)
network = builder.create_network(network_flags)
parser = trt.OnnxParser(network, logger)
config = builder.create_builder_config()
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 32) # 4GB
if dtype == torch.float16:
config.set_flag(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
# load onnx model
with open(onnx_model, "rb") as f:
if not parser.parse(f.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
raise ValueError('failed to parse {}'.format(onnx_model))
# set input shapes
for i in range(len(trt_kwargs['input_names'])):
profile.set_shape(trt_kwargs['input_names'][i], trt_kwargs['min_shape'][i], trt_kwargs['opt_shape'][i], trt_kwargs['max_shape'][i])
if dtype == torch.float16:
tensor_dtype = trt.DataType.HALF
elif dtype == torch.bfloat16:
tensor_dtype = trt.DataType.BF16
elif dtype == torch.float32:
tensor_dtype = trt.DataType.FLOAT
else:
raise ValueError('invalid dtype {}'.format(dtype))
# set input and output data type
for i in range(network.num_inputs):
input_tensor = network.get_input(i)
input_tensor.dtype = tensor_dtype
for i in range(network.num_outputs):
output_tensor = network.get_output(i)
output_tensor.dtype = tensor_dtype
config.add_optimization_profile(profile)
engine_bytes = builder.build_serialized_network(network, config)
# save trt engine
with open(trt_model, "wb") as f:
f.write(engine_bytes)
logging.info("Succesfully convert onnx to trt...")
class TrtContextWrapper:
def __init__(self, trt_engine, trt_concurrent=1, device='cuda:0'):
self.trt_context_pool = queue.Queue(maxsize=trt_concurrent)
self.trt_engine = trt_engine
self.device = device
for _ in range(trt_concurrent):
trt_context = trt_engine.create_execution_context()
trt_stream = torch.cuda.stream(torch.cuda.Stream(torch.device(device)))
assert trt_context is not None, 'failed to create trt context, maybe not enough CUDA memory, try reduce current trt concurrent {}'.format(trt_concurrent)
self.trt_context_pool.put([trt_context, trt_stream])
assert self.trt_context_pool.empty() is False, 'no avaialbe estimator context'
def acquire_estimator(self):
return self.trt_context_pool.get(), self.trt_engine
def release_estimator(self, context, stream):
self.trt_context_pool.put([context, stream])
class CosyVoice2_Token2Wav(torch.nn.Module):
def __init__(self, model_dir: str, enable_trt: bool = False, device_id: int = 0, streaming: bool = False, dtype: torch.dtype = torch.float16):
super().__init__()
self.device_id = device_id
self.device = f"cuda:{device_id}"
with open(f"{model_dir}/flow.yaml", "r") as f:
configs = load_hyperpyyaml(f)
self.flow = configs['flow']
self.dtype = dtype
self.flow.to(self.dtype)
self.flow.load_state_dict(torch.load(f"{model_dir}/flow.pt", map_location="cpu", weights_only=True), strict=True)
self.flow.to(self.device).eval()
self.hift = HiFTGenerator()
hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(f"{model_dir}/hift.pt", map_location="cpu", weights_only=True).items()}
self.hift.load_state_dict(hift_state_dict, strict=True)
self.hift.to(self.device).eval()
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
self.spk_model = onnxruntime.InferenceSession(
f"{model_dir}/campplus.onnx", sess_options=option,
providers=["CPUExecutionProvider"])
self.audio_tokenizer = s3tokenizer.load_model(f"{model_dir}/speech_tokenizer_v2_25hz.onnx").to(self.device).eval()
gpu = "l20"
if enable_trt:
if streaming:
self.load_trt(
f'{model_dir}/flow.decoder.estimator.{self.dtype}.dynamic_batch.chunk.{gpu}.plan',
f'{model_dir}/flow.decoder.estimator.chunk.fp32.dynamic_batch.simplify.onnx',
1,
self.dtype, streaming
)
else:
self.load_trt(
f'{model_dir}/flow.decoder.estimator.{self.dtype}.dynamic_batch.{gpu}.plan',
f'{model_dir}/flow.decoder.estimator.fp32.dynamic_batch.onnx',
1,
self.dtype
)
self.load_spk_trt(
f'{model_dir}/campplus.{gpu}.fp32.trt',
f'{model_dir}/campplus.onnx',
1,
False
)
self.streaming_flow_cache = {}
self.speaker_cache = {}
self.mel_cache_len = 8 # hard-coded, 160ms
self.source_cache_len = int(self.mel_cache_len * 480) # 50hz mel -> 24kHz wave
self.speech_window = torch.from_numpy(np.hamming(2 * self.source_cache_len)).cuda()
# hifigan cache for streaming tts
self.hift_cache_dict = {}
def forward_spk_embedding(self, spk_feat):
if isinstance(self.spk_model, onnxruntime.InferenceSession):
return self.spk_model.run(
None, {self.spk_model.get_inputs()[0].name: spk_feat.unsqueeze(dim=0).cpu().numpy()}
)[0].flatten().tolist()
else:
[spk_model, stream], trt_engine = self.spk_model.acquire_estimator()
# NOTE need to synchronize when switching stream
with torch.cuda.device(self.device_id):
torch.cuda.current_stream().synchronize()
spk_feat = spk_feat.unsqueeze(dim=0).to(self.device)
batch_size = spk_feat.size(0)
with stream:
spk_model.set_input_shape('input', (batch_size, spk_feat.size(1), 80))
output_tensor = torch.empty((batch_size, 192), device=spk_feat.device)
data_ptrs = [spk_feat.contiguous().data_ptr(),
output_tensor.contiguous().data_ptr()]
for i, j in enumerate(data_ptrs):
spk_model.set_tensor_address(trt_engine.get_tensor_name(i), j)
# run trt engine
assert spk_model.execute_async_v3(torch.cuda.current_stream().cuda_stream) is True
torch.cuda.current_stream().synchronize()
self.spk_model.release_estimator(spk_model, stream)
return output_tensor.cpu().numpy().flatten().tolist()
def load_spk_trt(self, spk_model, spk_onnx_model, trt_concurrent=1, fp16=True):
if not os.path.exists(spk_model) or os.path.getsize(spk_model) == 0:
trt_kwargs = self.get_spk_trt_kwargs()
convert_onnx_to_trt(spk_model, trt_kwargs, spk_onnx_model, torch.float32)
import tensorrt as trt
with open(spk_model, 'rb') as f:
spk_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert spk_engine is not None, 'failed to load trt {}'.format(spk_model)
self.spk_model = TrtContextWrapper(spk_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_spk_trt_kwargs(self):
min_shape = [(1, 4, 80)]
opt_shape = [(1, 500, 80)]
max_shape = [(1, 3000, 80)]
input_names = ["input"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, trt_concurrent=1, dtype=torch.float16, streaming=False):
assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
if not os.path.exists(flow_decoder_estimator_model) or os.path.getsize(flow_decoder_estimator_model) == 0:
opt_batch_size = 2
max_batch_size = 16
if streaming:
opt_batch_size, max_batch_size = 1, 1 # only support batch size 1 for streaming tts
trt_kwargs = self.get_trt_kwargs_dynamic_batch(opt_batch_size=opt_batch_size, max_batch_size=max_batch_size, streaming=streaming)
convert_onnx_to_trt(flow_decoder_estimator_model, trt_kwargs, flow_decoder_onnx_model, dtype)
del self.flow.decoder.estimator
import tensorrt as trt
with open(flow_decoder_estimator_model, 'rb') as f:
estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
self.flow.decoder.estimator = TrtContextWrapper(estimator_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_trt_kwargs_dynamic_batch(self, opt_batch_size=2, max_batch_size=64, streaming=False):
if streaming:
min_shape = [(2, 80, 4), (2, 80, 4), (2, 80, 4), (2,), (2, 80), (16, 2, 1024, 2), (16, 2, 8, 0, 128)]
opt_shape = [
(opt_batch_size * 2, 80, 500), (opt_batch_size * 2, 80, 500), (opt_batch_size * 2, 80, 500),
(opt_batch_size * 2,), (opt_batch_size * 2, 80), (16, opt_batch_size * 2, 1024, 2),
(16, opt_batch_size * 2, 8, 100, 128)
]
max_shape = [
(max_batch_size * 2, 80, 3000), (max_batch_size * 2, 80, 3000), (max_batch_size * 2, 80, 3000),
(max_batch_size * 2,), (max_batch_size * 2, 80), (16, max_batch_size * 2, 1024, 2),
(16, max_batch_size * 2, 8, 1000, 128)
]
input_names = ["x", "mu", "cond", "t", "spks", "cnn_cache", "att_cache"]
else:
min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4), (2,), (2, 80)]
opt_shape = [
(opt_batch_size * 2, 80, 500), (opt_batch_size * 2, 1, 500), (opt_batch_size * 2, 80, 500),
(opt_batch_size * 2, 80, 500), (opt_batch_size * 2,), (opt_batch_size * 2, 80)
]
max_shape = [
(max_batch_size * 2, 80, 3000), (max_batch_size * 2, 1, 3000), (max_batch_size * 2, 80, 3000),
(max_batch_size * 2, 80, 3000), (max_batch_size * 2,), (max_batch_size * 2, 80)
]
input_names = ["x", "mask", "mu", "cond", "t", "spks"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def prompt_audio_tokenization(self, prompt_audios_list: list[torch.Tensor]) -> list[list[int]]:
prompt_speech_tokens_list, prompt_speech_mels_list = [], []
for audio in prompt_audios_list:
assert len(audio.shape) == 1
log_mel = s3tokenizer.log_mel_spectrogram(audio) # [num_mels, T]
prompt_speech_mels_list.append(log_mel)
prompt_mels_for_llm, prompt_mels_lens_for_llm = s3tokenizer.padding(prompt_speech_mels_list)
prompt_speech_tokens, prompt_speech_tokens_lens = self.audio_tokenizer.quantize(
prompt_mels_for_llm.to(self.device), prompt_mels_lens_for_llm.to(self.device)
)
for i in range(len(prompt_speech_tokens)):
speech_tokens_i = prompt_speech_tokens[i, :prompt_speech_tokens_lens[i].item()].tolist()
prompt_speech_tokens_list.append(speech_tokens_i)
return prompt_speech_tokens_list
def get_spk_emb(self, prompt_audios_list: list[torch.Tensor]) -> torch.Tensor:
spk_emb_for_flow = []
for audio in prompt_audios_list:
assert len(audio.shape) == 1
spk_feat = kaldi.fbank(audio.unsqueeze(0), num_mel_bins=80, dither=0, sample_frequency=16000)
spk_feat = spk_feat - spk_feat.mean(dim=0, keepdim=True)
spk_emb = self.forward_spk_embedding(spk_feat)
spk_emb_for_flow.append(spk_emb)
spk_emb_for_flow = torch.tensor(spk_emb_for_flow)
if self.dtype != torch.float32:
spk_emb_for_flow = spk_emb_for_flow.to(self.dtype)
return spk_emb_for_flow
def get_prompt_mels(self, prompt_audios_list: list[torch.Tensor], prompt_audios_sample_rate: list[int]):
prompt_mels_for_flow = []
prompt_mels_lens_for_flow = []
for audio, sample_rate in zip(prompt_audios_list, prompt_audios_sample_rate):
assert len(audio.shape) == 1
audio = audio.unsqueeze(0)
if sample_rate != 24000:
audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=24000)(audio)
mel = mel_spectrogram(audio).transpose(1, 2).squeeze(0) # [T, num_mels]
mel_len = mel.shape[0]
prompt_mels_for_flow.append(mel)
prompt_mels_lens_for_flow.append(mel_len)
prompt_mels_for_flow = torch.nn.utils.rnn.pad_sequence(
prompt_mels_for_flow, batch_first=True, padding_value=0
) # [B, T', num_mels=80]
prompt_mels_lens_for_flow = torch.tensor(prompt_mels_lens_for_flow)
return prompt_mels_for_flow, prompt_mels_lens_for_flow
def forward_flow(self, prompt_speech_tokens_list: list[list[int]],
generated_speech_tokens_list: list[list[int]],
prompt_mels_for_flow: torch.Tensor,
prompt_mels_lens_for_flow: torch.Tensor,
spk_emb_for_flow: torch.Tensor):
batch_size = prompt_mels_for_flow.shape[0]
flow_inputs = []
flow_inputs_lens = []
for prompt_speech_tokens, generated_speech_tokens in zip(prompt_speech_tokens_list, generated_speech_tokens_list):
flow_inputs.append(torch.tensor(prompt_speech_tokens + generated_speech_tokens))
flow_inputs_lens.append(len(prompt_speech_tokens) + len(generated_speech_tokens))
flow_inputs = torch.nn.utils.rnn.pad_sequence(flow_inputs, batch_first=True, padding_value=0)
flow_inputs_lens = torch.tensor(flow_inputs_lens)
with torch.amp.autocast(self.device, dtype=torch.float16):
generated_mels, generated_mels_lens = self.flow.inference(
flow_inputs.to(self.device), flow_inputs_lens.to(self.device),
prompt_mels_for_flow.to(self.device), prompt_mels_lens_for_flow.to(self.device), spk_emb_for_flow.to(self.device), 10
)
return generated_mels, generated_mels_lens
def forward_hift(self, generated_mels: torch.Tensor, generated_mels_lens: torch.Tensor, prompt_mels_lens_for_flow: torch.Tensor):
batch_size = generated_mels.shape[0]
generated_wavs = []
for i in range(batch_size):
mel = generated_mels[i, :, prompt_mels_lens_for_flow[i].item():generated_mels_lens[i].item()].unsqueeze(0)
wav, _ = self.hift(speech_feat=mel)
generated_wavs.append(wav)
return generated_wavs
@torch.inference_mode()
def forward(
self, generated_speech_tokens_list: list[list[int]], prompt_audios_list: list[torch.Tensor], prompt_audios_sample_rate: list[int]
):
assert all(sample_rate == 16000 for sample_rate in prompt_audios_sample_rate)
prompt_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow = self.prepare_prompt_audio(prompt_audios_list, prompt_audios_sample_rate)
generated_mels, generated_mels_lens = self.forward_flow(
prompt_speech_tokens_list, generated_speech_tokens_list,
prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow
)
generated_wavs = self.forward_hift(generated_mels, generated_mels_lens, prompt_mels_lens_for_flow)
return generated_wavs
def prepare_prompt_audio(
self, prompt_audios_list: list[torch.Tensor], prompt_audios_sample_rate: list[int]
):
assert all(sample_rate == 16000 for sample_rate in prompt_audios_sample_rate)
prompt_speech_tokens_list = self.prompt_audio_tokenization(prompt_audios_list)
prompt_mels_for_flow, prompt_mels_lens_for_flow = self.get_prompt_mels(prompt_audios_list, prompt_audios_sample_rate)
spk_emb_for_flow = self.get_spk_emb(prompt_audios_list)
return prompt_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow
def get_prompt_audio_cache_for_streaming_tts(
self, prompt_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow
):
assert len(prompt_speech_tokens_list) == 1, "only support batch size 1 for streaming tts"
for i, prompt_speech_tokens in enumerate(prompt_speech_tokens_list):
prompt_speech_tokens_list[i] = torch.tensor(prompt_speech_tokens + prompt_speech_tokens_list[i][:3])
prompt_speech_tokens_tensor = torch.nn.utils.rnn.pad_sequence(prompt_speech_tokens_list, batch_first=True, padding_value=0)
cache = self.flow.setup_cache(
prompt_speech_tokens_tensor.to(self.device),
prompt_mels_for_flow.to(self.device),
spk_emb_for_flow.to(self.device),
n_timesteps=10
)
new_cache = {k: v.clone() for k, v in cache.items()}
# Hack: this is a hack to avoid in-place changes to the cache['estimator_att_cache'] and cache['estimator_cnn_cache']
return new_cache
@torch.inference_mode()
def forward_streaming(
self, generated_speech_tokens: list[int], last_chunk: bool, request_id: str, speaker_id: str, prompt_audio: torch.Tensor = None, prompt_audio_sample_rate: int = 16000
):
if speaker_id not in self.speaker_cache:
assert prompt_audio is not None, "prompt_audio is required for new speaker"
assert prompt_audio_sample_rate == 16000
prompt_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow = self.prepare_prompt_audio([prompt_audio], [prompt_audio_sample_rate])
token_len = min(int(prompt_mels_for_flow.shape[1] / 2), len(prompt_speech_tokens_list[0]))
prompt_mels_for_flow = prompt_mels_for_flow[:, :2 * token_len].contiguous()
prompt_speech_tokens_list[0] = prompt_speech_tokens_list[0][:token_len]
prompt_audio_dict = {'spk_emb_for_flow': spk_emb_for_flow, 'prompt_mels_for_flow': prompt_mels_for_flow}
cache_dict = self.get_prompt_audio_cache_for_streaming_tts(prompt_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow)
self.speaker_cache[speaker_id] = {'prompt_audio_dict': prompt_audio_dict, 'cache_dict': cache_dict}
if request_id not in self.streaming_flow_cache:
self.streaming_flow_cache[request_id] = {k: v.clone() for k, v in self.speaker_cache[speaker_id]['cache_dict'].items()}
self.hift_cache_dict[request_id] = dict(
mel=torch.zeros(1, 80, 0, device='cuda'),
source=torch.zeros(1, 1, 0, device='cuda'),
speech=torch.zeros(1, 0, device='cuda'),
)
current_request_cache = self.streaming_flow_cache[request_id]
current_prompt_audio_dict = self.speaker_cache[speaker_id]['prompt_audio_dict']
generated_speech_tokens = torch.tensor([generated_speech_tokens], dtype=torch.int32, device='cuda')
chunk_mel, new_streaming_flow_cache = self.flow.inference_chunk(
token=generated_speech_tokens,
spk=current_prompt_audio_dict['spk_emb_for_flow'].to(self.device),
cache=current_request_cache,
last_chunk=last_chunk,
n_timesteps=10,
)
self.streaming_flow_cache[request_id] = new_streaming_flow_cache
if self.streaming_flow_cache[request_id]['estimator_att_cache'].shape[4] > (current_prompt_audio_dict['prompt_mels_for_flow'].shape[1] + 100):
self.streaming_flow_cache[request_id]['estimator_att_cache'] = torch.cat([
self.streaming_flow_cache[request_id]['estimator_att_cache'][:, :, :, :, :current_prompt_audio_dict['prompt_mels_for_flow'].shape[1]],
self.streaming_flow_cache[request_id]['estimator_att_cache'][:, :, :, :, -100:],
], dim=4)
hift_cache_mel = self.hift_cache_dict[request_id]['mel'].clone()
hift_cache_source = self.hift_cache_dict[request_id]['source'].clone()
hift_cache_speech = self.hift_cache_dict[request_id]['speech'].clone()
mel = torch.concat([hift_cache_mel, chunk_mel], dim=2).clone()
speech, source = self.hift(mel, hift_cache_source)
# overlap speech smooth
if hift_cache_speech.shape[-1] > 0:
speech = fade_in_out(speech, hift_cache_speech, self.speech_window)
# update vocoder cache
self.hift_cache_dict[request_id] = dict(
mel=mel[..., -self.mel_cache_len:].clone().detach(),
source=source[:, :, -self.source_cache_len:].clone().detach(),
speech=speech[:, -self.source_cache_len:].clone().detach(),
)
if not last_chunk:
speech = speech[:, :-self.source_cache_len]
if last_chunk:
assert request_id in self.streaming_flow_cache
self.streaming_flow_cache.pop(request_id)
self.hift_cache_dict.pop(request_id)
return speech
def collate_fn(batch):
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate = [], [], [], []
for item in batch:
generated_speech_tokens_list.append(item['target_audio_cosy2_tokens'])
audio = torch.from_numpy(item['prompt_audio']['array']).float()
prompt_audios_list.append(audio)
prompt_audios_sample_rate.append(item['prompt_audio']['sampling_rate'])
ids.append(item['id'])
return ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--enable-trt", action="store_true")
parser.add_argument("--model-dir", type=str, default="./Step-Audio-2-mini/token2wav")
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--output-dir", type=str, default="generated_wavs")
parser.add_argument("--huggingface-dataset-split", type=str, default="wenetspeech4tts")
parser.add_argument("--warmup", type=int, default=3, help="Number of warmup epochs, performance statistics will only be collected from the last epoch")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
model = CosyVoice2_Token2Wav(model_dir=args.model_dir, enable_trt=args.enable_trt)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
dataset_name = "yuekai/seed_tts_cosy2"
dataset = load_dataset(dataset_name, split=args.huggingface_dataset_split, trust_remote_code=True)
data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn, num_workers=0)
for _ in range(args.warmup):
start_time = time.time()
for batch in data_loader:
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate = batch
generated_wavs = model(generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate)
for id, wav in zip(ids, generated_wavs):
torchaudio.save(f"{args.output_dir}/{id}.wav", wav.cpu(), 24000)
end_time = time.time()
epoch_time = end_time - start_time
print(f"Measurement epoch time taken: {epoch_time:.4f} seconds")
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/token2wav_dit/1/token2wav_dit.py",
"license": "Apache License 2.0",
"lines": 434,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/offline_inference.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Example Usage
CUDA_VISIBLE_DEVICES=0 \
python3 offline_inference.py \
--output-dir $output_dir \
--llm-model-name-or-path $huggingface_model_local_dir \
--token2wav-path $model_scope_model_local_dir \
--backend $backend \
--batch-size $batch_size --token2wav-batch-size $token2wav_batch_size \
--engine-dir $trt_engines_dir \
--split-name ${dataset} || exit 1
"""
import argparse
import json
import os
import sys
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torchaudio
from cosyvoice.utils.file_utils import load_wav
from datasets import load_dataset
from transformers import AutoTokenizer
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import soundfile as sf
import s3tokenizer
from functools import partial
import time
import requests
import asyncio
import httpx
sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS")
try:
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
pass
async def send_request_async(client, url, payload):
response = await client.post(url, json=payload, timeout=None)
response.raise_for_status()
response_json = response.json()
return response_json['choices'][0]['message']['content']
async def send_batch_requests_async(api_base, model_name, chats, temperature, top_p, top_k):
async with httpx.AsyncClient() as client:
tasks = []
for chat in chats:
payload = {
"model": model_name,
"messages": chat,
"max_tokens": 2048,
"temperature": temperature,
"top_p": top_p,
"top_k": top_k,
"repetition_penalty": 1.1,
"stop": ["<|eos1|>", "<|eos|>"],
"stream": False,
}
tasks.append(send_request_async(client, api_base, payload))
return await asyncio.gather(*tasks)
def extract_speech_ids(speech_tokens_str):
"""Extract speech IDs from token strings like <|s_23456|>"""
speech_ids = []
for token_str in speech_tokens_str:
if token_str.startswith('<|s_') and token_str.endswith('|>'):
num_str = token_str[4:-2]
num = int(num_str)
speech_ids.append(num)
else:
print(f"Unexpected token: {token_str}")
return speech_ids
def convert_cosy2_tokens_to_speech_id_str(cosy2_tokens):
"""Convert CosyVoice2 tokens to speech IDs string like <|s_23456|>"""
speech_id_str = ""
for token in cosy2_tokens:
speech_id_str += f"<|s_{token}|>"
return speech_id_str
def get_args():
parser = argparse.ArgumentParser(description="Speech generation using LLM + CosyVoice2")
parser.add_argument(
"--split-name",
type=str,
default="wenetspeech4tts",
help="huggingface dataset split name, see yuekai/CV3-Eval, yuekai/seed_tts_cosy2",
)
parser.add_argument(
"--output-dir", required=True, type=str, help="dir to save result"
)
parser.add_argument(
"--batch-size",
default=1,
type=int,
help="batch size (per-device) for inference",
)
parser.add_argument(
"--token2wav-batch-size",
default=1,
type=int,
help="batch size (per-device) for inference",
)
parser.add_argument(
"--num-workers", type=int, default=0, help="workers for dataloader"
)
parser.add_argument(
"--prefetch", type=int, default=None, help="prefetch for dataloader"
)
parser.add_argument(
"--llm-model-name-or-path",
required=True,
type=str,
help="LLM model path (includes both model and tokenizer)",
)
parser.add_argument(
"--token2wav-path",
required=True,
type=str,
help="CosyVoice2 token2wav model path",
)
parser.add_argument(
"--prompt-text",
type=str,
default=None,
help="The prompt text for CosyVoice2",
)
parser.add_argument(
"--prompt-speech-path",
type=str,
default=None,
help="The path to the prompt speech for CosyVoice2",
)
parser.add_argument(
"--top-p",
type=float,
default=0.95,
help="top p for sampling",
)
parser.add_argument(
"--temperature",
type=float,
default=0.8,
help="temperature for sampling",
)
parser.add_argument(
"--top-k",
type=int,
default=50,
help="top k for sampling",
)
parser.add_argument(
"--backend",
type=str,
default="hf",
choices=["hf", "trtllm", "vllm", "trtllm-serve"],
help="Backend to use for LLM inference: 'hf' for HuggingFace, 'trtllm' for TensorRT-LLM, 'vllm' for VLLM",
)
parser.add_argument(
"--engine-dir",
type=str,
default=None,
help="TensorRT-LLM engine directory (required when backend is 'trtllm')",
)
parser.add_argument(
"--kv-cache-free-gpu-memory-fraction",
type=float,
default=0.6,
help="Fraction of GPU memory to free for KV cache (TensorRT-LLM only)",
)
parser.add_argument(
"--openai-api-base",
type=str,
default="http://localhost:8000/v1/chat/completions",
help="OpenAI API base URL (for trtllm-serve backend)",
)
parser.add_argument(
"--openai-model-name",
type=str,
default="trt_engines_bfloat16",
help="Model name to use with OpenAI API (for trtllm-serve backend)",
)
args = parser.parse_args()
return args
def data_collator(batch, tokenizer, s3_tokenizer):
"""Simplified data collator for batch_size=1 processing"""
collator_start_time = time.time()
total_audio_processing_time = 0
total_speech_tokenization_time = 0
total_text_tokenization_time = 0
target_sample_rate = 16000 # CosyVoice2 uses 16kHz for prompt audio
device = s3_tokenizer.device if s3_tokenizer is not None else torch.device("cpu")
input_ids_list, prompt_audio_list, prompt_text_list = [], [], []
prompt_text_after_apply_template_list = []
mels, prompt_audio_cosy2tokens_list, full_text_list = [], [], []
chat_list = []
for _, item in enumerate(batch):
audio_processing_start_time = time.time()
prompt_text, target_text = (
item["prompt_text"],
item["target_text"],
)
prompt_text_list.append(prompt_text)
full_text = prompt_text + target_text
full_text_list.append(full_text)
# remove the unnecessary punctuation for cosyvoice3 zero_shot_zh dataset
puncts = ['"', '(', ')', '“', '”', '‘', '(', ')', '\'']
for p in puncts:
if p in full_text:
full_text = full_text.replace(p, '')
print(f"removed {p} from {full_text}")
# get prompt audio for CosyVoice2 (convert to 16kHz)
ref_audio_org, ref_sr = (
item["prompt_audio"]["array"],
item["prompt_audio"]["sampling_rate"],
)
ref_audio_org = torch.from_numpy(ref_audio_org).float().unsqueeze(0)
print(ref_audio_org.shape)
if ref_sr != target_sample_rate:
resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate)
ref_audio = resampler(ref_audio_org)
else:
ref_audio = ref_audio_org
prompt_audio_list.append(ref_audio)
audio_processing_end_time = time.time()
total_audio_processing_time += audio_processing_end_time - audio_processing_start_time
speech_tokenization_start_time = time.time()
if "prompt_audio_cosy2_tokens" in item:
prompt_audio_cosy2tokens = item["prompt_audio_cosy2_tokens"]
prompt_audio_cosy2tokens_list.append(prompt_audio_cosy2tokens)
else:
mels.append(s3tokenizer.log_mel_spectrogram(ref_audio.squeeze(0)))
if len(mels) > 0:
mels, mels_lens = s3tokenizer.padding(mels)
codes, codes_lens = s3_tokenizer.quantize(mels.to(device), mels_lens.to(device))
for i in range(len(codes)):
prompt_audio_cosy2tokens_list.append(codes[i, :codes_lens[i].item()])
speech_tokenization_end_time = time.time()
total_speech_tokenization_time += speech_tokenization_end_time - speech_tokenization_start_time
for i, prompt_audio_cosy2tokens in enumerate(prompt_audio_cosy2tokens_list):
text_tokenization_start_time = time.time()
prompt_audio_cosy2_id_str = convert_cosy2_tokens_to_speech_id_str(prompt_audio_cosy2tokens)
# Create chat template for LLM generation
chat = [
{"role": "user", "content": full_text_list[i]},
{"role": "assistant", "content": prompt_audio_cosy2_id_str}
]
chat_list.append(chat)
assert 'system' not in tokenizer.chat_template, "system is not allowed in the chat template"
input_ids = tokenizer.apply_chat_template(
chat,
tokenize=True,
return_tensors='pt',
continue_final_message=True
)
input_ids_list.append(input_ids.squeeze(0))
prompt_text_after_apply_template = f"<|sos|>{full_text_list[i]}<|task_id|>{prompt_audio_cosy2_id_str}"
prompt_text_after_apply_template_list.append(prompt_text_after_apply_template)
text_tokenization_end_time = time.time()
total_text_tokenization_time += text_tokenization_end_time - text_tokenization_start_time
ids = [item["id"] for item in batch]
return {
"input_ids": input_ids_list,
"ids": ids,
"prompt_text": prompt_text_list,
"prompt_audio_list": prompt_audio_list,
"prompt_text_after_apply_template": prompt_text_after_apply_template_list,
"audio_processing_time": total_audio_processing_time,
"speech_tokenization_time": total_speech_tokenization_time,
"text_tokenization_time": total_text_tokenization_time,
"chat_list": chat_list
}
def init_distributed():
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
rank = int(os.environ.get("RANK", 0))
print(
"Inference on multiple gpus, this gpu {}".format(local_rank)
+ ", rank {}, world_size {}".format(rank, world_size)
)
torch.cuda.set_device(local_rank)
dist.init_process_group("nccl")
return world_size, local_rank, rank
def main(args):
os.makedirs(args.output_dir, exist_ok=True)
assert torch.cuda.is_available()
local_rank, world_size, rank = 0, 1, 0
device = torch.device(f"cuda:{local_rank}")
tokenizer = AutoTokenizer.from_pretrained(args.llm_model_name_or_path)
if args.backend == "hf":
model = AutoModelForCausalLM.from_pretrained(args.llm_model_name_or_path)
model.eval()
model.to(device)
runner = None
elif args.backend == "trtllm":
if args.engine_dir is None:
raise ValueError("--engine-dir is required when backend is 'trtllm'")
runtime_rank = tensorrt_llm.mpi_rank()
model = None
runner_kwargs = dict(
engine_dir=args.engine_dir,
rank=runtime_rank,
max_output_len=2048,
enable_context_fmha_fp32_acc=False,
max_batch_size=args.batch_size,
max_input_len=512,
kv_cache_free_gpu_memory_fraction=args.kv_cache_free_gpu_memory_fraction,
cuda_graph_mode=False,
gather_generation_logits=False,
)
runner = ModelRunnerCpp.from_dir(**runner_kwargs)
elif args.backend == "vllm":
model = LLM(model=args.llm_model_name_or_path, gpu_memory_utilization=0.4)
runner = None
elif args.backend == "trtllm-serve":
model = None
runner = None
else:
raise ValueError(f"Unsupported backend: {args.backend}")
if 'Step-Audio-2-mini' in args.token2wav_path:
from token2wav_dit import CosyVoice2_Token2Wav
else:
assert 'CosyVoice2-0.5B' in args.token2wav_path
from token2wav import CosyVoice2_Token2Wav
token2wav_model = CosyVoice2_Token2Wav(
model_dir=args.token2wav_path, enable_trt=True, device_id=local_rank
)
if args.prompt_speech_path:
prompt_speech_16k = load_wav(args.prompt_speech_path, 16000)
else:
prompt_speech_16k = None
s3_tokenizer = s3tokenizer.load_model(f"{args.token2wav_path}/speech_tokenizer_v2.onnx").to(device) if 'zero' in args.split_name else None
dataset_name = "yuekai/CV3-Eval" if 'zero' in args.split_name else "yuekai/seed_tts_cosy2"
dataset = load_dataset(
dataset_name,
split=args.split_name,
trust_remote_code=True,
)
sampler = None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=args.num_workers,
prefetch_factor=args.prefetch,
collate_fn=partial(data_collator, tokenizer=tokenizer, s3_tokenizer=s3_tokenizer),
)
for _ in range(3):
print(f"Running {_} times")
total_llm_time = 0
total_token2wav_time = 0
total_data_load_time = 0
total_llm_post_processing_time = 0
total_audio_save_time = 0
total_audio_processing_time_in_collator = 0
total_speech_tokenization_time_in_collator = 0
total_text_tokenization_time_in_collator = 0
total_audio_samples = 0
start_time = time.time()
total_steps = len(dataset)
if rank == 0:
progress_bar = tqdm(total=total_steps, desc="Processing", unit="wavs")
last_batch_end_time = time.time()
for batch in dataloader:
data_loaded_time = time.time()
total_data_load_time += data_loaded_time - last_batch_end_time
total_audio_processing_time_in_collator += batch["audio_processing_time"]
total_speech_tokenization_time_in_collator += batch["speech_tokenization_time"]
total_text_tokenization_time_in_collator += batch["text_tokenization_time"]
with torch.no_grad():
llm_start_time = time.time()
if args.backend == "hf":
input_ids_list = batch["input_ids"]
if len(input_ids_list) == 1:
input_ids = input_ids_list[0].unsqueeze(0)
attention_mask = torch.ones_like(input_ids)
else:
max_len = max([len(input_ids) for input_ids in input_ids_list])
input_ids_list_new = [
torch.cat([input_ids, torch.full((max_len - len(input_ids),), tokenizer.pad_token_id)])
for input_ids in input_ids_list
]
input_ids = torch.stack(input_ids_list_new)
attention_mask = torch.zeros_like(input_ids)
for i in range(len(input_ids_list)):
attention_mask[i, :len(input_ids_list[i])] = 1
input_ids = input_ids.to(device)
outputs = model.generate(
input_ids=input_ids.to(device),
attention_mask=attention_mask.to(device),
max_new_tokens=2048,
do_sample=True,
top_p=args.top_p,
temperature=args.temperature,
repetition_penalty=1.1,
top_k=args.top_k,
)
torch.cuda.synchronize()
elif args.backend == "trtllm":
batch_input_ids = list(batch["input_ids"])
input_lengths = [x.size(0) for x in batch_input_ids]
end_id = tokenizer.convert_tokens_to_ids("<|eos1|>") if "<|eos1|>" in tokenizer.get_vocab() else tokenizer.eos_token_id
print(f"end_id: {end_id}, tokenizer.eos_token_id: {tokenizer.eos_token_id} ========================")
outputs = runner.generate(
batch_input_ids=batch_input_ids,
max_new_tokens=2048,
end_id=end_id,
pad_id=end_id,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
repetition_penalty=1.1,
num_return_sequences=1,
streaming=False,
output_sequence_lengths=True,
output_generation_logits=False,
return_dict=True,
return_all_generated_tokens=False
)
torch.cuda.synchronize()
output_ids, sequence_lengths = outputs["output_ids"], outputs["sequence_lengths"]
num_output_sents, num_beams, _ = output_ids.size()
assert num_beams == 1
beam = 0
batch_size = len(batch["input_ids"])
num_return_sequences = num_output_sents // batch_size
assert num_return_sequences == 1
outputs = []
for i in range(batch_size * num_return_sequences):
batch_idx = i // num_return_sequences
seq_idx = i % num_return_sequences
output_begin = input_lengths[batch_idx]
output_end = sequence_lengths[i][beam]
outputs_i = output_ids[i][beam][:output_end].tolist()
outputs.append(outputs_i)
elif args.backend == "vllm":
input_ids_list = [ids.tolist() for ids in batch["input_ids"]]
sampling_params = SamplingParams(
temperature=args.temperature,
top_p=args.top_p,
top_k=args.top_k,
repetition_penalty=1.1,
max_tokens=2048,
)
outputs = model.generate(prompt_token_ids=input_ids_list, sampling_params=sampling_params)
print(outputs)
for j, output in enumerate(outputs):
outputs[j] = input_ids_list[j] + output.outputs[0].token_ids
elif args.backend == "trtllm-serve":
if args.batch_size > 1:
outputs = asyncio.run(send_batch_requests_async(
args.openai_api_base,
args.openai_model_name,
batch["chat_list"],
args.temperature,
args.top_p,
args.top_k,
))
else:
outputs = []
for chat in batch["chat_list"]:
payload = {
"model": args.openai_model_name,
"messages": chat,
"max_tokens": 2048,
"temperature": args.temperature,
"top_p": args.top_p,
"top_k": args.top_k,
"repetition_penalty": 1.1,
"stop": ["<|eos1|>", "<|eos|>"],
"stream": False,
}
response = requests.post(args.openai_api_base, json=payload)
response.raise_for_status()
response_json = response.json()
generated_content = response_json['choices'][0]['message']['content']
outputs.append(generated_content)
llm_end_time = time.time()
total_llm_time += (llm_end_time - llm_start_time)
items_for_token_2wav = []
for i in range(len(batch["ids"])):
llm_post_processing_start_time = time.time()
if args.backend == "trtllm-serve":
speech_tokens_str = outputs[i].strip().split('><')
if len(speech_tokens_str) > 1:
speech_tokens_str = [
t if t.startswith('<') else '<' + t for t in speech_tokens_str
]
speech_tokens_str = [
t if t.endswith('>') else t + '>' for t in speech_tokens_str
]
speech_ids = extract_speech_ids(speech_tokens_str)
else:
input_length = len(batch["input_ids"][i])
generated_ids = outputs[i][input_length:]
speech_tokens_str = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
speech_ids = extract_speech_ids(speech_tokens_str)
print(i, speech_ids)
if len(speech_ids) == 0:
print(f"Warning: No speech tokens generated for sample {batch['ids'][i]}, skipping")
continue
if args.prompt_text is not None:
current_prompt_text = args.prompt_text
current_prompt_audio = prompt_speech_16k
else:
current_prompt_text = batch["prompt_text"][i]
current_prompt_audio = batch["prompt_audio_list"][i]
llm_post_processing_end_time = time.time()
total_llm_post_processing_time += llm_post_processing_end_time - llm_post_processing_start_time
if current_prompt_audio is not None:
items_for_token_2wav.append({
"speech_ids": speech_ids,
"prompt_audio": current_prompt_audio.squeeze(0),
"id": batch["ids"][i]
})
else:
print(f"Warning: No prompt audio available for sample {batch['ids'][i]}, skipping")
for i in range(0, len(items_for_token_2wav), args.token2wav_batch_size):
t2w_batch = items_for_token_2wav[i:i + args.token2wav_batch_size]
if not t2w_batch:
continue
t2w_generated_speech_tokens_list = [item["speech_ids"] for item in t2w_batch]
t2w_prompt_audios_list = [item["prompt_audio"] for item in t2w_batch]
t2w_prompt_audios_sample_rate = [16000] * len(t2w_batch)
t2w_ids = [item["id"] for item in t2w_batch]
token2wav_start_time = time.time()
generated_wavs = token2wav_model(
t2w_generated_speech_tokens_list,
t2w_prompt_audios_list,
t2w_prompt_audios_sample_rate,
)
token2wav_end_time = time.time()
total_token2wav_time += (token2wav_end_time - token2wav_start_time)
audio_save_start_time = time.time()
for j, audio_hat in enumerate(generated_wavs):
generated_wave = audio_hat.squeeze().cpu().numpy()
total_audio_samples += len(generated_wave)
target_sample_rate = 24000
utt = t2w_ids[j]
sf.write(f"{args.output_dir}/{utt}.wav", generated_wave, target_sample_rate)
print(f"Generated audio for sample {utt} with {len(t2w_generated_speech_tokens_list[j])} tokens")
audio_save_end_time = time.time()
total_audio_save_time += audio_save_end_time - audio_save_start_time
if rank == 0:
progress_bar.update(world_size * len(batch["ids"]))
last_batch_end_time = time.time()
if rank == 0:
progress_bar.close()
end_time = time.time()
target_sample_rate = 24000
total_audio_duration_seconds = total_audio_samples / target_sample_rate
log_file_path = os.path.join(args.output_dir, "log.txt")
with open(log_file_path, 'w') as f:
args_dict = vars(args)
log_data = {
"args": args_dict,
"data_load_time_seconds": total_data_load_time,
"audio_processing_time_in_collator_seconds": total_audio_processing_time_in_collator,
"speech_tokenization_time_in_collator_seconds": total_speech_tokenization_time_in_collator,
"text_tokenization_time_in_collator_seconds": total_text_tokenization_time_in_collator,
"llm_time_seconds": total_llm_time,
"llm_post_processing_time_seconds": total_llm_post_processing_time,
"token2wav_time_seconds": total_token2wav_time,
"audio_save_time_seconds": total_audio_save_time,
"total_audio_duration_seconds": total_audio_duration_seconds,
"pipeline_time_seconds": end_time - start_time,
}
print(log_data)
f.write(json.dumps(log_data, indent=4))
print(f"Metrics logged to {log_file_path}")
if __name__ == "__main__":
args = get_args()
if args.backend == "vllm":
from vllm import LLM, SamplingParams
elif args.backend == "trtllm":
import tensorrt_llm
from tensorrt_llm.runtime import ModelRunnerCpp
elif args.backend == "hf":
from transformers import AutoModelForCausalLM
elif args.backend == "trtllm-serve":
pass
else:
raise ValueError(f"Unsupported backend: {args.backend}")
main(args)
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/offline_inference.py",
"license": "Apache License 2.0",
"lines": 594,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/streaming_inference.py | import torch
import os
import argparse
from datasets import load_dataset
from torch.utils.data import DataLoader
import numpy as np
import torchaudio
import time
from token2wav_dit import CosyVoice2_Token2Wav
import soundfile as sf
def collate_fn(batch):
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate = [], [], [], []
prompt_speech_tokens_list, prompt_text_list = [], []
for item in batch:
generated_speech_tokens_list.append(item['target_audio_cosy2_tokens'])
audio = torch.from_numpy(item['prompt_audio']['array']).float()
prompt_audios_list.append(audio)
prompt_audios_sample_rate.append(item['prompt_audio']['sampling_rate'])
ids.append(item['id'])
prompt_speech_tokens_list.append(item['prompt_audio_cosy2_tokens'])
prompt_text_list.append(item['prompt_text'])
return ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate, prompt_speech_tokens_list, prompt_text_list
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--enable-trt", action="store_true")
parser.add_argument("--model-dir", type=str, default="./Step-Audio-2-mini/token2wav")
parser.add_argument("--batch-size", type=int, default=1)
parser.add_argument("--output-dir", type=str, default="generated_wavs")
parser.add_argument("--huggingface-dataset-split", type=str, default="wenetspeech4tts")
parser.add_argument("--dataset-name", type=str, default="yuekai/seed_tts_cosy2")
parser.add_argument("--strategy", type=str, default="equal", choices=["equal", "exponential"])
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
dataset_name = args.dataset_name
dataset = load_dataset(dataset_name, split=args.huggingface_dataset_split, trust_remote_code=True)
data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn, num_workers=0)
token2wav_model = CosyVoice2_Token2Wav(model_dir=args.model_dir, enable_trt=args.enable_trt, streaming=True)
CHUNK_SIZE = 25
token_frame_rate = 25
OVERLAP_SIZE = 0
warmup_times = 3
for _ in range(warmup_times):
start_time = time.time()
total_forward_count = 0
for batch in data_loader:
tts_speech_list = []
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate, prompt_speech_tokens_list, prompt_text_list = batch
id, generated_speech_tokens, prompt_audio, prompt_audio_sample_rate = ids[0], generated_speech_tokens_list[0], prompt_audios_list[0], prompt_audios_sample_rate[0]
assert prompt_audio_sample_rate == 16000
prompt_text = prompt_text_list[0]
prompt_speech_tokens = prompt_speech_tokens_list[0]
semantic_token_ids_arr, token_offset = [], 0
flow_prompt_speech_token_len = len(prompt_speech_tokens)
buffer = generated_speech_tokens
output_wavs = []
chunk_index = 0
while True:
if args.strategy == "equal":
this_chunk_size = CHUNK_SIZE
elif args.strategy == "exponential":
this_chunk_size = token_frame_rate * (2 ** chunk_index)
if len(buffer) >= this_chunk_size + token2wav_model.flow.pre_lookahead_len:
wavs = token2wav_model.forward_streaming(
buffer[:this_chunk_size + token2wav_model.flow.pre_lookahead_len],
False, request_id=id, speaker_id=f"{id}", prompt_audio=prompt_audio,
prompt_audio_sample_rate=prompt_audio_sample_rate
)
buffer = buffer[this_chunk_size - OVERLAP_SIZE:]
output_wavs.append(wavs)
total_forward_count += 1
chunk_index += 1
else:
wavs = token2wav_model.forward_streaming(
buffer, True, request_id=id, speaker_id=f"{id}",
prompt_audio=prompt_audio, prompt_audio_sample_rate=prompt_audio_sample_rate
)
output_wavs.append(wavs)
total_forward_count += 1
# chunk_index += 1
break
for i, wav in enumerate(output_wavs):
output_wavs[i] = wav.cpu().numpy().squeeze()
audios = output_wavs
reconstructed_audio = np.concatenate(audios)
sf.write(os.path.join(args.output_dir, f"{id}.wav"), reconstructed_audio, 24000, "PCM_16")
end_time = time.time()
if _ == 0:
token2wav_model.speaker_cache = {}
print(f"Warmup time: {end_time - start_time} seconds")
print("clear speaker cache")
elif _ == 1:
print(f"Cost time without speaker cache: {end_time - start_time} seconds")
else:
print(f"Cost time with speaker cache: {end_time - start_time} seconds")
print(f"Total flow matching forward calls: {total_forward_count}")
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/streaming_inference.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/token2wav.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Example Usage
CUDA_VISIBLE_DEVICES=0 \
python3 token2wav.py --enable-trt || exit 1
"""
import torch
from flashcosyvoice.modules.flow import CausalMaskedDiffWithXvec
from flashcosyvoice.modules.hifigan import HiFTGenerator
from flashcosyvoice.utils.audio import mel_spectrogram
import torchaudio.compliance.kaldi as kaldi
import onnxruntime
import s3tokenizer
from torch.utils.data import DataLoader
from datasets import load_dataset
import torchaudio
import os
import logging
import argparse
import queue
import time
def convert_onnx_to_trt(trt_model, trt_kwargs, onnx_model, fp16):
import tensorrt as trt
logging.info("Converting onnx to trt...")
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
logger = trt.Logger(trt.Logger.INFO)
builder = trt.Builder(logger)
network = builder.create_network(network_flags)
parser = trt.OnnxParser(network, logger)
config = builder.create_builder_config()
# config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, 1 << 32) # 4GB
if fp16:
config.set_flag(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
# load onnx model
with open(onnx_model, "rb") as f:
if not parser.parse(f.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
raise ValueError('failed to parse {}'.format(onnx_model))
# set input shapes
for i in range(len(trt_kwargs['input_names'])):
profile.set_shape(trt_kwargs['input_names'][i], trt_kwargs['min_shape'][i], trt_kwargs['opt_shape'][i], trt_kwargs['max_shape'][i])
tensor_dtype = trt.DataType.HALF if fp16 else trt.DataType.FLOAT
# set input and output data type
for i in range(network.num_inputs):
input_tensor = network.get_input(i)
input_tensor.dtype = tensor_dtype
for i in range(network.num_outputs):
output_tensor = network.get_output(i)
output_tensor.dtype = tensor_dtype
config.add_optimization_profile(profile)
engine_bytes = builder.build_serialized_network(network, config)
# save trt engine
with open(trt_model, "wb") as f:
f.write(engine_bytes)
logging.info("Succesfully convert onnx to trt...")
class TrtContextWrapper:
def __init__(self, trt_engine, trt_concurrent=1, device='cuda:0'):
self.trt_context_pool = queue.Queue(maxsize=trt_concurrent)
self.trt_engine = trt_engine
self.device = device
for _ in range(trt_concurrent):
trt_context = trt_engine.create_execution_context()
trt_stream = torch.cuda.stream(torch.cuda.Stream(torch.device(device)))
assert trt_context is not None, 'failed to create trt context, maybe not enough CUDA memory, try reduce current trt concurrent {}'.format(trt_concurrent)
self.trt_context_pool.put([trt_context, trt_stream])
assert self.trt_context_pool.empty() is False, 'no avaialbe estimator context'
def acquire_estimator(self):
return self.trt_context_pool.get(), self.trt_engine
def release_estimator(self, context, stream):
self.trt_context_pool.put([context, stream])
class CosyVoice2_Token2Wav(torch.nn.Module):
def __init__(self, model_dir: str = "./CosyVoice2-0.5B", enable_trt: bool = False, device_id: int = 0):
super().__init__()
self.device_id = device_id
self.device = f"cuda:{device_id}"
self.flow = CausalMaskedDiffWithXvec()
self.flow.half()
self.flow.load_state_dict(torch.load(f"{model_dir}/flow.pt", map_location="cpu", weights_only=True), strict=True)
self.flow.to(self.device).eval()
self.hift = HiFTGenerator()
hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(f"{model_dir}/hift.pt", map_location="cpu", weights_only=True).items()}
self.hift.load_state_dict(hift_state_dict, strict=True)
self.hift.to(self.device).eval()
option = onnxruntime.SessionOptions()
option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
option.intra_op_num_threads = 1
self.spk_model = onnxruntime.InferenceSession(f"{model_dir}/campplus.onnx", sess_options=option, providers=["CPUExecutionProvider"])
self.audio_tokenizer = s3tokenizer.load_model(f"{model_dir}/speech_tokenizer_v2.onnx").to(self.device).eval()
gpu = "l20"
if enable_trt:
self.load_trt(f'{model_dir}/flow.decoder.estimator.fp16.dynamic_batch.{gpu}.plan',
f'{model_dir}/flow.decoder.estimator.fp32.dynamic_batch.onnx',
1,
True)
self.load_spk_trt(f'{model_dir}/campplus.{gpu}.fp32.trt',
f'{model_dir}/campplus.onnx',
1,
False)
def forward_spk_embedding(self, spk_feat):
if isinstance(self.spk_model, onnxruntime.InferenceSession):
return self.spk_model.run(
None, {self.spk_model.get_inputs()[0].name: spk_feat.unsqueeze(dim=0).cpu().numpy()}
)[0].flatten().tolist()
else:
[spk_model, stream], trt_engine = self.spk_model.acquire_estimator()
# NOTE need to synchronize when switching stream
with torch.cuda.device(self.device_id):
torch.cuda.current_stream().synchronize()
spk_feat = spk_feat.unsqueeze(dim=0).to(self.device)
batch_size = spk_feat.size(0)
with stream:
spk_model.set_input_shape('input', (batch_size, spk_feat.size(1), 80))
output_tensor = torch.empty((batch_size, 192), device=spk_feat.device)
data_ptrs = [spk_feat.contiguous().data_ptr(),
output_tensor.contiguous().data_ptr()]
for i, j in enumerate(data_ptrs):
spk_model.set_tensor_address(trt_engine.get_tensor_name(i), j)
# run trt engine
assert spk_model.execute_async_v3(torch.cuda.current_stream().cuda_stream) is True
torch.cuda.current_stream().synchronize()
self.spk_model.release_estimator(spk_model, stream)
return output_tensor.cpu().numpy().flatten().tolist()
def load_spk_trt(self, spk_model, spk_onnx_model, trt_concurrent=1, fp16=True):
if not os.path.exists(spk_model) or os.path.getsize(spk_model) == 0:
trt_kwargs = self.get_spk_trt_kwargs()
convert_onnx_to_trt(spk_model, trt_kwargs, spk_onnx_model, fp16)
import tensorrt as trt
with open(spk_model, 'rb') as f:
spk_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert spk_engine is not None, 'failed to load trt {}'.format(spk_model)
self.spk_model = TrtContextWrapper(spk_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_spk_trt_kwargs(self):
min_shape = [(1, 4, 80)]
opt_shape = [(1, 500, 80)]
max_shape = [(1, 3000, 80)]
input_names = ["input"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, trt_concurrent=1, fp16=True):
assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
if not os.path.exists(flow_decoder_estimator_model) or os.path.getsize(flow_decoder_estimator_model) == 0:
trt_kwargs = self.get_trt_kwargs_dynamic_batch(opt_bs=2, max_batch_size=16)
convert_onnx_to_trt(flow_decoder_estimator_model, trt_kwargs, flow_decoder_onnx_model, fp16)
del self.flow.decoder.estimator
import tensorrt as trt
with open(flow_decoder_estimator_model, 'rb') as f:
estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
self.flow.decoder.estimator = TrtContextWrapper(estimator_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_trt_kwargs_dynamic_batch(self, opt_bs=2, max_batch_size=64):
min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4), (2,), (2, 80)]
opt_shape = [(opt_bs * 2, 80, 500), (opt_bs * 2, 1, 500), (opt_bs * 2, 80, 500), (opt_bs * 2, 80, 500), (opt_bs * 2,), (opt_bs * 2, 80)]
max_shape = [(max_batch_size * 2, 80, 3000), (max_batch_size * 2, 1, 3000), (max_batch_size * 2, 80, 3000), (max_batch_size * 2, 80, 3000), (max_batch_size * 2,),
(max_batch_size * 2, 80)]
input_names = ["x", "mask", "mu", "cond", "t", "spks"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def prompt_audio_tokenization(self, prompt_audios_list: list[torch.Tensor]) -> list[list[int]]:
prompt_speech_tokens_list, prompt_speech_mels_list = [], []
for audio in prompt_audios_list:
assert len(audio.shape) == 1
log_mel = s3tokenizer.log_mel_spectrogram(audio) # [num_mels, T]
prompt_speech_mels_list.append(log_mel)
prompt_mels_for_llm, prompt_mels_lens_for_llm = s3tokenizer.padding(prompt_speech_mels_list)
prompt_speech_tokens, prompt_speech_tokens_lens = self.audio_tokenizer.quantize(
prompt_mels_for_llm.to(self.device), prompt_mels_lens_for_llm.to(self.device)
)
for i in range(len(prompt_speech_tokens)):
speech_tokens_i = prompt_speech_tokens[i, :prompt_speech_tokens_lens[i].item()].tolist()
prompt_speech_tokens_list.append(speech_tokens_i)
return prompt_speech_tokens_list
def get_spk_emb(self, prompt_audios_list: list[torch.Tensor]) -> torch.Tensor:
spk_emb_for_flow = []
for audio in prompt_audios_list:
assert len(audio.shape) == 1
spk_feat = kaldi.fbank(audio.unsqueeze(0), num_mel_bins=80, dither=0, sample_frequency=16000)
spk_feat = spk_feat - spk_feat.mean(dim=0, keepdim=True)
spk_emb = self.forward_spk_embedding(spk_feat)
spk_emb_for_flow.append(spk_emb)
spk_emb_for_flow = torch.tensor(spk_emb_for_flow)
return spk_emb_for_flow
def get_prompt_mels(self, prompt_audios_list: list[torch.Tensor], prompt_audios_sample_rate: list[int]):
prompt_mels_for_flow = []
prompt_mels_lens_for_flow = []
for audio, sample_rate in zip(prompt_audios_list, prompt_audios_sample_rate):
assert len(audio.shape) == 1
audio = audio.unsqueeze(0)
if sample_rate != 24000:
audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=24000)(audio)
mel = mel_spectrogram(audio).transpose(1, 2).squeeze(0) # [T, num_mels]
mel_len = mel.shape[0]
prompt_mels_for_flow.append(mel)
prompt_mels_lens_for_flow.append(mel_len)
prompt_mels_for_flow = torch.nn.utils.rnn.pad_sequence(prompt_mels_for_flow, batch_first=True, padding_value=0) # [B, T', num_mels=80]
prompt_mels_lens_for_flow = torch.tensor(prompt_mels_lens_for_flow)
return prompt_mels_for_flow, prompt_mels_lens_for_flow
def forward_flow(self, prompt_speech_tokens_list: list[list[int]], generated_speech_tokens_list: list[list[int]], prompt_mels_for_flow: torch.Tensor,
prompt_mels_lens_for_flow: torch.Tensor, spk_emb_for_flow: torch.Tensor):
batch_size = prompt_mels_for_flow.shape[0]
flow_inputs = []
flow_inputs_lens = []
for prompt_speech_tokens, generated_speech_tokens in zip(prompt_speech_tokens_list, generated_speech_tokens_list):
flow_inputs.append(torch.tensor(prompt_speech_tokens + generated_speech_tokens))
flow_inputs_lens.append(len(prompt_speech_tokens) + len(generated_speech_tokens))
flow_inputs = torch.nn.utils.rnn.pad_sequence(flow_inputs, batch_first=True, padding_value=0)
flow_inputs_lens = torch.tensor(flow_inputs_lens)
with torch.amp.autocast(self.device, dtype=torch.float16):
generated_mels, generated_mels_lens = self.flow(
flow_inputs.to(self.device), flow_inputs_lens.to(self.device),
prompt_mels_for_flow.to(self.device), prompt_mels_lens_for_flow.to(self.device), spk_emb_for_flow.to(self.device),
streaming=False, finalize=True
)
return generated_mels, generated_mels_lens
def forward_hift(self, generated_mels: torch.Tensor, generated_mels_lens: torch.Tensor, prompt_mels_lens_for_flow: torch.Tensor):
batch_size = generated_mels.shape[0]
generated_wavs = []
for i in range(batch_size):
mel = generated_mels[i, :, prompt_mels_lens_for_flow[i].item():generated_mels_lens[i].item()].unsqueeze(0)
wav, _ = self.hift(speech_feat=mel)
generated_wavs.append(wav)
return generated_wavs
@torch.inference_mode()
def forward(
self, generated_speech_tokens_list: list[list[int]], prompt_audios_list: list[torch.Tensor], prompt_audios_sample_rate: list[int]
):
# assert all item in prompt_audios_sample_rate is 16000
assert all(sample_rate == 16000 for sample_rate in prompt_audios_sample_rate)
prompt_speech_tokens_list = self.prompt_audio_tokenization(prompt_audios_list)
prompt_mels_for_flow, prompt_mels_lens_for_flow = self.get_prompt_mels(prompt_audios_list, prompt_audios_sample_rate)
spk_emb_for_flow = self.get_spk_emb(prompt_audios_list)
generated_mels, generated_mels_lens = self.forward_flow(
prompt_speech_tokens_list, generated_speech_tokens_list, prompt_mels_for_flow, prompt_mels_lens_for_flow, spk_emb_for_flow)
generated_wavs = self.forward_hift(generated_mels, generated_mels_lens, prompt_mels_lens_for_flow)
return generated_wavs
def collate_fn(batch):
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate = [], [], [], []
for _, item in enumerate(batch):
generated_speech_tokens_list.append(item['target_audio_cosy2_tokens'])
audio = torch.from_numpy(item['prompt_audio']['array']).float()
prompt_audios_list.append(audio)
prompt_audios_sample_rate.append(item['prompt_audio']['sampling_rate'])
ids.append(item['id'])
return ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--enable-trt", action="store_true")
parser.add_argument("--model-dir", type=str, default="./CosyVoice2-0.5B")
parser.add_argument("--batch-size", type=int, default=4)
parser.add_argument("--output-dir", type=str, default="generated_wavs")
parser.add_argument("--huggingface-dataset-split", type=str, default="wenetspeech4tts")
parser.add_argument("--warmup", type=int, default=3, help="Number of warmup epochs, performance statistics will only be collected from the last epoch")
return parser.parse_args()
if __name__ == "__main__":
args = get_args()
model = CosyVoice2_Token2Wav(model_dir=args.model_dir, enable_trt=args.enable_trt)
# mkdir output_dir if not exists
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
dataset_name = "yuekai/seed_tts_cosy2"
dataset = load_dataset(dataset_name, split=args.huggingface_dataset_split, trust_remote_code=True)
data_loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn, num_workers=0)
for _ in range(args.warmup):
start_time = time.time()
for batch in data_loader:
ids, generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate = batch
generated_wavs = model(generated_speech_tokens_list, prompt_audios_list, prompt_audios_sample_rate)
for id, wav in zip(ids, generated_wavs):
torchaudio.save(f"{args.output_dir}/{id}.wav", wav.cpu(), 24000)
end_time = time.time()
epoch_time = end_time - start_time
print(f"Measurement epoch time taken: {epoch_time:.4f} seconds")
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/token2wav.py",
"license": "Apache License 2.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/huggingface_to_pretrained.py |
# SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
python3 hf2pretrained.py --hf-cosyvoice2-llm-path /workspace/rl-exp/checkpoint-400 --output-path /workspace/CosyVoice2-0.5B/llm-new.pt
"""
from argparse import ArgumentParser
import torch
from safetensors import safe_open
from transformers import AutoTokenizer
def get_args():
parser = ArgumentParser()
parser.add_argument(
"--hf-cosyvoice2-llm-path",
type=str,
default=None,
help="The RL trained CosyVoice2 model path in HuggingFace format",
)
parser.add_argument(
"--output-path",
type=str,
default="./llm.pt",
help="The path to save the llm.pt",
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
tokenizer = AutoTokenizer.from_pretrained(args.hf_cosyvoice2_llm_path)
speech_start_idx = tokenizer.convert_tokens_to_ids("<|s_0|>")
cosyvoice2_token_size = 6561 + 3
llm_embedding_vocab_size = 2
hf_tensors = {}
with safe_open(f"{args.hf_cosyvoice2_llm_path}/model.safetensors", framework="pt", device="cpu") as f:
for k in f.keys():
if k.startswith("lm_head.bias"):
# RL trained model disable bias for lm_head
continue
new_k = "llm.model." + k
hf_tensors[new_k] = f.get_tensor(k)
if k.startswith("lm_head"):
hf_tensors["llm_decoder.weight"] = f.get_tensor(k)[speech_start_idx:speech_start_idx + cosyvoice2_token_size]
hf_tensors["llm_decoder.bias"] = torch.zeros_like(hf_tensors["llm_decoder.weight"][:, 0])
if k.startswith("model.embed_tokens"):
hf_tensors["speech_embedding.weight"] = f.get_tensor(k)[speech_start_idx:speech_start_idx + cosyvoice2_token_size]
hf_tensors["llm_embedding.weight"] = f.get_tensor(k)[speech_start_idx + cosyvoice2_token_size:speech_start_idx + cosyvoice2_token_size + llm_embedding_vocab_size]
# use tie_word_embeddings=True
hf_tensors["llm.model.model.embed_tokens.weight"] = hf_tensors["llm.model.model.embed_tokens.weight"][:151936]
hf_tensors["llm.model.lm_head.weight"] = hf_tensors["llm.model.model.embed_tokens.weight"]
torch.save(hf_tensors, args.output_path)
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/huggingface_to_pretrained.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/infer_dataset.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Example Usage
dataset=zero_shot_zh
output_dir=./outputs_rl_aishell3_step${step}_${dataset}_jit_trt_fp16_reward_tts
token2wav_path=/workspace/CosyVoice2-0.5B
CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
torchrun --nproc_per_node=8 \
infer_dataset.py \
--output-dir $output_dir \
--llm-model-name-or-path $llm_path/merged_hf_model \
--token2wav-path $token2wav_path \
--split-name ${dataset} || exit 1
"""
import argparse
import json
import os
import sys
from pathlib import Path
import torch
import torch.distributed as dist
import torch.nn.functional as F
import torchaudio
from cosyvoice.cli.cosyvoice import CosyVoice2
from cosyvoice.utils.file_utils import load_wav
from datasets import load_dataset
from transformers import AutoTokenizer, AutoModelForCausalLM
from torch.utils.data import DataLoader, Dataset, DistributedSampler
from tqdm import tqdm
import soundfile as sf
import s3tokenizer
from functools import partial
sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS")
try:
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
pass
TEMPLATE = "{% for message in messages %}{%- if message['role'] == 'user' %}{{- '<|im_start|>' + message['role'] + '\n' + 'Convert the text to speech: ' + message['content'] + '<|im_end|>\n'}}{%- elif message['role'] == 'assistant' %}{{- '<|im_start|>' + message['role'] + '\n' + '<|SPEECH_GENERATION_START|>' + message['content']}}{%- endif %}{%- endfor %}" # noqa: E501
def audio_decode_cosyvoice2(
audio_tokens, prompt_text, prompt_speech_16k, codec_decoder
):
"""
Generate audio from tokens with optional tone and prompt embedding.
"""
model_inputs_dict = codec_decoder.frontend.frontend_zero_shot(
"empty", prompt_text, prompt_speech_16k, 24000
)
tts_mel, _ = codec_decoder.model.flow.inference(
token=audio_tokens.to(codec_decoder.model.device),
token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to(
codec_decoder.model.device
),
prompt_token=model_inputs_dict["flow_prompt_speech_token"].to(
codec_decoder.model.device
),
prompt_token_len=torch.tensor(
[model_inputs_dict["flow_prompt_speech_token_len"]], dtype=torch.int32
).to(codec_decoder.model.device),
prompt_feat=model_inputs_dict["prompt_speech_feat"].to(
codec_decoder.model.device
),
prompt_feat_len=model_inputs_dict["prompt_speech_feat_len"].to(
codec_decoder.model.device
),
embedding=model_inputs_dict["flow_embedding"].to(codec_decoder.model.device),
finalize=True,
)
audio_hat, _ = codec_decoder.model.hift.inference(
speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0)
)
return audio_hat
def extract_speech_ids(speech_tokens_str):
"""Extract speech IDs from token strings like <|s_23456|>"""
speech_ids = []
for token_str in speech_tokens_str:
if token_str.startswith('<|s_') and token_str.endswith('|>'):
num_str = token_str[4:-2]
num = int(num_str)
speech_ids.append(num)
else:
print(f"Unexpected token: {token_str}")
return speech_ids
def convert_cosy2_tokens_to_speech_id_str(cosy2_tokens):
"""Convert CosyVoice2 tokens to speech IDs string like <|s_23456|>"""
speech_id_str = ""
for token in cosy2_tokens:
speech_id_str += f"<|s_{token}|>"
return speech_id_str
def get_args():
parser = argparse.ArgumentParser(description="Speech generation using LLM + CosyVoice2")
parser.add_argument(
"--split-name",
type=str,
default="wenetspeech4tts",
help="huggingface dataset split name, see yuekai/CV3-Eval, yuekai/seed_tts_cosy2",
)
parser.add_argument(
"--output-dir", required=True, type=str, help="dir to save result"
)
parser.add_argument(
"--batch-size",
default=1,
type=int,
help="batch size (per-device) for inference",
)
parser.add_argument(
"--num-workers", type=int, default=1, help="workers for dataloader"
)
parser.add_argument(
"--prefetch", type=int, default=5, help="prefetch for dataloader"
)
parser.add_argument(
"--llm-model-name-or-path",
required=True,
type=str,
help="LLM model path (includes both model and tokenizer)",
)
parser.add_argument(
"--token2wav-path",
required=True,
type=str,
help="CosyVoice2 token2wav model path",
)
parser.add_argument(
"--prompt-text",
type=str,
default=None,
help="The prompt text for CosyVoice2",
)
parser.add_argument(
"--prompt-speech-path",
type=str,
default=None,
help="The path to the prompt speech for CosyVoice2",
)
parser.add_argument(
"--top-p",
type=float,
default=0.95,
help="top p for sampling",
)
parser.add_argument(
"--temperature",
type=float,
default=0.8,
help="temperature for sampling",
)
parser.add_argument(
"--top-k",
type=int,
default=50,
help="top k for sampling",
)
args = parser.parse_args()
return args
def data_collator(batch, tokenizer, s3_tokenizer):
"""Simplified data collator for batch_size=1 processing"""
target_sample_rate = 16000 # CosyVoice2 uses 16kHz for prompt audio
device = s3_tokenizer.device if s3_tokenizer is not None else torch.device("cpu")
input_ids_list, prompt_audio_list, prompt_text_list = [], [], []
mels, prompt_audio_cosy2tokens_list = [], []
for item in batch:
prompt_text, target_text = (
item["prompt_text"],
item["target_text"],
)
prompt_text_list.append(prompt_text)
# Combine prompt and target text
full_text = prompt_text + target_text
# get prompt audio for CosyVoice2 (convert to 16kHz)
ref_audio_org, ref_sr = (
item["prompt_audio"]["array"],
item["prompt_audio"]["sampling_rate"],
)
ref_audio_org = torch.from_numpy(ref_audio_org).float().unsqueeze(0)
# ref_audio_org = ref_audio_org.mean(dim=0, keepdim=True)
print(ref_audio_org.shape)
if ref_sr != target_sample_rate:
resampler = torchaudio.transforms.Resample(ref_sr, target_sample_rate)
ref_audio = resampler(ref_audio_org)
else:
ref_audio = ref_audio_org
prompt_audio_list.append(ref_audio)
if "prompt_audio_cosy2_tokens" in item:
prompt_audio_cosy2tokens = item["prompt_audio_cosy2_tokens"]
prompt_audio_cosy2tokens_list.append(prompt_audio_cosy2tokens)
else:
# convert to float first
mels.append(s3tokenizer.log_mel_spectrogram(ref_audio.squeeze(0)))
if len(mels) > 0:
mels, mels_lens = s3tokenizer.padding(mels)
codes, codes_lens = s3_tokenizer.quantize(mels.to(device), mels_lens.to(device))
for i in range(len(codes)):
prompt_audio_cosy2tokens_list.append(codes[i, :codes_lens[i].item()])
for prompt_audio_cosy2tokens in prompt_audio_cosy2tokens_list:
prompt_audio_cosy2_id_str = convert_cosy2_tokens_to_speech_id_str(prompt_audio_cosy2tokens)
# Create chat template for LLM generation
chat = [
{"role": "user", "content": full_text},
{"role": "assistant", "content": prompt_audio_cosy2_id_str}
]
if 'system' in tokenizer.chat_template:
tokenizer.chat_template = TEMPLATE
input_ids = tokenizer.apply_chat_template(
chat,
tokenize=True,
return_tensors='pt',
continue_final_message=True
)
input_ids_list.append(input_ids.squeeze(0))
# For batch_size=1, no need to pad
if len(input_ids_list) == 1:
input_ids = input_ids_list[0].unsqueeze(0)
else:
# Handle batch > 1 if needed
max_len = max([len(input_ids) for input_ids in input_ids_list])
input_ids_list = [
torch.cat([torch.full((max_len - len(input_ids),), tokenizer.pad_token_id), input_ids])
for input_ids in input_ids_list
]
input_ids = torch.stack(input_ids_list)
ids = [item["id"] for item in batch]
return {
"input_ids": input_ids,
"ids": ids,
"prompt_text": prompt_text_list,
"prompt_audio_list": prompt_audio_list,
}
def init_distributed():
world_size = int(os.environ.get("WORLD_SIZE", 1))
local_rank = int(os.environ.get("LOCAL_RANK", 0))
rank = int(os.environ.get("RANK", 0))
print(
"Inference on multiple gpus, this gpu {}".format(local_rank)
+ ", rank {}, world_size {}".format(rank, world_size)
)
torch.cuda.set_device(local_rank)
dist.init_process_group("nccl")
return world_size, local_rank, rank
def main():
args = get_args()
os.makedirs(args.output_dir, exist_ok=True)
assert torch.cuda.is_available()
world_size, local_rank, rank = init_distributed()
device = torch.device(f"cuda:{local_rank}")
# Load LLM model and tokenizer directly
tokenizer = AutoTokenizer.from_pretrained(args.llm_model_name_or_path)
model = AutoModelForCausalLM.from_pretrained(args.llm_model_name_or_path)
model.eval()
model.to(device)
cosyvoice_codec = CosyVoice2(
args.token2wav_path, load_jit=True, load_trt=True, fp16=True
)
if args.prompt_speech_path:
prompt_speech_16k = load_wav(args.prompt_speech_path, 16000)
else:
prompt_speech_16k = None
s3_tokenizer = s3tokenizer.load_model("speech_tokenizer_v2_25hz").to(device) if 'zero' in args.split_name else None
dataset_name = "yuekai/CV3-Eval" if 'zero' in args.split_name else "yuekai/seed_tts_cosy2"
dataset = load_dataset(
dataset_name,
split=args.split_name,
trust_remote_code=True,
)
sampler = DistributedSampler(dataset, num_replicas=world_size, rank=rank)
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
sampler=sampler,
shuffle=False,
num_workers=args.num_workers,
prefetch_factor=args.prefetch,
collate_fn=partial(data_collator, tokenizer=tokenizer, s3_tokenizer=s3_tokenizer),
)
total_steps = len(dataset)
if rank == 0:
progress_bar = tqdm(total=total_steps, desc="Processing", unit="wavs")
for batch in dataloader:
with torch.no_grad():
input_ids = batch["input_ids"].to(device)
# Generate speech tokens using LLM
outputs = model.generate(
input_ids,
max_new_tokens=2048, # Max length for generation
do_sample=True,
top_p=args.top_p,
temperature=args.temperature,
top_k=args.top_k,
)
# Process each sample in the batch
for i in range(len(batch["ids"])):
# Extract generated tokens (excluding input)
input_length = input_ids[i].shape[0]
generated_ids = outputs[i][input_length:-1] # Remove last token if needed
speech_tokens_str = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
# Extract speech IDs from token strings like <|s_23456|>
speech_ids = extract_speech_ids(speech_tokens_str)
if len(speech_ids) == 0:
print(f"Warning: No speech tokens generated for sample {batch['ids'][i]}, skipping")
continue
# Convert to tensor for CosyVoice2
audio_tokens = torch.tensor(speech_ids, dtype=torch.long, device=device).unsqueeze(0)
if args.prompt_text is not None:
current_prompt_text = args.prompt_text
current_prompt_audio = prompt_speech_16k
else:
current_prompt_text = batch["prompt_text"][i]
current_prompt_audio = batch["prompt_audio_list"][i]
if current_prompt_audio is not None:
# Generate audio using CosyVoice2
audio_hat = audio_decode_cosyvoice2(
audio_tokens,
current_prompt_text,
current_prompt_audio,
cosyvoice_codec,
)
# Convert to numpy and save
generated_wave = audio_hat.squeeze(0).cpu().numpy()
target_sample_rate = 24000
utt = batch["ids"][i]
sf.write(f"{args.output_dir}/{utt}.wav", generated_wave, target_sample_rate)
print(f"Generated audio for sample {utt} with {len(speech_ids)} tokens")
else:
print(f"Warning: No prompt audio available for sample {batch['ids'][i]}, skipping")
if rank == 0:
progress_bar.update(world_size * len(batch["ids"]))
if rank == 0:
progress_bar.close()
dist.barrier()
dist.destroy_process_group()
if __name__ == "__main__":
main()
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/infer_dataset.py",
"license": "Apache License 2.0",
"lines": 344,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/prepare_data.py | # Copyright 2024 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Preprocess the Text to Speech dataset to parquet format
"""
import argparse
import os
import re
import datasets
from verl.utils.hdfs_io import copy, makedirs
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--train_file", required=True, help="Path to training JSON/JSONL file")
parser.add_argument("--test_file", required=True, help="Path to test JSON/JSONL file")
parser.add_argument("--local_dir", default=None, required=True)
parser.add_argument("--hdfs_dir", default=None)
args = parser.parse_args()
# Load datasets from local JSON files
train_dataset = datasets.load_dataset("json", data_files=args.train_file)['train']
test_dataset = datasets.load_dataset("json", data_files=args.test_file)['train']
# add a row to each data item that represents a unique id
def make_map_fn(split):
def process_fn(example, idx):
text = example.pop("text")
# use cosyvoice2 official huggingface compatible checkpoint template
question = text
answer = ""
data = {
"data_source": f"{args.train_file}_{args.test_file}", # Use file names as data source
"prompt": [
{
"role": "user",
"content": question,
},
{
"role": "assistant",
"content": answer,
},
],
"ability": "text-to-speech",
"reward_model": {"style": "rule", "ground_truth": text},
"extra_info": {
"split": split,
"index": idx,
"text": text,
},
}
return data
return process_fn
train_dataset = train_dataset.map(function=make_map_fn("train"), with_indices=True)
test_dataset = test_dataset.map(function=make_map_fn("test"), with_indices=True)
local_dir = args.local_dir
hdfs_dir = args.hdfs_dir
print(train_dataset)
print(test_dataset)
train_dataset.to_parquet(os.path.join(local_dir, "train.parquet"))
test_dataset.to_parquet(os.path.join(local_dir, "test.parquet"))
if hdfs_dir is not None:
makedirs(hdfs_dir)
copy(src=local_dir, dst=hdfs_dir)
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/prepare_data.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/reward_tts.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Reward calculation for CosyVoice2-0.5B.
"""
from __future__ import annotations
import re
import json
import time
import argparse
from typing import List
import numpy as np
import requests
REWARD_SERVER_URL = "http://localhost:8000/v2/models/token2wav_asr/infer"
def _parse_ids(token_str: str) -> List[int]:
return [int(t) for t in re.findall(r"<\|s_(\d+)\|>", token_str)]
def _remote_reward(tokens: List[int], ground_truth: str, timeout: float = 200.0) -> float:
"""Send token IDs and ground-truth text to the Triton server and get reward."""
tokens_arr = np.array(tokens, dtype=np.int32).reshape(1, -1)
lens_arr = np.array([[tokens_arr.shape[1]]], dtype=np.int32)
gt_arr = np.array([ground_truth.encode("utf-8")], dtype=object)
payload = {
"inputs": [
{
"name": "TOKENS",
"shape": list(tokens_arr.shape),
"datatype": "INT32",
"data": tokens_arr.tolist(),
},
{
"name": "TOKEN_LENS",
"shape": list(lens_arr.shape),
"datatype": "INT32",
"data": lens_arr.tolist(),
},
{
"name": "GT_TEXT",
"shape": [1, 1],
"datatype": "BYTES",
"data": [ground_truth],
},
]
}
rsp = requests.post(
REWARD_SERVER_URL,
headers={"Content-Type": "application/json"},
json=payload,
timeout=timeout,
verify=False,
params={"request_id": "0"},
)
rsp.raise_for_status()
result = rsp.json()
try:
# Reward is returned as the first output
return float(result["outputs"][0]["data"][0])
except (KeyError, IndexError, TypeError):
return 0.0
def compute_score(
data_source: str,
solution_str: str,
ground_truth: str,
extra_info: dict | None = None,
*,
debug_dump: bool = False,
) -> float:
"""Return reward in [0, 1] using the Triton ASR service.
The reward is based on the pinyin-level WER between the ASR transcript
produced from *solution_str* and the provided *ground_truth* text.
"""
# Decode token IDs
ids = _parse_ids(solution_str)
# Query remote server for reward
try:
reward = _remote_reward(ids, ground_truth)
except Exception as e:
reward = 0.0
if debug_dump:
print(
f"\033[92m[{data_source}] Remote reward: {reward:.4f}\033[0m"
)
return reward
# CLI quick test
if __name__ == "__main__":
import sys
def get_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description="Test TTS CER scoring with data from JSONL file",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--input", "-i",
type=str,
default="data/emilia_zh-cosy-tiny-test.jsonl",
help="Path to input JSONL file"
)
parser.add_argument(
"--max-samples", "-n",
type=int,
default=None,
help="Maximum number of samples to process (default: all)"
)
parser.add_argument(
"--no-interactive",
action="store_true",
help="Run in non-interactive mode (process all samples without prompts)"
)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug mode"
)
return parser.parse_args()
def load_jsonl(file_path: str):
"""Load data from jsonl file."""
data = []
with open(file_path, 'r', encoding='utf-8') as f:
for line in f:
data.append(json.loads(line.strip()))
return data
def code_to_solution_str(code_list: List[int]) -> str:
"""Convert code list to solution string format."""
return ''.join([f"<|s_{code}|>" for code in code_list])
# Parse command line arguments
args = get_args()
try:
# Load data from jsonl file
print(f"Loading data from: {args.input}")
data_list = load_jsonl(args.input)
print(f"Loaded {len(data_list)} samples")
# Limit samples if specified
if args.max_samples is not None:
data_list = data_list[:args.max_samples]
print(f"Processing first {len(data_list)} samples (limited by --max-samples)")
# Process each sample
begin_time = time.time()
for i, sample in enumerate(data_list):
print(f"\n--- Sample {i+1}/{len(data_list)} ---")
print(f"Index: {sample.get('index', 'unknown')}")
print(f"Text: {sample['text']}")
# Extract required fields
code_list = sample['code']
ground_truth = sample['text']
data_source = sample.get('index', f'sample_{i}') # Use index as data_source
# Convert code list to solution string
solution_str = code_to_solution_str(code_list)
print(f"Solution tokens: {len(code_list)} tokens")
if args.debug:
print(f"Solution string: {solution_str}")
else:
print(f"Solution string preview: {solution_str[:100]}..." if len(solution_str) > 100 else f"Solution string: {solution_str}")
# Call compute_score function
try:
score = compute_score(
data_source=data_source,
solution_str=solution_str,
ground_truth=ground_truth,
extra_info=None,
debug_dump=args.debug
)
print(f"Final Score: {score:.4f}")
except Exception as e:
print(f"Error computing score: {e}")
# Ask user if they want to continue (for interactive mode)
if not args.no_interactive and i < len(data_list) - 1:
try:
response = input("\nPress Enter to continue or 'q' to quit: ").strip().lower()
if response == 'q':
break
except KeyboardInterrupt:
print("\nStopped by user")
break
print(f"\nProcessed {min(i+1, len(data_list))} samples")
end_time = time.time()
print(f"Time taken: {end_time - begin_time} seconds")
except FileNotFoundError:
print(f"Error: File not found - {args.input}")
print("Please check the file path or use --input to specify correct path")
print("Run with --help for usage information")
except Exception as e:
print(f"Error: {e}")
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/reward_tts.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/scripts/offline-decode-files.py | # Copyright (c) 2023 by manyeyes
# Copyright (c) 2023 Xiaomi Corporation
"""
This file demonstrates how to use sherpa-onnx Python API to transcribe
file(s) with a non-streaming model.
(1) For paraformer
./python-api-examples/offline-decode-files.py \
--tokens=/path/to/tokens.txt \
--paraformer=/path/to/paraformer.onnx \
--num-threads=2 \
--decoding-method=greedy_search \
--debug=false \
--sample-rate=16000 \
--feature-dim=80 \
/path/to/0.wav \
/path/to/1.wav
(2) For transducer models from icefall
./python-api-examples/offline-decode-files.py \
--tokens=/path/to/tokens.txt \
--encoder=/path/to/encoder.onnx \
--decoder=/path/to/decoder.onnx \
--joiner=/path/to/joiner.onnx \
--num-threads=2 \
--decoding-method=greedy_search \
--debug=false \
--sample-rate=16000 \
--feature-dim=80 \
/path/to/0.wav \
/path/to/1.wav
(3) For CTC models from NeMo
python3 ./python-api-examples/offline-decode-files.py \
--tokens=./sherpa-onnx-nemo-ctc-en-citrinet-512/tokens.txt \
--nemo-ctc=./sherpa-onnx-nemo-ctc-en-citrinet-512/model.onnx \
--num-threads=2 \
--decoding-method=greedy_search \
--debug=false \
./sherpa-onnx-nemo-ctc-en-citrinet-512/test_wavs/0.wav \
./sherpa-onnx-nemo-ctc-en-citrinet-512/test_wavs/1.wav \
./sherpa-onnx-nemo-ctc-en-citrinet-512/test_wavs/8k.wav
(4) For Whisper models
python3 ./python-api-examples/offline-decode-files.py \
--whisper-encoder=./sherpa-onnx-whisper-base.en/base.en-encoder.int8.onnx \
--whisper-decoder=./sherpa-onnx-whisper-base.en/base.en-decoder.int8.onnx \
--tokens=./sherpa-onnx-whisper-base.en/base.en-tokens.txt \
--whisper-task=transcribe \
--num-threads=1 \
./sherpa-onnx-whisper-base.en/test_wavs/0.wav \
./sherpa-onnx-whisper-base.en/test_wavs/1.wav \
./sherpa-onnx-whisper-base.en/test_wavs/8k.wav
(5) For CTC models from WeNet
python3 ./python-api-examples/offline-decode-files.py \
--wenet-ctc=./sherpa-onnx-zh-wenet-wenetspeech/model.onnx \
--tokens=./sherpa-onnx-zh-wenet-wenetspeech/tokens.txt \
./sherpa-onnx-zh-wenet-wenetspeech/test_wavs/0.wav \
./sherpa-onnx-zh-wenet-wenetspeech/test_wavs/1.wav \
./sherpa-onnx-zh-wenet-wenetspeech/test_wavs/8k.wav
(6) For tdnn models of the yesno recipe from icefall
python3 ./python-api-examples/offline-decode-files.py \
--sample-rate=8000 \
--feature-dim=23 \
--tdnn-model=./sherpa-onnx-tdnn-yesno/model-epoch-14-avg-2.onnx \
--tokens=./sherpa-onnx-tdnn-yesno/tokens.txt \
./sherpa-onnx-tdnn-yesno/test_wavs/0_0_0_1_0_0_0_1.wav \
./sherpa-onnx-tdnn-yesno/test_wavs/0_0_1_0_0_0_1_0.wav \
./sherpa-onnx-tdnn-yesno/test_wavs/0_0_1_0_0_1_1_1.wav
Please refer to
https://k2-fsa.github.io/sherpa/onnx/index.html
to install sherpa-onnx and to download non-streaming pre-trained models
used in this file.
"""
import argparse
import time
import wave
from pathlib import Path
from typing import List, Tuple, Dict, Iterable, TextIO, Union
import numpy as np
import sherpa_onnx
import soundfile as sf
from datasets import load_dataset
import logging
from collections import defaultdict
import kaldialign
from zhon.hanzi import punctuation
import string
punctuation_all = punctuation + string.punctuation
Pathlike = Union[str, Path]
def remove_punctuation(text: str) -> str:
for x in punctuation_all:
if x == '\'':
continue
text = text.replace(x, '')
return text
def store_transcripts(
filename: Pathlike, texts: Iterable[Tuple[str, str, str]], char_level: bool = False
) -> None:
"""Save predicted results and reference transcripts to a file.
Args:
filename:
File to save the results to.
texts:
An iterable of tuples. The first element is the cur_id, the second is
the reference transcript and the third element is the predicted result.
If it is a multi-talker ASR system, the ref and hyp may also be lists of
strings.
Returns:
Return None.
"""
with open(filename, "w", encoding="utf8") as f:
for cut_id, ref, hyp in texts:
if char_level:
ref = list("".join(ref))
hyp = list("".join(hyp))
print(f"{cut_id}:\tref={ref}", file=f)
print(f"{cut_id}:\thyp={hyp}", file=f)
def write_error_stats(
f: TextIO,
test_set_name: str,
results: List[Tuple[str, str]],
enable_log: bool = True,
compute_CER: bool = False,
sclite_mode: bool = False,
) -> float:
"""Write statistics based on predicted results and reference transcripts.
It will write the following to the given file:
- WER
- number of insertions, deletions, substitutions, corrects and total
reference words. For example::
Errors: 23 insertions, 57 deletions, 212 substitutions, over 2606
reference words (2337 correct)
- The difference between the reference transcript and predicted result.
An instance is given below::
THE ASSOCIATION OF (EDISON->ADDISON) ILLUMINATING COMPANIES
The above example shows that the reference word is `EDISON`,
but it is predicted to `ADDISON` (a substitution error).
Another example is::
FOR THE FIRST DAY (SIR->*) I THINK
The reference word `SIR` is missing in the predicted
results (a deletion error).
results:
An iterable of tuples. The first element is the cut_id, the second is
the reference transcript and the third element is the predicted result.
enable_log:
If True, also print detailed WER to the console.
Otherwise, it is written only to the given file.
Returns:
Return None.
"""
subs: Dict[Tuple[str, str], int] = defaultdict(int)
ins: Dict[str, int] = defaultdict(int)
dels: Dict[str, int] = defaultdict(int)
# `words` stores counts per word, as follows:
# corr, ref_sub, hyp_sub, ins, dels
words: Dict[str, List[int]] = defaultdict(lambda: [0, 0, 0, 0, 0])
num_corr = 0
ERR = "*"
if compute_CER:
for i, res in enumerate(results):
cut_id, ref, hyp = res
ref = list("".join(ref))
hyp = list("".join(hyp))
results[i] = (cut_id, ref, hyp)
for _cut_id, ref, hyp in results:
ali = kaldialign.align(ref, hyp, ERR, sclite_mode=sclite_mode)
for ref_word, hyp_word in ali:
if ref_word == ERR:
ins[hyp_word] += 1
words[hyp_word][3] += 1
elif hyp_word == ERR:
dels[ref_word] += 1
words[ref_word][4] += 1
elif hyp_word != ref_word:
subs[(ref_word, hyp_word)] += 1
words[ref_word][1] += 1
words[hyp_word][2] += 1
else:
words[ref_word][0] += 1
num_corr += 1
ref_len = sum([len(r) for _, r, _ in results])
sub_errs = sum(subs.values())
ins_errs = sum(ins.values())
del_errs = sum(dels.values())
tot_errs = sub_errs + ins_errs + del_errs
tot_err_rate = "%.2f" % (100.0 * tot_errs / ref_len)
if enable_log:
logging.info(
f"[{test_set_name}] %WER {tot_errs / ref_len:.2%} "
f"[{tot_errs} / {ref_len}, {ins_errs} ins, "
f"{del_errs} del, {sub_errs} sub ]"
)
print(f"%WER = {tot_err_rate}", file=f)
print(
f"Errors: {ins_errs} insertions, {del_errs} deletions, "
f"{sub_errs} substitutions, over {ref_len} reference "
f"words ({num_corr} correct)",
file=f,
)
print(
"Search below for sections starting with PER-UTT DETAILS:, "
"SUBSTITUTIONS:, DELETIONS:, INSERTIONS:, PER-WORD STATS:",
file=f,
)
print("", file=f)
print("PER-UTT DETAILS: corr or (ref->hyp) ", file=f)
for cut_id, ref, hyp in results:
ali = kaldialign.align(ref, hyp, ERR)
combine_successive_errors = True
if combine_successive_errors:
ali = [[[x], [y]] for x, y in ali]
for i in range(len(ali) - 1):
if ali[i][0] != ali[i][1] and ali[i + 1][0] != ali[i + 1][1]:
ali[i + 1][0] = ali[i][0] + ali[i + 1][0]
ali[i + 1][1] = ali[i][1] + ali[i + 1][1]
ali[i] = [[], []]
ali = [
[
list(filter(lambda a: a != ERR, x)),
list(filter(lambda a: a != ERR, y)),
]
for x, y in ali
]
ali = list(filter(lambda x: x != [[], []], ali))
ali = [
[
ERR if x == [] else " ".join(x),
ERR if y == [] else " ".join(y),
]
for x, y in ali
]
print(
f"{cut_id}:\t"
+ " ".join(
(
ref_word if ref_word == hyp_word else f"({ref_word}->{hyp_word})"
for ref_word, hyp_word in ali
)
),
file=f,
)
print("", file=f)
print("SUBSTITUTIONS: count ref -> hyp", file=f)
for count, (ref, hyp) in sorted([(v, k) for k, v in subs.items()], reverse=True):
print(f"{count} {ref} -> {hyp}", file=f)
print("", file=f)
print("DELETIONS: count ref", file=f)
for count, ref in sorted([(v, k) for k, v in dels.items()], reverse=True):
print(f"{count} {ref}", file=f)
print("", file=f)
print("INSERTIONS: count hyp", file=f)
for count, hyp in sorted([(v, k) for k, v in ins.items()], reverse=True):
print(f"{count} {hyp}", file=f)
print("", file=f)
print("PER-WORD STATS: word corr tot_errs count_in_ref count_in_hyp", file=f)
for _, word, counts in sorted(
[(sum(v[1:]), k, v) for k, v in words.items()], reverse=True
):
(corr, ref_sub, hyp_sub, ins, dels) = counts
tot_errs = ref_sub + hyp_sub + ins + dels
ref_count = corr + ref_sub + dels
hyp_count = corr + hyp_sub + ins
print(f"{word} {corr} {tot_errs} {ref_count} {hyp_count}", file=f)
return float(tot_err_rate)
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"--tokens",
type=str,
help="Path to tokens.txt",
)
parser.add_argument(
"--hotwords-file",
type=str,
default="",
help="""
The file containing hotwords, one words/phrases per line, like
HELLO WORLD
你好世界
""",
)
parser.add_argument(
"--hotwords-score",
type=float,
default=1.5,
help="""
The hotword score of each token for biasing word/phrase. Used only if
--hotwords-file is given.
""",
)
parser.add_argument(
"--modeling-unit",
type=str,
default="",
help="""
The modeling unit of the model, valid values are cjkchar, bpe, cjkchar+bpe.
Used only when hotwords-file is given.
""",
)
parser.add_argument(
"--bpe-vocab",
type=str,
default="",
help="""
The path to the bpe vocabulary, the bpe vocabulary is generated by
sentencepiece, you can also export the bpe vocabulary through a bpe model
by `scripts/export_bpe_vocab.py`. Used only when hotwords-file is given
and modeling-unit is bpe or cjkchar+bpe.
""",
)
parser.add_argument(
"--encoder",
default="",
type=str,
help="Path to the encoder model",
)
parser.add_argument(
"--decoder",
default="",
type=str,
help="Path to the decoder model",
)
parser.add_argument(
"--joiner",
default="",
type=str,
help="Path to the joiner model",
)
parser.add_argument(
"--paraformer",
default="",
type=str,
help="Path to the model.onnx from Paraformer",
)
parser.add_argument(
"--nemo-ctc",
default="",
type=str,
help="Path to the model.onnx from NeMo CTC",
)
parser.add_argument(
"--wenet-ctc",
default="",
type=str,
help="Path to the model.onnx from WeNet CTC",
)
parser.add_argument(
"--tdnn-model",
default="",
type=str,
help="Path to the model.onnx for the tdnn model of the yesno recipe",
)
parser.add_argument(
"--num-threads",
type=int,
default=1,
help="Number of threads for neural network computation",
)
parser.add_argument(
"--whisper-encoder",
default="",
type=str,
help="Path to whisper encoder model",
)
parser.add_argument(
"--whisper-decoder",
default="",
type=str,
help="Path to whisper decoder model",
)
parser.add_argument(
"--whisper-language",
default="",
type=str,
help="""It specifies the spoken language in the input audio file.
Example values: en, fr, de, zh, jp.
Available languages for multilingual models can be found at
https://github.com/openai/whisper/blob/main/whisper/tokenizer.py#L10
If not specified, we infer the language from the input audio file.
""",
)
parser.add_argument(
"--whisper-task",
default="transcribe",
choices=["transcribe", "translate"],
type=str,
help="""For multilingual models, if you specify translate, the output
will be in English.
""",
)
parser.add_argument(
"--whisper-tail-paddings",
default=-1,
type=int,
help="""Number of tail padding frames.
We have removed the 30-second constraint from whisper, so you need to
choose the amount of tail padding frames by yourself.
Use -1 to use a default value for tail padding.
""",
)
parser.add_argument(
"--blank-penalty",
type=float,
default=0.0,
help="""
The penalty applied on blank symbol during decoding.
Note: It is a positive value that would be applied to logits like
this `logits[:, 0] -= blank_penalty` (suppose logits.shape is
[batch_size, vocab] and blank id is 0).
""",
)
parser.add_argument(
"--decoding-method",
type=str,
default="greedy_search",
help="Valid values are greedy_search and modified_beam_search",
)
parser.add_argument(
"--debug",
type=bool,
default=False,
help="True to show debug messages",
)
parser.add_argument(
"--sample-rate",
type=int,
default=16000,
help="""Sample rate of the feature extractor. Must match the one
expected by the model. Note: The input sound files can have a
different sample rate from this argument.""",
)
parser.add_argument(
"--feature-dim",
type=int,
default=80,
help="Feature dimension. Must match the one expected by the model",
)
parser.add_argument(
"sound_files",
type=str,
nargs="+",
help="The input sound file(s) to decode. Each file must be of WAVE"
"format with a single channel, and each sample has 16-bit, "
"i.e., int16_t. "
"The sample rate of the file can be arbitrary and does not need to "
"be 16 kHz",
)
parser.add_argument(
"--name",
type=str,
default="",
help="The directory containing the input sound files to decode",
)
parser.add_argument(
"--log-dir",
type=str,
default="",
help="The directory containing the input sound files to decode",
)
parser.add_argument(
"--label",
type=str,
default=None,
help="wav_base_name label",
)
# Dataset related arguments for loading labels when label file is not provided
parser.add_argument(
"--dataset-name",
type=str,
default="yuekai/seed_tts_cosy2",
help="Huggingface dataset name for loading labels",
)
parser.add_argument(
"--split-name",
type=str,
default="wenetspeech4tts",
help="Dataset split name for loading labels",
)
return parser.parse_args()
def assert_file_exists(filename: str):
assert Path(filename).is_file(), (
f"{filename} does not exist!\n"
"Please refer to "
"https://k2-fsa.github.io/sherpa/onnx/pretrained_models/index.html to download it"
)
def read_wave(wave_filename: str) -> Tuple[np.ndarray, int]:
"""
Args:
wave_filename:
Path to a wave file. It should be single channel and can be of type
32-bit floating point PCM. Its sample rate does not need to be 24kHz.
Returns:
Return a tuple containing:
- A 1-D array of dtype np.float32 containing the samples,
which are normalized to the range [-1, 1].
- Sample rate of the wave file.
"""
samples, sample_rate = sf.read(wave_filename, dtype="float32")
assert (
samples.ndim == 1
), f"Expected single channel, but got {samples.ndim} channels."
samples_float32 = samples.astype(np.float32)
return samples_float32, sample_rate
def normalize_text_alimeeting(text: str) -> str:
"""
Text normalization similar to M2MeT challenge baseline.
See: https://github.com/yufan-aslp/AliMeeting/blob/main/asr/local/text_normalize.pl
"""
import re
text = text.replace('\u00A0', '') # test_hard
text = text.replace(" ", "")
text = text.replace("<sil>", "")
text = text.replace("<%>", "")
text = text.replace("<->", "")
text = text.replace("<$>", "")
text = text.replace("<#>", "")
text = text.replace("<_>", "")
text = text.replace("<space>", "")
text = text.replace("`", "")
text = text.replace("&", "")
text = text.replace(",", "")
if re.search("[a-zA-Z]", text):
text = text.upper()
text = text.replace("A", "A")
text = text.replace("a", "A")
text = text.replace("b", "B")
text = text.replace("c", "C")
text = text.replace("k", "K")
text = text.replace("t", "T")
text = text.replace(",", "")
text = text.replace("丶", "")
text = text.replace("。", "")
text = text.replace("、", "")
text = text.replace("?", "")
text = remove_punctuation(text)
return text
def main():
args = get_args()
assert_file_exists(args.tokens)
assert args.num_threads > 0, args.num_threads
assert len(args.nemo_ctc) == 0, args.nemo_ctc
assert len(args.wenet_ctc) == 0, args.wenet_ctc
assert len(args.whisper_encoder) == 0, args.whisper_encoder
assert len(args.whisper_decoder) == 0, args.whisper_decoder
assert len(args.tdnn_model) == 0, args.tdnn_model
assert_file_exists(args.paraformer)
recognizer = sherpa_onnx.OfflineRecognizer.from_paraformer(
paraformer=args.paraformer,
tokens=args.tokens,
num_threads=args.num_threads,
sample_rate=args.sample_rate,
feature_dim=args.feature_dim,
decoding_method=args.decoding_method,
debug=args.debug,
)
print("Started!")
start_time = time.time()
streams, results = [], []
total_duration = 0
for i, wave_filename in enumerate(args.sound_files):
assert_file_exists(wave_filename)
samples, sample_rate = read_wave(wave_filename)
duration = len(samples) / sample_rate
total_duration += duration
s = recognizer.create_stream()
s.accept_waveform(sample_rate, samples)
streams.append(s)
if i % 10 == 0:
recognizer.decode_streams(streams)
results += [s.result.text for s in streams]
streams = []
print(f"Processed {i} files")
# process the last batch
if streams:
recognizer.decode_streams(streams)
results += [s.result.text for s in streams]
end_time = time.time()
print("Done!")
results_dict = {}
for wave_filename, result in zip(args.sound_files, results):
print(f"{wave_filename}\n{result}")
print("-" * 10)
wave_basename = Path(wave_filename).stem
results_dict[wave_basename] = result
elapsed_seconds = end_time - start_time
rtf = elapsed_seconds / total_duration
print(f"num_threads: {args.num_threads}")
print(f"decoding_method: {args.decoding_method}")
print(f"Wave duration: {total_duration:.3f} s")
print(f"Elapsed time: {elapsed_seconds:.3f} s")
print(
f"Real time factor (RTF): {elapsed_seconds:.3f}/{total_duration:.3f} = {rtf:.3f}"
)
# Load labels either from file or from dataset
labels_dict = {}
if args.label:
# Load labels from file (original functionality)
print(f"Loading labels from file: {args.label}")
with open(args.label, "r") as f:
for line in f:
# fields = line.strip().split(" ")
# fields = [item for item in fields if item]
# assert len(fields) == 4
# prompt_text, prompt_audio, text, audio_path = fields
fields = line.strip().split("|")
fields = [item for item in fields if item]
assert len(fields) == 4
audio_path, prompt_text, prompt_audio, text = fields
labels_dict[Path(audio_path).stem] = normalize_text_alimeeting(text)
else:
# Load labels from dataset (new functionality)
print(f"Loading labels from dataset: {args.dataset_name}, split: {args.split_name}")
if 'zero' in args.split_name:
dataset_name = "yuekai/CV3-Eval"
else:
dataset_name = "yuekai/seed_tts_cosy2"
dataset = load_dataset(
dataset_name,
split=args.split_name,
trust_remote_code=True,
)
for item in dataset:
audio_id = item["id"]
labels_dict[audio_id] = normalize_text_alimeeting(item["target_text"])
print(f"Loaded {len(labels_dict)} labels from dataset")
# Perform evaluation if labels are available
if labels_dict:
final_results = []
for key, value in results_dict.items():
if key in labels_dict:
final_results.append((key, labels_dict[key], value))
else:
print(f"Warning: No label found for {key}, skipping...")
if final_results:
store_transcripts(
filename=f"{args.log_dir}/recogs-{args.name}.txt", texts=final_results
)
with open(f"{args.log_dir}/errs-{args.name}.txt", "w") as f:
write_error_stats(f, "test-set", final_results, enable_log=True)
with open(f"{args.log_dir}/errs-{args.name}.txt", "r") as f:
print(f.readline()) # WER
print(f.readline()) # Detailed errors
else:
print("No matching labels found for evaluation")
else:
print("No labels available for evaluation")
if __name__ == "__main__":
main()
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/scripts/offline-decode-files.py",
"license": "Apache License 2.0",
"lines": 647,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:examples/grpo/cosyvoice2/token2wav_asr_server.py | # SPDX-FileCopyrightText: Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytriton server for token2wav conversion and ASR"""
from datasets import load_dataset
from cosyvoice.cli.cosyvoice import CosyVoice2
from omnisense.models import OmniSenseVoiceSmall
from pytriton.proxy.types import Request
from pytriton.triton import Triton, TritonConfig
from pytriton.model_config import DynamicBatcher, ModelConfig, Tensor
from pytriton.decorators import batch
import argparse
import io
import logging
from typing import Any, List
import numpy as np
import torch
from scipy.signal import resample
import sys
import random
import re
from jiwer import wer
from pypinyin import lazy_pinyin, Style
from tn.chinese.normalizer import Normalizer as ZhNormalizer
# Chinese text normalizer (cached globally)
zh_tn_model = ZhNormalizer(
cache_dir="./cache",
remove_erhua=False,
remove_interjections=False,
remove_puncts=True,
overwrite_cache=True,
)
sys.path.append("/workspace/CosyVoice/third_party/Matcha-TTS")
logger = logging.getLogger("token2wav_asr_server")
class _ASR_Server:
"""Wraps a single OmniSenseVoiceSmall model instance for Triton."""
def __init__(self, device_id: int):
self._model = OmniSenseVoiceSmall("iic/SenseVoiceSmall", quantize=False, device_id=device_id)
@batch
def __call__(self, WAV: np.ndarray, WAV_LENS: np.ndarray, LANGUAGE: np.ndarray, TEXT_NORM: np.ndarray):
"""
WAV: np.ndarray, WAV_LENS: np.ndarray
LANGUAGE: np.ndarray, TEXTNORM: np.ndarray for backward compatibility, not used
See: https://github.com/modelscope/FunASR/tree/main/runtime/triton_gpu
"""
logger.debug("WAV: %s, WAV_LENS: %s, shapes: %s %s", type(WAV), type(WAV_LENS), WAV.shape, WAV_LENS.shape)
wavs = [WAV[i, :WAV_LENS[i, 0]] for i in range(len(WAV))]
results = self._model.transcribe_single_batch(
wavs,
language="zh",
textnorm="woitn",
)
texts = [result.text for result in results]
transcripts = np.char.encode(np.array(texts).reshape(-1, 1), "utf-8")
return {"TRANSCRIPTS": transcripts}
def audio_decode_cosyvoice2(
audio_tokens, prompt_text, prompt_speech_16k, codec_decoder
):
"""
Generate audio from tokens with optional tone and prompt embedding.
"""
model_inputs_dict = codec_decoder.frontend.frontend_zero_shot(
"empty", prompt_text, prompt_speech_16k, 24000
)
tts_mel, _ = codec_decoder.model.flow.inference(
token=audio_tokens.to(codec_decoder.model.device),
token_len=torch.tensor([audio_tokens.shape[1]], dtype=torch.int32).to(
codec_decoder.model.device
),
prompt_token=model_inputs_dict["flow_prompt_speech_token"].to(
codec_decoder.model.device
),
prompt_token_len=torch.tensor(
[model_inputs_dict["flow_prompt_speech_token_len"]], dtype=torch.int32
).to(codec_decoder.model.device),
prompt_feat=model_inputs_dict["prompt_speech_feat"].to(
codec_decoder.model.device
),
prompt_feat_len=model_inputs_dict["prompt_speech_feat_len"].to(
codec_decoder.model.device
),
embedding=model_inputs_dict["flow_embedding"].to(codec_decoder.model.device),
finalize=True,
)
audio_hat, _ = codec_decoder.model.hift.inference(
speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0)
)
return audio_hat
def get_random_prompt_from_dataset(dataset):
"""
Get random prompt text and speech from the pre-loaded dataset.
Returns (prompt_text, prompt_speech_16k)
"""
random_idx = random.randint(0, len(dataset) - 1)
sample = dataset[random_idx]
# Extract audio data
audio_data = sample["audio"]
audio_array = audio_data["array"]
sample_rate = audio_data["sampling_rate"]
# Convert audio to 16kHz if needed
if sample_rate != 16000:
num_samples = int(len(audio_array) * (16000 / sample_rate))
audio_array = resample(audio_array, num_samples)
# Convert to torch tensor
prompt_speech_16k = torch.from_numpy(audio_array).float().unsqueeze(0)
prompt_text = sample["text"]
# remove space in prompt_text
prompt_text = prompt_text.replace(" ", "")
return prompt_text, prompt_speech_16k
class _Token2Wav_ASR:
"""Wraps a single OmniSenseVoiceSmall model instance for Triton."""
def __init__(self, device_id: int):
self.asr_model = OmniSenseVoiceSmall("iic/SenseVoiceSmall", quantize=False, device_id=device_id)
self.dataset = load_dataset("yuekai/aishell", "test", trust_remote_code=True)["test"]
# Make sure the CosyVoice2 decoder lives on the same GPU as the ASR model
# CosyVoice2 internally uses generic "cuda" device, so we first switch the
# current CUDA context to the desired card before the object is created.
# Afterwards, all parameters loaded with the generic "cuda" device will
# reside on this GPU. We keep the selected id in `self.device_id` and
# will set the context again for every forward call to avoid race
# conditions when several instances are used in the same process.
self.device_id = device_id
# Construct the TTS codec decoder under the correct CUDA device context
with torch.cuda.device(self.device_id):
self.codec_decoder = CosyVoice2(
"/workspace/CosyVoice2-0.5B", load_jit=True, load_trt=True, fp16=True
)
@batch
def __call__(self, TOKENS: np.ndarray, TOKEN_LENS: np.ndarray, GT_TEXT: np.ndarray):
"""
WAV: np.ndarray, WAV_LENS: np.ndarray
LANGUAGE: np.ndarray, TEXTNORM: np.ndarray for backward compatibility, not used
See: https://github.com/modelscope/FunASR/tree/main/runtime/triton_gpu
"""
# Ensure the default CUDA device is set correctly for this invocation
torch.cuda.set_device(self.device_id)
if self.device_id == 0:
print(f"device_id: {self.device_id}, TOKENS: {TOKENS.shape}, TOKEN_LENS: {TOKEN_LENS.shape}")
tokens_list = [TOKENS[i, :TOKEN_LENS[i, 0]] for i in range(len(TOKENS))]
# Decode ground-truth text strings (BYTES → str)
if GT_TEXT.ndim == 2:
gt_texts = [GT_TEXT[i, 0].decode("utf-8") for i in range(len(GT_TEXT))]
else:
gt_texts = [GT_TEXT[i].decode("utf-8") for i in range(len(GT_TEXT))]
wavs = []
for tokens in tokens_list:
prompt_text, prompt_speech_16k = get_random_prompt_from_dataset(self.dataset)
audio_tokens = torch.tensor(tokens, dtype=torch.long, device=self.asr_model.device).unsqueeze(0)
audio_hat = audio_decode_cosyvoice2(
audio_tokens,
prompt_text,
prompt_speech_16k,
self.codec_decoder,
)
# resample to 16000 using soundfile
audio_hat = audio_hat.squeeze(0).float().cpu()
audio_hat = audio_hat.numpy()
num_samples = int(len(audio_hat) * (16000 / 24000))
audio_hat = resample(audio_hat, num_samples)
wavs.append(audio_hat)
results = self.asr_model.transcribe_single_batch(
wavs,
language="zh",
textnorm="woitn",
)
texts = [result.text for result in results]
# ---------------- Reward computation ----------------
rewards = []
for gt_text, hyp_text in zip(gt_texts, texts):
gt_norm = zh_tn_model.normalize(gt_text).lower()
hyp_norm = zh_tn_model.normalize(hyp_text).lower()
gt_pinyin = lazy_pinyin(
gt_norm,
style=Style.TONE3,
tone_sandhi=True,
neutral_tone_with_five=True,
)
hyp_pinyin = lazy_pinyin(
hyp_norm,
style=Style.TONE3,
tone_sandhi=True,
neutral_tone_with_five=True,
)
c = float(wer(" ".join(gt_pinyin), " ".join(hyp_pinyin)))
reward_val = 1.0 - np.tanh(3.0 * c)
reward_val = max(0.0, min(1.0, reward_val))
rewards.append(reward_val)
print(f"gt_text: {gt_text}, hyp_text: {hyp_text}, reward_val: {reward_val}")
transcripts = np.char.encode(np.array(texts).reshape(-1, 1), "utf-8")
rewards_arr = np.array(rewards, dtype=np.float32).reshape(-1, 1)
return {"REWARDS": rewards_arr, "TRANSCRIPTS": transcripts}
def _infer_function_factory(device_ids: List[int], model_name: str):
"""Creates a list of inference functions, one for each requested device ID."""
infer_funcs = []
for device_id in device_ids:
if model_name == "sensevoice":
infer_funcs.append(_ASR_Server(device_id=device_id))
else:
infer_funcs.append(_Token2Wav_ASR(device_id=device_id))
return infer_funcs
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--max-batch-size",
type=int,
default=32,
help="Batch size of request.",
required=False,
)
parser.add_argument(
"--verbose",
action="store_true",
default=False,
)
parser.add_argument(
"--number-of-instances-per-device",
type=int,
default=1,
help="Number of model instances to load.",
required=False,
)
parser.add_argument(
"--number-of-devices",
type=int,
default=8,
help="Number of devices to use.",
)
parser.add_argument(
"--model-name",
type=str,
default="token2wav_asr",
choices=["token2wav_asr", "sensevoice"],
help="Model name.",
)
args = parser.parse_args()
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level, format="%(asctime)s - %(levelname)s - %(name)s: %(message)s")
triton_config = TritonConfig(
http_port=8000,
grpc_port=8001,
metrics_port=8002,
)
device_ids = list(range(args.number_of_devices))
device_ids = device_ids * args.number_of_instances_per_device
with Triton(config=triton_config) as triton:
logger.info("Loading SenseVoice model on device ids: %s", device_ids)
if args.model_name == "sensevoice":
triton.bind(
model_name="sensevoice",
infer_func=_infer_function_factory(device_ids, args.model_name),
inputs=[
Tensor(name="WAV", dtype=np.float32, shape=(-1,)),
Tensor(name="WAV_LENS", dtype=np.int32, shape=(-1,)),
Tensor(name="LANGUAGE", dtype=np.int32, shape=(-1,)),
Tensor(name="TEXT_NORM", dtype=np.int32, shape=(-1,)),
],
outputs=[
Tensor(name="TRANSCRIPTS", dtype=bytes, shape=(-1,)),
],
config=ModelConfig(
max_batch_size=args.max_batch_size,
batcher=DynamicBatcher(max_queue_delay_microseconds=10000), # 10ms
),
strict=True,
)
else:
triton.bind(
model_name="token2wav_asr",
infer_func=_infer_function_factory(device_ids, args.model_name),
inputs=[
Tensor(name="TOKENS", dtype=np.int32, shape=(-1,)),
Tensor(name="TOKEN_LENS", dtype=np.int32, shape=(-1,)),
Tensor(name="GT_TEXT", dtype=bytes, shape=(-1,)),
],
outputs=[
Tensor(name="REWARDS", dtype=np.float32, shape=(-1,)),
Tensor(name="TRANSCRIPTS", dtype=bytes, shape=(-1,)),
],
config=ModelConfig(
max_batch_size=args.max_batch_size,
batcher=DynamicBatcher(max_queue_delay_microseconds=10000), # 10ms
),
strict=True,
)
logger.info("Serving inference")
triton.serve()
if __name__ == "__main__":
main()
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/grpo/cosyvoice2/token2wav_asr_server.py",
"license": "Apache License 2.0",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/client_grpc.py | # Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
# 2023 Nvidia (authors: Yuekai Zhang)
# 2023 Recurrent.ai (authors: Songtao Shi)
# See LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script supports to load dataset from huggingface and sends it to the server
for decoding, in parallel.
Usage:
num_task=2
# For offline F5-TTS
python3 client_grpc.py \
--server-addr localhost \
--model-name f5_tts \
--num-tasks $num_task \
--huggingface-dataset yuekai/seed_tts \
--split-name test_zh \
--log-dir ./log_concurrent_tasks_${num_task}
# For offline Spark-TTS-0.5B
python3 client_grpc.py \
--server-addr localhost \
--model-name spark_tts \
--num-tasks $num_task \
--huggingface-dataset yuekai/seed_tts \
--split-name wenetspeech4tts \
--log-dir ./log_concurrent_tasks_${num_task}
"""
import argparse
import asyncio
import json
import queue
import uuid
import functools
import os
import time
import types
from pathlib import Path
import numpy as np
import soundfile as sf
import tritonclient
import tritonclient.grpc.aio as grpcclient_aio
import tritonclient.grpc as grpcclient_sync
from tritonclient.utils import np_to_triton_dtype, InferenceServerException
class UserData:
def __init__(self):
self._completed_requests = queue.Queue()
self._first_chunk_time = None
self._second_chunk_time = None
self._start_time = None
def record_start_time(self):
self._start_time = time.time()
def get_first_chunk_latency(self):
if self._first_chunk_time and self._start_time:
return self._first_chunk_time - self._start_time
return None
def get_second_chunk_latency(self):
if self._first_chunk_time and self._second_chunk_time:
return self._second_chunk_time - self._first_chunk_time
return None
def callback(user_data, result, error):
if not error:
if user_data._first_chunk_time is None:
user_data._first_chunk_time = time.time()
elif user_data._second_chunk_time is None:
user_data._second_chunk_time = time.time()
if error:
user_data._completed_requests.put(error)
else:
user_data._completed_requests.put(result)
def stream_callback(user_data_map, result, error):
request_id = None
if error:
print(f"An error occurred in the stream callback: {error}")
else:
request_id = result.get_response().id
if request_id:
user_data = user_data_map.get(request_id)
if user_data:
callback(user_data, result, error)
else:
print(f"Warning: Could not find user_data for request_id {request_id}")
def write_triton_stats(stats, summary_file):
with open(summary_file, "w") as summary_f:
model_stats = stats["model_stats"]
for model_state in model_stats:
if "last_inference" not in model_state:
continue
summary_f.write(f"model name is {model_state['name']} \n")
model_inference_stats = model_state["inference_stats"]
total_queue_time_s = int(model_inference_stats["queue"]["ns"]) / 1e9
total_infer_time_s = int(model_inference_stats["compute_infer"]["ns"]) / 1e9
total_input_time_s = int(model_inference_stats["compute_input"]["ns"]) / 1e9
total_output_time_s = int(model_inference_stats["compute_output"]["ns"]) / 1e9
summary_f.write(
f"queue time {total_queue_time_s:<5.2f} s, "
f"compute infer time {total_infer_time_s:<5.2f} s, "
f"compute input time {total_input_time_s:<5.2f} s, "
f"compute output time {total_output_time_s:<5.2f} s \n"
)
model_batch_stats = model_state["batch_stats"]
for batch in model_batch_stats:
batch_size = int(batch["batch_size"])
compute_input = batch["compute_input"]
compute_output = batch["compute_output"]
compute_infer = batch["compute_infer"]
batch_count = int(compute_infer["count"])
if batch_count == 0:
continue
assert compute_infer["count"] == compute_output["count"] == compute_input["count"]
compute_infer_time_ms = int(compute_infer["ns"]) / 1e6
compute_input_time_ms = int(compute_input["ns"]) / 1e6
compute_output_time_ms = int(compute_output["ns"]) / 1e6
summary_f.write(
f"execuate inference with batch_size {batch_size:<2} total {batch_count:<5} times, "
f"total_infer_time {compute_infer_time_ms:<9.2f} ms, "
f"avg_infer_time {compute_infer_time_ms:<9.2f}/{batch_count:<5}="
f"{compute_infer_time_ms / batch_count:.2f} ms, "
f"avg_infer_time_per_sample {compute_infer_time_ms:<9.2f}/{batch_count:<5}/{batch_size}="
f"{compute_infer_time_ms / batch_count / batch_size:.2f} ms \n"
)
summary_f.write(
f"input {compute_input_time_ms:<9.2f} ms, avg {compute_input_time_ms / batch_count:.2f} ms, "
)
summary_f.write(
f"output {compute_output_time_ms:<9.2f} ms, avg {compute_output_time_ms / batch_count:.2f} ms \n"
)
def subtract_stats(stats_after, stats_before):
"""Subtracts two Triton inference statistics objects."""
stats_diff = json.loads(json.dumps(stats_after))
model_stats_before_map = {
s["name"]: {
"version": s["version"],
"last_inference": s.get("last_inference", 0),
"inference_count": s.get("inference_count", 0),
"execution_count": s.get("execution_count", 0),
"inference_stats": s.get("inference_stats", {}),
"batch_stats": s.get("batch_stats", []),
}
for s in stats_before["model_stats"]
}
for model_stat_after in stats_diff["model_stats"]:
model_name = model_stat_after["name"]
if model_name in model_stats_before_map:
model_stat_before = model_stats_before_map[model_name]
model_stat_after["inference_count"] = str(
int(model_stat_after.get("inference_count", 0)) - int(model_stat_before.get("inference_count", 0))
)
model_stat_after["execution_count"] = str(
int(model_stat_after.get("execution_count", 0)) - int(model_stat_before.get("execution_count", 0))
)
if "inference_stats" in model_stat_after and "inference_stats" in model_stat_before:
for key in ["success", "fail", "queue", "compute_input", "compute_infer", "compute_output", "cache_hit", "cache_miss"]:
if key in model_stat_after["inference_stats"] and key in model_stat_before["inference_stats"]:
if "ns" in model_stat_after["inference_stats"][key]:
ns_after = int(model_stat_after["inference_stats"][key]["ns"])
ns_before = int(model_stat_before["inference_stats"][key]["ns"])
model_stat_after["inference_stats"][key]["ns"] = str(ns_after - ns_before)
if "count" in model_stat_after["inference_stats"][key]:
count_after = int(model_stat_after["inference_stats"][key]["count"])
count_before = int(model_stat_before["inference_stats"][key]["count"])
model_stat_after["inference_stats"][key]["count"] = str(count_after - count_before)
if "batch_stats" in model_stat_after and "batch_stats" in model_stat_before:
batch_stats_before_map = {b["batch_size"]: b for b in model_stat_before["batch_stats"]}
for batch_stat_after in model_stat_after["batch_stats"]:
bs = batch_stat_after["batch_size"]
if bs in batch_stats_before_map:
batch_stat_before = batch_stats_before_map[bs]
for key in ["compute_input", "compute_infer", "compute_output"]:
if key in batch_stat_after and key in batch_stat_before:
count_after = int(batch_stat_after[key]["count"])
count_before = int(batch_stat_before[key]["count"])
batch_stat_after[key]["count"] = str(count_after - count_before)
ns_after = int(batch_stat_after[key]["ns"])
ns_before = int(batch_stat_before[key]["ns"])
batch_stat_after[key]["ns"] = str(ns_after - ns_before)
return stats_diff
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--server-addr",
type=str,
default="localhost",
help="Address of the server",
)
parser.add_argument(
"--server-port",
type=int,
default=8001,
help="Grpc port of the triton server, default is 8001",
)
parser.add_argument(
"--reference-audio",
type=str,
default=None,
help="Path to a single audio file. It can't be specified at the same time with --manifest-dir",
)
parser.add_argument(
"--reference-text",
type=str,
default="",
help="",
)
parser.add_argument(
"--target-text",
type=str,
default="",
help="",
)
parser.add_argument(
"--huggingface-dataset",
type=str,
default="yuekai/seed_tts",
help="dataset name in huggingface dataset hub",
)
parser.add_argument(
"--split-name",
type=str,
default="wenetspeech4tts",
choices=["wenetspeech4tts", "test_zh", "test_en", "test_hard"],
help="dataset split name, default is 'test'",
)
parser.add_argument(
"--manifest-path",
type=str,
default=None,
help="Path to the manifest dir which includes wav.scp trans.txt files.",
)
parser.add_argument(
"--model-name",
type=str,
default="f5_tts",
choices=[
"f5_tts",
"spark_tts",
"cosyvoice2",
"cosyvoice2_dit"],
help="triton model_repo module name to request",
)
parser.add_argument(
"--num-tasks",
type=int,
default=1,
help="Number of concurrent tasks for sending",
)
parser.add_argument(
"--log-interval",
type=int,
default=5,
help="Controls how frequently we print the log.",
)
parser.add_argument(
"--compute-wer",
action="store_true",
default=False,
help="""True to compute WER.
""",
)
parser.add_argument(
"--log-dir",
type=str,
required=False,
default="./tmp",
help="log directory",
)
parser.add_argument(
"--mode",
type=str,
default="offline",
choices=["offline", "streaming"],
help="Select offline or streaming benchmark mode."
)
parser.add_argument(
"--chunk-overlap-duration",
type=float,
default=0.1,
help="Chunk overlap duration for streaming reconstruction (in seconds)."
)
parser.add_argument(
"--use-spk2info-cache",
type=str,
default="False",
help="Use spk2info cache for reference audio.",
)
return parser.parse_args()
def load_audio(wav_path, target_sample_rate=16000):
assert target_sample_rate == 16000, "hard coding in server"
if isinstance(wav_path, dict):
waveform = wav_path["array"]
sample_rate = wav_path["sampling_rate"]
else:
waveform, sample_rate = sf.read(wav_path)
if sample_rate != target_sample_rate:
from scipy.signal import resample
num_samples = int(len(waveform) * (target_sample_rate / sample_rate))
waveform = resample(waveform, num_samples)
return waveform, target_sample_rate
def prepare_request_input_output(
protocol_client,
waveform,
reference_text,
target_text,
sample_rate=16000,
padding_duration: int = None,
use_spk2info_cache: bool = False
):
"""Prepares inputs for Triton inference (offline or streaming)."""
assert len(waveform.shape) == 1, "waveform should be 1D"
lengths = np.array([[len(waveform)]], dtype=np.int32)
if padding_duration:
duration = len(waveform) / sample_rate
if reference_text:
estimated_target_duration = duration / len(reference_text) * len(target_text)
else:
estimated_target_duration = duration
required_total_samples = padding_duration * sample_rate * (
(int(estimated_target_duration + duration) // padding_duration) + 1
)
samples = np.zeros((1, required_total_samples), dtype=np.float32)
samples[0, : len(waveform)] = waveform
else:
samples = waveform.reshape(1, -1).astype(np.float32)
inputs = [
protocol_client.InferInput("reference_wav", samples.shape, np_to_triton_dtype(samples.dtype)),
protocol_client.InferInput(
"reference_wav_len", lengths.shape, np_to_triton_dtype(lengths.dtype)
),
protocol_client.InferInput("reference_text", [1, 1], "BYTES"),
protocol_client.InferInput("target_text", [1, 1], "BYTES"),
]
inputs[0].set_data_from_numpy(samples)
inputs[1].set_data_from_numpy(lengths)
input_data_numpy = np.array([reference_text], dtype=object)
input_data_numpy = input_data_numpy.reshape((1, 1))
inputs[2].set_data_from_numpy(input_data_numpy)
input_data_numpy = np.array([target_text], dtype=object)
input_data_numpy = input_data_numpy.reshape((1, 1))
inputs[3].set_data_from_numpy(input_data_numpy)
outputs = [protocol_client.InferRequestedOutput("waveform")]
if use_spk2info_cache:
inputs = inputs[-1:]
return inputs, outputs
def run_sync_streaming_inference(
sync_triton_client: tritonclient.grpc.InferenceServerClient,
model_name: str,
inputs: list,
outputs: list,
request_id: str,
user_data: UserData,
chunk_overlap_duration: float,
save_sample_rate: int,
audio_save_path: str,
):
"""Helper function to run the blocking sync streaming call."""
start_time_total = time.time()
user_data.record_start_time()
sync_triton_client.async_stream_infer(
model_name,
inputs,
request_id=request_id,
outputs=outputs,
enable_empty_final_response=True,
)
audios = []
while True:
try:
result = user_data._completed_requests.get(timeout=200)
if isinstance(result, InferenceServerException):
print(f"Received InferenceServerException: {result}")
return None, None, None, None
response = result.get_response()
final = response.parameters["triton_final_response"].bool_param
if final is True:
break
audio_chunk = result.as_numpy("waveform").reshape(-1)
if audio_chunk.size > 0:
audios.append(audio_chunk)
else:
print("Warning: received empty audio chunk.")
except queue.Empty:
print(f"Timeout waiting for response for request id {request_id}")
return None, None, None, None
end_time_total = time.time()
total_request_latency = end_time_total - start_time_total
first_chunk_latency = user_data.get_first_chunk_latency()
second_chunk_latency = user_data.get_second_chunk_latency()
if audios:
if model_name == "spark_tts":
cross_fade_samples = int(chunk_overlap_duration * save_sample_rate)
fade_out = np.linspace(1, 0, cross_fade_samples)
fade_in = np.linspace(0, 1, cross_fade_samples)
reconstructed_audio = None
if not audios:
print("Warning: No audio chunks received.")
reconstructed_audio = np.array([], dtype=np.float32)
elif len(audios) == 1:
reconstructed_audio = audios[0]
else:
reconstructed_audio = audios[0][:-cross_fade_samples]
for i in range(1, len(audios)):
cross_faded_overlap = (audios[i][:cross_fade_samples] * fade_in +
audios[i - 1][-cross_fade_samples:] * fade_out)
middle_part = audios[i][cross_fade_samples:-cross_fade_samples]
reconstructed_audio = np.concatenate([reconstructed_audio, cross_faded_overlap, middle_part])
reconstructed_audio = np.concatenate([reconstructed_audio, audios[-1][-cross_fade_samples:]])
if reconstructed_audio is not None and reconstructed_audio.size > 0:
actual_duration = len(reconstructed_audio) / save_sample_rate
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
else:
print("Warning: No audio chunks received or reconstructed.")
actual_duration = 0
else:
reconstructed_audio = np.concatenate(audios)
actual_duration = len(reconstructed_audio) / save_sample_rate
sf.write(audio_save_path, reconstructed_audio, save_sample_rate, "PCM_16")
else:
print("Warning: No audio chunks received.")
actual_duration = 0
return total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration
async def send_streaming(
manifest_item_list: list,
name: str,
server_url: str,
protocol_client: types.ModuleType,
log_interval: int,
model_name: str,
audio_save_dir: str = "./",
save_sample_rate: int = 16000,
chunk_overlap_duration: float = 0.1,
padding_duration: int = None,
use_spk2info_cache: bool = False,
):
total_duration = 0.0
latency_data = []
task_id = int(name[5:])
sync_triton_client = None
user_data_map = {}
try:
print(f"{name}: Initializing sync client for streaming...")
sync_triton_client = grpcclient_sync.InferenceServerClient(url=server_url, verbose=False)
sync_triton_client.start_stream(callback=functools.partial(stream_callback, user_data_map))
print(f"{name}: Starting streaming processing for {len(manifest_item_list)} items.")
for i, item in enumerate(manifest_item_list):
if i % log_interval == 0:
print(f"{name}: Processing item {i}/{len(manifest_item_list)}")
try:
waveform, sample_rate = load_audio(item["audio_filepath"], target_sample_rate=16000)
reference_text, target_text = item["reference_text"], item["target_text"]
inputs, outputs = prepare_request_input_output(
protocol_client,
waveform,
reference_text,
target_text,
sample_rate,
padding_duration=padding_duration,
use_spk2info_cache=use_spk2info_cache
)
request_id = str(uuid.uuid4())
user_data = UserData()
user_data_map[request_id] = user_data
audio_save_path = os.path.join(audio_save_dir, f"{item['target_audio_path']}.wav")
total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration = await asyncio.to_thread(
run_sync_streaming_inference,
sync_triton_client,
model_name,
inputs,
outputs,
request_id,
user_data,
chunk_overlap_duration,
save_sample_rate,
audio_save_path
)
if total_request_latency is not None:
print(
f"{name}: Item {i} - First Chunk Latency: {first_chunk_latency:.4f}s, "
f"Second Chunk Latency: {second_chunk_latency if second_chunk_latency is not None else 'N/A'}, "
f"Total Latency: {total_request_latency:.4f}s, Duration: {actual_duration:.4f}s"
)
latency_data.append((total_request_latency, first_chunk_latency, second_chunk_latency, actual_duration))
total_duration += actual_duration
else:
print(f"{name}: Item {i} failed.")
del user_data_map[request_id]
except FileNotFoundError:
print(f"Error: Audio file not found for item {i}: {item['audio_filepath']}")
except Exception as e:
print(f"Error processing item {i} ({item['target_audio_path']}): {e}")
import traceback
traceback.print_exc()
finally:
if sync_triton_client:
try:
print(f"{name}: Closing stream and sync client...")
sync_triton_client.stop_stream()
sync_triton_client.close()
except Exception as e:
print(f"{name}: Error closing sync client: {e}")
print(f"{name}: Finished streaming processing. Total duration synthesized: {total_duration:.4f}s")
return total_duration, latency_data
async def send(
manifest_item_list: list,
name: str,
triton_client: tritonclient.grpc.aio.InferenceServerClient,
protocol_client: types.ModuleType,
log_interval: int,
model_name: str,
padding_duration: int = None,
audio_save_dir: str = "./",
save_sample_rate: int = 16000,
use_spk2info_cache: bool = False,
):
total_duration = 0.0
latency_data = []
task_id = int(name[5:])
for i, item in enumerate(manifest_item_list):
if i % log_interval == 0:
print(f"{name}: {i}/{len(manifest_item_list)}")
waveform, sample_rate = load_audio(item["audio_filepath"], target_sample_rate=16000)
reference_text, target_text = item["reference_text"], item["target_text"]
inputs, outputs = prepare_request_input_output(
protocol_client,
waveform,
reference_text,
target_text,
sample_rate,
padding_duration=padding_duration,
use_spk2info_cache=use_spk2info_cache
)
sequence_id = 100000000 + i + task_id * 10
start = time.time()
response = await triton_client.infer(model_name, inputs, request_id=str(sequence_id), outputs=outputs)
audio = response.as_numpy("waveform").reshape(-1)
actual_duration = len(audio) / save_sample_rate
end = time.time() - start
audio_save_path = os.path.join(audio_save_dir, f"{item['target_audio_path']}.wav")
sf.write(audio_save_path, audio, save_sample_rate, "PCM_16")
latency_data.append((end, actual_duration))
total_duration += actual_duration
return total_duration, latency_data
def load_manifests(manifest_path):
with open(manifest_path, "r") as f:
manifest_list = []
for line in f:
assert len(line.strip().split("|")) == 4
utt, prompt_text, prompt_wav, gt_text = line.strip().split("|")
utt = Path(utt).stem
if not os.path.isabs(prompt_wav):
prompt_wav = os.path.join(os.path.dirname(manifest_path), prompt_wav)
manifest_list.append(
{
"audio_filepath": prompt_wav,
"reference_text": prompt_text,
"target_text": gt_text,
"target_audio_path": utt,
}
)
return manifest_list
def split_data(data, k):
n = len(data)
if n < k:
print(f"Warning: the length of the input list ({n}) is less than k ({k}). Setting k to {n}.")
k = n
quotient = n // k
remainder = n % k
result = []
start = 0
for i in range(k):
if i < remainder:
end = start + quotient + 1
else:
end = start + quotient
result.append(data[start:end])
start = end
return result
async def main():
args = get_args()
url = f"{args.server_addr}:{args.server_port}"
triton_client = None
protocol_client = None
if args.mode == "offline":
print("Initializing gRPC client for offline mode...")
triton_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
protocol_client = grpcclient_aio
elif args.mode == "streaming":
print("Initializing gRPC client for streaming mode...")
protocol_client = grpcclient_sync
else:
raise ValueError(f"Invalid mode: {args.mode}")
if args.reference_audio:
args.num_tasks = 1
args.log_interval = 1
manifest_item_list = [
{
"reference_text": args.reference_text,
"target_text": args.target_text,
"audio_filepath": args.reference_audio,
"target_audio_path": "test",
}
]
elif args.huggingface_dataset:
import datasets
dataset = datasets.load_dataset(
args.huggingface_dataset,
split=args.split_name,
trust_remote_code=True,
)
manifest_item_list = []
for i in range(len(dataset)):
manifest_item_list.append(
{
"audio_filepath": dataset[i]["prompt_audio"],
"reference_text": dataset[i]["prompt_text"],
"target_audio_path": dataset[i]["id"],
"target_text": dataset[i]["target_text"],
}
)
else:
manifest_item_list = load_manifests(args.manifest_path)
stats_client = None
stats_before = None
try:
print("Initializing temporary async client for fetching stats...")
stats_client = grpcclient_aio.InferenceServerClient(url=url, verbose=False)
print("Fetching inference statistics before running tasks...")
stats_before = await stats_client.get_inference_statistics(model_name="", as_json=True)
except Exception as e:
print(f"Could not retrieve statistics before running tasks: {e}")
num_tasks = min(args.num_tasks, len(manifest_item_list))
manifest_item_list = split_data(manifest_item_list, num_tasks)
os.makedirs(args.log_dir, exist_ok=True)
args.use_spk2info_cache = args.use_spk2info_cache == "True" or args.use_spk2info_cache == "true"
tasks = []
start_time = time.time()
for i in range(num_tasks):
if args.mode == "offline":
task = asyncio.create_task(
send(
manifest_item_list[i],
name=f"task-{i}",
triton_client=triton_client,
protocol_client=protocol_client,
log_interval=args.log_interval,
model_name=args.model_name,
audio_save_dir=args.log_dir,
padding_duration=1,
save_sample_rate=16000 if args.model_name == "spark_tts" else 24000,
use_spk2info_cache=args.use_spk2info_cache,
)
)
elif args.mode == "streaming":
task = asyncio.create_task(
send_streaming(
manifest_item_list[i],
name=f"task-{i}",
server_url=url,
protocol_client=protocol_client,
log_interval=args.log_interval,
model_name=args.model_name,
audio_save_dir=args.log_dir,
padding_duration=10,
save_sample_rate=16000 if args.model_name == "spark_tts" else 24000,
chunk_overlap_duration=args.chunk_overlap_duration,
use_spk2info_cache=args.use_spk2info_cache,
)
)
tasks.append(task)
ans_list = await asyncio.gather(*tasks)
end_time = time.time()
elapsed = end_time - start_time
total_duration = 0.0
latency_data = []
for ans in ans_list:
if ans:
total_duration += ans[0]
latency_data.extend(ans[1])
else:
print("Warning: A task returned None, possibly due to an error.")
if total_duration == 0:
print("Total synthesized duration is zero. Cannot calculate RTF or latency percentiles.")
rtf = float('inf')
else:
rtf = elapsed / total_duration
s = f"Mode: {args.mode}\n"
s += f"RTF: {rtf:.4f}\n"
s += f"total_duration: {total_duration:.3f} seconds\n"
s += f"({total_duration / 3600:.2f} hours)\n"
s += f"processing time: {elapsed:.3f} seconds ({elapsed / 3600:.2f} hours)\n"
if latency_data:
if args.mode == "offline":
latency_list = [chunk_end for (chunk_end, chunk_duration) in latency_data]
if latency_list:
latency_ms = sum(latency_list) / float(len(latency_list)) * 1000.0
latency_variance = np.var(latency_list, dtype=np.float64) * 1000.0
s += f"latency_variance: {latency_variance:.2f}\n"
s += f"latency_50_percentile_ms: {np.percentile(latency_list, 50) * 1000.0:.2f}\n"
s += f"latency_90_percentile_ms: {np.percentile(latency_list, 90) * 1000.0:.2f}\n"
s += f"latency_95_percentile_ms: {np.percentile(latency_list, 95) * 1000.0:.2f}\n"
s += f"latency_99_percentile_ms: {np.percentile(latency_list, 99) * 1000.0:.2f}\n"
s += f"average_latency_ms: {latency_ms:.2f}\n"
else:
s += "No latency data collected for offline mode.\n"
elif args.mode == "streaming":
total_latency_list = [total for (total, first, second, duration) in latency_data if total is not None]
first_chunk_latency_list = [first for (total, first, second, duration) in latency_data if first is not None]
second_chunk_latency_list = [second for (total, first, second, duration) in latency_data if second is not None]
s += "\n--- Total Request Latency ---\n"
if total_latency_list:
avg_total_latency_ms = sum(total_latency_list) / len(total_latency_list) * 1000.0
variance_total_latency = np.var(total_latency_list, dtype=np.float64) * 1000.0
s += f"total_request_latency_variance: {variance_total_latency:.2f}\n"
s += f"total_request_latency_50_percentile_ms: {np.percentile(total_latency_list, 50) * 1000.0:.2f}\n"
s += f"total_request_latency_90_percentile_ms: {np.percentile(total_latency_list, 90) * 1000.0:.2f}\n"
s += f"total_request_latency_95_percentile_ms: {np.percentile(total_latency_list, 95) * 1000.0:.2f}\n"
s += f"total_request_latency_99_percentile_ms: {np.percentile(total_latency_list, 99) * 1000.0:.2f}\n"
s += f"average_total_request_latency_ms: {avg_total_latency_ms:.2f}\n"
else:
s += "No total request latency data collected.\n"
s += "\n--- First Chunk Latency ---\n"
if first_chunk_latency_list:
avg_first_chunk_latency_ms = sum(first_chunk_latency_list) / len(first_chunk_latency_list) * 1000.0
variance_first_chunk_latency = np.var(first_chunk_latency_list, dtype=np.float64) * 1000.0
s += f"first_chunk_latency_variance: {variance_first_chunk_latency:.2f}\n"
s += f"first_chunk_latency_50_percentile_ms: {np.percentile(first_chunk_latency_list, 50) * 1000.0:.2f}\n"
s += f"first_chunk_latency_90_percentile_ms: {np.percentile(first_chunk_latency_list, 90) * 1000.0:.2f}\n"
s += f"first_chunk_latency_95_percentile_ms: {np.percentile(first_chunk_latency_list, 95) * 1000.0:.2f}\n"
s += f"first_chunk_latency_99_percentile_ms: {np.percentile(first_chunk_latency_list, 99) * 1000.0:.2f}\n"
s += f"average_first_chunk_latency_ms: {avg_first_chunk_latency_ms:.2f}\n"
else:
s += "No first chunk latency data collected (check for errors or if all requests failed before first chunk).\n"
s += "\n--- Second Chunk Latency ---\n"
if second_chunk_latency_list:
avg_second_chunk_latency_ms = sum(second_chunk_latency_list) / len(second_chunk_latency_list) * 1000.0
variance_second_chunk_latency = np.var(second_chunk_latency_list, dtype=np.float64) * 1000.0
s += f"second_chunk_latency_variance: {variance_second_chunk_latency:.2f}\n"
s += f"second_chunk_latency_50_percentile_ms: {np.percentile(second_chunk_latency_list, 50) * 1000.0:.2f}\n"
s += f"second_chunk_latency_90_percentile_ms: {np.percentile(second_chunk_latency_list, 90) * 1000.0:.2f}\n"
s += f"second_chunk_latency_95_percentile_ms: {np.percentile(second_chunk_latency_list, 95) * 1000.0:.2f}\n"
s += f"second_chunk_latency_99_percentile_ms: {np.percentile(second_chunk_latency_list, 99) * 1000.0:.2f}\n"
s += f"average_second_chunk_latency_ms: {avg_second_chunk_latency_ms:.2f}\n"
else:
s += "No second chunk latency data collected (check for errors or if all requests failed before second chunk).\n"
else:
s += "No latency data collected.\n"
print(s)
if args.manifest_path:
name = Path(args.manifest_path).stem
elif args.split_name:
name = args.split_name
elif args.reference_audio:
name = Path(args.reference_audio).stem
else:
name = "results"
with open(f"{args.log_dir}/rtf-{name}.txt", "w") as f:
f.write(s)
try:
if stats_client and stats_before:
print("Fetching inference statistics after running tasks...")
stats_after = await stats_client.get_inference_statistics(model_name="", as_json=True)
print("Calculating statistics difference...")
stats = subtract_stats(stats_after, stats_before)
print("Fetching model config...")
metadata = await stats_client.get_model_config(model_name=args.model_name, as_json=True)
write_triton_stats(stats, f"{args.log_dir}/stats_summary-{name}.txt")
with open(f"{args.log_dir}/model_config-{name}.json", "w") as f:
json.dump(metadata, f, indent=4)
else:
print("Stats client not available or initial stats were not fetched. Skipping stats reporting.")
except Exception as e:
print(f"Could not retrieve statistics or config: {e}")
finally:
if stats_client:
try:
print("Closing temporary async stats client...")
await stats_client.close()
except Exception as e:
print(f"Error closing async stats client: {e}")
if __name__ == "__main__":
async def run_main():
try:
await main()
except Exception as e:
print(f"An error occurred in main: {e}")
import traceback
traceback.print_exc()
asyncio.run(run_main())
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/client_grpc.py",
"license": "Apache License 2.0",
"lines": 796,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/audio_tokenizer/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import torch
from torch.utils.dlpack import to_dlpack
import triton_python_backend_utils as pb_utils
import os
import numpy as np
import s3tokenizer
torch.set_num_threads(1)
ORIGINAL_VOCAB_SIZE = 151663
class TritonPythonModel:
"""Triton Python model for audio tokenization.
This model takes reference audio input and extracts semantic tokens
using s3tokenizer.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
# Parse model parameters
parameters = json.loads(args['model_config'])['parameters']
model_params = {k: v["string_value"] for k, v in parameters.items()}
self.device = torch.device("cuda")
model_path = os.path.join(model_params["model_dir"], "speech_tokenizer_v2.onnx")
self.audio_tokenizer = s3tokenizer.load_model(model_path).to(self.device)
def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing tokenized outputs
"""
mels = []
# Process each request in batch
for request in requests:
# Extract input tensors
wav_array = pb_utils.get_input_tensor_by_name(
request, "reference_wav").as_numpy()
wav_len = pb_utils.get_input_tensor_by_name(
request, "reference_wav_len").as_numpy().item()
wav_array = torch.from_numpy(wav_array).to(self.device)
# Prepare inputs
wav = wav_array[:, :wav_len].squeeze(0)
mels.append(s3tokenizer.log_mel_spectrogram(wav))
mels, mels_lens = s3tokenizer.padding(mels)
codes, codes_lens = self.audio_tokenizer.quantize(mels.to(self.device), mels_lens.to(self.device))
codes = codes.clone() + ORIGINAL_VOCAB_SIZE
responses = []
for i in range(len(requests)):
prompt_speech_tokens = codes[i, :codes_lens[i].item()]
prompt_speech_tokens_tensor = pb_utils.Tensor.from_dlpack(
"prompt_speech_tokens", to_dlpack(prompt_speech_tokens))
inference_response = pb_utils.InferenceResponse(
output_tensors=[prompt_speech_tokens_tensor])
responses.append(inference_response)
return responses
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/audio_tokenizer/1/model.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/cosyvoice2/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import threading
import time
from uuid import uuid4
import numpy as np
import torch
from torch.utils.dlpack import from_dlpack, to_dlpack
import triton_python_backend_utils as pb_utils
from transformers import AutoTokenizer
import torchaudio
from matcha.utils.audio import mel_spectrogram
ORIGINAL_VOCAB_SIZE = 151663
torch.set_num_threads(1)
class TritonPythonModel:
"""Triton Python model for Spark TTS.
This model orchestrates the end-to-end TTS pipeline by coordinating
between audio tokenizer, LLM, and vocoder components.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
self.logger = pb_utils.Logger
# Parse model parameters
self.model_config = json.loads(args['model_config'])
parameters = self.model_config['parameters']
model_params = {k: v["string_value"] for k, v in parameters.items()}
self.logger.log_info(f"model_params:{model_params}")
self.dynamic_chunk_strategy = model_params.get("dynamic_chunk_strategy", "exponential") # "exponential" or "time_based"
self.logger.log_info(f"Using dynamic chunk strategy: {self.dynamic_chunk_strategy}")
# Initialize tokenizer
llm_tokenizer_dir = model_params["llm_tokenizer_dir"]
self.tokenizer = AutoTokenizer.from_pretrained(llm_tokenizer_dir)
self.prompt_template = "<|sos|>{input_text}<|task_id|>"
self.eos_token_id = self.tokenizer.convert_tokens_to_ids("<|eos1|>")
self.device = torch.device("cuda")
self.decoupled = pb_utils.using_decoupled_model_transaction_policy(self.model_config)
self.token_frame_rate = 25
self.flow_pre_lookahead_len = 3
self.token_hop_len = 15
spk_info_path = os.path.join(model_params["model_dir"], "spk2info.pt")
if not os.path.exists(spk_info_path):
raise ValueError(f"spk2info.pt not found in {model_params['model_dir']}")
spk_info = torch.load(spk_info_path, map_location="cpu", weights_only=False)
self.default_spk_info = spk_info["001"]
def forward_llm(self, input_ids):
"""
Prepares the response from the language model based on the provided
inputs. Creates a `pb_utils.InferenceRequest` object with passed
`llm_request_inputs` to send to a decoupled TensorRTLLM model.
For each response from the language model:
- Checks for errors and raise an exception if any are found.
- Extracts the "output_ids" tensor from the response.
- Determines the finish reason based on the presence of the
end-of-sequence token or reaching the maximum length.
- Appends the generated token IDs to `output_ids`.
- If the finish reason is determined, decodes the output IDs to text
and prepares the final response.
The final response includes the generated text, finish reason,
completion tokens, prompt tokens, and total tokens.
Parameters
----------
- llm_request_inputs (dict): A dictionary containing the inputs for the language model.
Returns
-------
- pb_utils.InferenceResponse: The response object containing the generated text and additional metadata.
"""
# convert input_ids to numpy, with shape [1, sequence_length]
input_ids = input_ids.cpu().numpy()
max_tokens = 750
input_dict = {
"request_output_len": np.array([[max_tokens]], dtype=np.int32),
"end_id": np.array([[self.eos_token_id]], dtype=np.int32),
"pad_id": np.array([[self.eos_token_id]], dtype=np.int32),
"streaming": np.array([[self.decoupled]], dtype=np.bool_),
"runtime_top_p": np.array([[0.95]], dtype=np.float32),
"runtime_top_k": np.array([[50]], dtype=np.int32),
"temperature": np.array([[0.8]], dtype=np.float32),
"repetition_penalty": np.array([[1.1]], dtype=np.float32),
"random_seed": np.array([[42]], dtype=np.uint64),
"input_ids": input_ids,
"input_lengths": np.array([[input_ids.shape[1]]], dtype=np.int32),
}
# Convert inputs to Triton tensors
input_tensor_list = [
pb_utils.Tensor(k, v) for k, v in input_dict.items()
]
# Create and execute inference request
llm_request = pb_utils.InferenceRequest(
model_name="tensorrt_llm",
requested_output_names=["output_ids", "sequence_length"],
inputs=input_tensor_list,
)
llm_responses = llm_request.exec(decoupled=self.decoupled)
if self.decoupled:
for llm_response in llm_responses:
if llm_response.has_error():
raise pb_utils.TritonModelException(llm_response.error().message())
# Extract and process output
output_ids = pb_utils.get_output_tensor_by_name(
llm_response, "output_ids").as_numpy()
seq_lens = pb_utils.get_output_tensor_by_name(
llm_response, "sequence_length").as_numpy()
# Get actual output IDs up to the sequence length
actual_output_ids = output_ids[0][0][:seq_lens[0][0]]
yield actual_output_ids
else:
llm_response = llm_responses
if llm_response.has_error():
raise pb_utils.TritonModelException(llm_response.error().message())
# Extract and process output
output_ids = pb_utils.get_output_tensor_by_name(
llm_response, "output_ids").as_numpy()
seq_lens = pb_utils.get_output_tensor_by_name(
llm_response, "sequence_length").as_numpy()
# Get actual output IDs up to the sequence length
actual_output_ids = output_ids[0][0][:seq_lens[0][0]]
yield actual_output_ids
def forward_audio_tokenizer(self, wav, wav_len):
"""Forward pass through the audio tokenizer component.
Args:
wav: Input waveform tensor
wav_len: Waveform length tensor
Returns:
Tuple of global and semantic tokens
"""
inference_request = pb_utils.InferenceRequest(
model_name='audio_tokenizer',
requested_output_names=['prompt_speech_tokens'],
inputs=[wav, wav_len]
)
inference_response = inference_request.exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output tensors
prompt_speech_tokens = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_speech_tokens')
prompt_speech_tokens = torch.utils.dlpack.from_dlpack(prompt_speech_tokens.to_dlpack()).cpu()
return prompt_speech_tokens
def forward_speaker_embedding(self, wav):
"""Forward pass through the speaker embedding component.
Args:
wav: Input waveform tensor
Returns:
Prompt speaker embedding tensor
"""
inference_request = pb_utils.InferenceRequest(
model_name='speaker_embedding',
requested_output_names=['prompt_spk_embedding'],
inputs=[pb_utils.Tensor.from_dlpack("reference_wav", to_dlpack(wav))]
)
inference_response = inference_request.exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output tensors
prompt_spk_embedding = pb_utils.get_output_tensor_by_name(inference_response, 'prompt_spk_embedding')
prompt_spk_embedding = torch.utils.dlpack.from_dlpack(prompt_spk_embedding.to_dlpack())
return prompt_spk_embedding
def forward_token2wav(
self,
target_speech_tokens: torch.Tensor,
request_id: str,
prompt_speech_tokens: torch.Tensor = None,
prompt_speech_feat: torch.Tensor = None,
prompt_spk_embedding: torch.Tensor = None,
token_offset: int = None,
finalize: bool = None) -> torch.Tensor:
"""Forward pass through the vocoder component.
Args:
prompt_speech_tokens: Prompt speech tokens tensor
prompt_speech_feat: Prompt speech feat tensor
prompt_spk_embedding: Prompt spk embedding tensor
target_speech_tokens: Target speech tokens tensor
Returns:
Generated waveform tensor
"""
target_speech_tokens_tensor = pb_utils.Tensor.from_dlpack("target_speech_tokens", to_dlpack(target_speech_tokens))
inputs_tensor = [target_speech_tokens_tensor]
if token_offset is not None:
assert finalize is not None
token_offset_tensor = pb_utils.Tensor("token_offset", np.array([[token_offset]], dtype=np.int32))
finalize_tensor = pb_utils.Tensor("finalize", np.array([[finalize]], dtype=np.bool_))
inputs_tensor.append(token_offset_tensor)
inputs_tensor.append(finalize_tensor)
if prompt_spk_embedding is not None:
assert prompt_speech_feat is not None
prompt_speech_tokens_tensor = pb_utils.Tensor.from_dlpack("prompt_speech_tokens", to_dlpack(prompt_speech_tokens))
prompt_speech_feat_tensor = pb_utils.Tensor.from_dlpack("prompt_speech_feat", to_dlpack(prompt_speech_feat))
prompt_spk_embedding_tensor = pb_utils.Tensor.from_dlpack("prompt_spk_embedding", to_dlpack(prompt_spk_embedding))
inputs_tensor.extend([prompt_speech_tokens_tensor, prompt_speech_feat_tensor, prompt_spk_embedding_tensor])
# Create and execute inference request
inference_request = pb_utils.InferenceRequest(
model_name='token2wav',
requested_output_names=['waveform'],
inputs=inputs_tensor,
request_id=request_id,
)
inference_response = inference_request.exec()
if inference_response.has_error():
raise pb_utils.TritonModelException(inference_response.error().message())
# Extract and convert output waveform
waveform = pb_utils.get_output_tensor_by_name(inference_response, 'waveform')
waveform = torch.utils.dlpack.from_dlpack(waveform.to_dlpack()).cpu()
return waveform
def parse_input(self, text, prompt_text, prompt_speech_tokens):
total_text = f"{prompt_text}{text}"
prompt = self.prompt_template.format(input_text=total_text)
input_ids = self.tokenizer.encode(prompt)
input_ids = torch.tensor([input_ids], dtype=torch.int32)
input_ids = torch.cat([input_ids, prompt_speech_tokens], dim=1)
return input_ids
def _extract_speech_feat(self, speech):
speech_feat = mel_spectrogram(
speech,
n_fft=1920,
num_mels=80,
sampling_rate=24000,
hop_size=480,
win_size=1920,
fmin=0,
fmax=8000).squeeze(
dim=0).transpose(
0,
1).to(
self.device)
speech_feat = speech_feat.unsqueeze(dim=0)
return speech_feat
def _llm_gen_thread(self, generated_ids_iter, semantic_token_ids_arr, llm_is_done_flag):
for generated_ids in generated_ids_iter:
generated_ids = generated_ids.tolist()
if len(generated_ids) == 0:
break
semantic_token_ids_arr.extend(generated_ids)
llm_is_done_flag[0] = True
def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing generated audio
"""
responses = []
for request in requests:
request_id = request.request_id()
# Extract input tensors
wav = pb_utils.get_input_tensor_by_name(request, "reference_wav")
# Process reference audio through audio tokenizer
if wav is not None:
wav_len = pb_utils.get_input_tensor_by_name(request, "reference_wav_len")
prompt_speech_tokens = self.forward_audio_tokenizer(wav, wav_len)
prompt_speech_tokens = prompt_speech_tokens.unsqueeze(0)
wav_tensor = wav.as_numpy()
wav_tensor = torch.from_numpy(wav_tensor)[:, :wav_len.as_numpy()[0][0]]
prompt_speech_resample = torchaudio.transforms.Resample(orig_freq=16000, new_freq=24000)(wav_tensor)
speech_feat = self._extract_speech_feat(prompt_speech_resample)
token_len = min(int(speech_feat.shape[1] / 2), prompt_speech_tokens.shape[-1])
prompt_speech_feat = speech_feat[:, :2 * token_len].contiguous().half()
prompt_speech_tokens = prompt_speech_tokens[:, :token_len].contiguous()
reference_text = pb_utils.get_input_tensor_by_name(request, "reference_text").as_numpy()
reference_text = reference_text[0][0].decode('utf-8')
prompt_spk_embedding = self.forward_speaker_embedding(wav_tensor)
else:
# using pre-cached reference text
reference_text = self.default_spk_info["prompt_text"]
prompt_speech_tokens = self.default_spk_info["speech_token"] + ORIGINAL_VOCAB_SIZE
prompt_speech_feat = None
prompt_spk_embedding = None
target_text = pb_utils.get_input_tensor_by_name(request, "target_text").as_numpy()
target_text = target_text[0][0].decode('utf-8')
# Prepare prompt for LLM
input_ids = self.parse_input(
text=target_text,
prompt_text=reference_text,
prompt_speech_tokens=prompt_speech_tokens,
)
# Generate semantic tokens with LLM
generated_ids_iter = self.forward_llm(input_ids)
token2wav_request_id = request_id or str(uuid4())
if self.decoupled:
response_sender = request.get_response_sender()
semantic_token_ids_arr = []
llm_is_done_flag = [False]
llm_thread = threading.Thread(
target=self._llm_gen_thread,
args=(generated_ids_iter, semantic_token_ids_arr, llm_is_done_flag)
)
llm_thread.start()
token_offset, chunk_index = 0, 0
start_time = time.time()
this_token_hop_len = self.token_hop_len
while True:
pending_num = len(semantic_token_ids_arr) - token_offset
if llm_is_done_flag[0]:
break
if pending_num >= this_token_hop_len + self.flow_pre_lookahead_len:
this_tts_speech_token = semantic_token_ids_arr[:token_offset + this_token_hop_len + self.flow_pre_lookahead_len]
this_tts_speech_token = torch.tensor(this_tts_speech_token).unsqueeze(dim=0).to(torch.int32).to(self.device)
sub_tts_speech = self.forward_token2wav(
this_tts_speech_token, token2wav_request_id, prompt_speech_tokens,
prompt_speech_feat, prompt_spk_embedding, token_offset, False
)
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
response_sender.send(inference_response)
token_offset += this_token_hop_len
self.logger.log_info(f"chunk_index: {chunk_index}, current_token_hop_len: {this_token_hop_len}")
if self.dynamic_chunk_strategy == "exponential":
this_token_hop_len = self.token_frame_rate * (2 ** chunk_index)
elif self.dynamic_chunk_strategy == "time_based":
# see https://github.com/qi-hua/async_cosyvoice/blob/main/model.py#L306
cost_time = time.time() - start_time
duration = token_offset / self.token_frame_rate
if chunk_index > 0 and cost_time > 0:
avg_chunk_processing_time = cost_time / (chunk_index + 1)
if avg_chunk_processing_time > 0:
multiples = (duration - cost_time) / avg_chunk_processing_time
self.logger.log_info(f"multiples: {multiples}")
next_pending_num = len(semantic_token_ids_arr) - token_offset
if multiples > 4:
this_token_hop_len = (next_pending_num // self.token_hop_len + 1) * self.token_hop_len
elif multiples > 2:
this_token_hop_len = (next_pending_num // self.token_hop_len) * self.token_hop_len
else:
this_token_hop_len = self.token_hop_len
this_token_hop_len = max(self.token_hop_len, this_token_hop_len)
chunk_index += 1
else:
time.sleep(0.02)
this_tts_speech_token = torch.tensor(semantic_token_ids_arr).unsqueeze(dim=0).to(torch.int32).to(self.device)
sub_tts_speech = self.forward_token2wav(this_tts_speech_token, token2wav_request_id, prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding, token_offset, True)
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(sub_tts_speech))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
response_sender.send(inference_response)
llm_thread.join()
response_sender.send(flags=pb_utils.TRITONSERVER_RESPONSE_COMPLETE_FINAL)
self.logger.log_info("send tritonserver_response_complete_final to end")
else:
generated_ids = next(generated_ids_iter)
generated_ids = torch.tensor(generated_ids).unsqueeze(0).to(self.device)
if generated_ids is None or len(generated_ids) == 0:
raise pb_utils.TritonModelException("Generated IDs is None or empty")
audio = self.forward_token2wav(generated_ids, token2wav_request_id, prompt_speech_tokens, prompt_speech_feat, prompt_spk_embedding)
# Prepare response
audio_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio))
inference_response = pb_utils.InferenceResponse(output_tensors=[audio_tensor])
responses.append(inference_response)
if not self.decoupled:
return responses
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/cosyvoice2/1/model.py",
"license": "Apache License 2.0",
"lines": 374,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/model_repo/token2wav/1/model.py | # Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import os
import logging
import torch
from torch.utils.dlpack import to_dlpack
from torch.nn import functional as F
import triton_python_backend_utils as pb_utils
from hyperpyyaml import load_hyperpyyaml
from cosyvoice.utils.common import fade_in_out
from cosyvoice.utils.file_utils import convert_onnx_to_trt, export_cosyvoice2_vllm
from cosyvoice.utils.common import TrtContextWrapper
from collections import defaultdict
import numpy as np
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
ORIGINAL_VOCAB_SIZE = 151663
torch.set_num_threads(1)
class CosyVoice2:
def __init__(self, model_dir, load_jit=False, load_trt=False, fp16=False, trt_concurrent=1, device='cuda'):
self.model_dir = model_dir
self.fp16 = fp16
hyper_yaml_path = '{}/cosyvoice2.yaml'.format(model_dir)
if not os.path.exists(hyper_yaml_path):
raise ValueError('{} not found!'.format(hyper_yaml_path))
with open(hyper_yaml_path, 'r') as f:
configs = load_hyperpyyaml(f, overrides={'qwen_pretrain_path': os.path.join(model_dir, 'CosyVoice-BlankEN')})
self.model = CosyVoice2Model(configs['flow'], configs['hift'], fp16, device)
self.model.load('{}/flow.pt'.format(model_dir), '{}/hift.pt'.format(model_dir))
if load_jit:
self.model.load_jit('{}/flow.encoder.{}.zip'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'))
if load_trt:
self.model.load_trt('{}/flow.decoder.estimator.{}.mygpu.plan'.format(model_dir, 'fp16' if self.fp16 is True else 'fp32'),
'{}/flow.decoder.estimator.fp32.onnx'.format(model_dir),
trt_concurrent,
self.fp16)
class CosyVoice2Model:
def __init__(self,
flow: torch.nn.Module,
hift: torch.nn.Module,
fp16: bool = False,
device: str = 'cuda'):
self.device = device
self.flow = flow
self.hift = hift
self.fp16 = fp16
if self.fp16 is True:
self.flow.half()
# streaming tts config
self.token_hop_len = 25
self.mel_cache_len = 8
self.source_cache_len = int(self.mel_cache_len * 480)
self.speech_window = np.hamming(2 * self.source_cache_len)
self.hift_cache_dict = defaultdict(lambda: None)
def load_jit(self, flow_encoder_model):
flow_encoder = torch.jit.load(flow_encoder_model, map_location=self.device)
self.flow.encoder = flow_encoder
def load(self, flow_model, hift_model):
self.flow.load_state_dict(torch.load(flow_model, map_location=self.device), strict=True)
self.flow.to(self.device).eval()
# in case hift_model is a hifigan model
hift_state_dict = {k.replace('generator.', ''): v for k, v in torch.load(hift_model, map_location=self.device).items()}
self.hift.load_state_dict(hift_state_dict, strict=True)
self.hift.to(self.device).eval()
def load_trt(self, flow_decoder_estimator_model, flow_decoder_onnx_model, trt_concurrent, fp16):
assert torch.cuda.is_available(), 'tensorrt only supports gpu!'
if not os.path.exists(flow_decoder_estimator_model) or os.path.getsize(flow_decoder_estimator_model) == 0:
convert_onnx_to_trt(flow_decoder_estimator_model, self.get_trt_kwargs(), flow_decoder_onnx_model, fp16)
del self.flow.decoder.estimator
import tensorrt as trt
with open(flow_decoder_estimator_model, 'rb') as f:
estimator_engine = trt.Runtime(trt.Logger(trt.Logger.INFO)).deserialize_cuda_engine(f.read())
assert estimator_engine is not None, 'failed to load trt {}'.format(flow_decoder_estimator_model)
self.flow.decoder.estimator = TrtContextWrapper(estimator_engine, trt_concurrent=trt_concurrent, device=self.device)
def get_trt_kwargs(self):
min_shape = [(2, 80, 4), (2, 1, 4), (2, 80, 4), (2, 80, 4)]
opt_shape = [(2, 80, 500), (2, 1, 500), (2, 80, 500), (2, 80, 500)]
max_shape = [(2, 80, 3000), (2, 1, 3000), (2, 80, 3000), (2, 80, 3000)]
input_names = ["x", "mask", "mu", "cond"]
return {'min_shape': min_shape, 'opt_shape': opt_shape, 'max_shape': max_shape, 'input_names': input_names}
def token2wav(self, token, prompt_token, prompt_feat, embedding, token_offset, uuid, stream=False, finalize=False, speed=1.0):
with torch.cuda.amp.autocast(self.fp16):
tts_mel, _ = self.flow.inference(token=token.to(self.device),
token_len=torch.tensor([token.shape[1]], dtype=torch.int32).to(self.device),
prompt_token=prompt_token.to(self.device),
prompt_token_len=torch.tensor([prompt_token.shape[1]], dtype=torch.int32).to(self.device),
prompt_feat=prompt_feat.to(self.device),
prompt_feat_len=torch.tensor([prompt_feat.shape[1]], dtype=torch.int32).to(self.device),
embedding=embedding.to(self.device),
streaming=stream,
finalize=finalize)
tts_mel = tts_mel[:, :, token_offset * self.flow.token_mel_ratio:]
# append hift cache
if self.hift_cache_dict[uuid] is not None:
hift_cache_mel, hift_cache_source = self.hift_cache_dict[uuid]['mel'], self.hift_cache_dict[uuid]['source']
tts_mel = torch.concat([hift_cache_mel, tts_mel], dim=2)
else:
hift_cache_source = torch.zeros(1, 1, 0)
# keep overlap mel and hift cache
if finalize is False:
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
if self.hift_cache_dict[uuid] is not None:
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
self.hift_cache_dict[uuid] = {'mel': tts_mel[:, :, -self.mel_cache_len:],
'source': tts_source[:, :, -self.source_cache_len:],
'speech': tts_speech[:, -self.source_cache_len:]}
tts_speech = tts_speech[:, :-self.source_cache_len]
else:
if speed != 1.0:
assert self.hift_cache_dict[uuid] is None, 'speed change only support non-stream inference mode'
tts_mel = F.interpolate(tts_mel, size=int(tts_mel.shape[2] / speed), mode='linear')
tts_speech, tts_source = self.hift.inference(speech_feat=tts_mel, cache_source=hift_cache_source)
if self.hift_cache_dict[uuid] is not None:
tts_speech = fade_in_out(tts_speech, self.hift_cache_dict[uuid]['speech'], self.speech_window)
return tts_speech
class TritonPythonModel:
"""Triton Python model for vocoder.
This model takes global and semantic tokens as input and generates audio waveforms
using the BiCodec vocoder.
"""
def initialize(self, args):
"""Initialize the model.
Args:
args: Dictionary containing model configuration
"""
# Parse model parameters
parameters = json.loads(args['model_config'])['parameters']
model_params = {key: value["string_value"] for key, value in parameters.items()}
model_dir = model_params["model_dir"]
# Initialize device and vocoder
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
logger.info(f"Initializing vocoder from {model_dir} on {self.device}")
self.token2wav_model = CosyVoice2(
model_dir, load_jit=False, load_trt=True, fp16=True, device=self.device
)
spk_info_path = os.path.join(model_dir, "spk2info.pt")
if not os.path.exists(spk_info_path):
raise ValueError(f"spk2info.pt not found in {model_dir}")
spk_info = torch.load(spk_info_path, map_location="cpu", weights_only=False)
self.default_spk_info = spk_info["001"]
logger.info("Token2Wav initialized successfully")
def execute(self, requests):
"""Execute inference on the batched requests.
Args:
requests: List of inference requests
Returns:
List of inference responses containing generated waveforms
"""
responses = []
# Process each request in batch
for request in requests:
target_speech_tokens_tensor = pb_utils.get_input_tensor_by_name(request, "target_speech_tokens").as_numpy()
target_speech_tokens = torch.from_numpy(target_speech_tokens_tensor).to(self.device)
prompt_speech_tokens_tensor = pb_utils.get_input_tensor_by_name(request, "prompt_speech_tokens")
if prompt_speech_tokens_tensor is not None:
prompt_speech_tokens_tensor = prompt_speech_tokens_tensor.as_numpy()
prompt_speech_feat_tensor = pb_utils.get_input_tensor_by_name(request, "prompt_speech_feat").as_numpy()
prompt_spk_embedding_tensor = pb_utils.get_input_tensor_by_name(request, "prompt_spk_embedding").as_numpy()
prompt_speech_tokens = torch.from_numpy(prompt_speech_tokens_tensor).to(self.device)
prompt_speech_feat = torch.from_numpy(prompt_speech_feat_tensor).to(self.device)
prompt_spk_embedding = torch.from_numpy(prompt_spk_embedding_tensor).to(self.device)
prompt_speech_tokens = prompt_speech_tokens - ORIGINAL_VOCAB_SIZE
else:
prompt_speech_tokens = self.default_spk_info["speech_token"].to(self.device)
prompt_speech_feat = self.default_spk_info["speech_feat"].to(torch.float16).to(self.device)
prompt_spk_embedding = self.default_spk_info["embedding"].to(torch.float16).to(self.device)
# shift the speech tokens according to the original vocab size
target_speech_tokens = target_speech_tokens - ORIGINAL_VOCAB_SIZE
# We set token_offset as an optional input to support streaming/offline tts. It has to be None when offline tts.
token_offset = pb_utils.get_input_tensor_by_name(request, "token_offset")
if token_offset is not None:
token_offset = token_offset.as_numpy().item()
finalize = pb_utils.get_input_tensor_by_name(request, "finalize").as_numpy().item()
if not finalize:
stream = True
else:
stream = False
request_id = request.request_id()
audio_hat = self.token2wav_model.model.token2wav(token=target_speech_tokens,
prompt_token=prompt_speech_tokens,
prompt_feat=prompt_speech_feat,
embedding=prompt_spk_embedding,
token_offset=token_offset,
uuid=request_id,
stream=stream,
finalize=finalize)
if finalize:
self.token2wav_model.model.hift_cache_dict.pop(request_id)
else:
tts_mel, _ = self.token2wav_model.model.flow.inference(
token=target_speech_tokens,
token_len=torch.tensor([target_speech_tokens.shape[1]], dtype=torch.int32).to(
self.device
),
prompt_token=prompt_speech_tokens,
prompt_token_len=torch.tensor(
[prompt_speech_tokens.shape[1]], dtype=torch.int32
).to(self.device),
prompt_feat=prompt_speech_feat,
prompt_feat_len=torch.tensor([prompt_speech_feat.shape[1]], dtype=torch.int32).to(self.device),
embedding=prompt_spk_embedding,
streaming=False,
finalize=True,
)
audio_hat, _ = self.token2wav_model.model.hift.inference(
speech_feat=tts_mel, cache_source=torch.zeros(1, 1, 0)
)
generated_wave = audio_hat.squeeze(0).cpu().numpy()
wav_tensor = pb_utils.Tensor.from_dlpack("waveform", to_dlpack(audio_hat))
inference_response = pb_utils.InferenceResponse(output_tensors=[wav_tensor])
responses.append(inference_response)
return responses
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/model_repo/token2wav/1/model.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/scripts/convert_checkpoint.py | import argparse
import os
import time
import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
from transformers import AutoConfig
import tensorrt_llm
from tensorrt_llm._utils import release_gc
from tensorrt_llm.logger import logger
from tensorrt_llm.mapping import Mapping
from tensorrt_llm.models import QWenForCausalLM
from tensorrt_llm.models.modeling_utils import QuantConfig
from tensorrt_llm.quantization import QuantAlgo
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--model_dir', type=str, default=None, required=True)
parser.add_argument('--tp_size',
type=int,
default=1,
help='N-way tensor parallelism size')
parser.add_argument('--pp_size',
type=int,
default=1,
help='N-way pipeline parallelism size')
parser.add_argument('--cp_size',
type=int,
default=1,
help='N-way context parallelism size')
parser.add_argument(
'--dtype',
type=str,
default='auto',
choices=['auto', 'float16', 'bfloat16', 'float32'],
help="The data type for the model weights and activations if not quantized. "
"If 'auto', the data type is automatically inferred from the source model; "
"however, if the source dtype is float32, it is converted to float16.")
parser.add_argument(
'--use_weight_only',
default=False,
action="store_true",
help='Quantize weights for the various GEMMs to INT4/INT8.'
'See --weight_only_precision to set the precision')
parser.add_argument(
'--disable_weight_only_quant_plugin',
default=False,
action="store_true",
help='By default, using plugin implementation for weight quantization. Enabling disable_weight_only_quant_plugin flag will use ootb implementation instead of plugin.'
'You must also use --use_weight_only for that argument to have an impact.'
)
parser.add_argument(
'--weight_only_precision',
const='int8',
type=str,
nargs='?',
default='int8',
choices=['int8', 'int4', 'int4_gptq'],
help='Define the precision for the weights when using weight-only quantization.'
'You must also use --use_weight_only for that argument to have an impact.'
)
parser.add_argument(
'--calib_dataset',
type=str,
default='ccdv/cnn_dailymail',
help="The huggingface dataset name or the local directory of the dataset for calibration."
)
parser.add_argument(
"--smoothquant",
"-sq",
type=float,
default=None,
help="Set the α parameter (see https://arxiv.org/pdf/2211.10438.pdf)"
" to Smoothquant the model, and output int8 weights."
" A good first try is 0.5. Must be in [0, 1]")
parser.add_argument(
'--per_channel',
action="store_true",
default=False,
help='By default, we use a single static scaling factor for the GEMM\'s result. '
'per_channel instead uses a different static scaling factor for each channel. '
'The latter is usually more accurate, but a little slower.')
parser.add_argument(
'--per_token',
action="store_true",
default=False,
help='By default, we use a single static scaling factor to scale activations in the int8 range. '
'per_token chooses at run time, and for each token, a custom scaling factor. '
'The latter is usually more accurate, but a little slower.')
parser.add_argument(
'--int8_kv_cache',
default=False,
action="store_true",
help='By default, we use dtype for KV cache. int8_kv_cache chooses int8 quantization for KV'
)
parser.add_argument(
'--per_group',
default=False,
action="store_true",
help='By default, we use a single static scaling factor to scale weights in the int4 range. '
'per_group chooses at run time, and for each group, a custom scaling factor. '
'The flag is built for GPTQ/AWQ quantization.')
parser.add_argument('--group_size',
type=int,
default=128,
help='Group size used in GPTQ quantization.')
parser.add_argument("--load_model_on_cpu", action="store_true")
parser.add_argument(
'--use_parallel_embedding',
action="store_true",
default=False,
help='By default embedding parallelism is disabled. By setting this flag, embedding parallelism is enabled'
)
parser.add_argument(
'--embedding_sharding_dim',
type=int,
default=0,
choices=[0, 1],
help='By default the embedding lookup table is sharded along vocab dimension (embedding_sharding_dim=0). '
'To shard it along hidden dimension, set embedding_sharding_dim=1'
'Note: embedding sharing is only enabled when embedding_sharding_dim = 0'
)
parser.add_argument('--output_dir',
type=str,
default='tllm_checkpoint',
help='The path to save the TensorRT-LLM checkpoint')
parser.add_argument(
'--workers',
type=int,
default=1,
help='The number of workers for converting checkpoint in parallel')
parser.add_argument(
'--moe_tp_size',
type=int,
default=-1,
help='N-way tensor parallelism size for MOE, default is tp_size, which will do tp-only for MoE'
)
parser.add_argument(
'--moe_ep_size',
type=int,
default=-1,
help='N-way expert parallelism size for MOE, default is 1, which will do tp-only for MoE'
)
args = parser.parse_args()
return args
def args_to_quant_config(args: argparse.Namespace) -> QuantConfig:
'''return config dict with quantization info based on the command line args
'''
quant_config = QuantConfig()
if args.use_weight_only:
if args.weight_only_precision == 'int8':
quant_config.quant_algo = QuantAlgo.W8A16
elif args.weight_only_precision == 'int4':
quant_config.quant_algo = QuantAlgo.W4A16
elif args.smoothquant:
quant_config.smoothquant_val = args.smoothquant
if args.per_channel:
if args.per_token:
quant_config.quant_algo = QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TOKEN_PLUGIN
else:
quant_config.quant_algo = QuantAlgo.W8A8_SQ_PER_CHANNEL_PER_TENSOR_PLUGIN
else:
if args.per_token:
quant_config.quant_algo = QuantAlgo.W8A8_SQ_PER_TENSOR_PER_TOKEN_PLUGIN
else:
quant_config.quant_algo = QuantAlgo.W8A8_SQ_PER_TENSOR_PLUGIN
if args.int8_kv_cache:
quant_config.kv_cache_quant_algo = QuantAlgo.INT8
if args.weight_only_precision == 'int4_gptq':
quant_config.group_size = args.group_size
quant_config.has_zero_point = True
quant_config.pre_quant_scale = False
quant_config.quant_algo = QuantAlgo.W4A16_GPTQ
return quant_config
def update_quant_config_from_hf(quant_config, hf_config,
override_fields) -> tuple[QuantConfig, dict]:
hf_config_dict = hf_config.to_dict()
if hf_config_dict.get('quantization_config'):
# update the quant_algo, and clamp_val.
if hf_config_dict['quantization_config'].get('quant_method') == 'awq':
logger.info(
"Load quantization configs from huggingface model_config.")
quant_config.quant_algo = QuantAlgo.W4A16_GPTQ
quant_config.group_size = hf_config_dict['quantization_config'].get(
'group_size', 128)
quant_config.has_zero_point = hf_config_dict[
'quantization_config'].get('zero_point', False)
override_fields.update({"use_autoawq": True})
elif hf_config_dict['quantization_config'].get(
'quant_method') == 'gptq':
logger.info(
"Load quantization configs from huggingface model_config.")
desc_act = hf_config_dict['quantization_config'].get(
'desc_act', False)
if desc_act:
raise ValueError("GPTQ with desc_act=True is not implemented!")
quant_config.quant_algo = QuantAlgo.W4A16_GPTQ
quant_config.group_size = hf_config_dict['quantization_config'].get(
'group_size', 128)
quant_config.has_zero_point = hf_config_dict[
'quantization_config'].get('sym', False)
return quant_config, override_fields
def args_to_build_options(args):
return {
'use_parallel_embedding': args.use_parallel_embedding,
'embedding_sharding_dim': args.embedding_sharding_dim,
'disable_weight_only_quant_plugin':
args.disable_weight_only_quant_plugin
}
def convert_and_save_hf(args):
model_dir = args.model_dir
world_size = args.tp_size * args.pp_size
# Need to convert the cli args to the kay-value pairs and override them in the generate config dict.
# Ideally these fields will be moved out of the config and pass them into build API, keep them here for compatibility purpose for now,
# before the refactor is done.
override_fields = {}
override_fields.update(args_to_build_options(args))
quant_config = args_to_quant_config(args)
try:
hf_config = AutoConfig.from_pretrained(model_dir,
trust_remote_code=True)
quant_config, override_fields = update_quant_config_from_hf(
quant_config, hf_config, override_fields)
except BaseException:
logger.warning("AutoConfig cannot load the huggingface config.")
if args.smoothquant is not None or args.int8_kv_cache:
mapping = Mapping(world_size=world_size,
tp_size=args.tp_size,
pp_size=args.pp_size,
moe_tp_size=args.moe_tp_size,
moe_ep_size=args.moe_ep_size,
cp_size=args.cp_size)
QWenForCausalLM.quantize(args.model_dir,
args.output_dir,
dtype=args.dtype,
mapping=mapping,
quant_config=quant_config,
calib_dataset=args.calib_dataset,
**override_fields)
else:
def convert_and_save_rank(args, rank):
mapping = Mapping(world_size=world_size,
rank=rank,
tp_size=args.tp_size,
pp_size=args.pp_size,
moe_tp_size=args.moe_tp_size,
moe_ep_size=args.moe_ep_size)
qwen = QWenForCausalLM.from_hugging_face(model_dir,
args.dtype,
mapping=mapping,
quant_config=quant_config,
**override_fields)
qwen.config.mapping.cp_size = args.cp_size
qwen.config.mapping.attn_tp_size = -1
qwen.config.mapping.attn_cp_size = -1
qwen.config.mapping.world_size *= args.cp_size
qwen.save_checkpoint(args.output_dir, save_config=(rank == 0))
del qwen
execute(args.workers, [convert_and_save_rank] * world_size, args)
release_gc()
def execute(workers, func, args):
if workers == 1:
for rank, f in enumerate(func):
f(args, rank)
else:
with ThreadPoolExecutor(max_workers=workers) as p:
futures = [p.submit(f, args, rank) for rank, f in enumerate(func)]
exceptions = []
for future in as_completed(futures):
try:
future.result()
except Exception as e:
traceback.print_exc()
exceptions.append(e)
assert len(
exceptions
) == 0, "Checkpoint conversion failed, please check error log."
def main():
print(tensorrt_llm.__version__)
args = parse_arguments()
if (args.moe_tp_size == -1 and args.moe_ep_size == -1):
# moe default to tp-only
args.moe_tp_size = args.tp_size
args.moe_ep_size = 1
elif (args.moe_tp_size == -1):
args.moe_tp_size = args.tp_size // args.moe_ep_size
elif (args.moe_ep_size == -1):
args.moe_ep_size = args.tp_size // args.moe_tp_size
assert (args.moe_tp_size * args.moe_ep_size == args.tp_size
), "moe_tp_size * moe_ep_size must equal to tp_size"
tik = time.time()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
assert args.model_dir is not None
convert_and_save_hf(args)
tok = time.time()
t = time.strftime('%H:%M:%S', time.gmtime(tok - tik))
print(f'Total time of converting checkpoints: {t}')
if __name__ == '__main__':
main()
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/scripts/convert_checkpoint.py",
"license": "Apache License 2.0",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/scripts/fill_template.py | # /usr/bin/env python3
from argparse import ArgumentParser
from string import Template
def split(string, delimiter):
"""Split a string using delimiter. Supports escaping.
Args:
string (str): The string to split.
delimiter (str): The delimiter to split the string with.
Returns:
list: A list of strings.
"""
result = []
current = ""
escape = False
for char in string:
if escape:
current += char
escape = False
elif char == delimiter:
result.append(current)
current = ""
elif char == "\\":
escape = True
else:
current += char
result.append(current)
return result
def main(file_path, substitutions, in_place):
with open(file_path) as f:
pbtxt = Template(f.read())
sub_dict = {
"max_queue_size": 0,
'max_queue_delay_microseconds': 0,
}
for sub in split(substitutions, ","):
key, value = split(sub, ":")
sub_dict[key] = value
assert key in pbtxt.template, f"key '{key}' does not exist in the file {file_path}."
pbtxt = pbtxt.safe_substitute(sub_dict)
if in_place:
with open(file_path, "w") as f:
f.write(pbtxt)
else:
print(pbtxt)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("file_path", help="path of the .pbtxt to modify")
parser.add_argument(
"substitutions",
help="substitutions to perform, in the format variable_name_1:value_1,variable_name_2:value_2..."
)
parser.add_argument("--in_place",
"-i",
action="store_true",
help="do the operation in-place")
args = parser.parse_args()
main(**vars(args))
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/scripts/fill_template.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:runtime/triton_trtllm/scripts/test_llm.py |
# SPDX-FileCopyrightText: Copyright (c) 2022-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import torch
import tensorrt_llm
from tensorrt_llm.logger import logger
from tensorrt_llm.runtime import ModelRunnerCpp
from transformers import AutoTokenizer
def parse_arguments(args=None):
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_text',
type=str,
nargs='+',
default=["Born in north-east France, Soyer trained as a"])
parser.add_argument('--tokenizer_dir', type=str, default="meta-llama/Meta-Llama-3-8B-Instruct")
parser.add_argument('--engine_dir', type=str, default="meta-llama/Meta-Llama-3-8B-Instruct")
parser.add_argument('--log_level', type=str, default="debug")
parser.add_argument('--kv_cache_free_gpu_memory_fraction', type=float, default=0.6)
parser.add_argument('--temperature', type=float, default=0.8)
parser.add_argument('--top_k', type=int, default=50)
parser.add_argument('--top_p', type=float, default=0.95)
return parser.parse_args(args=args)
def parse_input(tokenizer,
input_text=None,
prompt_template=None):
batch_input_ids = []
for curr_text in input_text:
if prompt_template is not None:
curr_text = prompt_template.format(input_text=curr_text)
input_ids = tokenizer.encode(
curr_text)
batch_input_ids.append(input_ids)
batch_input_ids = [
torch.tensor(x, dtype=torch.int32) for x in batch_input_ids
]
logger.debug(f"Input token ids (batch_size = {len(batch_input_ids)}):")
for i, input_ids in enumerate(batch_input_ids):
logger.debug(f"Request {i}: {input_ids.tolist()}")
return batch_input_ids
def main(args):
runtime_rank = tensorrt_llm.mpi_rank()
logger.set_level(args.log_level)
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir)
prompt_template = "<|sos|>{input_text}<|task_id|>"
end_id = tokenizer.convert_tokens_to_ids("<|eos1|>")
batch_input_ids = parse_input(tokenizer=tokenizer,
input_text=args.input_text,
prompt_template=prompt_template)
input_lengths = [x.size(0) for x in batch_input_ids]
runner_kwargs = dict(
engine_dir=args.engine_dir,
rank=runtime_rank,
max_output_len=1024,
enable_context_fmha_fp32_acc=False,
max_batch_size=len(batch_input_ids),
max_input_len=max(input_lengths),
kv_cache_free_gpu_memory_fraction=args.kv_cache_free_gpu_memory_fraction,
cuda_graph_mode=False,
gather_generation_logits=False,
)
runner = ModelRunnerCpp.from_dir(**runner_kwargs)
with torch.no_grad():
outputs = runner.generate(
batch_input_ids=batch_input_ids,
max_new_tokens=1024,
end_id=end_id,
pad_id=end_id,
temperature=args.temperature,
top_k=args.top_k,
top_p=args.top_p,
num_return_sequences=1,
repetition_penalty=1.1,
random_seed=42,
streaming=False,
output_sequence_lengths=True,
output_generation_logits=False,
return_dict=True,
return_all_generated_tokens=False)
torch.cuda.synchronize()
output_ids, sequence_lengths = outputs["output_ids"], outputs["sequence_lengths"]
num_output_sents, num_beams, _ = output_ids.size()
assert num_beams == 1
beam = 0
batch_size = len(input_lengths)
num_return_sequences = num_output_sents // batch_size
assert num_return_sequences == 1
for i in range(batch_size * num_return_sequences):
batch_idx = i // num_return_sequences
seq_idx = i % num_return_sequences
inputs = output_ids[i][0][:input_lengths[batch_idx]].tolist()
input_text = tokenizer.decode(inputs)
print(f'Input [Text {batch_idx}]: \"{input_text}\"')
output_begin = input_lengths[batch_idx]
output_end = sequence_lengths[i][beam]
outputs = output_ids[i][beam][output_begin:output_end].tolist()
output_text = tokenizer.decode(outputs)
print(f'Output [Text {batch_idx}]: \"{output_text}\"')
logger.debug(str(outputs))
if __name__ == '__main__':
args = parse_arguments()
main(args)
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "runtime/triton_trtllm/scripts/test_llm.py",
"license": "Apache License 2.0",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
FunAudioLLM/CosyVoice:examples/libritts/cosyvoice/local/prepare_reject_sample.py | import argparse
import logging
import os
from tqdm import tqdm
import torch
import torchaudio
from cosyvoice.cli.cosyvoice import CosyVoice2
from cosyvoice.utils.file_utils import load_wav
logger = logging.getLogger()
def main():
cosyvoice = CosyVoice2(args.ref_model)
utt2wav, utt2text = {}, {}
with open('{}/wav.scp'.format(args.src_dir)) as f:
for l in f:
l = l.split('\n')[0].split()
utt2wav[l[0]] = l[1]
with open('{}/text'.format(args.src_dir)) as f:
for l in f:
l = l.split('\n')[0].split()
utt2text[l[0]] = ' '.join(l[1:])
os.makedirs('{}/wav'.format(args.des_dir), exist_ok=True)
with open('{}/wav.scp'.format(args.des_dir), 'w') as f:
for utt, wav in tqdm(utt2wav.items()):
prompt_speech_16k = load_wav(wav, 16000)
if prompt_speech_16k.shape[1] >= 30 * 16000:
continue
speech_list = []
for _, j in enumerate(cosyvoice.inference_zero_shot(utt2text[utt], utt2text[utt], prompt_speech_16k, stream=False, text_frontend=False)):
speech_list.append(j['tts_speech'])
negative_wav = os.path.abspath('{}/wav/{}'.format(args.des_dir, os.path.basename(wav)))
torchaudio.save(negative_wav, torch.concat(speech_list, dim=1), cosyvoice.sample_rate, backend='soundfile')
f.write('{} {}\n'.format(utt, negative_wav))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir',
type=str)
parser.add_argument('--des_dir',
type=str)
parser.add_argument('--ref_model',
type=str)
args = parser.parse_args()
main()
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "examples/libritts/cosyvoice/local/prepare_reject_sample.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
FunAudioLLM/CosyVoice:cosyvoice/vllm/cosyvoice2.py | # SPDX-License-Identifier: Apache-2.0
# Adapted from
# https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/qwen2/modeling_qwen2.py
# Copyright 2024 The Qwen team.
# Copyright 2023 The vLLM team.
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Inference-only Qwen2 model compatible with HuggingFace weights."""
from typing import Optional
from packaging.version import parse as vparse
import vllm
# vLLM-0.11.0+ only support V1 engine
VLLM_V1_ENGINE_ONLY: bool = vparse(vllm.__version__) >= vparse("0.11.0")
if VLLM_V1_ENGINE_ONLY:
from vllm.v1.sample.metadata import SamplingMetadata
from vllm.model_executor.models.qwen2 import *
class CosyVoice2ForCausalLM(nn.Module, SupportsLoRA, SupportsPP):
packed_modules_mapping = {
"qkv_proj": [
"q_proj",
"k_proj",
"v_proj",
],
"gate_up_proj": [
"gate_proj",
"up_proj",
],
}
def __init__(self, *, vllm_config: VllmConfig, prefix: str = ""):
super().__init__()
config = vllm_config.model_config.hf_config
quant_config = vllm_config.quant_config
lora_config = vllm_config.lora_config
self.config = config
self.lora_config = lora_config
self.quant_config = quant_config
self.model = Qwen2Model(vllm_config=vllm_config,
prefix=maybe_prefix(prefix, "model"))
if get_pp_group().is_last_rank:
if config.tie_word_embeddings:
self.lm_head = self.model.embed_tokens
else:
self.lm_head = ParallelLMHead(config.vocab_size,
config.hidden_size,
True,
quant_config=quant_config,
prefix=maybe_prefix(
prefix, "lm_head"))
else:
self.lm_head = PPMissingLayer()
self.logits_processor = LogitsProcessor(config.vocab_size)
self.make_empty_intermediate_tensors = (
self.model.make_empty_intermediate_tensors)
def get_input_embeddings(self, input_ids: torch.Tensor) -> torch.Tensor:
return self.model.get_input_embeddings(input_ids)
def forward(
self,
input_ids: torch.Tensor,
positions: torch.Tensor,
intermediate_tensors: Optional[IntermediateTensors] = None,
inputs_embeds: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, IntermediateTensors]:
hidden_states = self.model(input_ids, positions, intermediate_tensors,
inputs_embeds)
return hidden_states
def compute_logits(
self,
hidden_states: torch.Tensor,
sampling_metadata: Optional[SamplingMetadata] = None,
) -> Optional[torch.Tensor]:
if VLLM_V1_ENGINE_ONLY:
logits = self.logits_processor(self.lm_head, hidden_states,
self.lm_head.bias)
else:
logits = self.logits_processor(self.lm_head, hidden_states,
sampling_metadata, self.lm_head.bias)
return logits
def load_weights(self, weights: Iterable[tuple[str,
torch.Tensor]]) -> set[str]:
loader = AutoWeightsLoader(
self,
skip_prefixes=(["lm_head."]
if self.config.tie_word_embeddings else None),
)
return loader.load_weights(weights)
| {
"repo_id": "FunAudioLLM/CosyVoice",
"file_path": "cosyvoice/vllm/cosyvoice2.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
Genesis-Embodied-AI/Genesis:examples/kinematic/go2_kinematic.py | """
Example: KinematicEntity as a ghost reference motion.
Creates a Go2 quadruped as a physics-simulated entity alongside a second Go2 loaded as a KinematicEntity
(visualization-only ghost). The kinematic entity follows a simple sinusoidal joint trajectory while the physics robot
is free to fall and interact with the ground. This demonstrates how KinematicEntity can display a reference motion
without affecting simulation speed or physics.
"""
import argparse
import math
import os
import genesis as gs
AMPLITUDE = 0.3
FREQ = 2.0
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--vis", action="store_true", default=True)
parser.add_argument("-nv", "--no-vis", action="store_false", dest="vis")
args = parser.parse_args()
gs.init()
scene = gs.Scene(
show_viewer=args.vis,
)
# ── Ground plane ─────────────────────────────────────────────────
scene.add_entity(gs.morphs.Plane())
# ── Physics Go2 (normal rigid entity) ────────────────────────────
robot = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0.0, 0.0, 0.42),
),
)
# ── Ghost Go2 (kinematic entity — visualization only) ─────────────
ghost = scene.add_entity(
gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0.0, 0.0, 0.42),
),
material=gs.materials.Kinematic(),
surface=gs.surfaces.Default(
color=(0.4, 0.7, 1.0),
opacity=0.5,
),
)
scene.build()
# ── Joint names and default standing pose (12 DOFs) ──────────────
joint_names = [
"FR_hip_joint",
"FR_thigh_joint",
"FR_calf_joint",
"FL_hip_joint",
"FL_thigh_joint",
"FL_calf_joint",
"RR_hip_joint",
"RR_thigh_joint",
"RR_calf_joint",
"RL_hip_joint",
"RL_thigh_joint",
"RL_calf_joint",
]
# FR FL RR RL
joint_angles = [0.0, 0.8, -1.5, 0.0, 0.8, -1.5, 0.0, 1.0, -1.5, 0.0, 1.0, -1.5]
dofs_idx = [robot.get_joint(name).dofs_idx_local[0] for name in joint_names]
robot.set_dofs_position(joint_angles, dofs_idx)
ghost.set_dofs_position(joint_angles, dofs_idx)
for step in range(500 if "PYTEST_VERSION" not in os.environ else 5):
t = step * scene.sim_options.dt
# Sinusoidal reference trajectory for the ghost
offset = AMPLITUDE * math.sin(2.0 * math.pi * FREQ * t)
ref_angles = joint_angles.copy()
# Oscillate thigh joints (indices 1, 4, 7, 10)
ref_angles[1] += offset
ref_angles[4] += offset
ref_angles[7] -= offset
ref_angles[10] -= offset
ghost.set_dofs_position(ref_angles, dofs_idx)
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/kinematic/go2_kinematic.py",
"license": "Apache License 2.0",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/engine/materials/kinematic.py | from .base import Material
class Kinematic(Material):
"""
Visualization-only material for ghost/reference entities.
Kinematic entities are rendered but do not participate in physics simulation,
collision detection, or constraint solving.
"""
...
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/materials/kinematic.py",
"license": "Apache License 2.0",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/kinematic_solver.py | from typing import TYPE_CHECKING, Literal
import numpy as np
import torch
import genesis as gs
import genesis.utils.array_class as array_class
from genesis.engine.entities.rigid_entity import KinematicEntity
from genesis.engine.states import QueriedStates
from genesis.engine.states.solvers import KinematicSolverState
from genesis.options.solvers import RigidOptions, KinematicOptions
from genesis.utils.misc import (
qd_to_torch,
sanitize_indexed_tensor,
indices_to_mask,
broadcast_tensor,
assign_indexed_tensor,
)
from .base_solver import Solver
from .rigid.abd.misc import (
kernel_init_dof_fields,
kernel_init_link_fields,
kernel_init_joint_fields,
kernel_init_vvert_fields,
kernel_init_vgeom_fields,
kernel_init_entity_fields,
kernel_update_heterogeneous_links_vgeom,
kernel_update_vgeoms_render_T,
)
from .rigid.abd.forward_kinematics import (
kernel_forward_kinematics,
kernel_forward_velocity,
kernel_masked_forward_kinematics,
kernel_masked_forward_velocity,
kernel_update_vgeoms,
)
from .rigid.abd.accessor import (
kernel_get_kinematic_state,
kernel_get_state_grad,
kernel_set_kinematic_state,
kernel_set_links_pos_grad,
kernel_set_links_quat_grad,
kernel_set_dofs_position,
kernel_set_dofs_velocity,
kernel_set_dofs_velocity_grad,
kernel_set_dofs_zero_velocity,
kernel_set_links_pos,
kernel_set_links_quat,
kernel_set_qpos,
kernel_get_links_vel,
)
if TYPE_CHECKING:
from genesis.engine.scene import Scene
from genesis.engine.simulator import Simulator
IS_OLD_TORCH = tuple(map(int, torch.__version__.split(".")[:2])) < (2, 8)
class KinematicSolver(Solver):
"""
Base solver for articulated kinematic entities (FK, rendering, state get/set).
Provides the full build pipeline, field init methods, counter properties,
render methods, and IO sanitization shared by both KinematicSolver and RigidSolver.
RigidSolver extends this with physics (collision, constraints, dynamics).
"""
def __init__(self, scene: "Scene", sim: "Simulator", options: "KinematicOptions") -> None:
super().__init__(scene, sim, options)
if isinstance(options, RigidOptions):
self._options = options
elif isinstance(options, KinematicOptions):
self._options = RigidOptions(
dt=options.dt,
enable_collision=False,
enable_joint_limit=False,
enable_self_collision=False,
enable_neutral_collision=False,
enable_adjacent_collision=False,
disable_constraint=True,
max_collision_pairs=0,
enable_multi_contact=False,
enable_mujoco_compatibility=False,
use_contact_island=False,
use_hibernation=False,
max_dynamic_constraints=0,
iterations=0,
)
else:
gs.raise_exception(f"Invalid options type: {type(options)}")
self._enable_collision = False
self._enable_mujoco_compatibility = False
self._requires_grad = False
self._enable_heterogeneous = False # Set to True when any entity has heterogeneous morphs
self.collider = None
self.constraint_solver = None
self.qpos = None
self._is_forward_pos_updated: bool = False
self._is_forward_vel_updated: bool = False
self._queried_states = QueriedStates()
# ------------------------------------------------------------------------------------
# ----------------------------------- add_entity -------------------------------------
# ------------------------------------------------------------------------------------
def add_entity(self, idx, material, morph, surface, visualize_contact=False, name=None):
morph_heterogeneous = []
if isinstance(morph, (tuple, list)):
morph, *morph_heterogeneous = morph
self._enable_heterogeneous |= bool(morph_heterogeneous)
morph._enable_mujoco_compatibility = self._enable_mujoco_compatibility
entity = KinematicEntity(
scene=self._scene,
solver=self,
material=material,
morph=morph,
surface=surface,
idx=idx,
idx_in_solver=self.n_entities,
link_start=self.n_links,
joint_start=self.n_joints,
q_start=self.n_qs,
dof_start=self.n_dofs,
vgeom_start=self.n_vgeoms,
vvert_start=self.n_vverts,
vface_start=self.n_vfaces,
morph_heterogeneous=morph_heterogeneous,
name=name,
)
self._entities.append(entity)
return entity
# ------------------------------------------------------------------------------------
# ------------------------------------ build -----------------------------------------
# ------------------------------------------------------------------------------------
def build(self):
super().build()
self.n_envs = self.sim.n_envs
self._B = self.sim._B
self._para_level = self.sim._para_level
for entity in self._entities:
entity._build()
self._n_qs = self.n_qs
self._n_dofs = self.n_dofs
self._n_links = self.n_links
self._n_joints = self.n_joints
self._n_vgeoms = self.n_vgeoms
self._n_vfaces = self.n_vfaces
self._n_vverts = self.n_vverts
self._n_entities = self.n_entities
self._vgeoms = self.vgeoms
self._links = self.links
self._joints = self.joints
base_links_idx = []
for link in self.links:
if link.parent_idx == -1 and link.is_fixed:
base_links_idx.append(link.idx)
for joint in self.joints:
if joint.type == gs.JOINT_TYPE.FREE:
base_links_idx.append(joint.link.idx)
self._base_links_idx = torch.tensor(base_links_idx, dtype=gs.tc_int, device=gs.device)
self.n_qs_ = max(1, self.n_qs)
self.n_dofs_ = max(1, self.n_dofs)
self.n_links_ = max(1, self.n_links)
self.n_joints_ = max(1, self.n_joints)
self.n_vgeoms_ = max(1, self.n_vgeoms)
self.n_vfaces_ = max(1, self.n_vfaces)
self.n_vverts_ = max(1, self.n_vverts)
self.n_entities_ = max(1, self.n_entities)
# batch_links_info is required when heterogeneous simulation is used.
# We must update options because get_links_info reads from solver._options.batch_links_info.
if self._enable_heterogeneous:
self._options.batch_links_info = True
self._build_static_config()
self._create_data_manager()
self._init_dof_fields()
self._init_vvert_fields()
self._init_vgeom_fields()
self._init_link_fields()
self._process_heterogeneous_link_info()
self._init_entity_fields()
self._init_envs_offset()
def _build_static_config(self):
# Static config with all physics disabled
self._static_rigid_sim_config = array_class.StructRigidSimStaticConfig(
backend=gs.backend,
para_level=self.sim._para_level,
requires_grad=False,
use_hibernation=False,
batch_links_info=False,
batch_dofs_info=False,
batch_joints_info=False,
enable_heterogeneous=False,
enable_mujoco_compatibility=False,
enable_multi_contact=False,
enable_collision=False,
enable_joint_limit=False,
box_box_detection=False,
sparse_solve=False,
integrator=gs.integrator.approximate_implicitfast,
solver_type=0,
)
def _create_data_manager(self):
self.data_manager = array_class.DataManager(self, kinematic_only=True)
self._rigid_global_info = self.data_manager.rigid_global_info
self._rigid_adjoint_cache = self.data_manager.rigid_adjoint_cache
# ------------------------------------------------------------------------------------
# --------------------------------- hook methods -------------------------------------
# ------------------------------------------------------------------------------------
def _sanitize_joint_sol_params(self, sol_params):
"""Hook: sanitize joint constraint solver params. No-op in base (no constraints)."""
return sol_params
def _sanitize_geom_sol_params(self, sol_params):
"""Hook: sanitize geom constraint solver params. No-op in base (no constraints)."""
return sol_params
# ------------------------------------------------------------------------------------
# --------------------------------- init methods -------------------------------------
# ------------------------------------------------------------------------------------
def _init_dof_fields(self):
self.dofs_info = self.data_manager.dofs_info
self.dofs_state = self.data_manager.dofs_state
joints = self.joints
has_dofs = sum(joint.n_dofs for joint in joints) > 0
if has_dofs:
kernel_init_dof_fields(
entity_idx=np.concatenate(
[(joint.link._entity_idx_in_solver,) * joint.n_dofs for joint in joints if joint.n_dofs],
dtype=gs.np_int,
),
dofs_motion_ang=np.concatenate([joint.dofs_motion_ang for joint in joints], dtype=gs.np_float),
dofs_motion_vel=np.concatenate([joint.dofs_motion_vel for joint in joints], dtype=gs.np_float),
dofs_limit=np.concatenate([joint.dofs_limit for joint in joints], dtype=gs.np_float),
dofs_invweight=np.concatenate([joint.dofs_invweight for joint in joints], dtype=gs.np_float),
dofs_stiffness=np.concatenate([joint.dofs_stiffness for joint in joints], dtype=gs.np_float),
dofs_damping=np.concatenate([joint.dofs_damping for joint in joints], dtype=gs.np_float),
dofs_frictionloss=np.concatenate([joint.dofs_frictionloss for joint in joints], dtype=gs.np_float),
dofs_armature=np.concatenate([joint.dofs_armature for joint in joints], dtype=gs.np_float),
dofs_kp=np.concatenate([joint.dofs_kp for joint in joints], dtype=gs.np_float),
dofs_kv=np.concatenate([joint.dofs_kv for joint in joints], dtype=gs.np_float),
dofs_force_range=np.concatenate([joint.dofs_force_range for joint in joints], dtype=gs.np_float),
dofs_info=self.dofs_info,
dofs_state=self.dofs_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self.dofs_state.force.fill(0)
def _init_link_fields(self):
self.links_info = self.data_manager.links_info
self.links_state = self.data_manager.links_state
if self.links:
links = self.links
kernel_init_link_fields(
links_parent_idx=np.array([link.parent_idx for link in links], dtype=gs.np_int),
links_root_idx=np.array([link.root_idx for link in links], dtype=gs.np_int),
links_q_start=np.array([link.q_start for link in links], dtype=gs.np_int),
links_dof_start=np.array([link.dof_start for link in links], dtype=gs.np_int),
links_joint_start=np.array([link.joint_start for link in links], dtype=gs.np_int),
links_q_end=np.array([link.q_end for link in links], dtype=gs.np_int),
links_dof_end=np.array([link.dof_end for link in links], dtype=gs.np_int),
links_joint_end=np.array([link.joint_end for link in links], dtype=gs.np_int),
links_invweight=np.array([link.invweight for link in links], dtype=gs.np_float),
links_is_fixed=np.array([link.is_fixed for link in links], dtype=gs.np_bool),
links_pos=np.array([link.pos for link in links], dtype=gs.np_float),
links_quat=np.array([link.quat for link in links], dtype=gs.np_float),
links_inertial_pos=np.array([link.inertial_pos for link in links], dtype=gs.np_float),
links_inertial_quat=np.array([link.inertial_quat for link in links], dtype=gs.np_float),
links_inertial_i=np.array([link.inertial_i for link in links], dtype=gs.np_float),
links_inertial_mass=np.array([link.inertial_mass for link in links], dtype=gs.np_float),
links_entity_idx=np.array([link._entity_idx_in_solver for link in links], dtype=gs.np_int),
links_geom_start=np.array([link.geom_start for link in links], dtype=gs.np_int),
links_geom_end=np.array([link.geom_end for link in links], dtype=gs.np_int),
links_vgeom_start=np.array([link.vgeom_start for link in links], dtype=gs.np_int),
links_vgeom_end=np.array([link.vgeom_end for link in links], dtype=gs.np_int),
links_info=self.links_info,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self.joints_info = self.data_manager.joints_info
self.joints_state = self.data_manager.joints_state
if self.joints:
joints = self.joints
joints_sol_params = np.array([joint.sol_params for joint in joints], dtype=gs.np_float)
joints_sol_params = self._sanitize_joint_sol_params(joints_sol_params)
kernel_init_joint_fields(
joints_type=np.array([joint.type for joint in joints], dtype=gs.np_int),
joints_sol_params=joints_sol_params,
joints_q_start=np.array([joint.q_start for joint in joints], dtype=gs.np_int),
joints_dof_start=np.array([joint.dof_start for joint in joints], dtype=gs.np_int),
joints_q_end=np.array([joint.q_end for joint in joints], dtype=gs.np_int),
joints_dof_end=np.array([joint.dof_end for joint in joints], dtype=gs.np_int),
joints_pos=np.array([joint.pos for joint in joints], dtype=gs.np_float),
joints_info=self.joints_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
# Set initial qpos
self.qpos = self._rigid_global_info.qpos
self.qpos0 = self._rigid_global_info.qpos0
if self.n_qs > 0:
init_qpos = np.tile(np.expand_dims(self.init_qpos, -1), (1, self._B))
self.qpos0.from_numpy(init_qpos)
is_init_qpos_out_of_bounds = False
for joint in self.joints:
if joint.type in (gs.JOINT_TYPE.REVOLUTE, gs.JOINT_TYPE.PRISMATIC):
is_init_qpos_out_of_bounds |= (joint.dofs_limit[0, 0] > init_qpos[joint.q_start]).any()
is_init_qpos_out_of_bounds |= (init_qpos[joint.q_start] > joint.dofs_limit[0, 1]).any()
if is_init_qpos_out_of_bounds:
gs.logger.warning("Neutral robot position (qpos0) exceeds joint limits.")
self.qpos.from_numpy(init_qpos)
self.links_T = self._rigid_global_info.links_T
def _process_heterogeneous_link_info(self):
"""
Process heterogeneous link info: dispatch geoms per environment and compute per-env inertial properties.
This method is called after _init_link_fields to update the per-environment inertial properties
for entities with heterogeneous morphs.
"""
for entity in self._entities:
# Skip non-heterogeneous entities
if not entity._enable_heterogeneous:
continue
# Get the number of variants for this entity
n_variants = len(entity.variants_vgeom_start)
# Distribute variants across environments using balanced block assignment:
# - If B >= n_variants: first B/n_variants environments get variant 0, next get variant 1, etc.
# - If B < n_variants: each environment gets a different variant (some variants unused)
if self._B >= n_variants:
base = self._B // n_variants
extra = self._B % n_variants # first `extra` chunks get one more
sizes = np.r_[np.full(extra, base + 1), np.full(n_variants - extra, base)]
variant_idx = np.repeat(np.arange(n_variants), sizes)
else:
# Each environment gets a unique variant; variants beyond B are unused
variant_idx = np.arange(self._B)
# Get arrays from entity
np_vgeom_start = np.array(entity.variants_vgeom_start, dtype=gs.np_int)
np_vgeom_end = np.array(entity.variants_vgeom_end, dtype=gs.np_int)
# Process each link in this heterogeneous entity (currently only single-link supported)
for link in entity.links:
i_l = link.idx
# Build per-env arrays for vgeom ranges
links_vgeom_start = np_vgeom_start[variant_idx]
links_vgeom_end = np_vgeom_end[variant_idx]
# Update links vgeoms with per-environment values
# Note: when batch_links_info is True, the shape is (n_links, B)
kernel_update_heterogeneous_links_vgeom(i_l, links_vgeom_start, links_vgeom_end, self.links_info)
# Update active_envs_idx for vgeoms - indicates which environments each geom is active in
for vgeom in link.vgeoms:
active_envs_mask = (links_vgeom_start <= vgeom.idx) & (vgeom.idx < links_vgeom_end)
vgeom.active_envs_mask = torch.tensor(active_envs_mask, device=gs.device)
(vgeom.active_envs_idx,) = np.where(active_envs_mask)
def _init_vvert_fields(self):
self.vverts_info = self.data_manager.vverts_info
self.vfaces_info = self.data_manager.vfaces_info
if self.n_vverts > 0:
vgeoms = self.vgeoms
kernel_init_vvert_fields(
vverts=np.concatenate([vgeom.init_vverts for vgeom in vgeoms], dtype=gs.np_float),
vfaces=np.concatenate([vgeom.init_vfaces + vgeom.vvert_start for vgeom in vgeoms], dtype=gs.np_int),
vnormals=np.concatenate([vgeom.init_vnormals for vgeom in vgeoms], dtype=gs.np_float),
vverts_vgeom_idx=np.concatenate(
[np.full(vgeom.n_vverts, vgeom.idx) for vgeom in vgeoms], dtype=gs.np_int
),
vverts_info=self.vverts_info,
vfaces_info=self.vfaces_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def _init_vgeom_fields(self):
self.vgeoms_info = self.data_manager.vgeoms_info
self.vgeoms_state = self.data_manager.vgeoms_state
self._vgeoms_render_T = np.empty((self.n_vgeoms_, self._B, 4, 4), dtype=np.float32)
if self.n_vgeoms > 0:
vgeoms = self.vgeoms
kernel_init_vgeom_fields(
vgeoms_pos=np.array([vgeom.init_pos for vgeom in vgeoms], dtype=gs.np_float),
vgeoms_quat=np.array([vgeom.init_quat for vgeom in vgeoms], dtype=gs.np_float),
vgeoms_link_idx=np.array([vgeom.link.idx for vgeom in vgeoms], dtype=gs.np_int),
vgeoms_vvert_start=np.array([vgeom.vvert_start for vgeom in vgeoms], dtype=gs.np_int),
vgeoms_vface_start=np.array([vgeom.vface_start for vgeom in vgeoms], dtype=gs.np_int),
vgeoms_vvert_end=np.array([vgeom.vvert_end for vgeom in vgeoms], dtype=gs.np_int),
vgeoms_vface_end=np.array([vgeom.vface_end for vgeom in vgeoms], dtype=gs.np_int),
vgeoms_color=np.array([vgeom._color for vgeom in vgeoms], dtype=gs.np_float),
vgeoms_info=self.vgeoms_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def _init_entity_fields(self):
self.entities_info = self.data_manager.entities_info
self.entities_state = self.data_manager.entities_state
if self._entities:
entities = self._entities
kernel_init_entity_fields(
entities_dof_start=np.array([entity.dof_start for entity in entities], dtype=gs.np_int),
entities_dof_end=np.array([entity.dof_end for entity in entities], dtype=gs.np_int),
entities_link_start=np.array([entity.link_start for entity in entities], dtype=gs.np_int),
entities_link_end=np.array([entity.link_end for entity in entities], dtype=gs.np_int),
entities_geom_start=np.array([0 for entity in entities], dtype=gs.np_int),
entities_geom_end=np.array([0 for entity in entities], dtype=gs.np_int),
entities_gravity_compensation=np.array([0.0 for entity in entities], dtype=gs.np_float),
entities_is_local_collision_mask=np.array([False for entity in entities], dtype=gs.np_bool),
entities_info=self.entities_info,
entities_state=self.entities_state,
links_info=self.links_info,
dofs_info=self.dofs_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def _init_envs_offset(self):
self.envs_offset = self._rigid_global_info.envs_offset
self.envs_offset.from_numpy(self._scene.envs_offset)
# ------------------------------------------------------------------------------------
# -------------------------------- simulation no-ops ----------------------------------
# ------------------------------------------------------------------------------------
def substep_pre_coupling(self, f):
pass
def substep_pre_coupling_grad(self, f):
pass
def substep_post_coupling(self, f):
if not self._is_forward_pos_updated or not self._is_forward_vel_updated:
kernel_forward_kinematics(
self.scene._envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
def substep_post_coupling_grad(self, f):
pass
def add_grad_from_state(self, state):
if self.is_active:
qpos_grad = gs.zeros_like(state.qpos)
dofs_vel_grad = gs.zeros_like(state.dofs_vel)
links_pos_grad = gs.zeros_like(state.links_pos)
links_quat_grad = gs.zeros_like(state.links_quat)
if state.qpos.grad is not None:
qpos_grad = state.qpos.grad
if state.dofs_vel.grad is not None:
dofs_vel_grad = state.dofs_vel.grad
if state.links_pos.grad is not None:
links_pos_grad = state.links_pos.grad
if state.links_quat.grad is not None:
links_quat_grad = state.links_quat.grad
kernel_get_state_grad(
qpos_grad=qpos_grad,
vel_grad=dofs_vel_grad,
links_pos_grad=links_pos_grad,
links_quat_grad=links_quat_grad,
links_state=self.links_state,
dofs_state=self.dofs_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def collect_output_grads(self):
"""
Collect gradients from downstream queried states.
"""
if self._sim.cur_step_global in self._queried_states:
# one step could have multiple states
assert len(self._queried_states[self._sim.cur_step_global]) == 1
state = self._queried_states[self._sim.cur_step_global][0]
self.add_grad_from_state(state)
def reset_grad(self):
for entity in self._entities:
entity.reset_grad()
self._queried_states.clear()
# ------------------------------------------------------------------------------------
# ----------------------------------- render -----------------------------------------
# ------------------------------------------------------------------------------------
def update_vgeoms_render_T(self):
kernel_update_vgeoms_render_T(
self._vgeoms_render_T,
vgeoms_info=self.vgeoms_info,
vgeoms_state=self.vgeoms_state,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
# ------------------------------------------------------------------------------------
# -------------------------------- state get/set -------------------------------------
# ------------------------------------------------------------------------------------
def get_state(self, f=None):
if self.is_active:
s_global = self.sim.cur_step_global
if s_global in self._queried_states:
return self._queried_states[s_global][0]
state = KinematicSolverState(self._scene, s_global)
kernel_get_kinematic_state(
qpos=state.qpos,
vel=state.dofs_vel,
links_pos=state.links_pos,
links_quat=state.links_quat,
i_pos_shift=state.i_pos_shift,
links_state=self.links_state,
dofs_state=self.dofs_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._queried_states.append(state)
else:
state = None
return state
def set_state(self, f, state, envs_idx=None):
if self.is_active:
envs_idx = self._scene._sanitize_envs_idx(envs_idx)
kernel_set_kinematic_state(
envs_idx=envs_idx,
qpos=state.qpos,
dofs_vel=state.dofs_vel,
links_pos=state.links_pos,
links_quat=state.links_quat,
i_pos_shift=state.i_pos_shift,
links_state=self.links_state,
dofs_state=self.dofs_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
kernel_forward_kinematics(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
# ------------------------------------------------------------------------------------
# -------------------------------- process_input -------------------------------------
# ------------------------------------------------------------------------------------
def process_input(self, in_backward=False):
"""Process input for entities (set qpos from user commands)."""
for entity in self._entities:
entity.process_input()
def process_input_grad(self):
"""No-op: kinematic solver does not support gradients."""
pass
def save_ckpt(self, ckpt_name):
"""No-op: kinematic solver does not save checkpoints."""
pass
def load_ckpt(self, ckpt_name):
"""No-op: kinematic solver does not load checkpoints."""
pass
@property
def is_active(self):
return self.n_links > 0
# ------------------------------------------------------------------------------------
# ------------------------------------ control ---------------------------------------
# ------------------------------------------------------------------------------------
def _sanitize_io_variables(
self,
tensor,
inputs_idx,
input_size,
idx_name,
envs_idx=None,
element_shape=(),
*,
batched=True,
skip_allocation=False,
):
envs_idx_ = self._scene._sanitize_envs_idx(envs_idx) if batched else self._scene._envs_idx[:0]
if self.n_envs == 0 or not batched:
tensor_, (inputs_idx_,) = sanitize_indexed_tensor(
tensor,
gs.tc_float,
(inputs_idx,),
(-1, *element_shape),
(input_size, *element_shape),
(idx_name, *("" for _ in element_shape)),
skip_allocation=skip_allocation,
)
else:
tensor_, (envs_idx_, inputs_idx_) = sanitize_indexed_tensor(
tensor,
gs.tc_float,
(envs_idx_, inputs_idx),
(-1, -1, *element_shape),
(self.n_envs, input_size, *element_shape),
("envs_idx", idx_name, *("" for _ in element_shape)),
skip_allocation=skip_allocation,
)
return tensor_, inputs_idx_, envs_idx_
def set_base_links_pos(self, pos, links_idx=None, envs_idx=None, *, relative=False):
if links_idx is None:
links_idx = self._base_links_idx
pos, links_idx, envs_idx = self._sanitize_io_variables(
pos, links_idx, self.n_links, "links_idx", envs_idx, (3,), skip_allocation=True
)
if self.n_envs == 0:
pos = pos[None]
kernel_set_links_pos(
relative,
pos,
links_idx,
envs_idx,
links_info=self.links_info,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
kernel_forward_kinematics(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
def set_base_links_pos_grad(self, links_idx, envs_idx, relative, pos_grad):
if links_idx is None:
links_idx = self._base_links_idx
pos_grad_, links_idx, envs_idx = self._sanitize_io_variables(
pos_grad.unsqueeze(-2), links_idx, self.n_links, "links_idx", envs_idx, (3,), skip_allocation=True
)
if self.n_envs == 0:
pos_grad_ = pos_grad_.unsqueeze(0)
kernel_set_links_pos_grad(
relative,
pos_grad_,
links_idx,
envs_idx,
links_info=self.links_info,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def set_base_links_quat(self, quat, links_idx=None, envs_idx=None, *, relative=False):
if links_idx is None:
links_idx = self._base_links_idx
quat, links_idx, envs_idx = self._sanitize_io_variables(
quat, links_idx, self.n_links, "links_idx", envs_idx, (4,), skip_allocation=True
)
if self.n_envs == 0:
quat = quat[None]
kernel_set_links_quat(
relative,
quat,
links_idx,
envs_idx,
links_info=self.links_info,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
kernel_forward_kinematics(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
def set_base_links_quat_grad(self, links_idx, envs_idx, relative, quat_grad):
if links_idx is None:
links_idx = self._base_links_idx
quat_grad_, links_idx, envs_idx = self._sanitize_io_variables(
quat_grad.unsqueeze(-2), links_idx, self.n_links, "links_idx", envs_idx, (4,), skip_allocation=True
)
if self.n_envs == 0:
quat_grad_ = quat_grad_.unsqueeze(0)
assert relative == False, "Backward pass for relative quaternion is not supported yet."
kernel_set_links_quat_grad(
relative,
quat_grad_,
links_idx,
envs_idx,
links_info=self.links_info,
links_state=self.links_state,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
def set_qpos(self, qpos, qs_idx=None, envs_idx=None, *, skip_forward=False):
if gs.use_zerocopy:
data = qd_to_torch(self._rigid_global_info.qpos, transpose=True, copy=False)
qs_mask = indices_to_mask(qs_idx)
if (
(not qs_mask or isinstance(qs_mask[0], slice))
and isinstance(envs_idx, torch.Tensor)
and envs_idx.dtype == torch.bool
):
qs_data = data[(slice(None), *qs_mask)]
if qpos.ndim == 2:
# Note that it is necessary to create a new temporary view because it will be modified in-place
qs_data.masked_scatter_(envs_idx[:, None], qpos.view_as(qpos))
else:
qpos = broadcast_tensor(qpos, gs.tc_float, qs_data.shape)
torch.where(envs_idx[:, None], qpos, qs_data, out=qs_data)
else:
mask = (0, *qs_mask) if self.n_envs == 0 else indices_to_mask(envs_idx, *qs_mask)
assign_indexed_tensor(data, mask, qpos)
if mask and isinstance(mask[0], torch.Tensor):
envs_idx = mask[0].reshape((-1,))
else:
qpos, qs_idx, envs_idx = self._sanitize_io_variables(
qpos, qs_idx, self.n_qs, "qs_idx", envs_idx, skip_allocation=True
)
if self.n_envs == 0:
qpos = qpos[None]
kernel_set_qpos(qpos, qs_idx, envs_idx, self._rigid_global_info, self._static_rigid_sim_config)
if not skip_forward:
if not isinstance(envs_idx, torch.Tensor):
envs_idx = self._scene._sanitize_envs_idx(envs_idx)
if envs_idx.dtype == torch.bool:
fn = kernel_masked_forward_kinematics
else:
fn = kernel_forward_kinematics
fn(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
else:
self._is_forward_pos_updated = False
self._is_forward_vel_updated = False
def set_dofs_velocity(self, velocity, dofs_idx=None, envs_idx=None, *, skip_forward=False):
if gs.use_zerocopy:
vel = qd_to_torch(self.dofs_state.vel, transpose=True, copy=False)
dofs_mask = indices_to_mask(dofs_idx)
if (
(not dofs_mask or isinstance(dofs_mask[0], slice))
and isinstance(envs_idx, torch.Tensor)
and (
(velocity is None and (not IS_OLD_TORCH or envs_idx.dtype == torch.bool))
or (velocity is not None and velocity.ndim == 2 and envs_idx.dtype == torch.bool)
)
):
dofs_vel = vel[(slice(None), *dofs_mask)]
if velocity is None:
if envs_idx.dtype == torch.bool:
dofs_vel.masked_fill_(envs_idx[:, None], 0.0)
else:
dofs_vel.scatter_(0, envs_idx[:, None].expand((-1, dofs_vel.shape[1])), 0.0)
else:
if velocity.ndim == 2:
# Note that it is necessary to create a new temporary view because it will be modified in-place
dofs_vel.masked_scatter_(envs_idx[:, None], velocity.view_as(velocity))
else:
velocity = broadcast_tensor(velocity, gs.tc_float, dofs_vel.shape)
torch.where(envs_idx[:, None], velocity, dofs_vel, out=dofs_vel)
else:
mask = (0, *dofs_mask) if self.n_envs == 0 else indices_to_mask(envs_idx, *dofs_mask)
if velocity is None:
vel[mask] = 0.0
else:
assign_indexed_tensor(vel, mask, velocity)
if mask and isinstance(mask[0], torch.Tensor):
envs_idx = mask[0].reshape((-1,))
if not skip_forward and not isinstance(envs_idx, torch.Tensor):
envs_idx = self._scene._sanitize_envs_idx(envs_idx)
else:
velocity, dofs_idx, envs_idx = self._sanitize_io_variables(
velocity, dofs_idx, self.n_dofs, "dofs_idx", envs_idx, skip_allocation=True
)
if velocity is None:
kernel_set_dofs_zero_velocity(dofs_idx, envs_idx, self.dofs_state, self._static_rigid_sim_config)
else:
if self.n_envs == 0:
velocity = velocity[None]
kernel_set_dofs_velocity(velocity, dofs_idx, envs_idx, self.dofs_state, self._static_rigid_sim_config)
if not skip_forward:
if envs_idx.dtype == torch.bool:
fn = kernel_masked_forward_velocity
else:
fn = kernel_forward_velocity
fn(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
is_backward=False,
)
self._is_forward_vel_updated = True
else:
self._is_forward_vel_updated = False
def set_dofs_velocity_grad(self, dofs_idx, envs_idx, velocity_grad):
velocity_grad_, dofs_idx, envs_idx = self._sanitize_io_variables(
velocity_grad, dofs_idx, self.n_dofs, "dofs_idx", envs_idx, skip_allocation=True
)
if self.n_envs == 0:
velocity_grad_ = velocity_grad_.unsqueeze(0)
kernel_set_dofs_velocity_grad(
velocity_grad_, dofs_idx, envs_idx, self.dofs_state, self._static_rigid_sim_config
)
def set_dofs_position(self, position, dofs_idx=None, envs_idx=None):
position, dofs_idx, envs_idx = self._sanitize_io_variables(
position, dofs_idx, self.n_dofs, "dofs_idx", envs_idx, skip_allocation=True
)
if self.n_envs == 0:
position = position[None]
kernel_set_dofs_position(
position,
dofs_idx,
envs_idx,
self.dofs_state,
self.links_info,
self.joints_info,
self.entities_info,
self._rigid_global_info,
self._static_rigid_sim_config,
)
kernel_forward_kinematics(
envs_idx,
links_state=self.links_state,
links_info=self.links_info,
joints_state=self.joints_state,
joints_info=self.joints_info,
dofs_state=self.dofs_state,
dofs_info=self.dofs_info,
entities_info=self.entities_info,
rigid_global_info=self._rigid_global_info,
static_rigid_sim_config=self._static_rigid_sim_config,
)
self._is_forward_pos_updated = True
self._is_forward_vel_updated = True
def get_links_pos(self, links_idx=None, envs_idx=None):
if not gs.use_zerocopy:
_, links_idx, envs_idx = self._sanitize_io_variables(
None, links_idx, self.n_links, "links_idx", envs_idx, (3,), skip_allocation=True
)
tensor = qd_to_torch(self.links_state.pos, envs_idx, links_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def get_links_quat(self, links_idx=None, envs_idx=None):
tensor = qd_to_torch(self.links_state.quat, envs_idx, links_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def get_links_vel(self, links_idx=None, envs_idx=None):
if gs.use_zerocopy:
mask = (0, *indices_to_mask(links_idx)) if self.n_envs == 0 else indices_to_mask(envs_idx, links_idx)
cd_vel = qd_to_torch(self.links_state.cd_vel, transpose=True)
cd_ang = qd_to_torch(self.links_state.cd_ang, transpose=True)
pos = qd_to_torch(self.links_state.pos, transpose=True)
root_COM = qd_to_torch(self.links_state.root_COM, transpose=True)
return cd_vel[mask] + cd_ang[mask].cross(pos[mask] - root_COM[mask], dim=-1)
_tensor, links_idx, envs_idx = self._sanitize_io_variables(
None, links_idx, self.n_links, "links_idx", envs_idx, (3,)
)
assert _tensor is not None
tensor = _tensor[None] if self.n_envs == 0 else _tensor
kernel_get_links_vel(tensor, links_idx, envs_idx, 2, self.links_state, self._static_rigid_sim_config)
return _tensor
def get_links_ang(self, links_idx=None, envs_idx=None):
tensor = qd_to_torch(self.links_state.cd_ang, envs_idx, links_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def _build_dof_to_q_map(self, dofs_idx_t):
"""Build a mapping from DOF indices to qpos indices for revolute/prismatic joints."""
dof_to_q = torch.zeros(self.n_dofs, dtype=torch.long, device=gs.device)
for entity in self._entities:
for joint in entity.joints:
if joint.n_dofs == 0:
continue
for i in range(joint.n_dofs):
dof_to_q[joint.dof_start - entity.dof_start + i] = joint.q_start - entity.q_start + i
return dof_to_q[dofs_idx_t]
def get_qpos(self, qs_idx=None, envs_idx=None):
tensor = qd_to_torch(self.qpos, envs_idx, qs_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def get_dofs_velocity(self, dofs_idx=None, envs_idx=None):
tensor = qd_to_torch(self.dofs_state.vel, envs_idx, dofs_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def get_dofs_position(self, dofs_idx=None, envs_idx=None):
"""Read current DOF positions."""
tensor = qd_to_torch(self.dofs_state.pos, envs_idx, dofs_idx, transpose=True, copy=True)
return tensor[0] if self.n_envs == 0 else tensor
def get_dofs_limit(self, dofs_idx=None, envs_idx=None):
if not self._options.batch_dofs_info and envs_idx is not None:
gs.raise_exception("`envs_idx` cannot be specified for non-batched dofs info.")
tensor = qd_to_torch(self.dofs_info.limit, envs_idx, dofs_idx, transpose=True, copy=True)
if self.n_envs == 0 and self._options.batch_dofs_info:
tensor = tensor[0]
return tensor[..., 0], tensor[..., 1]
def clear_external_force(self):
"""No-op: kinematic entities have no external forces."""
pass
def update_vgeoms(self):
kernel_update_vgeoms(self.vgeoms_info, self.vgeoms_state, self.links_state, self._static_rigid_sim_config)
# ------------------------------------------------------------------------------------
# ----------------------------------- properties -------------------------------------
# ------------------------------------------------------------------------------------
@property
def links(self):
if self.is_built:
return self._links
return gs.List(link for entity in self._entities for link in entity.links)
@property
def joints(self):
if self.is_built:
return self._joints
return gs.List(joint for entity in self._entities for joint in entity.joints)
@property
def geoms(self):
if self.is_built:
return self._geoms
return gs.List(geom for entity in self._entities for geom in entity.geoms)
@property
def vgeoms(self):
if self.is_built:
return self._vgeoms
return gs.List(vgeom for entity in self._entities for vgeom in entity.vgeoms)
@property
def n_links(self):
if self.is_built:
return self._n_links
return len(self.links)
@property
def n_joints(self):
if self.is_built:
return self._n_joints
return len(self.joints)
@property
def n_vgeoms(self):
if self.is_built:
return self._n_vgeoms
return len(self.vgeoms)
@property
def n_vverts(self):
if self.is_built:
return self._n_vverts
return sum(entity.n_vverts for entity in self._entities)
@property
def n_vfaces(self):
if self.is_built:
return self._n_vfaces
return sum(entity.n_vfaces for entity in self._entities)
@property
def n_qs(self):
if self.is_built:
return self._n_qs
return sum(entity.n_qs for entity in self._entities)
@property
def n_dofs(self):
if self.is_built:
return self._n_dofs
return sum(entity.n_dofs for entity in self._entities)
@property
def init_qpos(self):
if self._entities:
return np.concatenate([entity.init_qpos for entity in self._entities], dtype=gs.np_float)
return np.array([], dtype=gs.np_float)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/kinematic_solver.py",
"license": "Apache License 2.0",
"lines": 961,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/test_kinematic.py | import pytest
import genesis as gs
from genesis.utils.misc import tensor_to_array
from tests.utils import assert_allclose
@pytest.mark.required
def test_setters(show_viewer, tol):
scene = gs.Scene(
show_viewer=show_viewer,
show_FPS=False,
)
ghost_box = scene.add_entity(
morph=gs.morphs.Box(
size=(0.4, 0.2, 0.1),
),
material=gs.materials.Kinematic(),
surface=gs.surfaces.Default(
color=(1.0, 0.0, 0.0, 0.7),
),
)
ghost_robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/simple/two_cube_prismatic.urdf",
fixed=False,
),
material=gs.materials.Kinematic(),
surface=gs.surfaces.Default(
color=(0.0, 1.0, 0.0, 0.7),
),
)
scene.build(n_envs=2)
assert_allclose(ghost_box.get_vAABB(), ((-0.20, -0.10, -0.05), (0.20, 0.10, 0.05)), tol=tol)
assert_allclose(ghost_robot.get_vAABB(), ((-0.05, -0.05, -0.05), (0.15, 0.05, 0.05)), tol=tol)
ghost_box.set_pos([1.0, 2.0, 3.0])
assert_allclose(ghost_box.get_vAABB(), ((0.80, 1.90, 2.95), (1.20, 2.10, 3.05)), tol=tol)
ghost_box.set_pos([0.0, 0.0, 0.0])
ghost_box.set_quat([[1.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0]])
assert_allclose(ghost_box.get_vAABB()[0], ((-0.20, -0.05, -0.10), (0.20, 0.05, 0.1)), tol=tol)
assert_allclose(ghost_box.get_vAABB()[1], ((-0.05, -0.10, -0.2), (0.05, 0.10, 0.2)), tol=tol)
ghost_robot.set_dofs_position([0.1, -0.1], dofs_idx_local=-1)
assert_allclose(ghost_robot.get_vAABB()[0], ((-0.05, -0.05, -0.05), (0.25, 0.05, 0.05)), tol=tol)
assert_allclose(ghost_robot.get_vAABB()[1], ((-0.05, -0.05, -0.05), (0.05, 0.05, 0.05)), tol=tol)
ghost_robot.set_qpos([1.0, 2.0, 3.0, 1.0, 1.0, 0.0, 0.0, 1.0])
assert_allclose(ghost_robot.get_vAABB(), ((0.95, 1.95, 2.95), (2.15, 2.05, 3.05)), tol=tol)
frozen_vaabb = [tensor_to_array(entity.get_vAABB()) for entity in scene.entities]
for _ in range(5):
scene.step()
assert_allclose([tensor_to_array(entity.get_vAABB()) for entity in scene.entities], frozen_vaabb, tol=gs.EPS)
@pytest.mark.required
def test_track_rigid(show_viewer, tol):
scene = gs.Scene(
show_viewer=show_viewer,
show_FPS=False,
)
scene.add_entity(gs.morphs.Plane())
robot = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0, 0.5, 0.42),
),
)
ghost = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/go2/urdf/go2.urdf",
pos=(0, 0.5, 0.42),
),
material=gs.materials.Kinematic(),
surface=gs.surfaces.Default(
color=(0.2, 0.0, 1.0, 0.7),
),
)
scene.build(n_envs=2, env_spacing=(0.5, 0.5))
for _ in range(20):
scene.step()
ghost.set_qpos(robot.get_qpos())
assert_allclose(ghost.get_vAABB(), robot.get_vAABB(), tol=tol)
assert_allclose(ghost.get_links_pos(), robot.get_links_pos(), tol=tol)
ghost.set_dofs_velocity(robot.get_dofs_velocity())
assert_allclose(ghost.get_links_vel(), robot.get_links_vel(), tol=tol)
frozen_ghost_vaabb = ghost.get_vAABB()
frozen_robot_vaabb = robot.get_vAABB()
for _ in range(20):
scene.step()
assert_allclose(ghost.get_vAABB(), frozen_ghost_vaabb, tol=gs.EPS)
with pytest.raises(AssertionError):
assert_allclose(robot.get_vAABB(), frozen_robot_vaabb, atol=0.1)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_kinematic.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:examples/IPC_Solver/ipc_robot_grasp_cube.py | import argparse
import os
import genesis as gs
def main():
gs.init(backend=gs.cpu, logging_level="info")
parser = argparse.ArgumentParser()
parser.add_argument("--no-ipc", action="store_true", default=False)
parser.add_argument("-v", "--vis", action="store_true", default=False)
parser.add_argument(
"--coup_type",
type=str,
default="two_way_soft_constraint",
choices=["two_way_soft_constraint", "external_articulation"],
)
args = parser.parse_args()
coupler_options = None
if not args.no_ipc:
coupler_options = gs.options.IPCCouplerOptions(
constraint_strength_translation=10.0,
constraint_strength_rotation=10.0,
enable_rigid_rigid_contact=False,
enable_rigid_ground_contact=False,
newton_translation_tolerance=10.0,
)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=0.01,
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 1.0, 1.0),
camera_lookat=(0.3, 0.0, 0.5),
),
coupler_options=coupler_options,
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
franka_material_kwargs = dict(
coup_friction=0.8,
coup_type=args.coup_type,
)
if args.coup_type == "two_way_soft_constraint":
franka_material_kwargs["coup_links"] = ("left_finger", "right_finger")
franka_material = gs.materials.Rigid(**franka_material_kwargs) if not args.no_ipc else None
franka = scene.add_entity(
gs.morphs.MJCF(
file="xml/franka_emika_panda/panda_non_overlap.xml",
),
material=franka_material,
)
if not args.no_ipc:
cube_material = gs.materials.FEM.Elastic(
E=5.0e4,
nu=0.45,
rho=1000.0,
friction_mu=0.5,
model="stable_neohookean",
)
else:
cube_material = gs.materials.Rigid()
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.65, 0.0, 0.03),
size=(0.05, 0.05, 0.05),
),
material=cube_material,
surface=gs.surfaces.Plastic(
color=(0.2, 0.8, 0.2, 0.5),
),
)
scene.build()
motors_dof, fingers_dof = slice(0, 7), slice(7, 9)
end_effector = franka.get_link("hand")
franka.set_dofs_kp([4500.0, 4500.0, 3500.0, 3500.0, 2000.0, 2000.0, 2000.0, 500.0, 500.0])
qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.4], quat=[0.0, 1.0, 0.0, 0.0])
if not args.no_ipc or args.coup_type == "external_articulation":
franka.control_dofs_position(qpos[motors_dof], dofs_idx_local=motors_dof)
franka.control_dofs_position(0.04, dofs_idx_local=fingers_dof)
for _ in range(200 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
else:
franka.set_dofs_position(qpos)
# Lower the grapper half way to grasping position
qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.25], quat=[0.0, 1.0, 0.0, 0.0])
franka.control_dofs_position(qpos[motors_dof], dofs_idx_local=motors_dof)
for _ in range(100 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
# Reach grasping position
qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.135], quat=[0.0, 1.0, 0.0, 0.0])
franka.control_dofs_position(qpos[motors_dof], dofs_idx_local=motors_dof)
for _ in range(50 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
# Grasp the cube
franka.control_dofs_position(qpos[motors_dof], dofs_idx_local=motors_dof)
franka.control_dofs_position(0.0, dofs_idx_local=fingers_dof)
for _ in range(10 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
# Lift the cube
qpos = franka.inverse_kinematics(link=end_effector, pos=[0.65, 0.0, 0.3], quat=[0.0, 1.0, 0.0, 0.0])
franka.control_dofs_position(qpos[motors_dof], dofs_idx_local=motors_dof)
for _ in range(50 if "PYTEST_VERSION" not in os.environ else 1):
scene.step()
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/IPC_Solver/ipc_robot_grasp_cube.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/constraint/solver_breakdown.py | import quadrants as ti
import genesis as gs
import genesis.utils.array_class as array_class
from genesis.engine.solvers.rigid.constraint import solver
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_linesearch(
entities_info: array_class.EntitiesInfo,
dofs_state: array_class.DofsState,
constraint_state: array_class.ConstraintState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: ti.template(),
):
_B = constraint_state.grad.shape[1]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_linesearch_and_apply_alpha(
i_b,
entities_info=entities_info,
dofs_state=dofs_state,
rigid_global_info=rigid_global_info,
constraint_state=constraint_state,
static_rigid_sim_config=static_rigid_sim_config,
)
else:
constraint_state.improved[i_b] = False
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_cg_only_save_prev_grad(
constraint_state: array_class.ConstraintState,
static_rigid_sim_config: ti.template(),
):
"""Save prev_grad and prev_Mgrad (CG only)"""
_B = constraint_state.grad.shape[1]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_save_prev_grad(i_b, constraint_state=constraint_state)
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_update_constraint(
dofs_state: array_class.DofsState,
constraint_state: array_class.ConstraintState,
static_rigid_sim_config: ti.template(),
):
_B = constraint_state.grad.shape[1]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_update_constraint_batch(
i_b,
qacc=constraint_state.qacc,
Ma=constraint_state.Ma,
cost=constraint_state.cost,
dofs_state=dofs_state,
constraint_state=constraint_state,
static_rigid_sim_config=static_rigid_sim_config,
)
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_newton_only_nt_hessian(
constraint_state: array_class.ConstraintState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: ti.template(),
):
"""Step 4: Newton Hessian update (Newton only)"""
solver.func_hessian_direct_tiled(constraint_state=constraint_state, rigid_global_info=rigid_global_info)
if ti.static(static_rigid_sim_config.enable_tiled_cholesky_hessian):
solver.func_cholesky_factor_direct_tiled(
constraint_state=constraint_state,
rigid_global_info=rigid_global_info,
static_rigid_sim_config=static_rigid_sim_config,
)
else:
_B = constraint_state.jac.shape[2]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_cholesky_factor_direct_batch(
i_b=i_b, constraint_state=constraint_state, rigid_global_info=rigid_global_info
)
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_update_gradient(
entities_info: array_class.EntitiesInfo,
dofs_state: array_class.DofsState,
constraint_state: array_class.ConstraintState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: ti.template(),
):
"""Step 5: Update gradient"""
_B = constraint_state.grad.shape[1]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_update_gradient_batch(
i_b,
dofs_state=dofs_state,
entities_info=entities_info,
rigid_global_info=rigid_global_info,
constraint_state=constraint_state,
static_rigid_sim_config=static_rigid_sim_config,
)
@ti.kernel(fastcache=gs.use_fastcache)
def _kernel_update_search_direction(
constraint_state: array_class.ConstraintState,
rigid_global_info: array_class.RigidGlobalInfo,
static_rigid_sim_config: ti.template(),
):
"""Step 6: Check convergence and update search direction"""
_B = constraint_state.grad.shape[1]
ti.loop_config(serialize=static_rigid_sim_config.para_level < gs.PARA_LEVEL.ALL, block_dim=32)
for i_b in range(_B):
if constraint_state.n_constraints[i_b] > 0 and constraint_state.improved[i_b]:
solver.func_terminate_or_update_descent_batch(
i_b,
rigid_global_info=rigid_global_info,
constraint_state=constraint_state,
static_rigid_sim_config=static_rigid_sim_config,
)
@solver.func_solve_body.register(is_compatible=lambda *args, **kwargs: gs.backend in {gs.cuda})
def func_solve_decomposed(
entities_info,
dofs_state,
constraint_state,
rigid_global_info,
static_rigid_sim_config,
):
"""
Uses separate kernels for each solver step per iteration.
This maximizes kernel granularity, potentially allowing better GPU scheduling
and more flexibility in execution, at the cost of more Python→C++ boundary crossings.
"""
iterations = rigid_global_info.iterations[None]
for _it in range(iterations):
_kernel_linesearch(
entities_info,
dofs_state,
constraint_state,
rigid_global_info,
static_rigid_sim_config,
)
if static_rigid_sim_config.solver_type == gs.constraint_solver.CG:
_kernel_cg_only_save_prev_grad(
constraint_state,
static_rigid_sim_config,
)
_kernel_update_constraint(
dofs_state,
constraint_state,
static_rigid_sim_config,
)
if static_rigid_sim_config.solver_type == gs.constraint_solver.Newton:
_kernel_newton_only_nt_hessian(
constraint_state,
rigid_global_info,
static_rigid_sim_config,
)
_kernel_update_gradient(
entities_info,
dofs_state,
constraint_state,
rigid_global_info,
static_rigid_sim_config,
)
_kernel_update_search_direction(
constraint_state,
rigid_global_info,
static_rigid_sim_config,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/constraint/solver_breakdown.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/profiling.py | import pytest
def parser_add_options(parser: pytest.Parser) -> None:
parser.addoption(
"--profile-ref",
type=str,
default="",
help="Added to output filename.",
)
parser.addoption(
"--profile-wait",
type=int,
required=True,
help="Number of steps to wait before profiling. Depends on what you want to view, since the profile will likely vary throughout the benchmark.",
)
parser.addoption(
"--profile-warmup", type=int, default=0, help="Number of warmup steps for profiling (default 0 is ok)."
)
parser.addoption(
"--profile-active",
type=int,
default=1,
help="Number of active profiling steps. (default 1 is ok; more than 1 will create large trace files)",
)
parser.addoption(
"--profile-repeat",
type=int,
default=1,
help="Number of times to repeat profiling. (default 1 is ok, unless you want to profile at multiple points during the simulation)",
)
def pytorch_profiler(pytestconfig):
"""Session-scoped fixture providing a PyTorch profiler context manager.
Activated by env var GS_PROFILING=1. Yields a (profiler, step_fn) tuple where step_fn
must be called after each simulation step.
The profiler uses a schedule so that only a window of steps is actively
traced, keeping the overhead minimal. On exit, a Chrome trace is written to
``profile_trace.json``.
"""
import torch
from torch.profiler import ProfilerActivity
wait = pytestconfig.getoption("--profile-wait")
warmup = pytestconfig.getoption("--profile-warmup")
active = pytestconfig.getoption("--profile-active")
repeat = pytestconfig.getoption("--profile-repeat")
ref = pytestconfig.getoption("--profile-ref")
schedule = torch.profiler.schedule(wait=wait, warmup=warmup, active=active, repeat=repeat)
activities = [ProfilerActivity.CPU]
if torch.cuda.is_available():
activities.append(ProfilerActivity.CUDA)
trace_counter = [0]
def trace_handler(prof):
trace_path = f"trace_{ref}_{trace_counter[0]}.json"
prof.export_chrome_trace(trace_path)
trace_counter[0] += 1
print(f"Exported trace cycle {trace_counter[0]} to {trace_path}")
prof = torch.profiler.profile(
activities=activities,
schedule=schedule,
record_shapes=False,
profile_memory=False,
with_stack=True,
with_flops=False,
on_trace_ready=trace_handler,
)
print(f"PyTorch profiling enabled (wait={wait}, warmup={warmup}, active={active})")
with prof:
yield prof.step
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/profiling.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/engine/couplers/ipc_coupler/coupler.py | import logging
import os
import sys
import tempfile
import weakref
from functools import partial
from typing import TYPE_CHECKING, cast
import numpy as np
import genesis as gs
import genesis.utils.geom as gu
from genesis.engine.materials.FEM.cloth import Cloth
from genesis.options.solvers import IPCCouplerOptions, RigidOptions
from genesis.repr_base import RBC
from genesis.utils.misc import geometric_mean, harmonic_mean, qd_to_numpy, tensor_to_array
if TYPE_CHECKING:
from genesis.engine.entities import FEMEntity, RigidEntity
from genesis.engine.entities.rigid_entity import RigidJoint, RigidLink
from genesis.engine.simulator import Simulator
from genesis.engine.solvers import FEMSolver, RigidSolver
# Check if libuipc is available
try:
import uipc
UIPC_AVAILABLE = True
except ImportError:
UIPC_AVAILABLE = False
if TYPE_CHECKING or UIPC_AVAILABLE:
import polyscope as ps
from uipc.backend import SceneVisitor
from uipc.constitution import (
AffineBodyConstitution,
AffineBodyPrismaticJoint,
AffineBodyRevoluteJoint,
DiscreteShellBending,
ElasticModuli,
ElasticModuli2D,
ExternalArticulationConstraint,
SoftTransformConstraint,
StableNeoHookean,
StrainLimitingBaraffWitkinShell,
)
from uipc.core import Engine, World, Scene, AffineBodyStateAccessorFeature, ContactElement, SubsceneElement
from uipc.geometry import GeometrySlot, SimplicialComplex, SimplicialComplexSlot
from uipc.gui import SceneGUI
from .data import COUPLING_TYPE, ABDLinkEntry, ArticulatedEntityData, IPCCouplingData
from .utils import (
build_ipc_scene_config,
compute_link_to_link_transform,
find_target_link_for_fixed_merge,
read_ipc_geometry_metadata,
update_coupling_forces,
)
ABD_KAPPA = 100.0 # MPa unit
# TODO: consider deriving from Genesis joint properties instead of hardcoding.
STIFFNESS_DEFAULT = 1e4
JOINT_STRENGTH_RATIO = 100.0
def _animate_rigid_link(coupler_ref, link, env_idx, info):
"""Animator callback for a soft-constraint coupled rigid link.
Uses a weakref to the coupler to avoid preventing garbage collection.
"""
coupler = coupler_ref()
if coupler is None:
gs.raise_exception("IPCCoupler was garbage collected while animator callback is still active.")
geom_slots = info.geo_slots()
if not geom_slots:
return
geom = geom_slots[0].geometry()
# Enable constraint and set target transform (q_genesis^n)
is_constrained_attr = geom.instances().find(uipc.builtin.is_constrained)
aim_transform_attr = geom.instances().find(uipc.builtin.aim_transform)
assert is_constrained_attr and aim_transform_attr
uipc.view(is_constrained_attr)[0] = 1
uipc.view(aim_transform_attr)[:] = coupler._abd_transforms_by_link[link][env_idx]
class IPCCoupler(RBC):
"""
Coupler class for handling Incremental Potential Contact (IPC) simulation coupling.
This coupler manages the communication between Genesis solvers and the IPC system,
including rigid bodies (as ABD objects) and FEM bodies in a unified contact framework.
"""
def __init__(self, simulator: "Simulator", options: IPCCouplerOptions) -> None:
"""
Initialize IPC Coupler.
Parameters
----------
simulator : Simulator
The simulator containing all solvers
options : IPCCouplerOptions
IPC configuration options
"""
# Check if uipc is available
if not UIPC_AVAILABLE:
raise ImportError(
"Python module 'uipc' is required by IPCCoupler but is not installed. Please install it via "
"`pip install pyuipc`."
)
self.sim = simulator
self.options = options
# Define some proxies for convenience
self.rigid_solver: "RigidSolver" = self.sim.rigid_solver
self.fem_solver: "FEMSolver" = self.sim.fem_solver
self._constraint_strength_translation_scaled = self.options.constraint_strength_translation / self.sim.dt**2
self._constraint_strength_rotation_scaled = self.options.constraint_strength_rotation / self.sim.dt**2
# ==== IPC System Infrastructure ====
self._ipc_engine: Engine | None = None
self._ipc_world: World | None = None
self._ipc_scene = Scene(build_ipc_scene_config(self.options, self.sim.options))
self._ipc_subscenes: list[SubsceneElement] = []
self._ipc_constitution_tabular = self._ipc_scene.constitution_tabular()
self._ipc_contact_tabular = self._ipc_scene.contact_tabular()
self._ipc_subscene_tabular = self._ipc_scene.subscene_tabular()
self._ipc_objects = self._ipc_scene.objects()
self._ipc_animator = self._ipc_scene.animator()
# ==== IPC Constitutions ====
self._ipc_abd: AffineBodyConstitution | None = None
self._ipc_stk: StableNeoHookean | None = None
self._ipc_stc: SoftTransformConstraint | None = None
self._ipc_nks: StrainLimitingBaraffWitkinShell | None = None
self._ipc_dsb: DiscreteShellBending | None = None
self._ipc_eac: ExternalArticulationConstraint | None = None
# ==== IPC Contact Elements ====
self._ipc_no_collision_contact: ContactElement = self._ipc_contact_tabular.create("no_collision_contact")
self._ipc_fem_contacts: dict["FEMEntity", ContactElement] = {}
self._ipc_cloth_contacts: dict["FEMEntity", ContactElement] = {}
self._ipc_abd_contacts: dict["RigidEntity", ContactElement] = {}
self._ipc_ground_contacts: dict["RigidEntity", ContactElement] = {}
# ==== Entity Coupling Configuration ====
self._coup_type_by_entity: dict["RigidEntity", COUPLING_TYPE] = {}
self._coup_links: dict["RigidEntity", set["RigidLink"]] = {} # Used for "two_way_soft_constraint"
self._coupling_collision_settings: dict["RigidEntity", dict["RigidLink", bool]] = {}
self._entities_by_coup_type: dict[COUPLING_TYPE, list["RigidEntity"]] = {}
# ==== ABD Geometry & State ====
self._abd_slots_by_link: dict["RigidLink", list[GeometrySlot]] = {}
self._abd_state_feature: AffineBodyStateAccessorFeature | None = None
self._abd_state_geom: SimplicialComplex | None = None # Geometry for batch data transfer
self._abd_data_by_link: dict["RigidLink", list[ABDLinkEntry]] = {}
# ==== Two-Way Coupling State ====
self._abd_transforms_by_link: dict["RigidLink", list[np.ndarray]] = {}
self._coupling_data: IPCCouplingData | None = None
# ==== GUI ====
self._ipc_gui: SceneGUI | None = None # polyscope viewer, only when _show_ipc_gui=True
# ==== External Articulation ====
self._articulation_non_fixed_base_entities: list["RigidEntity"] = [] # entities with non-fixed base
self._articulation_data_by_entity: dict["RigidEntity", ArticulatedEntityData] = {}
# ============================================================
# Section 1: Configuration API
# ============================================================
def build(self) -> None:
"""Build IPC system"""
# IPC coupler builds a single IPC scene shared across all envs, so it requires
# identical geometry topology (links, joints, geoms) across environments.
# Batched info options allow per-env topology which is incompatible.
if self.rigid_solver.is_active:
rigid_options = cast(RigidOptions, self.rigid_solver._options)
if rigid_options.batch_links_info or rigid_options.batch_dofs_info or rigid_options.batch_joints_info:
gs.raise_exception(
"IPC coupler does not support batched rigid info (batch_links_info, batch_dofs_info, "
"batch_joints_info). Please disable these options when using IPC coupling."
)
self._init_ipc()
self._setup_coupling_config()
self._add_objects_to_ipc()
self._finalize_ipc()
self._init_accessors()
if self.options._show_ipc_gui:
self._init_ipc_gui()
def _setup_coupling_config(self):
"""Read coup_type, coup_links, and collision settings from entity materials."""
assert gs.logger is not None
entity: "RigidEntity"
for i_e, entity in enumerate(cast(list["RigidEntity"], self.rigid_solver.entities)):
if not entity.material.needs_coup:
continue
coup_type = entity.material.coup_type
if coup_type is None:
# Auto-select based on entity type
if entity.n_joints > 0:
coup_type = "external_articulation" if entity.base_link.is_fixed else "two_way_soft_constraint"
else:
coup_type = "ipc_only"
self._coup_type_by_entity[entity] = coup_type = getattr(COUPLING_TYPE, coup_type.upper())
if coup_type == COUPLING_TYPE.EXTERNAL_ARTICULATION:
if not entity.base_link.is_fixed:
gs.raise_exception(
f"Rigid entity {i_e} is not fixed. Coupling type 'external_articulation' is not supported."
)
if entity.n_joints == 0:
gs.raise_exception(
f"Rigid entity {i_e} has no joint. Coupling type 'external_articulation' is not supported."
)
gs.logger.debug(f"Rigid entity {i_e}: coupling type '{coup_type.name.lower()}'")
# Resolve link filter from material
link_filter_names = entity.material.coup_links
if link_filter_names is not None:
self._coup_links[entity] = set(map(entity.get_link, link_filter_names))
gs.logger.debug(f"Rigid entity {i_e}: IPC link filter set to {len(link_filter_names)} link(s)")
# Resolve collision settings from material
if not entity.material.enable_coup_collision:
# Disable collision for all links
self._coupling_collision_settings[entity] = {link: False for link in entity.links}
gs.logger.debug(f"Rigid entity {i_e}: IPC collision disabled for all links")
elif entity.material.coup_collision_links is not None:
# Positive filter: only named links get collision, others disabled
allowed = set(entity.material.coup_collision_links)
self._coupling_collision_settings[entity] = {
link: False for link in entity.links if link.name not in allowed
}
gs.logger.debug(f"Rigid entity {i_e}: IPC collision limited to {allowed}")
# Categorize entities by coupling type
for entity, coup_type in self._coup_type_by_entity.items():
self._entities_by_coup_type.setdefault(coup_type, []).append(entity)
def _init_ipc(self) -> None:
"""Initialize IPC system components"""
assert gs.logger is not None
if gs.logger.level <= logging.DEBUG:
uipc.Logger.set_level(uipc.Logger.Level.Info)
uipc.Timer.enable_all()
else:
uipc.Logger.set_level(uipc.Logger.Level.Error)
uipc.Timer.disable_all()
# Create workspace directory for IPC output, named after scene UID.
workspace = os.path.join(tempfile.gettempdir(), f"genesis_ipc_{self.sim.scene.uid.full()}")
os.makedirs(workspace, exist_ok=False)
# Note: gpu_device option may need to be set via CUDA environment variables (CUDA_VISIBLE_DEVICES)
# before Genesis initialization, as libuipc Engine does not expose device selection in constructor
self._ipc_engine = Engine("cuda", workspace)
self._ipc_world = World(self._ipc_engine)
# Set up sub-scenes for multi-environment to isolate per-environment contacts if batched
for env_idx in range(self.sim._B):
ipc_subscene = self._ipc_subscene_tabular.create(f"subscene_{env_idx}")
for other_ipc_subscene in self._ipc_subscenes:
self._ipc_subscene_tabular.insert(other_ipc_subscene, ipc_subscene, False)
self._ipc_subscenes.append(ipc_subscene)
def _add_objects_to_ipc(self) -> None:
"""Add objects from solvers to IPC system"""
# Add FEM entities to IPC
if self.fem_solver.is_active:
self._add_fem_entities_to_ipc()
# Add rigid geoms and articulated entities to IPC based on per-entity coupling types
if self.rigid_solver.is_active:
self._add_rigid_geoms_to_ipc()
self._add_articulation_entities_to_ipc()
# Register all per-entity contact pair models with per-material friction
self._register_contact_pairs()
def _add_fem_entities_to_ipc(self) -> None:
"""Add FEM entities to the existing IPC scene (includes both volumetric FEM and cloth)"""
# Create constitutions based on entity types present
entity: "FEMEntity"
for env_idx in range(self.sim._B):
for i_e, entity in enumerate(cast(list["FEMEntity"], self.fem_solver.entities)):
is_cloth = isinstance(entity.material, Cloth)
solver_type = "cloth" if is_cloth else "fem"
# Create object in IPC
fem_obj = self._ipc_objects.create(f"{solver_type}_{i_e}_{env_idx}")
# ---- Create mesh ----
# trimesh for cloth (2D shell), tetmesh for volumetric FEM (3D)
if is_cloth:
verts = tensor_to_array(entity.init_positions).astype(np.float64, copy=False)
faces = entity.surface_triangles.astype(np.int32, copy=False)
mesh = uipc.geometry.trimesh(verts, faces)
else:
mesh = uipc.geometry.tetmesh(tensor_to_array(entity.init_positions), entity.elems)
uipc.geometry.label_surface(mesh)
# ---- Apply constitutions ----
# Add to contact subscene (only for multi-environment)
if self.sim.n_envs > 0:
self._ipc_subscenes[env_idx].apply_to(mesh)
# Apply per-entity contact element (created once per entity on first env iteration)
if is_cloth:
if entity not in self._ipc_cloth_contacts:
self._ipc_cloth_contacts[entity] = self._ipc_contact_tabular.create(f"cloth_contact_{i_e}")
self._ipc_cloth_contacts[entity].apply_to(mesh)
else:
if entity not in self._ipc_fem_contacts:
self._ipc_fem_contacts[entity] = self._ipc_contact_tabular.create(f"fem_contact_{i_e}")
self._ipc_fem_contacts[entity].apply_to(mesh)
# Apply material constitution based on type
if is_cloth:
if self._ipc_nks is None:
self._ipc_nks = StrainLimitingBaraffWitkinShell()
self._ipc_constitution_tabular.insert(self._ipc_nks)
# Apply shell material for cloth
moduli = ElasticModuli2D.youngs_poisson(entity.material.E, entity.material.nu)
self._ipc_nks.apply_to(
mesh, moduli=moduli, mass_density=entity.material.rho, thickness=entity.material.thickness
)
# Apply bending stiffness if specified
if entity.material.bending_stiffness is not None:
if self._ipc_dsb is None:
self._ipc_dsb = DiscreteShellBending()
self._ipc_constitution_tabular.insert(self._ipc_dsb)
self._ipc_dsb.apply_to(mesh, bending_stiffness=entity.material.bending_stiffness)
else:
if self._ipc_stk is None:
self._ipc_stk = StableNeoHookean()
self._ipc_constitution_tabular.insert(self._ipc_stk)
# Apply volumetric material for FEM
moduli = ElasticModuli.youngs_poisson(entity.material.E, entity.material.nu)
self._ipc_stk.apply_to(mesh, moduli, mass_density=entity.material.rho)
# ---- Apply subscene and metadata ----
meta_attrs = mesh.meta()
meta_attrs.create("solver_type", solver_type)
meta_attrs.create("entity_idx", str(i_e))
meta_attrs.create("env_idx", str(env_idx))
# ---- Create IPC object and geometry slot ----
fem_obj.geometries().create(mesh)
def _add_rigid_geoms_to_ipc(self) -> None:
"""Add rigid geoms to the IPC scene as ABD objects, merging geoms by link."""
assert gs.logger is not None
gs.logger.debug(f"Registered entity coupling types: {set(self._coup_type_by_entity.values())}")
# ========== Pre-compute link groups (env-independent) ==========
# Group links by fixed-joint merge target, matching mjcf.py behavior where geoms from fixed-joint children are
# merged into the parent body's mesh.
target_groups: dict["RigidLink", list["RigidLink"]] = {} # target_link_idx -> [source_link_idx, ...]
merge_transforms: dict["RigidLink", tuple[np.ndarray, np.ndarray]] = {
# source_link_idx -> (R, t) relative to target frame
}
for link in self.rigid_solver.links:
entity = link.entity
coup_type = self._coup_type_by_entity.get(entity)
if coup_type is None:
continue
# Link filter for two_way_soft_constraint
if coup_type == COUPLING_TYPE.TWO_WAY_SOFT_CONSTRAINT:
link_filter = self._coup_links.get(entity)
if link_filter is not None and link not in link_filter:
continue
target_link = find_target_link_for_fixed_merge(link)
target_groups.setdefault(target_link, []).append(link)
if target_link is not link:
merge_transforms[link] = compute_link_to_link_transform(link, target_link)
gs.logger.debug(f"Fixed-merge: link {link.idx} ({link.name}) -> {target_link.idx} ({target_link.name})")
# ========== Process each environment ==========
links_pos = qd_to_numpy(self.rigid_solver.links_state.pos, transpose=True)
links_quat = qd_to_numpy(self.rigid_solver.links_state.quat, transpose=True)
for env_idx in range(self.sim._B):
for target_link, source_links in target_groups.items():
entity = target_link.entity
entity_coup_type = self._coup_type_by_entity[entity]
i_e = entity._idx_in_solver
# ---- Collect geom meshes ----
meshes = []
for source_link in source_links:
for geom in source_link.geoms:
if geom.type == gs.GEOM_TYPE.PLANE:
if entity_coup_type != COUPLING_TYPE.IPC_ONLY:
gs.raise_exception(
f"Plane entity (solver idx={i_e}) has coup_type='{entity_coup_type}', "
f"but only 'ipc_only' is supported for plane geoms."
)
local_normal = geom.data[:3].astype(np.float64, copy=False)
normal = gu.transform_by_quat(local_normal, geom.init_quat)
normal = normal / np.linalg.norm(normal)
height = np.dot(geom.init_pos, normal)
plane_geom = uipc.geometry.ground(height, normal)
if entity not in self._ipc_ground_contacts:
plane_contact = self._ipc_contact_tabular.create(f"ground_contact_{i_e}")
self._ipc_ground_contacts[entity] = plane_contact
self._ipc_ground_contacts[entity].apply_to(plane_geom)
plane_obj = self._ipc_objects.create(f"rigid_plane_{geom.idx}_{env_idx}")
if self.sim.n_envs > 0:
self._ipc_subscenes[env_idx].apply_to(plane_geom)
plane_obj.geometries().create(plane_geom)
elif geom.n_verts:
# Apply geom transform to vertices
geom_verts = gu.transform_by_trans_quat(geom.init_verts, geom.init_pos, geom.init_quat)
# Apply additional transform for fixed joint merging
if source_link is not target_link:
geom_verts = gu.transform_by_trans_quat(geom_verts, *merge_transforms[source_link])
try:
mesh = uipc.geometry.trimesh(
geom_verts.astype(np.float64, copy=False),
geom.init_faces.astype(np.int32, copy=False),
)
except RuntimeError as e:
gs.raise_exception_from(f"Failed to process geom {geom.idx} for IPC.", e)
meshes.append(mesh)
if not meshes:
continue
# ---- Merge meshes and apply world transform ----
rigid_link_geom = meshes[0] if len(meshes) == 1 else uipc.geometry.merge(meshes)
uipc.geometry.label_surface(rigid_link_geom)
link_T = gu.trans_quat_to_T(links_pos[env_idx, target_link.idx], links_quat[env_idx, target_link.idx])
trans_view = uipc.view(rigid_link_geom.transforms())
trans_view[0] = link_T
# ---- Determine coupling behavior ----
is_ipc_only = entity_coup_type == COUPLING_TYPE.IPC_ONLY
is_free_base = (
entity_coup_type == COUPLING_TYPE.EXTERNAL_ARTICULATION
and target_link is entity.base_link
and not entity.base_link.is_fixed
)
is_soft_constraint_target = entity_coup_type == COUPLING_TYPE.TWO_WAY_SOFT_CONSTRAINT or (
is_free_base and not self.options.free_base_driven_by_ipc
)
is_free_base_ipc_driven = is_free_base and self.options.free_base_driven_by_ipc
# ---- Apply constitutions ----
abd_obj = self._ipc_objects.create(f"rigid_link_{target_link.idx}_{env_idx}")
if self.sim.n_envs > 0:
self._ipc_subscenes[env_idx].apply_to(rigid_link_geom)
# Apply per-entity contact element or no-collision marker
if self._coupling_collision_settings.get(entity, {}).get(target_link, True):
if entity not in self._ipc_abd_contacts:
abd_contact = self._ipc_contact_tabular.create(f"abd_contact_{i_e}")
self._ipc_abd_contacts[entity] = abd_contact
self._ipc_abd_contacts[entity].apply_to(rigid_link_geom)
else:
self._ipc_no_collision_contact.apply_to(rigid_link_geom)
# Apply ABD constitution
if self._ipc_abd is None:
self._ipc_abd = AffineBodyConstitution()
self._ipc_constitution_tabular.insert(self._ipc_abd)
self._ipc_abd.apply_to(
rigid_link_geom, kappa=ABD_KAPPA * uipc.unit.MPa, mass_density=entity.material.rho
)
# Apply SoftTransformConstraint and animator for coupled links
if is_soft_constraint_target:
if self._ipc_stc is None:
self._ipc_stc = SoftTransformConstraint()
self._ipc_constitution_tabular.insert(self._ipc_stc)
constraint_strength = np.array(
[
self.options.constraint_strength_translation,
self.options.constraint_strength_rotation,
],
dtype=np.float64,
)
self._ipc_stc.apply_to(rigid_link_geom, constraint_strength)
self._ipc_animator.insert(
abd_obj, partial(_animate_rigid_link, weakref.ref(self), target_link, env_idx)
)
# ---- Set geometry attributes ----
# external_kinetic: 1 = driven by rigid solver, 0 = IPC-only or IPC-driven free base
external_kinetic_attr = rigid_link_geom.instances().find(uipc.builtin.external_kinetic)
uipc.view(external_kinetic_attr)[:] = int(not is_free_base_ipc_driven and not is_ipc_only)
is_fixed_attr = rigid_link_geom.instances().find(uipc.builtin.is_fixed)
uipc.view(is_fixed_attr)[:] = int(target_link.is_fixed)
# For external_articulation, store reference DOF for articulation constraint sync
if entity_coup_type == COUPLING_TYPE.EXTERNAL_ARTICULATION and self.options.enable_rigid_dofs_sync:
ref_dof_prev_attr = rigid_link_geom.instances().create("ref_dof_prev", uipc.Vector12.Zero())
uipc.view(ref_dof_prev_attr)[:] = uipc.geometry.affine_body.transform_to_q(link_T)
# set metadata attributes
meta_attrs = rigid_link_geom.meta()
meta_attrs.create("solver_type", "rigid")
meta_attrs.create("link_idx", str(target_link.idx))
meta_attrs.create("env_idx", str(env_idx))
# ---- Create IPC object and geometry slot ----
abd_geom_slot, _ = abd_obj.geometries().create(rigid_link_geom)
# ---- Store slot mappings ----
self._abd_slots_by_link.setdefault(target_link, []).append(abd_geom_slot)
def _add_articulation_entities_to_ipc(self) -> None:
"""
Add articulated robot entities to IPC using ExternalArticulationConstraint.
This enables joint-level coupling between Genesis and IPC.
"""
assert gs.logger is not None
if COUPLING_TYPE.EXTERNAL_ARTICULATION not in self._coup_type_by_entity.values():
return
self._ipc_eac = ExternalArticulationConstraint()
self._ipc_constitution_tabular.insert(self._ipc_eac)
joints_xaxis = qd_to_numpy(self.rigid_solver.joints_state.xaxis, transpose=True)
joints_xanchor = qd_to_numpy(self.rigid_solver.joints_state.xanchor, transpose=True)
# Process each rigid entity with external_articulation coupling type
for i_e, entity in enumerate(cast(list["RigidEntity"], self.rigid_solver.entities)):
# Only process entities with external_articulation coupling type
if self._coup_type_by_entity.get(entity) != COUPLING_TYPE.EXTERNAL_ARTICULATION:
continue
# Detect non-fixed base for handling base link separately via SoftTransformConstraint
gs.logger.debug(f"Adding articulated entity {i_e} with {entity.n_joints} joints")
mass_matrix = np.diag(np.full((entity.n_dofs,), fill_value=STIFFNESS_DEFAULT, dtype=np.float64))
# ---- Collect joint info (env-independent) ----
joints: list[tuple["RigidJoint", type, bool, "RigidLink", "RigidLink"]] = []
for joint in entity.joints[(0 if entity.base_link.is_fixed else 1) :]:
if joint.type == gs.JOINT_TYPE.FIXED:
continue
elif joint.type == gs.constants.JOINT_TYPE.REVOLUTE:
joint_constitution = AffineBodyRevoluteJoint
reverse_verts = True
elif joint.type == gs.constants.JOINT_TYPE.PRISMATIC:
joint_constitution = AffineBodyPrismaticJoint
reverse_verts = False
else:
gs.raise_exception(f"Unsupported joint type: {joint.type}")
child_link = joint.link
parent_link = entity.links[max(joint.link.parent_idx, 0) - entity.link_start]
parent_link = find_target_link_for_fixed_merge(parent_link)
if parent_link not in self._abd_slots_by_link or child_link not in self._abd_slots_by_link:
gs.raise_exception(
"Rigid link has no collision geometry. Coupling type 'external_articulation' is not supported."
)
joints.append((joint, joint_constitution, reverse_verts, parent_link, child_link))
# ---- Create joint geometries per environment ----
articulation_geom_slots: list[GeometrySlot] = []
for env_idx in range(self.sim._B):
joint_geom_slots: list[GeometrySlot] = []
for joint, joint_constitution, reverse_verts, parent_link, child_link in joints:
joint_axis = joints_xaxis[env_idx, joint.idx]
joint_pos = joints_xanchor[env_idx, joint.idx]
v1 = joint_pos - 0.5 * joint_axis
v2 = joint_pos + 0.5 * joint_axis
vertices = np.array([v2, v1] if reverse_verts else [v1, v2], dtype=np.float64)
edges = np.array([[0, 1]], dtype=np.int32)
joint_geom = uipc.geometry.linemesh(vertices, edges)
if self.sim.n_envs > 0:
self._ipc_subscenes[env_idx].apply_to(joint_geom)
parent_abd_slot = self._abd_slots_by_link[parent_link][env_idx]
child_abd_slot = self._abd_slots_by_link[child_link][env_idx]
joint_constitution().apply_to(
joint_geom, [parent_abd_slot], [0], [child_abd_slot], [0], [JOINT_STRENGTH_RATIO]
)
joint_obj = self._ipc_objects.create(f"joint_{joint.idx}_{env_idx}")
joint_geom_slot, _ = joint_obj.geometries().create(joint_geom)
joint_geom_slots.append(joint_geom_slot)
articulation_geom = self._ipc_eac.create_geometry(joint_geom_slots, [0] * len(joint_geom_slots))
if self.sim.n_envs > 0:
self._ipc_subscenes[env_idx].apply_to(articulation_geom)
mass_attr = articulation_geom["joint_joint"].find("mass")
uipc.view(mass_attr).flat[:] = mass_matrix
articulation_obj = self._ipc_objects.create(f"articulation_entity_{i_e}_{env_idx}")
articulation_geom_slot, _ = articulation_obj.geometries().create(articulation_geom)
articulation_geom_slots.append(articulation_geom_slot)
# Store articulation data
self._articulation_data_by_entity[entity] = ArticulatedEntityData(
joints_child_link=[j.link for j, *_ in joints],
joints_q_idx_local=[j.qs_idx_local[0] for j, *_ in joints],
articulation_slots=articulation_geom_slots,
ref_dof_prev=np.zeros((self.sim._B, entity.n_qs), dtype=np.float64),
qpos_stored=np.zeros((self.sim._B, entity.n_qs), dtype=np.float64),
qpos_current=np.zeros((self.sim._B, entity.n_qs), dtype=np.float64),
qpos_new=np.zeros((self.sim._B, entity.n_qs), dtype=np.float64),
delta_theta_tilde=np.zeros((self.sim._B, len(joints)), dtype=np.float64),
delta_theta_ipc=np.zeros((self.sim._B, len(joints)), dtype=np.float64),
prev_links_transform=[[None for _ in range(self.sim._B)] for _ in joints],
)
# Add to cache list if non-fixed base for '_retrieve_rigid_states' in couple()
if not entity.base_link.is_fixed:
self._articulation_non_fixed_base_entities.append(entity)
gs.logger.debug(f"Successfully added articulated rigid entity {i_e} to IPC.")
def _register_contact_pairs(self) -> None:
"""Register pairwise contact models for all entity contact elements.
Friction is combined by geometric mean, resistance by harmonic mean (series spring).
When an entity material does not define
``contact_resistance``, ``options.contact_resistance`` is used as the per-entity fallback.
Ground pairs combine entity parameters with the plane entity's material friction.
"""
# Collect (ContactElement, friction_mu, resistance, is_abd) for all entity contact elements
contact_infos: list[tuple[ContactElement, float, float, bool]] = []
for entity, elem in (*self._ipc_cloth_contacts.items(), *self._ipc_fem_contacts.items()):
friction = entity.material.friction_mu
resistance = entity.material.contact_resistance or self.options.contact_resistance
contact_infos.append((elem, friction, resistance, False))
for entity, elem in self._ipc_abd_contacts.items():
friction = entity.material.coup_friction
resistance = entity.material.contact_resistance or self.options.contact_resistance
contact_infos.append((elem, friction, resistance, True))
# Register entity-entity pairs (upper triangle including self-pairs)
for i, (elem_i, friction_i, resistance_i, is_abd_i) in enumerate(contact_infos):
for elem_j, friction_j, resistance_j, is_abd_j in contact_infos[i:]:
friction_ij = geometric_mean(friction_i, friction_j)
resistance_ij = harmonic_mean(resistance_i, resistance_j)
enabled = not (is_abd_i and is_abd_j) or self.options.enable_rigid_rigid_contact
self._ipc_contact_tabular.insert(elem_i, elem_j, friction_ij, resistance_ij, enabled)
# Register per-plane ground contact pairs
for entity, ground_elem in self._ipc_ground_contacts.items():
plane_friction = entity.material.coup_friction
plane_resistance = entity.material.contact_resistance or self.options.contact_resistance
for elem, friction, resistance, is_abd in contact_infos:
friction_ground = geometric_mean(friction, plane_friction)
resistance_ground = harmonic_mean(resistance, plane_resistance)
enabled = not is_abd or self.options.enable_rigid_ground_contact
self._ipc_contact_tabular.insert(ground_elem, elem, friction_ground, resistance_ground, enabled)
self._ipc_contact_tabular.insert(self._ipc_no_collision_contact, ground_elem, 0.0, 0.0, False)
# Register no_collision pairs (always disabled)
for elem, *_ in contact_infos:
self._ipc_contact_tabular.insert(self._ipc_no_collision_contact, elem, 0.0, 0.0, False)
self._ipc_contact_tabular.insert(
self._ipc_no_collision_contact, self._ipc_no_collision_contact, 0.0, 0.0, False
)
def _finalize_ipc(self):
"""Finalize IPC setup and initialize AffineBodyStateAccessorFeature"""
assert gs.logger is not None
assert self._ipc_world is not None
self._ipc_world.init(self._ipc_scene)
# Checkpoint frame 0 so that recover(0) works in reset().
self._ipc_world.dump()
gs.logger.info("IPC world initialized successfully")
def _init_accessors(self):
assert gs.logger is not None
assert self._ipc_world is not None
# No ABD bodies, feature not needed
abd_links = list(self._abd_slots_by_link.keys())
n_abd_links = len(abd_links)
if not abd_links:
return
self._abd_state_feature = cast(
AffineBodyStateAccessorFeature, self._ipc_world.features().find(AffineBodyStateAccessorFeature)
)
body_count = self._abd_state_feature.body_count()
# Verify the count matches IPC's ABD body count
if body_count != n_abd_links * self.sim._B:
gs.raise_exception(f"ABD body count mismatch: got {body_count}.")
# Pre-allocate rigid link transform
for link in abd_links:
self._abd_transforms_by_link[link] = [np.eye(4, dtype=gs.np_float) for _ in range(self.sim._B)]
# Create state geometry for batch data transfer
self._abd_state_geom = self._abd_state_feature.create_geometry()
self._abd_state_geom.instances().create(uipc.builtin.transform, np.eye(4, dtype=np.float64))
self._abd_state_geom.instances().create(uipc.builtin.velocity, np.zeros((4, 4), dtype=np.float64))
rigid_retrieve_entities = set(
self._entities_by_coup_type.get(COUPLING_TYPE.TWO_WAY_SOFT_CONSTRAINT, [])
+ self._entities_by_coup_type.get(COUPLING_TYPE.IPC_ONLY, [])
+ self._articulation_non_fixed_base_entities
)
self._abd_data_by_link = {
link: [
ABDLinkEntry(
transform=np.eye(4, dtype=gs.np_float),
velocity=np.zeros((4, 4), dtype=gs.np_float),
)
for _ in range(self.sim._B)
]
for link in abd_links
if link.entity in rigid_retrieve_entities
}
# Pre-allocate coupling data
coupling_links = list(self._abd_data_by_link.keys())
abd_body_idx_by_link = {
link: [env_idx * n_abd_links + abd_links.index(link) for env_idx in range(self.sim._B)]
for link in coupling_links
}
self._coupling_data = IPCCouplingData(coupling_links, abd_body_idx_by_link, self.sim._B)
gs.logger.debug(f"IPC coupling data created: {len(coupling_links)} links.")
def _init_ipc_gui(self):
"""Initialize polyscope-based IPC GUI viewer."""
try:
if not ps.is_initialized():
# Use EGL on Linux to match Genesis offscreen renderer and avoid context conflicts.
ps.init("openGL3_egl" if sys.platform == "linux" else "")
self._ipc_gui = SceneGUI(self._ipc_scene, "split")
self._ipc_gui.register() # also sets up_dir and ground_plane_height from scene
ps.show(forFrames=1)
gs.logger.info("IPC GUI initialized successfully")
except Exception as e:
gs.logger.warning(f"IPC GUI unavailable: {e}. Continuing without IPC GUI.")
self._ipc_gui = None
# ============================================================
# Section 2: Core implementation
# ============================================================
def preprocess(self, f):
"""Preprocessing step before coupling"""
pass
def couple(self, f):
"""
Execute IPC coupling step with per-entity coupling types.
This unified coupling flow handles all entity types:
- 'two_way_soft_constraint': Uses Animator + SoftTransformConstraint
- 'external_articulation': Uses ExternalArticulationConstraint at joint level
- 'ipc_only': One-way coupling, IPC controls rigid body transforms
Flow:
1. Store Genesis rigid states (common)
2. Pre-advance processing (per entity type)
3. IPC advance + retrieve (common, only once)
4. Retrieve FEM states (common)
5. Post-advance processing (per entity type)
"""
assert self._ipc_world is not None
if not self.is_active:
return
# Step 1: Store Genesis rigid states (common)
self._store_gs_rigid_states()
# Step 2: Pre-advance processing (per entity type)
self._pre_advance_external_articulation()
# Step 3: IPC advance + retrieve (common)
self._ipc_world.advance()
self._ipc_world.retrieve()
# Step 4: Retrieve states
self._retrieve_fem_states()
self._retrieve_rigid_states()
# Step 5: Post-advance processing (per entity type)
self._apply_abd_coupling_forces()
self._post_advance_external_articulation()
self._post_advance_ipc_only()
# Step 6: Update GUI if enabled
if self._ipc_gui is not None:
ps.frame_tick()
self._ipc_gui.update()
def couple_grad(self, f):
"""Gradient computation for coupling"""
# IPC doesn't support gradients yet
pass
def reset(self, envs_idx=None):
"""Reset coupling state"""
assert gs.logger is not None
assert self._ipc_world is not None
assert envs_idx is None
gs.logger.debug("Resetting IPC coupler state")
self._ipc_world.recover(0)
self._ipc_world.retrieve()
@property
def is_active(self) -> bool:
"""Check if IPC coupling is active"""
return self._ipc_world is not None
@property
def has_any_rigid_coupling(self) -> bool:
"""
Check if any rigid entity is coupled to IPC.
Returns
-------
bool
True if at least one rigid entity has a coupling type (two_way_soft_constraint,
external_articulation, or ipc_only).
"""
return bool(self._coup_type_by_entity)
# ============================================================
# Section 3: Helpers
# ============================================================
def _apply_base_link_velocity_from_ipc(self, entity):
envs_vel = np.empty((self.sim._B, 6), dtype=gs.np_float)
for env_idx in range(self.sim._B):
abd_entry = self._abd_data_by_link[entity.base_link][env_idx]
envs_vel[env_idx, :3] = abd_entry.velocity[:3, 3]
# omega_skew = dR/dt @ R^T
omega_skew = abd_entry.velocity[:3, :3] @ abd_entry.transform[:3, :3].T
envs_vel[env_idx, 3:] = (
(omega_skew[2, 1] - omega_skew[1, 2]) / 2.0,
(omega_skew[0, 2] - omega_skew[2, 0]) / 2.0,
(omega_skew[1, 0] - omega_skew[0, 1]) / 2.0,
)
self.rigid_solver.set_dofs_velocity(
envs_vel if self.sim.n_envs > 0 else envs_vel[0],
dofs_idx=slice(entity.dof_start, entity.dof_start + 6),
skip_forward=True,
)
def _pre_advance_external_articulation(self):
"""
Pre-advance processing for external_articulation entities.
Prepares articulation data and updates IPC geometry before advance().
"""
if COUPLING_TYPE.EXTERNAL_ARTICULATION not in self._entities_by_coup_type:
return
mass_matrix = qd_to_numpy(self.rigid_solver.mass_mat, transpose=True)
for entity, ad in self._articulation_data_by_entity.items():
# Copy stored qpos to articulation_data.qpos_current
ad.qpos_current[:] = ad.qpos_stored
# Compute delta_theta_tilde = qpos_current - ref_dof_prev (per joint)
ad.delta_theta_tilde[:] = (
ad.qpos_current[..., ad.joints_q_idx_local] - ad.ref_dof_prev[..., ad.joints_q_idx_local]
)
# Update IPC geometry for each articulated entity
for env_idx in range(self.sim._B):
articulation_slot = ad.articulation_slots[env_idx]
articulation_geom = articulation_slot.geometry()
# Update ref_dof_prev on all ABD instances
if self.options.enable_rigid_dofs_sync:
for child_link, prev_link_transform in zip(ad.joints_child_link, ad.prev_links_transform):
link_transform = prev_link_transform[env_idx]
if link_transform is None:
link_transform = self._abd_transforms_by_link[child_link][env_idx]
abd_geom_slot = self._abd_slots_by_link[child_link][env_idx]
abd_geom = abd_geom_slot.geometry()
ref_dof_prev_attr = abd_geom.instances().find("ref_dof_prev")
uipc.view(ref_dof_prev_attr)[:] = uipc.geometry.affine_body.transform_to_q(link_transform)
# Set delta_theta_tilde to IPC geometry
delta_theta_tilde_attr = articulation_geom["joint"].find("delta_theta_tilde")
uipc.view(delta_theta_tilde_attr)[:] = ad.delta_theta_tilde[env_idx]
# Extract and transfer mass matrix from Genesis to IPC
dofs_idx = slice(entity.dof_start, entity.dof_end)
mass_matrix_attr = articulation_geom["joint_joint"].find("mass")
uipc.view(mass_matrix_attr).flat[:] = mass_matrix[env_idx, dofs_idx, dofs_idx]
def _post_advance_external_articulation(self):
"""
Post-advance processing for external_articulation entities.
Reads delta_theta from IPC and updates Genesis qpos.
"""
if COUPLING_TYPE.EXTERNAL_ARTICULATION not in self._entities_by_coup_type:
return
for entity, ad in self._articulation_data_by_entity.items():
# Read 'delta_theta_ipc' from IPC
for env_idx in range(self.sim._B):
scene_art_geom = ad.articulation_slots[env_idx].geometry()
delta_theta_attr = scene_art_geom["joint"].find("delta_theta")
ad.delta_theta_ipc[env_idx] = delta_theta_attr.view()
# Compute qpos_new: copy ref_dof_prev then scatter joint deltas
ad.qpos_new[:] = ad.ref_dof_prev
ad.qpos_new[..., ad.joints_q_idx_local] += ad.delta_theta_ipc
# Set qpos for all DOFs.
# For non-fixed base robots, apply base link transform from IPC.
qpos_new = ad.qpos_new.astype(dtype=gs.np_float, copy=(not entity.base_link.is_fixed))
if not entity.base_link.is_fixed:
abd_entry = self._abd_data_by_link[entity.base_link]
for env_idx in range(self.sim._B):
qpos_new[env_idx, :3], qpos_new[env_idx, 3:7] = gu.T_to_trans_quat(abd_entry[env_idx].transform)
self.rigid_solver.set_qpos(
qpos_new if self.sim.n_envs > 0 else qpos_new[0],
qs_idx=slice(entity.q_start, entity.q_end),
skip_forward=False,
)
# Set base link velocities from IPC if available
if not entity.base_link.is_fixed:
self._apply_base_link_velocity_from_ipc(entity)
# Update ref_dof_prev for next timestep
ad.ref_dof_prev[:] = ad.qpos_new
# Store current link transforms to prev_links_transform
for env_idx in range(self.sim._B):
for child_link, prev_link_transform in zip(ad.joints_child_link, ad.prev_links_transform):
link_transform = self._abd_transforms_by_link[child_link][env_idx]
prev_link_transform[env_idx] = link_transform.copy()
def _post_advance_ipc_only(self):
"""
Post-advance processing for 'ipc_only' entities.
This method directly sets Genesis transforms from IPC results. It only handles rigid objects.
"""
if COUPLING_TYPE.IPC_ONLY not in self._entities_by_coup_type:
return
envs_qpos = np.empty((self.sim._B, 7), dtype=gs.np_float)
for entity in self._entities_by_coup_type[COUPLING_TYPE.IPC_ONLY]:
if entity.base_link.is_fixed:
continue
for env_idx in range(self.sim._B):
abd_entry = self._abd_data_by_link[entity.base_link][env_idx]
envs_qpos[env_idx, :3], envs_qpos[env_idx, 3:7] = gu.T_to_trans_quat(abd_entry.transform)
self.rigid_solver.set_qpos(
envs_qpos if self.sim.n_envs > 0 else envs_qpos[0],
qs_idx=slice(entity.q_start, entity.q_start + 7),
skip_forward=True,
)
# FIXME: It is currently necessary to enforce zero velocity to avoid double time integration by Rigid solver
# self._apply_base_link_velocity_from_ipc(entity)
self.rigid_solver.set_dofs_velocity(
velocity=None,
dofs_idx=slice(entity.dof_start, entity.dof_start + 6),
skip_forward=True,
)
def _retrieve_fem_states(self):
# IPC world advance/retrieve is handled at Scene level
# This method handles both volumetric FEM (3D) and cloth (2D) post-processing
if not self.fem_solver.is_active:
return
# Gather FEM states (both volumetric and cloth) using metadata filtering
visitor = SceneVisitor(self._ipc_scene)
# Collect FEM and cloth geometries using metadata
fem_positions_by_entity: dict["FEMEntity", list[np.ndarray]] = {
entity: [np.array([]) for _ in range(self.sim._B)] for entity in self.fem_solver.entities
}
for fem_geom_slot in visitor.geometries():
if not isinstance(fem_geom_slot, SimplicialComplexSlot):
continue
fem_geom = fem_geom_slot.geometry()
if fem_geom.dim() not in (2, 3):
continue
meta = read_ipc_geometry_metadata(fem_geom)
if meta is None:
continue
solver_type, env_idx, i_e = meta
if solver_type not in ("fem", "cloth"):
continue
entity = cast("FEMEntity", self.fem_solver.entities[i_e])
(transformed_geom,) = uipc.geometry.apply_transform(fem_geom)
fem_positions_by_entity[entity][env_idx] = transformed_geom.positions().view().reshape(-1, 3)
# Update FEM entities using filtered geometries
for entity, geom_positions in fem_positions_by_entity.items():
geom_positions = np.stack(geom_positions, axis=0, dtype=gs.np_float)
entity.set_pos(0, geom_positions)
def _retrieve_rigid_states(self):
"""
Retrieve ABD transforms/affine matrices after IPC step using AffineBodyStateAccessorFeature.
O(num_rigid_bodies) instead of O(total_geometries).
Also populates data arrays for force computation.
"""
if self._abd_state_feature is None:
return
# Single batch copy of ALL ABD states from IPC
assert self._abd_state_geom is not None
self._abd_state_feature.copy_to(self._abd_state_geom)
# Get all transforms at once (array view)
trans_attr = self._abd_state_geom.instances().find(uipc.builtin.transform)
transforms = trans_attr.view() # Shape: (num_bodies, 4, 4)
# Get velocities (4x4 matrix representing transform derivative)
vel_attr = self._abd_state_geom.instances().find(uipc.builtin.velocity)
velocities = vel_attr.view() # Shape: (num_bodies, 4, 4)
assert self._coupling_data is not None
for i_link, link in enumerate(self._coupling_data.links):
for env_idx, abd_body_idx in enumerate(self._coupling_data.abd_body_idx_by_link[link]):
self._abd_data_by_link[link][env_idx].transform[:] = transforms[abd_body_idx]
self._abd_data_by_link[link][env_idx].velocity[:] = velocities[abd_body_idx]
self._coupling_data.ipc_transforms[env_idx, i_link] = transforms[abd_body_idx]
self._coupling_data.aim_transforms[env_idx, i_link] = self._abd_transforms_by_link[link][env_idx]
def _store_gs_rigid_states(self):
"""
Store current Genesis rigid body states before IPC advance.
These stored states will be used by:
1. Animator: to set aim_transform for IPC soft constraints
2. Force computation: to ensure action-reaction force consistency
3. User modification detection: to detect if user called set_qpos
"""
if not self.rigid_solver.is_active:
return
# Store qpos for all entities. It will be used by 'external_articulation' coupling mode
assert self.rigid_solver.qpos is not None
entities_qpos = qd_to_numpy(self.rigid_solver.qpos, transpose=True)
for entity, articulation_data in self._articulation_data_by_entity.items():
articulation_data.qpos_stored[:] = entities_qpos[..., entity.q_start : entity.q_end]
# Store transforms for all rigid links
links_pos = qd_to_numpy(self.rigid_solver.links_state.pos, transpose=True)
links_quat = qd_to_numpy(self.rigid_solver.links_state.quat, transpose=True)
links_transform = cast(np.ndarray, gu.trans_quat_to_T(links_pos, links_quat))
for link, transforms in self._abd_transforms_by_link.items():
for env_idx in range(self.sim._B):
transforms[env_idx][:] = links_transform[env_idx, link.idx]
def _apply_abd_coupling_forces(self):
"""
Apply coupling forces from IPC ABD constraint to Genesis rigid bodies.
Data has already been populated in data by _retrieve_rigid_states, so this function computes forces and applies
the results.
This ensures action-reaction force consistency:
- IPC constraint force: G_ipc = M * (q_ipc^{n+1} - q_genesis^n)
- Genesis reaction force: F_genesis = M * (q_ipc^{n+1} - q_genesis^n) = G_ipc
"""
if (
not self.options.two_way_coupling
or COUPLING_TYPE.TWO_WAY_SOFT_CONSTRAINT not in self._entities_by_coup_type
or not self._abd_data_by_link
):
return
assert self._coupling_data is not None
update_coupling_forces(
self._coupling_data.ipc_transforms,
self._coupling_data.aim_transforms,
self._coupling_data.links_mass,
self._coupling_data.links_inertia_i,
self._constraint_strength_translation_scaled,
self._constraint_strength_rotation_scaled,
self._coupling_data.out_forces,
self._coupling_data.out_torques,
)
if np.isnan(self._coupling_data.out_forces).any() or np.isnan(self._coupling_data.out_torques).any():
gs.raise_exception(
"Invalid coupling forces/torques causing 'nan'. This indicates numerical instability. Please decrease "
"the simulation timestep."
)
self.rigid_solver.apply_links_external_force(
self._coupling_data.out_forces if self.sim.n_envs > 0 else self._coupling_data.out_forces[0],
links_idx=self._coupling_data.links_idx,
local=False,
)
self.rigid_solver.apply_links_external_torque(
self._coupling_data.out_torques if self.sim.n_envs > 0 else self._coupling_data.out_torques[0],
links_idx=self._coupling_data.links_idx,
local=False,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/couplers/ipc_coupler/coupler.py",
"license": "Apache License 2.0",
"lines": 948,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/couplers/ipc_coupler/data.py | """
Data classes for IPC coupler.
"""
from enum import IntEnum
from dataclasses import dataclass
from typing import NamedTuple, TYPE_CHECKING
import numpy as np
from uipc.geometry import GeometrySlot
import genesis as gs
if TYPE_CHECKING:
from genesis.engine.entities.rigid_entity import RigidLink
class COUPLING_TYPE(IntEnum):
TWO_WAY_SOFT_CONSTRAINT = 0
EXTERNAL_ARTICULATION = 1
IPC_ONLY = 2
NONE = 3
class ABDLinkEntry(NamedTuple):
"""Per-link, per-env ABD state retrieved from IPC after advance."""
transform: np.ndarray # (4, 4) IPC transform
velocity: np.ndarray # (4, 4) velocity matrix
@dataclass
class ArticulatedEntityData:
"""Typed container for per-entity articulation coupling data."""
joints_child_link: list["RigidLink"]
joints_q_idx_local: list[int]
articulation_slots: list[GeometrySlot]
ref_dof_prev: np.ndarray
qpos_stored: np.ndarray
qpos_current: np.ndarray
qpos_new: np.ndarray
delta_theta_tilde: np.ndarray
delta_theta_ipc: np.ndarray
# Previous timestep link transforms for ref_dof_prev computation {(joint, env_idx): transform_matrix_4x4}
prev_links_transform: list[list[np.ndarray | None]]
class IPCCouplingData:
"""Pre-allocated arrays for coupling force computation."""
def __init__(
self,
links: list["RigidLink"],
abd_body_idx_by_link: dict["RigidLink", list[int]],
n_envs: int,
):
n_links = len(links)
assert set(abd_body_idx_by_link.keys()) == set(links)
self.links = links
self.abd_body_idx_by_link = abd_body_idx_by_link
self.links_idx = [link.idx for link in links]
self.link_to_idx_local = {link: i for i, link in enumerate(links)}
self.links_mass = np.array([link.inertial_mass for link in links], dtype=gs.np_float)
if links:
self.links_inertia_i = np.stack([link.inertial_i for link in links], axis=0, dtype=gs.np_float)
else:
self.links_inertia_i = np.empty((0, 0, 3, 3), dtype=gs.np_float)
self.ipc_transforms = np.empty((n_envs, n_links, 4, 4), dtype=gs.np_float)
self.aim_transforms = np.empty((n_envs, n_links, 4, 4), dtype=gs.np_float)
self.out_forces = np.empty((n_envs, n_links, 3), dtype=gs.np_float)
self.out_torques = np.empty((n_envs, n_links, 3), dtype=gs.np_float)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/couplers/ipc_coupler/data.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/engine/couplers/ipc_coupler/utils.py | """
Utility functions for IPC coupler.
Stateless helper functions extracted from IPCCoupler for clarity.
"""
import numba as nb
import numpy as np
import genesis as gs
import genesis.utils.geom as gu
from uipc.core import Scene
def find_target_link_for_fixed_merge(link):
"""
Find the target link for merging fixed joints.
Walks up the kinematic tree, skipping links connected via FIXED joints, until finding a link with a non-FIXED joint
or the root.
This is similar to _merge_target_id in mjcf.py.
Returns
-------
int
The target link index to merge into
"""
entity = link.entity
while True:
# If this is the root link (no parent), stop
if link.parent_idx < 0:
break
# Check if there is any non-fixed joint
if any(joint.type != gs.JOINT_TYPE.FIXED for joint in link.joints):
# Found a link with non-FIXED joint, this is our target
break
# All joints are FIXED, move up to parent
link = entity.links[link.parent_idx - entity.link_start]
return link
def compute_link_to_link_transform(from_link, to_link):
"""
Compute the relative transform from from_link to to_link.
Returns
-------
tuple
(pos, quat) transforming points from from_link frame to to_link frame
"""
# Accumulate transforms going up from from_link to common ancestor (to_link)
pos = np.array([0.0, 0.0, 0.0], dtype=gs.np_float)
quat = np.array([1.0, 0.0, 0.0, 0.0], dtype=gs.np_float)
assert from_link.entity is to_link.entity
entity = from_link.entity
link = from_link
while link is not to_link:
if link.parent_idx < 0:
gs.raise_exception(f"Cannot compute transform from link {from_link} to {to_link}")
pos, quat = gu.transform_pos_quat_by_trans_quat(pos, quat, link.pos, link.quat)
link = entity.links[link.parent_idx - entity.link_start]
return pos, quat
def build_ipc_scene_config(options, sim_options):
"""
Build IPC Scene config dict from IPCCouplerOptions and SimOptions.
Parameters
----------
options : IPCCouplerOptions
The coupler options
sim_options : SimOptions
The simulation options (provides dt, gravity, requires_grad)
Returns
-------
dict
Scene config dict ready to pass to Scene(config)
"""
config = Scene.default_config()
# Basic simulation parameters (derived from SimOptions)
config["dt"] = sim_options.dt
gravity = sim_options.gravity
config["gravity"] = [[float(e)] for e in gravity]
# Newton solver options (only set if specified)
_set_if_not_none(config, ["newton", "max_iter"], options.newton_max_iterations)
_set_if_not_none(config, ["newton", "min_iter"], options.newton_min_iterations)
_set_if_not_none(config, ["newton", "velocity_tol"], options.newton_tolerance)
_set_if_not_none(config, ["newton", "ccd_tol"], options.newton_ccd_tolerance)
_set_if_not_none(config, ["newton", "use_adaptive_tol"], options.newton_use_adaptive_tolerance)
_set_if_not_none(config, ["newton", "transrate_tol"], options.newton_translation_tolerance)
_set_if_not_none(config, ["newton", "semi_implicit", "enable"], options.newton_semi_implicit_enable)
_set_if_not_none(config, ["newton", "semi_implicit", "beta_tol"], options.newton_semi_implicit_beta_tolerance)
# Line search options
_set_if_not_none(config, ["line_search", "max_iter"], options.n_linesearch_iterations)
_set_if_not_none(config, ["line_search", "report_energy"], options.linesearch_report_energy)
# Linear system options
_set_if_not_none(config, ["linear_system", "solver"], options.linear_system_solver)
_set_if_not_none(config, ["linear_system", "tol_rate"], options.linear_system_tolerance)
# Contact options
_set_if_not_none(config, ["contact", "enable"], options.contact_enable)
_set_if_not_none(config, ["contact", "d_hat"], options.contact_d_hat)
_set_if_not_none(config, ["contact", "friction", "enable"], options.contact_friction_enable)
_set_if_not_none(config, ["contact", "eps_velocity"], options.contact_eps_velocity)
_set_if_not_none(config, ["contact", "constitution"], options.contact_constitution)
# Collision detection options
_set_if_not_none(config, ["collision_detection", "method"], options.collision_detection_method)
# CFL options
_set_if_not_none(config, ["cfl", "enable"], options.cfl_enable)
# Sanity check options
_set_if_not_none(config, ["sanity_check", "enable"], options.sanity_check_enable)
# Differential simulation options (derived from SimOptions)
_set_if_not_none(config, ["diff_sim", "enable"], sim_options.requires_grad)
return config
def _set_if_not_none(config, keys, value):
"""Set a nested config value only if it's not None."""
if value is None:
return
# Cast to native Python types — UIPC pybind11 rejects numpy scalars and Python bool.
# bool check must come before int since bool is a subclass of int.
if isinstance(value, (bool, np.bool_)):
value = int(value)
elif isinstance(value, (int, np.integer)):
value = int(value)
elif isinstance(value, (float, np.floating)):
value = float(value)
d = config
for key in keys[:-1]:
d = d[key]
d[keys[-1]] = value
def read_ipc_geometry_metadata(geo):
"""
Read solver_type, env_idx, and entity/link index from IPC geometry metadata.
Returns (solver_type, env_idx, idx) where idx is entity_idx for fem/cloth
or link_idx for rigid. Returns None if the geometry has no solver_type
metadata (i.e. not a Genesis-created geometry).
"""
meta_attrs = geo.meta()
solver_type_attr = meta_attrs.find("solver_type")
if solver_type_attr is None:
return None
(solver_type,) = solver_type_attr.view()
(env_idx,) = map(int, meta_attrs.find("env_idx").view())
if solver_type == "rigid":
(idx,) = map(int, meta_attrs.find("link_idx").view())
elif solver_type in ("fem", "cloth"):
(idx,) = map(int, meta_attrs.find("entity_idx").view())
else:
gs.raise_exception(f"Unknown IPC geometry solver_type: {solver_type!r}")
return (solver_type, env_idx, idx)
@nb.jit(nopython=True, cache=True)
def update_coupling_forces(
ipc_transforms,
aim_transforms,
links_mass,
links_inertia_i,
translation_strength,
rotation_strength,
out_forces,
out_torques,
):
"""Compute coupling forces and torques for all links."""
batch_shape = out_forces.shape[:-1]
pos_current, R_current = ipc_transforms[..., :3, 3], ipc_transforms[..., :3, :3]
pos_aim, R_aim = aim_transforms[..., :3, 3], aim_transforms[..., :3, :3]
# Linear force
out_forces[:] = (translation_strength * links_mass[..., None]) * (pos_current - pos_aim)
# Relative rotation matrix
R_rel = np.empty((*batch_shape, 3, 3), dtype=ipc_transforms.dtype)
for idx in np.ndindex(batch_shape):
R_rel[idx] = R_current[idx] @ R_aim[idx].T
# Relative rotation in angle-axis representation
rotvec = gu.R_to_rotvec(R_rel)
# Transform inertia to world frame
I_world = np.empty((*batch_shape, 3, 3), dtype=ipc_transforms.dtype)
for idx in np.ndindex(batch_shape):
I_world[idx] = R_current[idx] @ links_inertia_i[idx[-1:]] @ R_current[idx].T
# Torque
for idx in np.ndindex(batch_shape):
out_torques[idx] = rotation_strength * (I_world[idx] @ rotvec[idx])
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/couplers/ipc_coupler/utils.py",
"license": "Apache License 2.0",
"lines": 167,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/sensors/kinematic_contact_probe.py | """
Interactive KinematicContactProbe visualization with keyboard teleop.
A platform with a grid of contact probes sits in the scene.
Use keyboard controls to move the "pusher" cylinder across the probe surface and push around objects.
"""
import argparse
import os
import numpy as np
import genesis as gs
from genesis.vis.keybindings import Key, KeyAction, Keybind
# Teleop
KEY_DPOS = 0.05
PUSHER_SIZE = 0.1
# Probe sensors
GRID_SIZE = 5
PROBE_RADIUS = 0.05
# Objects
PLATFORM_SIZE = 1.5
PLATFORM_HEIGHT = 0.3
OBJ_Z = PLATFORM_HEIGHT * 1.4
OBJ_SIZE = PLATFORM_SIZE / 8.0
def _build_probe_grid(grid_n: int, platform_size: float, platform_height: float):
spacing = platform_size / (grid_n + 1)
centre = (grid_n - 1) / 2.0
i = np.repeat(np.arange(grid_n), grid_n)
j = np.tile(np.arange(grid_n), grid_n)
x = (i - centre) * spacing
y = (j - centre) * spacing
z = np.full_like(x, platform_height / 2) # top surface in link-local frame
positions = np.stack([x, y, z], axis=-1)
normals = np.tile([0.0, 0.0, 1.0], (grid_n * grid_n, 1))
radii = PROBE_RADIUS + i * (PROBE_RADIUS / 10.0)
return positions, normals, radii
def main():
parser = argparse.ArgumentParser(description="Interactive KinematicContactProbe Visualization")
parser.add_argument("--vis", "-v", action="store_true", default=False, help="Show visualization GUI")
parser.add_argument("--cpu", action="store_true", help="Run on CPU instead of GPU")
parser.add_argument("--seconds", "-t", type=float, default=3.0, help="Seconds to simulate (headless mode)")
args = parser.parse_args()
gs.init(backend=gs.cpu if args.cpu else gs.gpu, precision="32", logging_level="info")
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(-PLATFORM_SIZE * 2, 0.0, PLATFORM_HEIGHT + 1.5),
camera_lookat=(0.0, 0.0, PLATFORM_HEIGHT),
max_FPS=60,
),
show_viewer=args.vis,
)
scene.add_entity(gs.morphs.Plane())
platform = scene.add_entity(
gs.morphs.Box(
size=(PLATFORM_SIZE, PLATFORM_SIZE, PLATFORM_HEIGHT),
pos=(0.0, 0.0, PLATFORM_HEIGHT / 2),
fixed=True,
),
)
probe_positions, probe_normals, probe_radii = _build_probe_grid(GRID_SIZE, PLATFORM_SIZE, PLATFORM_HEIGHT)
n_probes = len(probe_positions)
probe = scene.add_sensor(
gs.sensors.KinematicContactProbe(
entity_idx=platform.idx,
link_idx_local=0,
probe_local_pos=probe_positions,
probe_local_normal=probe_normals,
radius=probe_radii,
stiffness=5000.0,
draw_debug=args.vis,
)
)
pusher_start = np.array([0.0, 0.0, PLATFORM_HEIGHT + PUSHER_SIZE / 2 - 0.02], dtype=np.float32)
pusher = scene.add_entity(
gs.morphs.Cylinder(
radius=PUSHER_SIZE,
height=PUSHER_SIZE,
pos=pusher_start,
),
surface=gs.surfaces.Default(
color=(0.15, 0.55, 0.95, 1.0),
),
)
# Add objects
rect = scene.add_entity(
gs.morphs.Box(
size=(OBJ_SIZE, OBJ_SIZE * 2, OBJ_SIZE),
pos=(PLATFORM_SIZE / 4, 0, OBJ_Z),
),
surface=gs.surfaces.Default(color=(1.0, 0.3, 0.3, 1.0)),
)
cylinder = scene.add_entity(
gs.morphs.Cylinder(
radius=OBJ_SIZE / 2,
height=OBJ_SIZE * 1.2,
pos=(0, PLATFORM_SIZE / 4, OBJ_Z),
),
surface=gs.surfaces.Default(color=(0.3, 1.0, 0.3, 1.0)),
)
sphere = scene.add_entity(
gs.morphs.Sphere(
radius=OBJ_SIZE / 2,
pos=(-PLATFORM_SIZE / 4, -PLATFORM_SIZE / 4, OBJ_Z),
),
surface=gs.surfaces.Default(color=(0.3, 0.3, 1.0, 1.0)),
)
objects = [rect, cylinder, sphere]
scene.build()
is_running = True
# Register keybindings
if args.vis:
target_pos = pusher_start.copy()
next_obj_idx = 0
def stop():
nonlocal is_running
is_running = False
def reset_pose():
target_pos[:] = pusher_start
def translate(index: int, is_negative: bool):
target_pos[index] += (-1 if is_negative else 1) * KEY_DPOS
def drop_object():
nonlocal next_obj_idx
idx = next_obj_idx % len(objects)
drop_pos = target_pos.copy()
drop_pos[2] = PLATFORM_HEIGHT * 2
objects[idx].set_pos(drop_pos)
objects[idx].set_quat(np.array([1, 0, 0, 0], dtype=np.float32))
next_obj_idx += 1
scene.viewer.register_keybinds(
Keybind("move_forward", Key.UP, KeyAction.HOLD, callback=translate, args=(0, False)),
Keybind("move_backward", Key.DOWN, KeyAction.HOLD, callback=translate, args=(0, True)),
Keybind("move_right", Key.RIGHT, KeyAction.HOLD, callback=translate, args=(1, True)),
Keybind("move_left", Key.LEFT, KeyAction.HOLD, callback=translate, args=(1, False)),
Keybind("move_down", Key.J, KeyAction.HOLD, callback=translate, args=(2, True)),
Keybind("move_up", Key.K, KeyAction.HOLD, callback=translate, args=(2, False)),
Keybind("drop_object", Key.SPACE, KeyAction.PRESS, callback=drop_object),
Keybind("reset", Key.BACKSLASH, KeyAction.PRESS, callback=reset_pose),
Keybind("quit", Key.ESCAPE, KeyAction.PRESS, callback=stop),
)
# ── Print info ─────────────────────────────────────────────────────
print("\n=== Interactive KinematicContactProbe ===")
print(f"Platform {PLATFORM_SIZE}m × {PLATFORM_SIZE}m with {GRID_SIZE}×{GRID_SIZE} probes ({n_probes} total)")
print(f"Probe radii range: {min(probe_radii):.4f} – {max(probe_radii):.4f} m")
if args.vis:
print()
print("Keyboard Controls:")
print(" [↑/↓/←/→] Move pusher box in XY")
print(" [j / k] Lower / raise pusher box")
print(" [SPACE] Drop an object at pusher location")
print(" [\\] Reset pusher position")
else:
print(f"Running headless for {args.seconds}s ...")
print()
# Simulation loop
steps = int(args.seconds / scene.sim_options.dt) if not args.vis else None
step = 0
try:
while is_running:
if args.vis:
pusher.set_pos(target_pos)
pusher.set_quat(np.array([1, 0, 0, 0], dtype=np.float32))
scene.step()
# Read probe data and print any active contacts
data = probe.read()
active = (data.penetration > 0).nonzero(as_tuple=False)
if active.numel() > 0:
idxs = active.squeeze(-1).tolist()
if isinstance(idxs, int):
idxs = [idxs]
depths = data.penetration[active.squeeze(-1)].tolist()
if isinstance(depths, float):
depths = [depths]
parts = [f"probe{i}={d:.4f}" for i, d in zip(idxs, depths)]
print(f"Step {step}: Contact: {', '.join(parts)}")
step += 1
if "PYTEST_VERSION" in os.environ:
break
if not args.vis and step >= steps:
break
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted.")
finally:
gs.logger.info("Simulation finished.")
if __name__ == "__main__":
main()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/sensors/kinematic_contact_probe.py",
"license": "Apache License 2.0",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/sensors/kinematic_contact_probe.py | from dataclasses import dataclass
from typing import TYPE_CHECKING, NamedTuple, Type
import numpy as np
import quadrants as qd
import torch
import genesis as gs
import genesis.utils.array_class as array_class
import genesis.utils.geom as gu
from genesis.engine.solvers.rigid.abd.forward_kinematics import func_update_all_verts
from genesis.engine.solvers.rigid.collider.utils import func_point_in_geom_aabb
from genesis.options.sensors import KinematicContactProbe as KinematicContactProbeOptions
from genesis.utils.misc import concat_with_tensor, make_tensor_field, tensor_to_array
from genesis.utils.raycast_qd import get_triangle_vertices, ray_triangle_intersection
from .base_sensor import (
NoisySensorMetadataMixin,
NoisySensorMixin,
RigidSensorMetadataMixin,
RigidSensorMixin,
Sensor,
SharedSensorMetadata,
)
from .sensor_manager import register_sensor
if TYPE_CHECKING:
from genesis.ext.pyrender.mesh import Mesh
from genesis.utils.ring_buffer import TensorRingBuffer
from genesis.vis.rasterizer_context import RasterizerContext
from .sensor_manager import SensorManager
@qd.func
def _probe_geom_penetration(
probe_pos: gs.qd_vec3,
probe_normal: gs.qd_vec3,
radius: gs.qd_float,
max_range: gs.qd_float,
i_g: gs.qd_int,
i_b: gs.qd_int,
geoms_info: array_class.GeomsInfo,
faces_info: array_class.FacesInfo,
verts_info: array_class.VertsInfo,
fixed_verts_state: array_class.VertsState,
free_verts_state: array_class.VertsState,
eps: gs.qd_float,
):
best = gs.qd_float(0.0)
neg_normal = -probe_normal
face_start = geoms_info.face_start[i_g]
face_end = geoms_info.face_end[i_g]
radius_sq = radius * radius
for i_f in range(face_start, face_end):
tri_verts = get_triangle_vertices(i_f, i_b, faces_info, verts_info, fixed_verts_state, free_verts_state)
v0 = tri_verts[:, 0]
v1 = tri_verts[:, 1]
v2 = tri_verts[:, 2]
if radius > gs.EPS:
# Sphere-triangle test (closest point)
closest_point = _closest_point_on_triangle(probe_pos, v0, v1, v2)
diff = closest_point - probe_pos
dist_sq = diff.dot(diff)
if dist_sq <= radius_sq:
penetration = diff.dot(neg_normal)
if penetration > best:
best = penetration
# Raycast test (ray along -normal)
result = ray_triangle_intersection(probe_pos, neg_normal, v0, v1, v2, eps)
if result[3] > 0.5 and result[0] <= max_range:
t = result[0]
if best == 0.0 or t < best:
best = t
return best
@qd.func
def _closest_point_on_triangle(
point: gs.qd_vec3,
v0: gs.qd_vec3,
v1: gs.qd_vec3,
v2: gs.qd_vec3,
) -> gs.qd_vec3:
"""
Find the point on the surface of a triangle closest to a given point.
Reference: Christer Ericson, *Real-Time Collision Detection*, §5.1.5.
"""
ab = v1 - v0
ac = v2 - v0
ap = point - v0
d1 = ab.dot(ap)
d2 = ac.dot(ap)
# Region A (vertex v0)
closest = v0
if not (d1 <= 0.0 and d2 <= 0.0):
bp = point - v1
d3 = ab.dot(bp)
d4 = ac.dot(bp)
# Region B (vertex v1)
if d3 >= 0.0 and d4 <= d3:
closest = v1
else:
cp = point - v2
d5 = ab.dot(cp)
d6 = ac.dot(cp)
# Region C (vertex v2)
if d6 >= 0.0 and d5 <= d6:
closest = v2
else:
vc = d1 * d4 - d3 * d2
# Region AB (edge v0-v1)
if vc <= 0.0 and d1 >= 0.0 and d3 <= 0.0:
w = d1 / (d1 - d3)
closest = v0 + w * ab
else:
vb = d5 * d2 - d1 * d6
# Region AC (edge v0-v2)
if vb <= 0.0 and d2 >= 0.0 and d6 <= 0.0:
w = d2 / (d2 - d6)
closest = v0 + w * ac
else:
va = d3 * d6 - d5 * d4
# Region BC (edge v1-v2)
if va <= 0.0 and (d4 - d3) >= 0.0 and (d5 - d6) >= 0.0:
w = (d4 - d3) / ((d4 - d3) + (d5 - d6))
closest = v1 + w * (v2 - v1)
else:
# Inside the triangle face
denom = 1.0 / (va + vb + vc)
v = vb * denom
w = vc * denom
closest = v0 + v * ab + w * ac
return closest
@qd.kernel
def _kernel_kinematic_contact_probe(
probe_positions_local: qd.types.ndarray(),
probe_normals_local: qd.types.ndarray(),
probe_sensor_idx: qd.types.ndarray(),
probe_max_raycast_range: gs.qd_float,
links_state: array_class.LinksState,
radii: qd.types.ndarray(),
stiffness: qd.types.ndarray(),
links_idx: qd.types.ndarray(),
n_probes_per_sensor: qd.types.ndarray(),
sensor_cache_start: qd.types.ndarray(),
sensor_probe_start: qd.types.ndarray(),
collider_state: array_class.ColliderState,
geoms_state: array_class.GeomsState,
geoms_info: array_class.GeomsInfo,
fixed_verts_state: array_class.VertsState,
free_verts_state: array_class.VertsState,
static_rigid_sim_config: qd.template(),
verts_info: array_class.VertsInfo,
faces_info: array_class.FacesInfo,
output: qd.types.ndarray(),
eps: gs.qd_float,
):
total_n_probes = probe_positions_local.shape[0]
n_batches = output.shape[0]
func_update_all_verts(
geoms_info, geoms_state, verts_info, free_verts_state, fixed_verts_state, static_rigid_sim_config
)
for i_b, i_p in qd.ndrange(n_batches, total_n_probes):
i_s = probe_sensor_idx[i_p]
probe_pos_local = qd.Vector(
[probe_positions_local[i_p, 0], probe_positions_local[i_p, 1], probe_positions_local[i_p, 2]]
)
probe_normal_local = qd.Vector(
[probe_normals_local[i_p, 0], probe_normals_local[i_p, 1], probe_normals_local[i_p, 2]]
)
radius = radii[i_p]
stiff = stiffness[i_s]
sensor_link_idx = links_idx[i_s]
link_pos = links_state.pos[sensor_link_idx, i_b]
link_quat = links_state.quat[sensor_link_idx, i_b]
probe_pos = link_pos + gu.qd_transform_by_quat(probe_pos_local, link_quat)
probe_normal = gu.qd_transform_by_quat(probe_normal_local, link_quat)
max_penetration = gs.qd_float(0.0)
# Iterate over contacts directly from collider state
n_contacts = collider_state.n_contacts[i_b]
for i_c in range(n_contacts):
c_link_a = collider_state.contact_data.link_a[i_c, i_b]
c_link_b = collider_state.contact_data.link_b[i_c, i_b]
c_geom_a = collider_state.contact_data.geom_a[i_c, i_b]
c_geom_b = collider_state.contact_data.geom_b[i_c, i_b]
# Check if either side of this contact involves one of our sensor links;
for side in qd.static(range(2)):
contact_link = c_link_a if side == 0 else c_link_b
i_g = c_geom_b if side == 0 else c_geom_a
# Is this contact relevant to this sensor?
if contact_link == sensor_link_idx and func_point_in_geom_aabb(
geoms_state, i_g, i_b, probe_pos, radius
):
# Raycast + sphere penetration test per geom
penetration = _probe_geom_penetration(
probe_pos,
probe_normal,
radius,
probe_max_raycast_range,
i_g,
i_b,
geoms_info,
faces_info,
verts_info,
fixed_verts_state,
free_verts_state,
eps,
)
if penetration > max_penetration:
max_penetration = penetration
force_local = qd.Vector.zero(gs.qd_float, 3)
if max_penetration > 0:
force_local = stiff * max_penetration * -probe_normal_local
probe_idx_in_sensor = i_p - sensor_probe_start[i_s]
n_probes = n_probes_per_sensor[i_s]
cache_start = sensor_cache_start[i_s]
output[i_b, cache_start + probe_idx_in_sensor] = max_penetration
output[i_b, cache_start + n_probes + probe_idx_in_sensor * 3 + 0] = force_local[0]
output[i_b, cache_start + n_probes + probe_idx_in_sensor * 3 + 1] = force_local[1]
output[i_b, cache_start + n_probes + probe_idx_in_sensor * 3 + 2] = force_local[2]
class KinematicContactProbeData(NamedTuple):
"""
Data returned by the kinematic contact probe.
Parameters
----------
penetration: torch.Tensor, shape ([n_envs,] n_probes)
Depth of penetration in meters (0 if no contact).
force: torch.Tensor, shape ([n_envs,] n_probes, 3)
Estimated contact force based on penetration and stiffness (non-physical) in the link frame.
"""
penetration: torch.Tensor
force: torch.Tensor
@dataclass
class KinematicContactProbeMetadata(RigidSensorMetadataMixin, NoisySensorMetadataMixin, SharedSensorMetadata):
"""Shared metadata for all kinematic contact probes."""
radii: torch.Tensor = make_tensor_field((0,))
stiffness: torch.Tensor = make_tensor_field((0,))
probe_sensor_idx: torch.Tensor = make_tensor_field((0,), dtype=gs.tc_int)
probe_positions: torch.Tensor = make_tensor_field((0, 3))
probe_normals: torch.Tensor = make_tensor_field((0, 3))
probe_max_raycast_range: float = 0.0
n_probes_per_sensor: torch.Tensor = make_tensor_field((0,), dtype=gs.tc_int)
sensor_cache_start: torch.Tensor = make_tensor_field((0,), dtype=gs.tc_int)
sensor_probe_start: torch.Tensor = make_tensor_field((0,), dtype=gs.tc_int)
total_n_probes: int = 0
@register_sensor(KinematicContactProbeOptions, KinematicContactProbeMetadata, KinematicContactProbeData)
@qd.data_oriented
class KinematicContactProbe(
RigidSensorMixin[KinematicContactProbeMetadata],
NoisySensorMixin[KinematicContactProbeMetadata],
Sensor[KinematicContactProbeMetadata],
):
"""Kinematic contact probe measuring penetration depth along the probe normal on collisions."""
def __init__(
self,
sensor_options: KinematicContactProbeOptions,
sensor_idx: int,
data_cls: Type[KinematicContactProbeData],
sensor_manager: "SensorManager",
):
# Store n_probes before super().__init__() since _get_return_format() is called there
self._n_probes = len(sensor_options.probe_local_pos)
super().__init__(sensor_options, sensor_idx, data_cls, sensor_manager)
self._debug_objects: list["Mesh | None"] = []
self._probe_local_pos = torch.tensor(self._options.probe_local_pos, dtype=gs.tc_float, device=gs.device)
self._probe_local_normal = torch.tensor(self._options.probe_local_normal, dtype=gs.tc_float, device=gs.device)
self._probe_local_normal /= self._probe_local_normal.norm(dim=1, keepdim=True).clamp(min=gs.EPS)
def build(self):
super().build()
n_probes = len(self._probe_local_pos)
sensor_idx = self._idx
self._shared_metadata.n_probes_per_sensor = concat_with_tensor(
self._shared_metadata.n_probes_per_sensor, n_probes, expand=(1,), dim=0
)
current_cache_start = sum(self._shared_metadata.cache_sizes[:-1]) if self._shared_metadata.cache_sizes else 0
self._shared_metadata.sensor_cache_start = concat_with_tensor(
self._shared_metadata.sensor_cache_start, current_cache_start, expand=(1,), dim=0
)
current_probe_start = self._shared_metadata.total_n_probes
self._shared_metadata.sensor_probe_start = concat_with_tensor(
self._shared_metadata.sensor_probe_start, current_probe_start, expand=(1,), dim=0
)
self._shared_metadata.probe_sensor_idx = concat_with_tensor(
self._shared_metadata.probe_sensor_idx,
torch.full((n_probes,), sensor_idx, dtype=gs.tc_int, device=gs.device),
expand=(n_probes,),
dim=0,
)
self._shared_metadata.probe_positions = concat_with_tensor(
self._shared_metadata.probe_positions, self._probe_local_pos, expand=(n_probes, 3), dim=0
)
self._shared_metadata.probe_normals = concat_with_tensor(
self._shared_metadata.probe_normals, self._probe_local_normal, expand=(n_probes, 3), dim=0
)
if self._shared_metadata.probe_max_raycast_range < gs.EPS:
link_aabb = self._link.get_vAABB()
max_range = torch.linalg.norm(link_aabb[1] - link_aabb[0], dim=-1).max()
self._shared_metadata.probe_max_raycast_range = max_range.item()
self._shared_metadata.total_n_probes += n_probes
if isinstance(self._options.radius, float):
radii_tensor = torch.full((n_probes,), self._options.radius, dtype=gs.tc_float, device=gs.device)
else:
radii_tensor = torch.tensor(self._options.radius, dtype=gs.tc_float, device=gs.device)
self._shared_metadata.radii = concat_with_tensor(
self._shared_metadata.radii, radii_tensor, expand=(n_probes,), dim=0
)
self._shared_metadata.stiffness = concat_with_tensor(
self._shared_metadata.stiffness, self._options.stiffness, expand=(1,), dim=0
)
def _get_return_format(self) -> tuple[tuple[int, ...], ...]:
n = self._n_probes
return (n,), (n, 3)
@classmethod
def _get_cache_dtype(cls) -> torch.dtype:
return gs.tc_float
@classmethod
def _update_shared_ground_truth_cache(
cls, shared_metadata: KinematicContactProbeMetadata, shared_ground_truth_cache: torch.Tensor
):
solver = shared_metadata.solver
collider_state = solver.collider._collider_state
shared_ground_truth_cache.zero_()
_kernel_kinematic_contact_probe(
shared_metadata.probe_positions,
shared_metadata.probe_normals,
shared_metadata.probe_sensor_idx,
shared_metadata.probe_max_raycast_range,
solver.links_state,
shared_metadata.radii,
shared_metadata.stiffness,
shared_metadata.links_idx,
shared_metadata.n_probes_per_sensor,
shared_metadata.sensor_cache_start,
shared_metadata.sensor_probe_start,
collider_state,
solver.geoms_state,
solver.geoms_info,
solver.fixed_verts_state,
solver.free_verts_state,
solver._static_rigid_sim_config,
solver.verts_info,
solver.faces_info,
shared_ground_truth_cache,
gs.EPS,
)
@classmethod
def _update_shared_cache(
cls,
shared_metadata: KinematicContactProbeMetadata,
shared_ground_truth_cache: torch.Tensor,
shared_cache: torch.Tensor,
buffered_data: "TensorRingBuffer",
):
buffered_data.set(shared_ground_truth_cache)
torch.normal(0.0, shared_metadata.jitter_ts, out=shared_metadata.cur_jitter_ts)
cls._apply_delay_to_shared_cache(
shared_metadata,
shared_cache,
buffered_data,
shared_metadata.cur_jitter_ts,
shared_metadata.interpolate,
)
cls._add_noise_drift_bias(shared_metadata, shared_cache)
cls._quantize_to_resolution(shared_metadata.resolution, shared_cache)
def _draw_debug(self, context: "RasterizerContext", buffer_updates: dict[str, np.ndarray]):
env_idx = context.rendered_envs_idx[0] if self._manager._sim.n_envs > 0 else None
for obj in self._debug_objects:
if obj is not None:
context.clear_debug_object(obj)
self._debug_objects = []
if self._link is None:
return
link_pos = self._link.get_pos(env_idx).reshape((3,))
link_quat = self._link.get_quat(env_idx).reshape((4,))
data = self.read_ground_truth(env_idx)
for i, pos in enumerate(self._probe_local_pos):
probe_world = link_pos + gu.transform_by_quat(pos, link_quat)
probe_global_idx = self._shared_metadata.sensor_probe_start[self._idx].item() + i
probe_radius = self._shared_metadata.radii[probe_global_idx].item()
penetration = data.penetration[i].item() if data.penetration.dim() > 0 else data.penetration.item()
sphere_obj = context.draw_debug_sphere(
pos=tensor_to_array(probe_world),
radius=probe_radius,
color=self._options.debug_sphere_color if penetration <= gs.EPS else self._options.debug_contact_color,
)
self._debug_objects.append(sphere_obj)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/sensors/kinematic_contact_probe.py",
"license": "Apache License 2.0",
"lines": 378,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:tests/test_rigid_physics_analytical_vs_gjk.py | """
Unit test comparing analytical capsule-capsule contact detection with GJK.
This test creates a modified version of narrowphase.py in a temporary file that
forces capsule-capsule and sphere-capsule collisions to use GJK instead of
analytical methods, allowing direct comparison between the two approaches.
# errno
We abuse errno in this test, because it is considerably easier, and needs much less code, than
attempting to add a new tensor into one of the existing structures, and have that work for both
ndarray and field, via monkey-patching.
errno is NOT designed for how we use it. Nevertheless with a couple of reasonable-ish assumptions
we can work with it.
Assumption 1: when code runs normally and correctly, nothing in Genesis production code (not including
test code) will ever set bit 16 of errno to any value except 0.
Assumption 2: when taking a step, nothing in Genesis production code will set bit 16 of errno to any value
at all - including 0 - when running normally.
Both of these assumptions are implicitly tested by our code, in that should Genesis code violate them,
our tests will almost certainly fail.
Note that as part of our use of errno, we take full responsibilty ourselves for resetting it to 0 before each
test scenario. We do not assume - nor require - any existing Genesis code to handle this for us, for example
by setting errno to 0 in set_qpos.
Note that, for completeness, Genesis code does handle resetting errno to 0, inside set_qpos, but for simplicity,
we make resetting errno explicit in this test.
"""
import copy
import importlib.util
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import TYPE_CHECKING, Callable, cast
import numpy as np
import pytest
import genesis as gs
from .utils import assert_allclose
from .conftest import TOL_SINGLE
if TYPE_CHECKING:
from genesis.engine.entities.rigid_entity import RigidGeom
ERRNO_CALLED_GJK = 1 << 16
POS_TOL = 1e-2 # otherwise tests fail
# Tolerances for checking results against hand-computed expected values.
# Analytical solutions should be near-exact; GJK needs more slack; reason unclear.
#
# Penetration tolerance: absolute error in metres.
# Normal tolerance: maximum allowed value of (1 - |dot(actual, expected)|).
# e.g. 1e-5 means the normal must agree to within ~0.26 degrees,
# 1e-2 means within ~8 degrees.
ANALYTICAL_PEN_TOL = TOL_SINGLE
ANALYTICAL_NORMAL_TOL = TOL_SINGLE
GJK_PEN_TOL = 1e-2
GJK_NORMAL_TOL = 1e-2
def _check_expected_values(contacts, description, exp_pen, exp_normal, method_name, pen_tol, normal_tol):
"""Check that contacts match the expected penetration and/or normal, when provided.
Parameters
----------
pen_tol : float
Maximum absolute penetration error (metres).
normal_tol : float
Maximum allowed ``1 - |dot(actual, expected)|``.
"""
if not contacts or len(contacts["geom_a"]) == 0:
return
if exp_pen is not None:
pen = contacts["penetration"][0]
assert abs(pen - exp_pen) < pen_tol, (
f"[{method_name}] {description}: penetration {pen:.6f} != expected {exp_pen:.6f} (tol={pen_tol})"
)
if exp_normal is not None:
normal = np.array(contacts["normal"][0])
exp_n = np.array(exp_normal, dtype=float)
exp_n_len = np.linalg.norm(exp_n)
assert gs.EPS is not None
if exp_n_len > gs.EPS:
dot_err = 1.0 - abs(np.dot(normal, exp_n / exp_n_len))
assert dot_err < normal_tol, (
f"[{method_name}] {description}: normal {normal} vs expected {exp_n / exp_n_len}, "
f"1-|dot|={dot_err:.6e} >= {normal_tol}"
)
def create_capsule_mjcf(name, pos, euler, radius, half_length):
"""Helper function to create an MJCF file with a single capsule."""
mjcf = ET.Element("mujoco", model=name)
ET.SubElement(mjcf, "compiler", angle="degree")
ET.SubElement(mjcf, "option", timestep="0.01")
worldbody = ET.SubElement(mjcf, "worldbody")
body = ET.SubElement(
worldbody,
"body",
name=name,
pos=" ".join(map(str, pos)),
euler=" ".join(map(str, euler)),
)
ET.SubElement(body, "geom", type="capsule", size=f"{radius} {half_length}")
ET.SubElement(body, "joint", name=f"{name}_joint", type="free")
return mjcf
def find_and_disable_condition(lines, function_name):
"""Find function call, look back for if/elif, and disable the entire multi-line condition."""
# Find the line with the function call
call_line_idx = None
for i, line in enumerate(lines):
if function_name in line and "(" in line:
call_line_idx = i
break
if call_line_idx is None:
raise ValueError(f"Could not find function call: {function_name}")
# Look backwards to find the if or elif line
condition_line_idx = None
for i in range(call_line_idx - 1, -1, -1):
stripped = lines[i].strip()
if stripped.startswith("if ") or stripped.startswith("elif "):
condition_line_idx = i
break
# Stop if we hit another major control structure
if stripped.startswith("else:"):
break
if condition_line_idx is None:
raise ValueError(f"Could not find if/elif for {function_name}")
# Find the end of the condition (look for the : that ends it)
condition_end_idx = condition_line_idx
for i in range(condition_line_idx, call_line_idx):
if ":" in lines[i]:
condition_end_idx = i
break
# Modify the condition to wrap entire thing in False and (...)
original_line = lines[condition_line_idx]
indent = len(original_line) - len(original_line.lstrip())
indent_str = original_line[:indent]
# Extract the condition part (after if/elif and before :)
if original_line.strip().startswith("if "):
prefix = "if "
rest = original_line.strip()[3:] # Remove 'if '
elif original_line.strip().startswith("elif "):
prefix = "elif "
rest = original_line.strip()[5:] # Remove 'elif '
else:
raise ValueError(f"Expected if/elif but got: {original_line}")
# If single-line condition
if condition_end_idx == condition_line_idx:
# Simple case: add False and
modified_line = f"{indent_str}{prefix}False and {rest}"
lines[condition_line_idx] = modified_line
else:
# Multi-line condition: wrap in False and (...)
rest_no_colon = rest.rstrip(":").rstrip()
lines[condition_line_idx] = f"{indent_str}{prefix}False and ({rest_no_colon}"
# Add closing ) before the : on the last line
last_line = lines[condition_end_idx]
if ":" in last_line:
# Insert ) before the :
lines[condition_end_idx] = last_line.replace(":", "):", 1)
return lines
def insert_errno_before_call(lines, function_call_pattern, errno_value, comment):
"""Insert errno marker on the line before a function call."""
call_line_idx = None
for i, line in enumerate(lines):
if function_call_pattern in line:
# Find the position of the pattern in the line
idx = line.find(function_call_pattern)
if idx != -1:
# Make sure it's not part of a longer identifier
# Check that the character before the pattern (if any) is not alphanumeric or underscore
if idx == 0 or not (line[idx - 1].isalnum() or line[idx - 1] == "_"):
call_line_idx = i
break
else:
raise ValueError(f"Could not find function call: {function_call_pattern}")
# Get indentation from the call line
indent_size = len(lines[call_line_idx]) - len(lines[call_line_idx].lstrip())
# Insert errno marker on the line before the call
errno_line = f"{' ' * indent_size}errno[i_b] |= {errno_value} # {comment}"
lines.insert(call_line_idx, errno_line)
return lines
def create_modified_narrowphase_file(tmp_path: Path):
"""
Create a modified version of narrowphase.py that forces capsule collisions to use GJK.
Returns:
str: Path to the temporary modified narrowphase.py file
"""
# Find the original narrowphase.py file
import genesis.engine.solvers.rigid.collider.narrowphase as narrowphase_module
narrowphase_path = narrowphase_module.__file__
with open(narrowphase_path, "r") as f:
content = f.read()
# remove relative imports
content = content.replace("from . import ", "from genesis.engine.solvers.rigid.collider import ")
content = content.replace("from .", "from genesis.engine.solvers.rigid.collider.")
lines = content.split("\n")
# Disable capsule-capsule analytical path
lines = find_and_disable_condition(lines, "capsule_contact.func_capsule_capsule_contact")
# Disable sphere-capsule analytical path
lines = find_and_disable_condition(lines, "capsule_contact.func_sphere_capsule_contact")
# Insert errno before GJK calls
lines = insert_errno_before_call(
lines, "diff_gjk.func_gjk_contact(", ERRNO_CALLED_GJK, "MODIFIED: GJK called for collision detection"
)
lines = insert_errno_before_call(
lines, "gjk.func_gjk_contact(", ERRNO_CALLED_GJK, "MODIFIED: GJK called for collision detection"
)
content = "\n".join(lines)
# Debug: Check if errno was actually inserted
errno_count = content.count(f"errno[i_b] |= {ERRNO_CALLED_GJK}")
assert errno_count >= 1
temp_narrowphase_path = tmp_path / "narrow.py"
with open(temp_narrowphase_path, "w") as f:
f.write(content)
return temp_narrowphase_path
def scene_add_sphere(tmp_path: Path, scene: gs.Scene, radius: float) -> "RigidGeom":
sphere_mjcf = create_sphere_mjcf("sphere", (0, 0, 0), radius)
sphere_path = tmp_path / "sphere.xml"
ET.ElementTree(sphere_mjcf).write(sphere_path)
entity_sphere = cast("RigidGeom", scene.add_entity(gs.morphs.MJCF(file=sphere_path)))
return entity_sphere
def scene_add_capsule(tmp_path: Path, scene: gs.Scene, half_length: float, radius: float) -> "RigidGeom":
capsule_mjcf = create_capsule_mjcf("capsule", (0, 0, 0), (0, 0, 0), radius, half_length)
capsule_path = tmp_path / "sphere.xml"
ET.ElementTree(capsule_mjcf).write(capsule_path)
entity_capsule = cast("RigidGeom", scene.add_entity(gs.morphs.MJCF(file=capsule_path)))
return entity_capsule
class AnalyticalVsGJKSceneCreator:
def __init__(self, monkeypatch, build_scene: Callable, tmp_path: Path, show_viewer: bool) -> None:
self.monkeypatch = monkeypatch
self.build_scene = build_scene
self.tmp_path = tmp_path
self.scene_analytical: gs.Scene
self.scene_gjk: gs.Scene
self.entities_analytical = []
self.entities_gjk = []
self.show_viewer = show_viewer
def setup_scenes(self) -> tuple[gs.Scene, gs.Scene]:
"""Build both scenes WITHOUT any monkey-patching."""
# Scene 1: Using ORIGINAL analytical collision detection
self.scene_analytical = gs.Scene(show_viewer=self.show_viewer)
self.build_scene(scene=self.scene_analytical, tmp_path=self.tmp_path, entities=self.entities_analytical)
# Scene 2: Will use GJK after monkey-patching (built now with use_gjk_collision=True)
self.scene_gjk = gs.Scene(
show_viewer=self.show_viewer,
rigid_options=gs.options.RigidOptions(use_gjk_collision=True),
)
self.build_scene(scene=self.scene_gjk, tmp_path=self.tmp_path, entities=self.entities_gjk)
return self.scene_analytical, self.scene_gjk
def apply_gjk_patch(self) -> None:
"""
Monkey-patch the @qd.kernel for narrowphase with the modified version from a tmp file.
This replaces the entire kernel object so that:
- The new kernel has its own empty materialized_kernels cache
- Fastcache sees a different filepath in the cache key (the tmp file),
so it won't find a stale on-disk cache hit
"""
temp_narrowphase_path = create_modified_narrowphase_file(tmp_path=self.tmp_path)
spec = importlib.util.spec_from_file_location("narrowphase_modified", temp_narrowphase_path)
narrowphase_modified = importlib.util.module_from_spec(spec)
spec.loader.exec_module(narrowphase_modified)
from genesis.engine.solvers.rigid.collider import narrowphase
self.monkeypatch.setattr(
narrowphase,
"func_narrow_phase_convex_vs_convex",
narrowphase_modified.func_narrow_phase_convex_vs_convex,
)
def update_pos_quat_analytical(self, entity_idx: int, pos, euler) -> None:
quat = gs.utils.geom.xyz_to_quat(xyz=np.array(euler, dtype=gs.np_float), degrees=True)
self.entities_analytical[entity_idx].set_qpos((*pos, *quat))
self.entities_analytical[entity_idx].zero_all_dofs_velocity()
def update_pos_quat_gjk(self, entity_idx: int, pos, euler) -> None:
quat = gs.utils.geom.xyz_to_quat(xyz=np.array(euler, dtype=gs.np_float), degrees=True)
self.entities_gjk[entity_idx].set_qpos((*pos, *quat))
self.entities_gjk[entity_idx].zero_all_dofs_velocity()
def step_analytical(self):
# see section '# errno' above for discussion on our abusing errno, and the assumptions which we make.
self.scene_analytical._sim.rigid_solver._errno.fill(0)
self.scene_analytical.step()
errno_val = self.scene_analytical._sim.rigid_solver._errno[0]
assert (errno_val & ERRNO_CALLED_GJK) == 0, "Analytical scene should not use GJK."
def step_gjk(self):
# see section '# errno' above for discussion on our abusing errno, and the assumptions which we make.
self.scene_gjk._sim.rigid_solver._errno.fill(0)
self.scene_gjk.step()
errno_val = self.scene_gjk._sim.rigid_solver._errno[0]
assert (errno_val & ERRNO_CALLED_GJK) != 0, "GJK scene should use GJK."
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_capsule_capsule_vs_gjk(backend, monkeypatch, tmp_path: Path, show_viewer: bool, tol: float) -> None:
"""
Compare analytical capsule-capsule collision with GJK by monkey-patching narrowphase.
Tests multiple configurations with a single scene build (moving objects between tests).
Two-phase approach to avoid kernel caching interference:
1. Run ALL analytical scenarios first (original kernel)
2. Apply monkey-patch (replaces the @qd.kernel with a new object from a tmp file)
3. Run ALL GJK scenarios (patched kernel with its own empty cache)
"""
test_cases = [
# (pos0, euler0, pos1, euler1, should_collide, description, exp_pen, exp_normal)
# Segments cross at origin (distance=0), pen = sum of radii, normal is degenerate
((0, 0, 0), (0, 0, 0), (0.15, 0, 0), (0, 90, 0), True, "perpendicular_close", 0.2, None),
# Parallel vertical, seg distance = 0.18, pen = 0.2 - 0.18 = 0.02
((0, 0, 0), (0, 0, 0), (0.18, 0, 0), (0, 0, 0), True, "parallel_light", 0.02, (-1, 0, 0)),
((0, 0, 0), (0, 90, 0), (0, 0.17, 0.17), (0, 90, 0), False, "horizontal_displaced", None, None),
# Parallel vertical, seg distance = 0.15, pen = 0.2 - 0.15 = 0.05
((0, 0, 0), (0, 0, 0), (0.15, 0, 0), (0, 0, 0), True, "parallel_deep", 0.05, (-1, 0, 0)),
# Segments cross at origin (distance=0), pen = sum of radii, normal is degenerate
((0, 0, 0), (0, 0, 0), (0, 0, 0), (90, 0, 0), True, "perpendicular_center", 0.2, None),
# 45° capsule segment crosses the vertical segment at (0, 0, -0.15), so dist=0, pen = sum of radii
((0, 0, 0), (0, 0, 0), (0.15, 0, 0), (0, 45, 0), True, "diagonal_rotated", 0.2, None),
]
radius = 0.1
half_length = 0.25
def build_scene(scene: gs.Scene, tmp_path: Path, entities: list):
entities.append(scene_add_capsule(tmp_path, scene, half_length=half_length, radius=radius))
entities.append(scene_add_capsule(tmp_path, scene, half_length=half_length, radius=radius))
scene.build()
scene_creator = AnalyticalVsGJKSceneCreator(
monkeypatch=monkeypatch, build_scene=build_scene, tmp_path=tmp_path, show_viewer=show_viewer
)
scene_analytical, scene_gjk = scene_creator.setup_scenes()
# Phase 1: Run all analytical scenarios (original, unpatched kernel)
analytical_results = {}
for pos0, euler0, pos1, euler1, should_collide, description, exp_pen, exp_normal in test_cases:
try:
scene_creator.update_pos_quat_analytical(entity_idx=0, pos=pos0, euler=euler0)
scene_creator.update_pos_quat_analytical(entity_idx=1, pos=pos1, euler=euler1)
scene_creator.step_analytical()
contacts = scene_analytical.rigid_solver.collider.get_contacts(as_tensor=False, to_torch=False)
has_collision = len(contacts["geom_a"]) > 0
assert has_collision == should_collide, "Analytical collision mismatch!"
_check_expected_values(
contacts, description, exp_pen, exp_normal, "analytical", ANALYTICAL_PEN_TOL, ANALYTICAL_NORMAL_TOL
)
# Deep-copy so subsequent steps can't corrupt stored data
analytical_results[description] = copy.deepcopy(contacts)
except AssertionError as e:
raise AssertionError(
f"\nFAILED TEST SCENARIO (analytical phase): {description}\n"
f"Capsule 0: pos={pos0}, euler={euler0}\n"
f"Capsule 1: pos={pos1}, euler={euler1}\n"
f"Expected collision: {should_collide}\n"
f"Backend: {backend}\n"
f"Radius: {radius}, Half-length: {half_length}\n"
) from e
# Phase 2: Apply monkey-patch (replace @qd.kernel with version from tmp file)
scene_creator.apply_gjk_patch()
# Phase 3: Run all GJK scenarios (patched kernel, fresh cache)
for pos0, euler0, pos1, euler1, should_collide, description, exp_pen, exp_normal in test_cases:
try:
scene_creator.update_pos_quat_gjk(entity_idx=0, pos=pos0, euler=euler0)
scene_creator.update_pos_quat_gjk(entity_idx=1, pos=pos1, euler=euler1)
scene_creator.step_gjk()
contacts_gjk = scene_gjk.rigid_solver.collider.get_contacts(as_tensor=False, to_torch=False)
contacts_analytical = analytical_results[description]
has_collision_analytical = contacts_analytical is not None and len(contacts_analytical["geom_a"]) > 0
has_collision_gjk = contacts_gjk is not None and len(contacts_gjk["geom_a"]) > 0
assert has_collision_analytical == has_collision_gjk, "Collision detection mismatch!"
assert has_collision_gjk == should_collide
_check_expected_values(contacts_gjk, description, exp_pen, exp_normal, "GJK", GJK_PEN_TOL, GJK_NORMAL_TOL)
# If both detected a collision, compare the contact details
if has_collision_analytical and has_collision_gjk:
pen_analytical = contacts_analytical["penetration"][0]
pen_gjk = contacts_gjk["penetration"][0]
normal_analytical = np.array(contacts_analytical["normal"][0])
normal_gjk = np.array(contacts_gjk["normal"][0])
pos_analytical = np.array(contacts_analytical["position"][0])
pos_gjk = np.array(contacts_gjk["position"][0])
assert_allclose(pen_analytical, pen_gjk, atol=POS_TOL, rtol=0.1, err_msg="Penetration mismatch!")
normal_agreement = abs(np.dot(normal_analytical, normal_gjk))
assert normal_agreement > 0.95, "Normal mismatch!"
if description in ["parallel_light", "parallel_deep"]:
n_analytical = len(contacts_analytical["geom_a"])
n_gjk = len(contacts_gjk["geom_a"])
# When GJK has multicontact, verify analytical also generates sufficient contacts
if n_gjk >= 2:
assert n_analytical >= 2, (
f"GJK found {n_gjk} contacts, but analytical only found {n_analytical} "
f"(expected at least 2)"
)
assert n_analytical >= (n_gjk - 1), (
f"GJK found {n_gjk} contacts, but analytical only found {n_analytical} "
f"(expected at least {n_gjk - 1})"
)
if n_analytical >= 2 or n_gjk >= 2:
all_analytical_positions = np.array(
[contacts_analytical["position"][i] for i in range(n_analytical)]
)
all_gjk_positions = np.array([contacts_gjk["position"][i] for i in range(n_gjk)])
for pos_a in all_analytical_positions:
min_dist = min(np.linalg.norm(pos_a - pos_g) for pos_g in all_gjk_positions)
assert min_dist < POS_TOL
# For parallel vertical capsules, verify contacts are on the line between axes
if euler0 == (0, 0, 0) and euler1 == (0, 0, 0): # Both vertical
expected_xy = np.array([pos1[0] / 2, 0.0]) # Midpoint between capsules
for pos_a in all_analytical_positions:
assert_allclose(pos_a[:2], expected_xy, tol=POS_TOL)
assert_allclose(pos_a[2], 0.0, tol=0.26)
for pos_g in all_gjk_positions:
assert_allclose(pos_g[:2], expected_xy, tol=POS_TOL)
assert -0.26 < pos_g[2] < 0.26
else:
assert_allclose(pos_analytical, pos_gjk, tol=POS_TOL)
else:
assert_allclose(pos_analytical, pos_gjk, tol=POS_TOL)
except AssertionError as e:
raise AssertionError(
f"\nFAILED TEST SCENARIO (GJK phase): {description}\n"
f"Capsule 0: pos={pos0}, euler={euler0}\n"
f"Capsule 1: pos={pos1}, euler={euler1}\n"
f"Expected collision: {should_collide}\n"
f"Backend: {backend}\n"
f"Radius: {radius}, Half-length: {half_length}\n"
) from e
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_capsule_analytical_accuracy(tmp_path: Path, show_viewer: bool, tol: float):
"""
Test that analytical capsule-capsule gives exact results for simple cases.
"""
# Simple test case: two vertical capsules offset horizontally
# Capsule 1: center at origin, radius=0.1, half_length=0.25
# Capsule 2: center at (0.15, 0, 0), same size
# Line segments are both vertical, closest points are at centers
# Distance between segments: 0.15
# Sum of radii: 0.2
# Expected penetration: 0.2 - 0.15 = 0.05
scene = gs.Scene(show_viewer=show_viewer)
_cap1 = scene_add_capsule(tmp_path=tmp_path, scene=scene, half_length=0.25, radius=0.1)
cap2 = scene_add_capsule(tmp_path=tmp_path, scene=scene, half_length=0.25, radius=0.1)
scene.build()
cap2.set_qpos(np.array([*(0.15, 0, 0), *(1, 0, 0, 0)], dtype=gs.np_float))
scene.step()
contacts = scene.rigid_solver.collider.get_contacts(as_tensor=False, to_torch=False)
assert len(contacts["geom_a"]) > 0
penetration = contacts["penetration"][0]
expected_pen = 0.05
assert_allclose(penetration, expected_pen, tol=POS_TOL, err_msg="Analytical solution not exact!")
assert_allclose(contacts["normal"][0], (-1.0, 0.0, 0.0), tol=tol)
def create_sphere_mjcf(name, pos, radius):
"""Helper function to create an MJCF file with a single sphere."""
mjcf = ET.Element("mujoco", model=name)
ET.SubElement(mjcf, "compiler", angle="degree")
ET.SubElement(mjcf, "option", timestep="0.01")
worldbody = ET.SubElement(mjcf, "worldbody")
body = ET.SubElement(
worldbody,
"body",
name=name,
pos=f"{pos[0]} {pos[1]} {pos[2]}",
)
ET.SubElement(body, "geom", type="sphere", size=f"{radius}")
ET.SubElement(body, "joint", name=f"{name}_joint", type="free")
return mjcf
@pytest.mark.required
@pytest.mark.parametrize("backend", [gs.cpu, gs.gpu])
def test_sphere_capsule_vs_gjk(backend, monkeypatch, tmp_path: Path, show_viewer: bool, tol: float) -> None:
"""
Compare analytical sphere-capsule collision with GJK by monkey-patching narrowphase.
Tests multiple configurations with a single scene build (moving objects between tests).
Two-phase approach to avoid kernel caching interference:
1. Run ALL analytical scenarios first (original kernel)
2. Apply monkey-patch (replaces the @qd.kernel with a new object from a tmp file)
3. Run ALL GJK scenarios (patched kernel with its own empty cache)
Note that these can be visualized, for verification purposes, using the script at:
https://github.com/Genesis-Embodied-AI/perso_hugh/blob/main/genesis/visualize_sphere_capsule.py
(note: only accessible internally)
"""
test_cases = [
# (sphere_pos, capsule_pos, capsule_euler, should_collide, description, exp_pen, exp_normal)
# Sphere above top cap: dist to segment endpoint (0,0,0.25) = 0.15, pen = 0.05
((0, 0, 0.4), (0, 0, 0), (0, 0, 0), True, "sphere_above_capsule_top", 0.05, (0, 0, 1)),
# Sphere beside cylinder: dist to axis = 0.18, pen = 0.02
((0.18, 0, 0), (0, 0, 0), (0, 0, 0), True, "sphere_close_to_capsule", 0.02, (1, 0, 0)),
# dist to axis = sqrt(0.17^2+0.17^2) ≈ 0.24 > 0.2, no collision
((0.17, 0.17, 0), (0, 0, 0), (0, 0, 0), False, "sphere_near_cylinder", None, None),
((0.35, 0, 0.35), (0, 0, 0), (0, 45, 0), False, "sphere_near_cap", None, None),
# Sphere beside cylinder: dist to axis = 0.15, pen = 0.05
((0.15, 0, 0), (0, 0, 0), (0, 0, 0), True, "sphere_touching_cylinder", 0.05, (1, 0, 0)),
# Sphere at capsule centre: dist = 0, pen = sum of radii = 0.2, normal is degenerate
((0, 0, 0), (0, 0, 0), (0, 0, 0), True, "sphere_at_capsule_center", 0.2, None),
# Sphere near top cap: nearest segment pt = (0,0,0.25), dist = sqrt(0.15²+0.05²) ≈ 0.1581
# pen = 0.2 - sqrt(0.025) ≈ 0.041886, normal along (3, 0, 1)
((0.15, 0, 0.3), (0, 0, 0), (0, 0, 0), True, "sphere_near_capsule_cap", 0.041886, (3, 0, 1)),
# Horizontal capsule (axis along X after 90° Y rotation), sphere offset in Y: pen = 0.05
((0, 0.15, 0), (0, 0, 0), (0, 90, 0), True, "sphere_horizontal_capsule", 0.05, (0, 1, 0)),
]
sphere_radius = 0.1
capsule_radius = 0.1
capsule_half_length = 0.25
def build_scene(scene: gs.Scene, tmp_path: Path, entities: list) -> None:
entities.append(scene_add_sphere(tmp_path, scene, radius=sphere_radius))
entities.append(scene_add_capsule(tmp_path, scene, half_length=capsule_half_length, radius=capsule_radius))
scene.build()
scene_creator = AnalyticalVsGJKSceneCreator(
monkeypatch=monkeypatch,
build_scene=build_scene,
tmp_path=tmp_path,
show_viewer=show_viewer,
)
scene_analytical, scene_gjk = scene_creator.setup_scenes()
# Phase 1: Run all analytical scenarios (original, unpatched kernel)
analytical_results = {}
for sphere_pos, capsule_pos, capsule_euler, should_collide, description, exp_pen, exp_normal in test_cases:
try:
scene_creator.update_pos_quat_analytical(entity_idx=0, pos=sphere_pos, euler=[0, 0, 0])
scene_creator.update_pos_quat_analytical(entity_idx=1, pos=capsule_pos, euler=capsule_euler)
scene_creator.step_analytical()
contacts = scene_analytical.rigid_solver.collider.get_contacts(as_tensor=False, to_torch=False)
has_collision = len(contacts["geom_a"]) > 0
assert has_collision == should_collide, "Analytical collision mismatch"
_check_expected_values(
contacts, description, exp_pen, exp_normal, "analytical", ANALYTICAL_PEN_TOL, ANALYTICAL_NORMAL_TOL
)
# Deep-copy so subsequent steps can't corrupt stored data
analytical_results[description] = copy.deepcopy(contacts)
except AssertionError as e:
raise AssertionError(
f"\nFAILED TEST SCENARIO (analytical phase): {description}\n"
f"Sphere: pos={sphere_pos}\n"
f"Capsule: pos={capsule_pos}, euler={capsule_euler}\n"
f"Expected collision: {should_collide}\n"
f"Backend: {backend}\n"
f"Sphere radius: {sphere_radius}\n"
f"Capsule radius: {capsule_radius}, Half-length: {capsule_half_length}\n"
) from e
# Phase 2: Apply monkey-patch (replace @qd.kernel with version from tmp file)
scene_creator.apply_gjk_patch()
# Phase 3: Run all GJK scenarios (patched kernel, fresh cache)
for sphere_pos, capsule_pos, capsule_euler, should_collide, description, exp_pen, exp_normal in test_cases:
try:
scene_creator.update_pos_quat_gjk(entity_idx=0, pos=sphere_pos, euler=[0, 0, 0])
scene_creator.update_pos_quat_gjk(entity_idx=1, pos=capsule_pos, euler=capsule_euler)
scene_creator.step_gjk()
contacts_gjk = scene_gjk.rigid_solver.collider.get_contacts(as_tensor=False, to_torch=False)
contacts_analytical = analytical_results[description]
has_collision_analytical = len(contacts_analytical["geom_a"]) > 0
has_collision_gjk = len(contacts_gjk["geom_a"]) > 0
assert has_collision_analytical == has_collision_gjk, "Collision detection mismatch!"
assert has_collision_gjk == should_collide
_check_expected_values(contacts_gjk, description, exp_pen, exp_normal, "GJK", GJK_PEN_TOL, GJK_NORMAL_TOL)
# If both detected a collision, compare the contact details
if has_collision_analytical and has_collision_gjk:
pen_analytical = contacts_analytical["penetration"][0]
pen_gjk = contacts_gjk["penetration"][0]
normal_analytical = np.array(contacts_analytical["normal"][0])
normal_gjk = np.array(contacts_gjk["normal"][0])
pos_analytical = np.array(contacts_analytical["position"][0])
pos_gjk = np.array(contacts_gjk["position"][0])
assert_allclose(pen_analytical, pen_gjk, atol=POS_TOL, rtol=0.1, err_msg="Penetration mismatch!")
normal_agreement = abs(np.dot(normal_analytical, normal_gjk))
normal_tol = 0.5 if description == "sphere_at_capsule_center" else 0.95
assert normal_agreement > normal_tol, "Normal mismatch!"
assert_allclose(pos_analytical, pos_gjk, tol=POS_TOL)
except AssertionError as e:
raise AssertionError(
f"\nFAILED TEST SCENARIO (GJK phase): {description}\n"
f"Sphere: pos={sphere_pos}\n"
f"Capsule: pos={capsule_pos}, euler={capsule_euler}\n"
f"Expected collision: {should_collide}\n"
f"Backend: {backend}\n"
f"Sphere radius: {sphere_radius}\n"
f"Capsule radius: {capsule_radius}, Half-length: {capsule_half_length}\n"
) from e
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_rigid_physics_analytical_vs_gjk.py",
"license": "Apache License 2.0",
"lines": 552,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/constants.py | """
Constants and enums for the collider module.
"""
from enum import IntEnum
class RETURN_CODE(IntEnum):
"""
Return codes for the general subroutines used in GJK and EPA algorithms.
"""
SUCCESS = 0
FAIL = 1
class GJK_RETURN_CODE(IntEnum):
"""
Return codes for the GJK algorithm.
"""
SEPARATED = 0
INTERSECT = 1
NUM_ERROR = 2
class EPA_POLY_INIT_RETURN_CODE(IntEnum):
"""
Return codes for the EPA polytope initialization.
"""
SUCCESS = 0
P2_NONCONVEX = 1
P2_FALLBACK3 = 2
P3_BAD_NORMAL = 3
P3_INVALID_V4 = 4
P3_INVALID_V5 = 5
P3_MISSING_ORIGIN = 6
P3_ORIGIN_ON_FACE = 7
P4_MISSING_ORIGIN = 8
P4_FALLBACK3 = 9
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/constants.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/gjk_support.py | """
Support function utilities for GJK algorithm.
This module contains support point computation functions used by both GJK and EPA algorithms.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.geom as gu
import genesis.utils.array_class as array_class
from . import support_field
@qd.func
def support_mesh(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
direction,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
i_o,
):
"""
Find the support point on a mesh in the given direction.
"""
d_mesh = gu.qd_transform_by_quat(direction, gu.qd_inv_quat(quat))
# Exhaustively search for the vertex with maximum dot product
fmax = -gjk_info.FLOAT_MAX[None]
imax = 0
vert_start = geoms_info.vert_start[i_g]
vert_end = geoms_info.vert_end[i_g]
# Use the previous maximum vertex if it is within the current range
prev_imax = gjk_state.support_mesh_prev_vertex_id[i_b, i_o]
if (prev_imax >= vert_start) and (prev_imax < vert_end):
pos_local = verts_info.init_pos[prev_imax]
fmax = d_mesh.dot(pos_local)
imax = prev_imax
for i in range(vert_start, vert_end):
pos_local = verts_info.init_pos[i]
vdot = d_mesh.dot(pos_local)
if vdot > fmax:
fmax = vdot
imax = i
v = verts_info.init_pos[imax]
vid = imax
gjk_state.support_mesh_prev_vertex_id[i_b, i_o] = vid
v_world = gu.qd_transform_by_trans_quat(v, pos, quat)
return v_world, vid
@qd.func
def support_driver(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
direction,
i_g,
pos: qd.types.vector(3, dtype=gs.qd_float),
quat: qd.types.vector(4, dtype=gs.qd_float),
i_b,
i_o,
shrink_sphere,
):
"""
@ shrink_sphere: If True, use point and line support for sphere and capsule.
"""
v = qd.Vector.zero(gs.qd_float, 3)
v_ = qd.Vector.zero(gs.qd_float, 3)
vid = -1
geom_type = geoms_info.type[i_g]
if geom_type == gs.GEOM_TYPE.SPHERE:
v, v_, vid = support_field._func_support_sphere(geoms_info, direction, i_g, pos, quat, shrink_sphere)
elif geom_type == gs.GEOM_TYPE.ELLIPSOID:
v = support_field._func_support_ellipsoid(geoms_info, direction, i_g, pos, quat)
elif geom_type == gs.GEOM_TYPE.CAPSULE:
v = support_field._func_support_capsule(geoms_info, direction, i_g, pos, quat, shrink_sphere)
elif geom_type == gs.GEOM_TYPE.BOX:
v, v_, vid = support_field._func_support_box(geoms_info, direction, i_g, pos, quat)
elif geom_type == gs.GEOM_TYPE.TERRAIN:
if qd.static(collider_static_config.has_terrain):
v, vid = support_field._func_support_prism(collider_state, direction, i_b)
elif geom_type == gs.GEOM_TYPE.MESH and static_rigid_sim_config.enable_mujoco_compatibility:
# If mujoco-compatible, do exhaustive search for the vertex
v, vid = support_mesh(geoms_info, verts_info, gjk_state, gjk_info, direction, i_g, pos, quat, i_b, i_o)
else:
v, v_, vid = support_field._func_support_world(support_field_info, direction, i_g, pos, quat)
return v, v_, vid
@qd.func
def func_support(
geoms_info: array_class.GeomsInfo,
verts_info: array_class.VertsInfo,
static_rigid_sim_config: qd.template(),
collider_state: array_class.ColliderState,
collider_static_config: qd.template(),
gjk_state: array_class.GJKState,
gjk_info: array_class.GJKInfo,
support_field_info: array_class.SupportFieldInfo,
i_ga,
i_gb,
i_b,
dir,
pos_a: qd.types.vector(3, dtype=gs.qd_float),
quat_a: qd.types.vector(4, dtype=gs.qd_float),
pos_b: qd.types.vector(3, dtype=gs.qd_float),
quat_b: qd.types.vector(4, dtype=gs.qd_float),
shrink_sphere,
):
"""
Find support points on the two objects using [dir].
Parameters:
----------
dir: gs.qd_vec3
The direction in which to find the support points, from [ga] (obj 1) to [gb] (obj 2).
"""
support_point_obj1 = gs.qd_vec3(0, 0, 0)
support_point_obj2 = gs.qd_vec3(0, 0, 0)
support_point_localpos1 = gs.qd_vec3(0, 0, 0)
support_point_localpos2 = gs.qd_vec3(0, 0, 0)
support_point_id_obj1 = -1
support_point_id_obj2 = -1
for i in range(2):
d = dir if i == 0 else -dir
i_g = i_ga if i == 0 else i_gb
pos = pos_a if i == 0 else pos_b
quat = quat_a if i == 0 else quat_b
sp, sp_, si = support_driver(
geoms_info,
verts_info,
static_rigid_sim_config,
collider_state,
collider_static_config,
gjk_state,
gjk_info,
support_field_info,
d,
i_g,
pos,
quat,
i_b,
i,
shrink_sphere,
)
if i == 0:
support_point_obj1 = sp
support_point_id_obj1 = si
support_point_localpos1 = sp_
else:
support_point_obj2 = sp
support_point_id_obj2 = si
support_point_localpos2 = sp_
support_point_minkowski = support_point_obj1 - support_point_obj2
return (
support_point_obj1,
support_point_obj2,
support_point_localpos1,
support_point_localpos2,
support_point_id_obj1,
support_point_id_obj2,
support_point_minkowski,
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/gjk_support.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/gjk_utils.py | """
Utility functions for GJK/EPA algorithms.
This module contains shared utility functions used by both GJK and EPA algorithms.
"""
import quadrants as qd
import genesis as gs
import genesis.utils.array_class as array_class
from .constants import RETURN_CODE
from .utils import func_det3
@qd.func
def func_ray_triangle_intersection(
ray_v1,
ray_v2,
tri_v1,
tri_v2,
tri_v3,
):
"""
Check if the ray intersects the triangle.
Returns
-------
int
True if the ray intersects the triangle, otherwise False.
"""
ray = ray_v2 - ray_v1
# Signed volumes of the tetrahedrons formed by the ray and triangle edges
vols = gs.qd_vec3(0.0, 0.0, 0.0)
for i in range(3):
v1, v2 = gs.qd_vec3(0.0, 0.0, 0.0), gs.qd_vec3(0.0, 0.0, 0.0)
if i == 0:
v1, v2 = tri_v1 - ray_v1, tri_v2 - ray_v1
if i == 1:
v1, v2 = tri_v2 - ray_v1, tri_v3 - ray_v1
elif i == 2:
v1, v2 = tri_v3 - ray_v1, tri_v1 - ray_v1
vols[i] = func_det3(v1, v2, ray)
return (vols >= 0.0).all() or (vols <= 0.0).all()
@qd.func
def func_triangle_affine_coords(
point,
tri_v1,
tri_v2,
tri_v3,
):
"""
Compute the affine coordinates of the point with respect to the triangle.
"""
# Compute minors of the triangle vertices
ms = gs.qd_vec3(0.0, 0.0, 0.0)
for i in qd.static(range(3)):
i1, i2 = (i + 1) % 3, (i + 2) % 3
if i == 1:
i1, i2 = i2, i1
ms[i] = (
tri_v2[i1] * tri_v3[i2]
- tri_v2[i2] * tri_v3[i1]
- tri_v1[i1] * tri_v3[i2]
+ tri_v1[i2] * tri_v3[i1]
+ tri_v1[i1] * tri_v2[i2]
- tri_v1[i2] * tri_v2[i1]
)
# Exclude one of the axes with the largest projection using the minors of the above linear system.
m_max = gs.qd_float(0.0)
i_x, i_y = gs.qd_int(0), gs.qd_int(0)
absms = qd.abs(ms)
for i in range(3):
if absms[i] >= absms[(i + 1) % 3] and absms[i] >= absms[(i + 2) % 3]:
# Remove the i-th row
m_max = ms[i]
i_x, i_y = (i + 1) % 3, (i + 2) % 3
if i == 1:
i_x, i_y = i_y, i_x
break
cs = gs.qd_vec3(0.0, 0.0, 0.0)
for i in range(3):
tv1, tv2 = tri_v2, tri_v3
if i == 1:
tv1, tv2 = tri_v3, tri_v1
elif i == 2:
tv1, tv2 = tri_v1, tri_v2
# Corresponds to the signed area of 2-simplex (triangle): (point, tv1, tv2)
cs[i] = (
point[i_x] * tv1[i_y]
+ point[i_y] * tv2[i_x]
+ tv1[i_x] * tv2[i_y]
- point[i_x] * tv2[i_y]
- point[i_y] * tv1[i_x]
- tv2[i_x] * tv1[i_y]
)
# Affine coordinates are computed as: [ l1, l2, l3 ] = [ C1 / m_max, C2 / m_max, C3 / m_max ]
return cs / m_max
@qd.func
def func_point_triangle_intersection(
gjk_info: array_class.GJKInfo,
point,
tri_v1,
tri_v2,
tri_v3,
):
"""
Check if the point is inside the triangle.
"""
is_inside = False
# Compute the affine coordinates of the point with respect to the triangle
_lambda = func_triangle_affine_coords(point, tri_v1, tri_v2, tri_v3)
# If any of the affine coordinates is negative, the point is outside the triangle
if (_lambda >= 0).all():
# Check if the point predicted by the affine coordinates is equal to the point itself
pred = tri_v1 * _lambda[0] + tri_v2 * _lambda[1] + tri_v3 * _lambda[2]
diff = pred - point
is_inside = diff.norm_sqr() < gjk_info.FLOAT_MIN_SQ[None]
return is_inside
@qd.func
def func_point_plane_same_side(
point,
plane_v1,
plane_v2,
plane_v3,
):
"""
Check if the point is on the same side of the plane as the origin.
"""
# Compute the normal of the plane
edge1 = plane_v2 - plane_v1
edge2 = plane_v3 - plane_v1
normal = edge1.cross(edge2)
diff1 = point - plane_v1
dot1 = normal.dot(diff1)
# origin - plane_v1
diff2 = -plane_v1
dot2 = normal.dot(diff2)
return RETURN_CODE.SUCCESS if dot1 * dot2 > 0 else RETURN_CODE.FAIL
@qd.func
def func_origin_tetra_intersection(
tet_v1,
tet_v2,
tet_v3,
tet_v4,
):
"""
Check if the origin is inside the tetrahedron.
"""
flag = RETURN_CODE.SUCCESS
for i in range(4):
v1, v2, v3, v4 = tet_v1, tet_v2, tet_v3, tet_v4
if i == 1:
v1, v2, v3, v4 = tet_v2, tet_v3, tet_v4, tet_v1
elif i == 2:
v1, v2, v3, v4 = tet_v3, tet_v4, tet_v1, tet_v2
elif i == 3:
v1, v2, v3, v4 = tet_v4, tet_v1, tet_v2, tet_v3
flag = func_point_plane_same_side(v1, v2, v3, v4)
if flag == RETURN_CODE.FAIL:
break
return flag
@qd.func
def func_project_origin_to_plane(
gjk_info: array_class.GJKInfo,
v1,
v2,
v3,
):
"""
Project the origin onto the plane defined by the simplex vertices.
"""
point, flag = gs.qd_vec3(0, 0, 0), RETURN_CODE.SUCCESS
d21 = v2 - v1
d31 = v3 - v1
d32 = v3 - v2
for i in range(3):
n = gs.qd_vec3(0, 0, 0)
v = gs.qd_vec3(0, 0, 0)
if i == 0:
# Normal = (v1 - v2) x (v3 - v2)
n = d32.cross(d21)
v = v2
elif i == 1:
# Normal = (v2 - v1) x (v3 - v1)
n = d21.cross(d31)
v = v1
else:
# Normal = (v1 - v3) x (v2 - v3)
n = d31.cross(d32)
v = v3
nv = n.dot(v)
nn = n.norm_sqr()
if nn == 0:
# Zero normal, cannot project.
flag = RETURN_CODE.FAIL
break
elif nn > gjk_info.FLOAT_MIN[None]:
point = n * (nv / nn)
flag = RETURN_CODE.SUCCESS
break
# Last fallback if no valid normal was found
if i == 2:
# If the normal is still unreliable, cannot project.
if nn < gjk_info.FLOAT_MIN[None]:
flag = RETURN_CODE.FAIL
else:
point = n * (nv / nn)
flag = RETURN_CODE.SUCCESS
return point, flag
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/gjk_utils.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/engine/solvers/rigid/collider/utils.py | import quadrants as qd
import genesis as gs
import genesis.utils.array_class as array_class
from genesis.constants import GEOM_TYPE
@qd.func
def func_closest_points_on_segments(
seg_a_p1,
seg_a_p2,
seg_b_p1,
seg_b_p2,
EPS,
):
"""
Compute closest points on two line segments using analytical solution.
References
----------
Real-Time Collision Detection by Christer Ericson, Chapter 5.1.9
"""
segment_a_dir = seg_a_p2 - seg_a_p1
segment_b_dir = seg_b_p2 - seg_b_p1
vec_between_segment_origins = seg_a_p1 - seg_b_p1
a_squared_len = segment_a_dir.dot(segment_a_dir)
dot_product_dir = segment_a_dir.dot(segment_b_dir)
b_squared_len = segment_b_dir.dot(segment_b_dir)
d = segment_a_dir.dot(vec_between_segment_origins)
e = segment_b_dir.dot(vec_between_segment_origins)
denom = a_squared_len * b_squared_len - dot_product_dir * dot_product_dir
s = gs.qd_float(0.0)
t = gs.qd_float(0.0)
if denom < EPS:
# Segments are parallel or one/both are degenerate
s = 0.0
if b_squared_len > EPS:
t = qd.math.clamp(e / b_squared_len, 0.0, 1.0)
else:
t = 0.0
else:
# General case: solve for optimal parameters
s = (dot_product_dir * e - b_squared_len * d) / denom
t = (a_squared_len * e - dot_product_dir * d) / denom
s = qd.math.clamp(s, 0.0, 1.0)
# Recompute t for clamped s
t = qd.math.clamp((dot_product_dir * s + e) / b_squared_len if b_squared_len > EPS else 0.0, 0.0, 1.0)
# Recompute s for clamped t (ensures we're on segment boundaries)
s_new = qd.math.clamp((dot_product_dir * t - d) / a_squared_len if a_squared_len > EPS else 0.0, 0.0, 1.0)
# Use refined s if it improves the solution
if a_squared_len > EPS:
s = s_new
seg_a_closest = seg_a_p1 + s * segment_a_dir
seg_b_closest = seg_b_p1 + t * segment_b_dir
return seg_a_closest, seg_b_closest
@qd.func
def func_det3(
v1,
v2,
v3,
):
"""
Compute the determinant of a 3x3 matrix M = [v1 | v2 | v3].
"""
return (
v1[0] * (v2[1] * v3[2] - v2[2] * v3[1])
- v1[1] * (v2[0] * v3[2] - v2[2] * v3[0])
+ v1[2] * (v2[0] * v3[1] - v2[1] * v3[0])
)
@qd.func
def func_point_in_geom_aabb(
geoms_state: array_class.GeomsState,
i_g: qd.i32,
i_b: qd.i32,
point: qd.types.vector(3, qd.f32),
expansion: qd.f32 = 0.0,
):
aabb_min = geoms_state.aabb_min[i_g, i_b] - expansion
aabb_max = geoms_state.aabb_max[i_g, i_b] + expansion
return (point > aabb_min).all() and (point < aabb_max).all()
@qd.func
def func_is_geom_aabbs_overlap(geoms_state: array_class.GeomsState, i_ga, i_gb, i_b):
return not (
(geoms_state.aabb_max[i_ga, i_b] <= geoms_state.aabb_min[i_gb, i_b]).any()
or (geoms_state.aabb_min[i_ga, i_b] >= geoms_state.aabb_max[i_gb, i_b]).any()
)
@qd.func
def func_is_discrete_geom(
geoms_info: array_class.GeomsInfo,
i_g,
):
"""
Check if the given geom is a discrete geometry.
"""
geom_type = geoms_info.type[i_g]
return geom_type == GEOM_TYPE.MESH or geom_type == GEOM_TYPE.BOX
@qd.func
def func_is_discrete_geoms(
geoms_info: array_class.GeomsInfo,
i_ga,
i_gb,
):
"""
Check if the given geoms are discrete geometries.
"""
return func_is_discrete_geom(geoms_info, i_ga) and func_is_discrete_geom(geoms_info, i_gb)
@qd.func
def func_is_equal_vec(a, b, eps):
"""
Check if two vectors are equal within a small tolerance.
"""
return (qd.abs(a - b) < eps).all()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/engine/solvers/rigid/collider/utils.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:examples/viewer_plugin/mesh_point_selector.py | import csv
import os
from typing import TYPE_CHECKING, NamedTuple
import numpy as np
from typing_extensions import override
import genesis as gs
import genesis.utils.geom as gu
import genesis.vis.keybindings as kb
from genesis.utils.misc import tensor_to_array
from genesis.vis.viewer_plugins import EVENT_HANDLE_STATE, EVENT_HANDLED, RaycasterViewerPlugin
if TYPE_CHECKING:
from genesis.engine.entities.rigid_entity import RigidLink
from genesis.engine.scene import Scene
from genesis.ext.pyrender.node import Node
class SelectedPoint(NamedTuple):
"""
Represents a selected point on a rigid mesh surface.
Attributes
----------
link : RigidLink
The rigid link that the point belongs to.
local_position : np.ndarray, shape (3,)
The position of the point in the link's local coordinate frame.
local_normal : np.ndarray, shape (3,)
The surface normal at the point in the link's local coordinate frame.
"""
link: "RigidLink"
local_position: np.ndarray # shape (3,)
local_normal: np.ndarray # shape (3,)
class MeshPointSelectorPlugin(RaycasterViewerPlugin):
"""
Interactive viewer plugin that enables using mouse clicks to select points on rigid meshes.
Selected points are stored in local coordinates relative to their link's frame.
"""
def __init__(
self,
sphere_radius: float = 0.005,
sphere_color: tuple = (0.1, 0.3, 1.0, 1.0),
hover_color: tuple = (0.3, 0.5, 1.0, 1.0),
grid_snap: tuple[float, float, float] = (-1.0, -1.0, -1.0),
output_file: str = "selected_points.csv",
) -> None:
super().__init__()
self.sphere_radius = sphere_radius
self.sphere_color = sphere_color
self.hover_color = hover_color
self.grid_snap = grid_snap
self.output_file = output_file
self.selected_points: dict[int, SelectedPoint] = {}
self._prev_mouse_pos: tuple[int, int] = (0, 0)
def build(self, viewer, camera: "Node", scene: "Scene"):
super().build(viewer, camera, scene)
self._prev_mouse_pos: tuple[int, int] = (self.viewer._viewport_size[0] // 2, self.viewer._viewport_size[1] // 2)
def _get_pos_hash(self, pos: np.ndarray) -> int:
"""
Generate a hash for a given position to use as a unique identifier.
Parameters
----------
pos : np.ndarray, shape (3,)
The position to hash.
Returns
-------
int
The hash of the position.
"""
return hash((round(pos[0], 6), round(pos[1], 6), round(pos[2], 6)))
def _snap_to_grid(self, point: np.ndarray) -> np.ndarray:
"""
Snap a point to the grid based on grid_snap settings.
Parameters
----------
point : np.ndarray, shape (3,)
The point to snap.
Returns
-------
np.ndarray, shape (3,)
The point snapped to the grid.
"""
grid_snap = np.array(self.grid_snap)
# Snap each axis if the snap value is non-negative
return np.where(grid_snap >= 0, np.round(point / grid_snap) * grid_snap, point)
@override
def on_mouse_motion(self, x: int, y: int, dx: int, dy: int) -> EVENT_HANDLE_STATE:
self._prev_mouse_pos = (x, y)
@override
def on_mouse_press(self, x: int, y: int, button: int, modifiers: int) -> EVENT_HANDLE_STATE:
if button == 1: # left click
ray = self._screen_position_to_ray(x, y)
ray_hit = self._raycaster.cast(*ray)
if ray_hit is not None and ray_hit.geom:
link = ray_hit.geom.link
world_pos = ray_hit.position
world_normal = ray_hit.normal
# Get link pose
link_pos = tensor_to_array(link.get_pos())
link_quat = tensor_to_array(link.get_quat())
local_pos = gu.inv_transform_by_trans_quat(world_pos, link_pos, link_quat)
local_normal = gu.inv_transform_by_quat(world_normal, link_quat)
# Apply grid snapping to local position
local_pos = self._snap_to_grid(local_pos)
pos_hash = self._get_pos_hash(local_pos)
if pos_hash in self.selected_points:
# Deselect point if already selected
del self.selected_points[pos_hash]
else:
selected_point = SelectedPoint(link, local_pos, local_normal)
self.selected_points[pos_hash] = selected_point
return EVENT_HANDLED
return None
@override
def on_draw(self) -> None:
super().on_draw()
if self.scene._visualizer is not None and self.scene._visualizer.is_built:
self.scene.clear_debug_objects()
mouse_ray = self._screen_position_to_ray(*self._prev_mouse_pos)
closest_hit = self._raycaster.cast(*mouse_ray)
if closest_hit is not None:
snap_pos = self._snap_to_grid(closest_hit.position)
# Draw hover preview
self.scene.draw_debug_sphere(
snap_pos,
self.sphere_radius,
self.hover_color,
)
self.scene.draw_debug_arrow(
snap_pos,
tuple(n * 0.05 for n in closest_hit.normal),
self.sphere_radius / 2,
self.hover_color,
)
if self.selected_points:
world_positions = []
for point in self.selected_points.values():
link_pos = tensor_to_array(point.link.get_pos())
link_quat = tensor_to_array(point.link.get_quat())
local_pos_arr = np.array(point.local_position, dtype=np.float32)
current_world_pos = gu.transform_by_trans_quat(local_pos_arr, link_pos, link_quat)
world_positions.append(current_world_pos)
if len(world_positions) == 1:
self.scene.draw_debug_sphere(
world_positions[0],
self.sphere_radius,
self.sphere_color,
)
else:
positions_array = np.array(world_positions)
self.scene.draw_debug_spheres(positions_array, self.sphere_radius, self.sphere_color)
@override
def on_close(self) -> None:
super().on_close()
if not self.selected_points:
print("[MeshPointSelectorPlugin] No points selected.")
return
output_file = self.output_file
try:
with open(output_file, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(
[
"point_idx",
"link_idx",
"local_pos_x",
"local_pos_y",
"local_pos_z",
"local_normal_x",
"local_normal_y",
"local_normal_z",
]
)
for i, point in enumerate(self.selected_points.values(), 1):
writer.writerow(
[
i,
point.link.idx,
point.local_position[0],
point.local_position[1],
point.local_position[2],
point.local_normal[0],
point.local_normal[1],
point.local_normal[2],
]
)
gs.logger.info(
f"[MeshPointSelectorPlugin] Wrote {len(self.selected_points)} selected points to '{output_file}'"
)
except Exception as e:
gs.logger.error(f"[MeshPointSelectorPlugin] Error writing to '{output_file}': {e}")
if __name__ == "__main__":
gs.init(backend=gs.gpu)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
gravity=(0.0, 0.0, 0.0),
),
viewer_options=gs.options.ViewerOptions(
camera_pos=(0.6, 0.6, 0.6),
camera_lookat=(0.0, 0.0, 0.2),
camera_fov=40,
),
vis_options=gs.options.VisOptions(
show_world_frame=True,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=True,
)
hand = scene.add_entity(
morph=gs.morphs.URDF(
file="urdf/shadow_hand/shadow_hand.urdf",
collision=True,
pos=(0.0, 0.0, 0.0),
euler=(0.0, 0.0, 180.0),
fixed=True,
merge_fixed_links=False,
),
)
scene.viewer.add_plugin(
MeshPointSelectorPlugin(
sphere_radius=0.004,
grid_snap=(-1.0, 0.01, 0.01),
output_file="selected_points.csv",
)
)
scene.build()
is_running = True
def stop():
global is_running
is_running = False
scene.viewer.register_keybinds(
kb.Keybind("quit", kb.Key.ESCAPE, kb.KeyAction.PRESS, callback=stop),
)
try:
while is_running:
scene.step()
if "PYTEST_VERSION" in os.environ:
break
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/viewer_plugin/mesh_point_selector.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:examples/viewer_plugin/mouse_interaction.py | import argparse
import math
import os
import genesis as gs
import genesis.vis.keybindings as kb
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mouse interaction viewer plugin example.")
parser.add_argument(
"--use_force", "-f", action="store_true", help="Apply spring forces instead of setting position"
)
args = parser.parse_args()
gs.init(backend=gs.gpu)
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(3.5, 0.0, 2.5),
camera_lookat=(0.0, 0.0, 0.5),
camera_fov=40,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=True,
)
scene.add_entity(gs.morphs.Plane())
sphere = scene.add_entity(
morph=gs.morphs.Sphere(
pos=(-0.3, -0.3, 0),
radius=0.1,
),
)
for i in range(6):
angle = i * (2 * math.pi / 6)
radius = 0.5 + i * 0.1
cube = scene.add_entity(
morph=gs.morphs.Box(
pos=(radius * math.cos(angle), radius * math.sin(angle), 0.1 + i * 0.1),
size=(0.2, 0.2, 0.2),
),
)
scene.viewer.add_plugin(
gs.vis.viewer_plugins.MouseInteractionPlugin(
use_force=args.use_force,
color=(0.1, 0.6, 0.8, 0.6),
)
)
scene.build()
is_running = True
def stop():
global is_running
is_running = False
scene.viewer.register_keybinds(
kb.Keybind("quit", kb.Key.ESCAPE, kb.KeyAction.PRESS, callback=stop),
)
try:
while is_running:
scene.step()
if "PYTEST_VERSION" in os.environ:
break
except KeyboardInterrupt:
gs.logger.info("Simulation interrupted, exiting.")
finally:
gs.logger.info("Simulation finished.")
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "examples/viewer_plugin/mouse_interaction.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/utils/raycast.py | from typing import TYPE_CHECKING, NamedTuple
import numpy as np
import genesis as gs
if TYPE_CHECKING:
from genesis.engine.entities.rigid_entity.rigid_geom import RigidGeom
class Ray(NamedTuple):
origin: np.ndarray # (3,)
direction: np.ndarray # (3,)
class RayHit(NamedTuple):
distance: float
position: np.ndarray # (3,)
normal: np.ndarray # (3,)
geom: "RigidGeom | None"
def plane_raycast(normal: np.ndarray, distance: float, ray: Ray) -> RayHit | None:
assert normal.shape == ray.direction.shape == ray.origin.shape == (3,)
dot = np.dot(ray.direction, normal)
# Ray is parallel to plane
if abs(dot) < gs.EPS:
return None
# Compute distance along ray to plane
dist_along_ray = -(np.dot(ray.origin, normal) + distance) / dot
# Intersection is behind the ray origin
if dist_along_ray < 0:
return None
hit_pos = ray.origin + ray.direction * dist_along_ray
return RayHit(distance=dist_along_ray, position=hit_pos, normal=normal, geom=None)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/raycast.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:genesis/vis/viewer_plugins/plugins/mouse_interaction.py | from functools import wraps
from threading import Lock
from typing import TYPE_CHECKING, Any, Callable, Type
import numpy as np
from typing_extensions import override
import genesis as gs
import genesis.utils.geom as gu
from genesis.utils.mesh import create_plane
from genesis.utils.misc import tensor_to_array
from genesis.utils.raycast import Ray, RayHit, plane_raycast
from genesis.vis.keybindings import MouseButton
from ..viewer_plugin import EVENT_HANDLE_STATE, EVENT_HANDLED, RaycasterViewerPlugin
if TYPE_CHECKING:
from genesis.engine.entities.rigid_entity import RigidLink
from genesis.engine.scene import Scene
from genesis.ext.pyrender.node import Node
def with_lock(fun: Callable[..., Any]) -> Callable[..., Any]:
@wraps(fun)
def fun_safe(self: "MouseInteractionPlugin", *args: Any, **kwargs: Any) -> Any:
with self._lock:
return fun(self, *args, **kwargs)
return fun_safe
class MouseInteractionPlugin(RaycasterViewerPlugin):
"""
Basic interactive viewer plugin that enables using mouse to apply spring force on rigid entities.
"""
def __init__(
self,
use_force: bool = True,
spring_const: float = 1000.0,
color: tuple[float, float, float, float] = (0.2, 0.8, 0.8, 0.6),
) -> None:
super().__init__()
self.use_force = bool(use_force)
self.spring_const = float(spring_const)
self.color = tuple(color)
self.plane_color = (color[0], color[1], color[2], color[3] * 0.5)
self._lock: Lock = Lock()
self._held_link: "RigidLink | None" = None
self._held_point_local: np.ndarray | None = None # Held point in link-local frame
self._mouse_drag_plane: tuple[np.ndarray, float] | None = None
self._prev_mouse_screen_pos: tuple[int, int] = (0, 0)
self._prev_mouse_scene_pos: np.ndarray | None = None
self._surface_normal: np.ndarray | None = None
self._plane_rotation_angle: float = 0.0
def build(self, viewer, camera: "Node", scene: "Scene"):
super().build(viewer, camera, scene)
self._prev_mouse_screen_pos = (self.viewer._viewport_size[0] // 2, self.viewer._viewport_size[1] // 2)
@override
def on_mouse_motion(self, x: int, y: int, dx: int, dy: int) -> EVENT_HANDLE_STATE:
self._prev_mouse_screen_pos = (x, y)
@with_lock
@override
def on_mouse_drag(self, x: int, y: int, dx: int, dy: int, buttons: int, modifiers: int) -> EVENT_HANDLE_STATE:
self._prev_mouse_screen_pos = (x, y)
if self._held_link:
return EVENT_HANDLED
@with_lock
@override
def on_mouse_scroll(self, x: int, y: int, scroll_x: float, scroll_y: float) -> EVENT_HANDLE_STATE:
if self._held_link and self._surface_normal is not None:
# Rotate the drag plane around the surface normal
self._plane_rotation_angle += scroll_y * 0.1 # 0.1 radians per scroll unit
self._update_drag_plane()
return EVENT_HANDLED
@with_lock
@override
def on_mouse_press(self, x: int, y: int, button: int, modifiers: int) -> EVENT_HANDLE_STATE:
if button == MouseButton.LEFT: # left mouse button
ray = self._screen_position_to_ray(x, y)
ray_hit = self._raycaster.cast(ray[0], ray[1])
if ray_hit.geom and ray_hit.geom.link is not None and not ray_hit.geom.link.is_fixed:
link = ray_hit.geom.link
# Validate mass is not too small to prevent numerical instability
if link.get_mass() < 1e-3:
gs.logger.warning(
f"Link '{link.name}' has very small mass ({link.get_mass():.2e}). "
"Skipping interaction to avoid numerical instability."
)
return
self._held_link = link
# Store the surface normal for rotation
self._surface_normal = ray_hit.normal
self._plane_rotation_angle = 0.0
self._prev_mouse_scene_pos = ray_hit.position
# Create drag plane perpendicular to surface normal
self._update_drag_plane()
# Store held point in link-local frame
link_pos = tensor_to_array(link.get_pos())
link_quat = tensor_to_array(link.get_quat())
self._held_point_local = gu.inv_transform_by_trans_quat(ray_hit.position, link_pos, link_quat)
@with_lock
@override
def on_mouse_release(self, x: int, y: int, button: int, modifiers: int) -> EVENT_HANDLE_STATE:
if button == MouseButton.LEFT:
self._held_link = None
self._held_point_local = None
self._mouse_drag_plane = None
self._prev_mouse_scene_pos = None
self._surface_normal = None
self._plane_rotation_angle = 0.0
@with_lock
@override
def update_on_sim_step(self) -> None:
super().update_on_sim_step()
if self._held_link:
mouse_ray: Ray = self._screen_position_to_ray(*self._prev_mouse_screen_pos)
assert self._mouse_drag_plane is not None
ray_hit: RayHit = plane_raycast(*self._mouse_drag_plane, mouse_ray)
# If ray doesn't hit the plane, skip this update
if ray_hit is None:
return
self._prev_mouse_scene_pos = ray_hit.position
if self.use_force:
self._apply_spring_force(ray_hit.position, self.scene.sim.dt)
else:
assert self._held_point_local is not None
link_quat = tensor_to_array(self._held_link.get_quat())
offset_world = gu.transform_by_quat(self._held_point_local, link_quat)
self._held_link.entity.set_pos(ray_hit.position - offset_world)
@with_lock
@override
def on_draw(self) -> None:
if self.scene._visualizer is not None and self.scene._visualizer.is_built:
self.scene.clear_debug_objects()
mouse_ray: Ray = self._screen_position_to_ray(*self._prev_mouse_screen_pos)
closest_hit: RayHit = self._raycaster.cast(mouse_ray[0], mouse_ray[1])
if self._held_link:
assert self._mouse_drag_plane is not None
assert self._held_point_local is not None
# Draw held point
link_pos = tensor_to_array(self._held_link.get_pos())
link_quat = tensor_to_array(self._held_link.get_quat())
held_point_world = gu.transform_by_trans_quat(self._held_point_local, link_pos, link_quat)
plane_hit: RayHit | None = plane_raycast(*self._mouse_drag_plane, mouse_ray)
if plane_hit is not None:
self.scene.draw_debug_sphere(
plane_hit.position,
radius=0.01,
color=self.color,
)
self.scene.draw_debug_line(
held_point_world,
plane_hit.position,
radius=0.005,
color=self.color,
)
# draw the mouse drag plane as a flat box around the mouse position
plane_normal, _plane_dist = self._mouse_drag_plane
self._draw_plane(
plane_normal,
plane_hit.position,
size=1.0,
color=self.plane_color,
)
else:
if closest_hit is not None:
self.scene.draw_debug_arrow(
closest_hit.position,
closest_hit.normal * 0.25,
color=self.color,
)
def _draw_plane(
self,
normal: np.ndarray,
center: np.ndarray | tuple[float, float, float],
size: float = 1.0,
color: tuple[float, float, float, float] = (0.5, 0.5, 1.0, 0.2),
) -> None:
vmesh, _ = create_plane(plane_size=(size, size), color_or_texture=color, double_sided=True)
normal_arr = np.ascontiguousarray(normal, dtype=gs.np_float)
T = gu.trans_R_to_T(center, gu.z_up_to_R(normal_arr))
self.scene.draw_debug_mesh(vmesh, T=T)
def _update_drag_plane(self) -> None:
"""Update the drag plane based on surface normal and rotation angle."""
if self._surface_normal is None or self._prev_mouse_scene_pos is None:
return
# Get camera direction
cam_forward = np.ascontiguousarray(-self.camera.matrix[:3, 2], dtype=gs.np_float)
surface_normal_contig = np.ascontiguousarray(self._surface_normal, dtype=gs.np_float)
# Create orthonormal basis with surface_normal as z-axis
R = gu.z_up_to_R(surface_normal_contig, up=cam_forward)
plane_normal = R[:, 0] * np.dot(R[:, 0], cam_forward) + R[:, 1] * np.dot(R[:, 1], cam_forward)
plane_normal = plane_normal / (np.linalg.norm(plane_normal) + gs.EPS)
if abs(self._plane_rotation_angle) > gs.EPS:
rotation_matrix = gu.axis_angle_to_R(surface_normal_contig, self._plane_rotation_angle)
plane_normal = gu.transform_by_R(plane_normal, rotation_matrix)
# Set the drag plane (perpendicular to surface normal)
self._mouse_drag_plane = (plane_normal, -np.dot(plane_normal, self._prev_mouse_scene_pos))
def _apply_spring_force(self, control_point: np.ndarray, dt: float) -> None:
if not self._held_link:
return
# Get current link state
link_pos = tensor_to_array(self._held_link.get_pos())
link_quat = tensor_to_array(self._held_link.get_quat())
lin_vel = tensor_to_array(self._held_link.get_vel())
ang_vel = tensor_to_array(self._held_link.get_ang())
# Compute current world position of held point
held_point_world = gu.transform_by_trans_quat(self._held_point_local, link_pos, link_quat)
# Compute inertial frame properties
inertial_pos = tensor_to_array(self._held_link.inertial_pos)
inertial_quat = tensor_to_array(self._held_link.inertial_quat)
world_principal_quat = gu.transform_quat_by_quat(inertial_quat, link_quat)
# Compute arm from COM to held point in world frame
arm_in_principal = gu.inv_transform_by_trans_quat(self._held_point_local, inertial_pos, inertial_quat)
arm_in_world = gu.transform_by_quat(arm_in_principal, world_principal_quat)
# Compute inverse inertia in world frame
R_world = gu.quat_to_R(world_principal_quat)
inertia_world = R_world @ self._held_link.inertial_i @ R_world.T
inv_inertia_world = np.linalg.inv(inertia_world)
pos_err_v = control_point - held_point_world
inv_mass = float(1.0 / self._held_link.get_mass())
total_impulse = np.zeros(3, dtype=gs.np_float)
total_torque_impulse = np.zeros(3, dtype=gs.np_float)
# Approximate spring-damper in each axis
for i in range(3):
body_point_vel = lin_vel + np.cross(ang_vel, arm_in_world)
vel_err_v = -body_point_vel
direction = np.zeros(3, dtype=gs.np_float)
direction[i % 3] = 1.0
pos_err = np.dot(direction, pos_err_v)
vel_err = np.dot(direction, vel_err_v)
# Compute virtual mass (effective inertia for this constraint direction)
arm_x_dir = np.cross(arm_in_world, direction)
rot_mass = np.dot(arm_x_dir, inv_inertia_world @ arm_x_dir)
virtual_mass = 1.0 / (inv_mass + rot_mass + gs.EPS)
# Critical damping
damping_coeff = 2.0 * np.sqrt(self.spring_const * virtual_mass)
# Impulse: J = F*dt = k*x*dt + c*v*dt
impulse = (self.spring_const * pos_err + damping_coeff * vel_err) * dt
lin_vel += direction * impulse * inv_mass
ang_vel += inv_inertia_world @ (arm_x_dir * impulse)
total_impulse[i % 3] += impulse
total_torque_impulse += arm_x_dir * impulse
# Apply the new force
self._held_link.solver.apply_links_external_force(
total_impulse / dt, (self._held_link.idx,), ref="link_com", local=False
)
self._held_link.solver.apply_links_external_torque(
total_torque_impulse / dt, (self._held_link.idx,), ref="link_com", local=False
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/vis/viewer_plugins/plugins/mouse_interaction.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/vis/keybindings.py | from dataclasses import dataclass, field
from enum import IntEnum
from typing import Callable, Any
class LabeledIntEnum(IntEnum):
def __new__(cls, value, label):
obj = int.__new__(cls, value)
obj._value_ = value
obj._label = label
return obj
def __str__(self) -> str:
return self._label
class Key(LabeledIntEnum):
"""
Key codes for keyboard keys.
These are compatible with the pyglet key codes.
https://github.com/pyglet/pyglet/blob/master/pyglet/window/key.py
"""
# fmt: off
# ASCII commands
BACKSPACE = 0xff08, "backspace"
TAB = 0xff09, "tab"
LINEFEED = 0xff0a, "linefeed"
CLEAR = 0xff0b, "clear"
RETURN = 0xff0d, "return"
ENTER = 0xff0d, "enter" # synonym
PAUSE = 0xff13, "pause"
SCROLLLOCK = 0xff14, "scrolllock"
SYSREQ = 0xff15, "sysreq"
ESCAPE = 0xff1b, "escape"
# Cursor control and motion
HOME = 0xff50, "home"
LEFT = 0xff51, "left"
UP = 0xff52, "up"
RIGHT = 0xff53, "right"
DOWN = 0xff54, "down"
PAGEUP = 0xff55, "pageup"
PAGEDOWN = 0xff56, "pagedown"
END = 0xff57, "end"
BEGIN = 0xff58, "begin"
# Misc functions
DELETE = 0xffff, "delete"
SELECT = 0xff60, "select"
PRINT = 0xff61, "print"
EXECUTE = 0xff62, "execute"
INSERT = 0xff63, "insert"
UNDO = 0xff65, "undo"
REDO = 0xff66, "redo"
MENU = 0xff67, "menu"
FIND = 0xff68, "find"
CANCEL = 0xff69, "cancel"
HELP = 0xff6a, "help"
BREAK = 0xff6b, "break"
MODESWITCH = 0xff7e, "modeswitch"
SCRIPTSWITCH = 0xff7e, "scriptswitch"
FUNCTION = 0xffd2, "function"
# Number pad
NUMLOCK = 0xff7f, "numlock"
NUM_SPACE = 0xff80, "num_space"
NUM_TAB = 0xff89, "num_tab"
NUM_ENTER = 0xff8d, "num_enter"
NUM_F1 = 0xff91, "num_f1"
NUM_F2 = 0xff92, "num_f2"
NUM_F3 = 0xff93, "num_f3"
NUM_F4 = 0xff94, "num_f4"
NUM_HOME = 0xff95, "num_home"
NUM_LEFT = 0xff96, "num_left"
NUM_UP = 0xff97, "num_up"
NUM_RIGHT = 0xff98, "num_right"
NUM_DOWN = 0xff99, "num_down"
NUM_PRIOR = 0xff9a, "num_prior"
NUM_PAGE_UP = 0xff9a, "num_page_up"
NUM_NEXT = 0xff9b, "num_next"
NUM_PAGE_DOWN = 0xff9b, "num_page_down"
NUM_END = 0xff9c, "num_end"
NUM_BEGIN = 0xff9d, "num_begin"
NUM_INSERT = 0xff9e, "num_insert"
NUM_DELETE = 0xff9f, "num_delete"
NUM_EQUAL = 0xffbd, "num_equal"
NUM_MULTIPLY = 0xffaa, "num_multiply"
NUM_ADD = 0xffab, "num_add"
NUM_SEPARATOR = 0xffac, "num_separator"
NUM_SUBTRACT = 0xffad, "num_subtract"
NUM_DECIMAL = 0xffae, "num_decimal"
NUM_DIVIDE = 0xffaf, "num_divide"
NUM_0 = 0xffb0, "num_0"
NUM_1 = 0xffb1, "num_1"
NUM_2 = 0xffb2, "num_2"
NUM_3 = 0xffb3, "num_3"
NUM_4 = 0xffb4, "num_4"
NUM_5 = 0xffb5, "num_5"
NUM_6 = 0xffb6, "num_6"
NUM_7 = 0xffb7, "num_7"
NUM_8 = 0xffb8, "num_8"
NUM_9 = 0xffb9, "num_9"
# Function keys
F1 = 0xffbe, "f1"
F2 = 0xffbf, "f2"
F3 = 0xffc0, "f3"
F4 = 0xffc1, "f4"
F5 = 0xffc2, "f5"
F6 = 0xffc3, "f6"
F7 = 0xffc4, "f7"
F8 = 0xffc5, "f8"
F9 = 0xffc6, "f9"
F10 = 0xffc7, "f10"
F11 = 0xffc8, "f11"
F12 = 0xffc9, "f12"
F13 = 0xffca, "f13"
F14 = 0xffcb, "f14"
F15 = 0xffcc, "f15"
F16 = 0xffcd, "f16"
F17 = 0xffce, "f17"
F18 = 0xffcf, "f18"
F19 = 0xffd0, "f19"
F20 = 0xffd1, "f20"
F21 = 0xffd2, "f21"
F22 = 0xffd3, "f22"
F23 = 0xffd4, "f23"
F24 = 0xffd5, "f24"
# Modifiers
LSHIFT = 0xffe1, "left_shift"
RSHIFT = 0xffe2, "right_shift"
LCTRL = 0xffe3, "left_ctrl"
RCTRL = 0xffe4, "right_ctrl"
CAPSLOCK = 0xffe5, "capslock"
LMETA = 0xffe7, "left_meta"
RMETA = 0xffe8, "right_meta"
LALT = 0xffe9, "left_alt"
RALT = 0xffea, "right_alt"
LWINDOWS = 0xffeb, "left_windows"
RWINDOWS = 0xffec, "right_windows"
LCOMMAND = 0xffed, "left_command"
RCOMMAND = 0xffee, "right_command"
LOPTION = 0xffef, "left_option"
ROPTION = 0xfff0, "right_option"
# Latin-1
SPACE = 0x020, "space"
EXCLAMATION = 0x021, "!"
DOUBLEQUOTE = 0x022, "\""
HASH = 0x023, "#"
POUND = 0x023, "#" # synonym
DOLLAR = 0x024, "$"
PERCENT = 0x025, "%"
AMPERSAND = 0x026, "&"
APOSTROPHE = 0x027, "'"
PARENLEFT = 0x028, "("
PARENRIGHT = 0x029, ")"
ASTERISK = 0x02a, "*"
PLUS = 0x02b, "+"
COMMA = 0x02c, ","
MINUS = 0x02d, "-"
PERIOD = 0x02e, "."
SLASH = 0x02f, "/"
_0 = 0x030, "0"
_1 = 0x031, "1"
_2 = 0x032, "2"
_3 = 0x033, "3"
_4 = 0x034, "4"
_5 = 0x035, "5"
_6 = 0x036, "6"
_7 = 0x037, "7"
_8 = 0x038, "8"
_9 = 0x039, "9"
COLON = 0x03a, ":"
SEMICOLON = 0x03b, ";"
LESS = 0x03c, "<"
EQUAL = 0x03d, "="
GREATER = 0x03e, ">"
QUESTION = 0x03f, "?"
AT = 0x040, "@"
BRACKETLEFT = 0x05b, "["
BACKSLASH = 0x05c, "\\"
BRACKETRIGHT = 0x05d, "]"
ASCIICIRCUM = 0x05e, "^"
UNDERSCORE = 0x05f, "_"
GRAVE = 0x060, "`"
QUOTELEFT = 0x060, "`"
A = 0x061, "a"
B = 0x062, "b"
C = 0x063, "c"
D = 0x064, "d"
E = 0x065, "e"
F = 0x066, "f"
G = 0x067, "g"
H = 0x068, "h"
I = 0x069, "i"
J = 0x06a, "j"
K = 0x06b, "k"
L = 0x06c, "l"
M = 0x06d, "m"
N = 0x06e, "n"
O = 0x06f, "o"
P = 0x070, "p"
Q = 0x071, "q"
R = 0x072, "r"
S = 0x073, "s"
T = 0x074, "t"
U = 0x075, "u"
V = 0x076, "v"
W = 0x077, "w"
X = 0x078, "x"
Y = 0x079, "y"
Z = 0x07a, "z"
BRACELEFT = 0x07b, "{"
BAR = 0x07c, "|"
BRACERIGHT = 0x07d, "}"
ASCIITILDE = 0x07e, "~"
# fmt: on
class KeyMod(LabeledIntEnum):
# fmt: off
SHIFT = 1 << 0, "shift"
CTRL = 1 << 1, "ctrl"
ALT = 1 << 2, "alt"
CAPSLOCK = 1 << 3, "capslock"
NUMLOCK = 1 << 4, "numlock"
WINDOWS = 1 << 5, "windows"
COMMAND = 1 << 6, "command"
OPTION = 1 << 7, "option"
SCROLLLOCK = 1 << 8, "scrolllock"
FUNCTION = 1 << 9, "function"
# fmt: on
class KeyAction(LabeledIntEnum):
PRESS = 0, "press"
HOLD = 1, "hold"
RELEASE = 2, "release"
class MouseButton(LabeledIntEnum):
LEFT = 1 << 0, "left"
MIDDLE = 1 << 1, "middle"
RIGHT = 1 << 2, "right"
def get_key_hash(key_code: int, modifiers: int | None, action: KeyAction) -> int:
"""Generate a unique hash for a key combination.
Parameters
----------
key_code: int
The key code as an int.
modifiers : int | None
The modifier keys pressed, as an int with bit flags, or None to ignore modifiers.
action : KeyAction
The type of key action (press, hold, release).
Returns
-------
int
A unique hash for this key combination.
"""
return hash((key_code, modifiers, action))
@dataclass
class Keybind:
"""
A keybinding with an associated callback.
Parameters
----------
name : str
The name of the keybind.
key : Key
The key code for the keybind.
key_action : KeyAction
The type of key action (press, hold, release).
key_mods : tuple[KeyMod] | None
The modifier keys required for the keybind. If None, modifiers are ignored.
callback : Callable[[], None] | None
The function to call when the keybind is activated.
args : tuple
Positional arguments to pass to the callback.
kwargs : dict
Keyword arguments to pass to the callback.
"""
name: str
key: Key
key_action: KeyAction = KeyAction.PRESS
key_mods: tuple[KeyMod] | None = None
callback: Callable[..., None] | None = None
args: tuple[Any, ...] = ()
kwargs: dict = field(default_factory=dict)
_modifiers: int | None = field(default=None, init=False, repr=False)
def __post_init__(self):
if self.key_mods is not None:
self._modifiers = 0
for mod in self.key_mods:
self._modifiers |= mod
if self.kwargs is None:
self.kwargs = {}
def key_hash(self) -> int:
"""Generate a unique hash for the keybind based on key code and modifiers."""
return get_key_hash(self.key, self._modifiers, self.key_action)
class Keybindings:
def __init__(self, keybinds: tuple[Keybind] = ()):
self._keybinds_map: dict[int, Keybind] = {}
self._name_to_hash: dict[str, int] = {}
for kb in keybinds:
key_hash = kb.key_hash()
self._keybinds_map[key_hash] = kb
self._name_to_hash[kb.name] = key_hash
def register(self, keybind: Keybind) -> None:
key_hash = keybind.key_hash()
if key_hash in self._keybinds_map:
existing_kb = self._keybinds_map[key_hash]
raise ValueError(f"Key [{keybind.key}] is already assigned to '{existing_kb.name}'.")
if keybind.name and keybind.name in self._name_to_hash:
raise ValueError(f"Name '{keybind.name}' is already assigned to another keybind.")
self._keybinds_map[key_hash] = keybind
self._name_to_hash[keybind.name] = key_hash
def remove(self, name: str) -> None:
if name not in self._name_to_hash:
raise ValueError(f"No keybind found with name '{name}'.")
key_hash = self._name_to_hash[name]
del self._keybinds_map[key_hash]
del self._name_to_hash[name]
def rebind(
self,
name: str,
new_key: Key | None,
new_key_mods: tuple[KeyMod] | None,
new_key_action: KeyAction | None = None,
) -> None:
if name not in self._name_to_hash:
raise ValueError(f"No keybind found with name '{name}'.")
old_hash = self._name_to_hash[name]
kb = self._keybinds_map[old_hash]
new_kb = Keybind(
name=kb.name,
key=new_key or kb.key,
key_action=new_key_action or kb.key_action,
key_mods=new_key_mods,
callback=kb.callback,
args=kb.args,
kwargs=kb.kwargs,
)
del self._keybinds_map[old_hash]
new_hash = new_kb.key_hash()
print("new_kb", new_kb)
self._keybinds_map[new_hash] = new_kb
self._name_to_hash[name] = new_hash
def get(self, key: int, modifiers: int, key_action: KeyAction) -> Keybind | None:
key_hash = get_key_hash(key, modifiers, key_action)
if key_hash in self._keybinds_map:
return self._keybinds_map[key_hash]
# Try ignoring modifiers (for keybinds where modifiers=None)
key_hash_no_mods = get_key_hash(key, None, key_action)
if key_hash_no_mods in self._keybinds_map:
return self._keybinds_map[key_hash_no_mods]
return None
def get_by_name(self, name: str) -> Keybind | None:
if name in self._name_to_hash:
key_hash = self._name_to_hash[name]
return self._keybinds_map[key_hash]
return None
def __len__(self) -> int:
return len(self._keybinds_map)
@property
def keybinds(self) -> tuple[Keybind]:
"""Return a tuple of all registered Keybinds."""
return tuple(self._keybinds_map.values())
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/vis/keybindings.py",
"license": "Apache License 2.0",
"lines": 352,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/vis/viewer_plugins/plugins/default_controls.py | import os
from typing import TYPE_CHECKING
import genesis as gs
from genesis.vis.keybindings import Key, Keybind
from ..viewer_plugin import ViewerPlugin
if TYPE_CHECKING:
from genesis.engine.scene import Scene
from genesis.ext.pyrender.node import Node
class DefaultControlsPlugin(ViewerPlugin):
"""
Default keyboard controls for the Genesis viewer.
This plugin handles the standard viewer keyboard shortcuts for recording, changing render modes, etc.
"""
def __init__(self):
super().__init__()
def build(self, viewer, camera: "Node", scene: "Scene"):
super().build(viewer, camera, scene)
self.viewer.register_keybinds(
Keybind("record_video", Key.R, callback=self._toggle_record_video),
Keybind("save_image", Key.S, callback=self._save_image),
Keybind("reset_camera", Key.Z, callback=self._reset_camera),
Keybind("camera_rotation", Key.A, callback=self._toggle_cam_rotation),
Keybind("shadow", Key.H, callback=self._toggle_shadow),
Keybind("face_normals", Key.F, callback=self._toggle_face_normals),
Keybind("vertex_normals", Key.V, callback=self._toggle_vertex_normals),
Keybind("world_frame", Key.W, callback=self._toggle_world_frame),
Keybind("link_frame", Key.L, callback=self._toggle_link_frame),
Keybind("wireframe", Key.D, callback=self._toggle_wireframe),
Keybind("camera_frustum", Key.C, callback=self._toggle_camera_frustum),
Keybind("reload_shader", Key.P, callback=self._reload_shader),
Keybind("fullscreen_mode", Key.F11, callback=self._toggle_fullscreen),
)
def _toggle_cam_rotation(self):
self.viewer.viewer_flags["rotate"] = not self.viewer.viewer_flags["rotate"]
if self.viewer.viewer_flags["rotate"]:
self.viewer.set_message_text("Rotation On")
else:
self.viewer.set_message_text("Rotation Off")
def _toggle_fullscreen(self):
self.viewer.viewer_flags["fullscreen"] = not self.viewer.viewer_flags["fullscreen"]
self.viewer.set_fullscreen(self.viewer.viewer_flags["fullscreen"])
self.viewer.activate()
if self.viewer.viewer_flags["fullscreen"]:
self.viewer.set_message_text("Fullscreen On")
else:
self.viewer.set_message_text("Fullscreen Off")
def _toggle_shadow(self):
self.viewer.render_flags["shadows"] = not self.viewer.render_flags["shadows"]
if self.viewer.render_flags["shadows"]:
self.viewer.set_message_text("Shadows On")
else:
self.viewer.set_message_text("Shadows Off")
def _toggle_world_frame(self):
if not self.viewer.gs_context.world_frame_shown:
self.viewer.gs_context.on_world_frame()
self.viewer.set_message_text("World Frame On")
else:
self.viewer.gs_context.off_world_frame()
self.viewer.set_message_text("World Frame Off")
def _toggle_link_frame(self):
if not self.viewer.gs_context.link_frame_shown:
self.viewer.gs_context.on_link_frame()
self.viewer.set_message_text("Link Frame On")
else:
self.viewer.gs_context.off_link_frame()
self.viewer.set_message_text("Link Frame Off")
def _toggle_camera_frustum(self):
if not self.viewer.gs_context.camera_frustum_shown:
self.viewer.gs_context.on_camera_frustum()
self.viewer.set_message_text("Camera Frustum On")
else:
self.viewer.gs_context.off_camera_frustum()
self.viewer.set_message_text("Camera Frustum Off")
def _toggle_face_normals(self):
self.viewer.render_flags["face_normals"] = not self.viewer.render_flags["face_normals"]
if self.viewer.render_flags["face_normals"]:
self.viewer.set_message_text("Face Normals On")
else:
self.viewer.set_message_text("Face Normals Off")
def _toggle_vertex_normals(self):
self.viewer.render_flags["vertex_normals"] = not self.viewer.render_flags["vertex_normals"]
if self.viewer.render_flags["vertex_normals"]:
self.viewer.set_message_text("Vert Normals On")
else:
self.viewer.set_message_text("Vert Normals Off")
def _toggle_record_video(self):
if self.viewer.viewer_flags["record"]:
self.viewer.save_video()
self.viewer.set_caption(self.viewer.viewer_flags["window_title"])
else:
# Importing moviepy is very slow and not used very often. Let's delay import.
from moviepy.video.io.ffmpeg_writer import FFMPEG_VideoWriter
self.viewer._video_recorder = FFMPEG_VideoWriter(
filename=os.path.join(gs.utils.misc.get_cache_dir(), "tmp_video.mp4"),
fps=self.viewer.viewer_flags["refresh_rate"],
size=self.viewer.viewport_size,
)
self.viewer.set_caption("{} (RECORDING)".format(self.viewer.viewer_flags["window_title"]))
self.viewer.viewer_flags["record"] = not self.viewer.viewer_flags["record"]
def _save_image(self):
self.viewer._save_image()
def _toggle_wireframe(self):
if self.viewer.render_flags["flip_wireframe"]:
self.viewer.render_flags["flip_wireframe"] = False
self.viewer.render_flags["all_wireframe"] = True
self.viewer.render_flags["all_solid"] = False
self.viewer.set_message_text("All Wireframe")
elif self.viewer.render_flags["all_wireframe"]:
self.viewer.render_flags["flip_wireframe"] = False
self.viewer.render_flags["all_wireframe"] = False
self.viewer.render_flags["all_solid"] = True
self.viewer.set_message_text("All Solid")
elif self.viewer.render_flags["all_solid"]:
self.viewer.render_flags["flip_wireframe"] = False
self.viewer.render_flags["all_wireframe"] = False
self.viewer.render_flags["all_solid"] = False
self.viewer.set_message_text("Default Wireframe")
else:
self.viewer.render_flags["flip_wireframe"] = True
self.viewer.render_flags["all_wireframe"] = False
self.viewer.render_flags["all_solid"] = False
self.viewer.set_message_text("Flip Wireframe")
def _reset_camera(self):
self.viewer._reset_view()
def _reload_shader(self):
self.viewer._renderer.reload_program()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/vis/viewer_plugins/plugins/default_controls.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/vis/viewer_plugins/viewer_plugin.py | from typing import TYPE_CHECKING, Literal
import numpy as np
from typing_extensions import override
from genesis.utils.raycast import Ray
if TYPE_CHECKING:
from genesis.engine.scene import Scene
from genesis.ext.pyrender.node import Node
from genesis.ext.pyrender.viewer import Viewer
from genesis.utils.raycast_qd import Raycaster
EVENT_HANDLE_STATE = Literal[True] | None
EVENT_HANDLED: Literal[True] = True
class ViewerPlugin:
"""
Base class for handling pyglet.window.Window events.
"""
def __init__(self):
self.viewer = None
self.camera: "Node | None" = None
self.scene: "Scene | None" = None
self._camera_yfov: float = 0.0
self._tan_half_fov: float = 0.0
def build(self, viewer: "Viewer", camera: "Node", scene: "Scene"):
"""Build and initialize the plugin with pyrender viewer context."""
self.viewer = viewer
self.camera = camera
self.scene = scene
self._camera_yfov: float = camera.camera.yfov
self._tan_half_fov: float = np.tan(0.5 * self._camera_yfov)
def on_mouse_motion(self, x: int, y: int, dx: int, dy: int) -> EVENT_HANDLE_STATE:
pass
def on_mouse_drag(self, x: int, y: int, dx: int, dy: int, buttons: int, modifiers: int) -> EVENT_HANDLE_STATE:
pass
def on_mouse_press(self, x: int, y: int, button: int, modifiers: int) -> EVENT_HANDLE_STATE:
pass
def on_mouse_release(self, x: int, y: int, button: int, modifiers: int) -> EVENT_HANDLE_STATE:
pass
def on_mouse_scroll(self, x: int, y: int, dx: int, dy: int) -> EVENT_HANDLE_STATE:
pass
def on_key_press(self, symbol: int, modifiers: int) -> EVENT_HANDLE_STATE:
pass
def on_key_release(self, symbol: int, modifiers: int) -> EVENT_HANDLE_STATE:
pass
def on_resize(self, width: int, height: int) -> EVENT_HANDLE_STATE:
pass
def update_on_sim_step(self) -> None:
pass
def on_draw(self) -> None:
pass
def on_close(self) -> None:
pass
class RaycasterViewerPlugin(ViewerPlugin):
"""
Base viewer plugins using mouse raycast
"""
def __init__(self) -> None:
super().__init__()
self._camera_tan_half_fov: float = 0.0
self._raycaster: "Raycaster | None" = None
def build(self, viewer, camera: "Node", scene: "Scene"):
super().build(viewer, camera, scene)
# NOTE: delayed import to avoid array_class import before gs is fully initialized
from genesis.utils.raycast_qd import Raycaster
self._raycaster = Raycaster(self.scene)
self._camera_tan_half_fov = np.tan(0.5 * self.camera.camera.yfov)
@override
def update_on_sim_step(self) -> None:
super().update_on_sim_step()
self._raycaster.update()
def _screen_position_to_ray(self, x: float, y: float) -> Ray:
"""
Converts 2D screen position to a ray.
Parameters
----------
x : float
The x coordinate on the screen.
y : float
The y coordinate on the screen.
Returns
-------
origin : np.ndarray, shape (3,)
The origin of the ray in world coordinates.
direction : np.ndarray, shape (3,)
The direction of the ray in world coordinates.
"""
viewport_size = self.viewer._viewport_size
x = x - 0.5 * viewport_size[0]
y = y - 0.5 * viewport_size[1]
x = 2.0 * x / viewport_size[1] * self._camera_tan_half_fov
y = 2.0 * y / viewport_size[1] * self._camera_tan_half_fov
# NOTE: ignoring pixel aspect ratio
mtx = self.camera.matrix
position = mtx[:3, 3]
forward = -mtx[:3, 2]
right = mtx[:3, 0]
up = mtx[:3, 1]
direction = forward + right * x + up * y
direction /= np.linalg.norm(direction)
return Ray(position, direction)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/vis/viewer_plugins/viewer_plugin.py",
"license": "Apache License 2.0",
"lines": 98,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Genesis-Embodied-AI/Genesis:tests/test_viewer.py | import sys
import time
import numpy as np
import OpenGL.error
import pytest
import genesis as gs
from genesis.utils.misc import tensor_to_array
from genesis.vis.keybindings import Key, KeyAction, Keybind, KeyMod, MouseButton
from .conftest import IS_INTERACTIVE_VIEWER_AVAILABLE
from .utils import assert_allclose
CAM_RES = (480, 320)
# Note that software emulation is so slow that it may takes minutes to render a single frame...
def wait_for_viewer_events(viewer, condition_fn, timeout=300.0, sleep_interval=0.1):
"""Utility function to wait for viewer events to be processed in a threaded viewer."""
if not viewer.run_in_thread:
viewer.dispatch_pending_events()
viewer.dispatch_events()
for _ in range(int(timeout / sleep_interval)):
if condition_fn():
return
time.sleep(sleep_interval)
else:
raise AssertionError("Keyboard event not processed before timeout")
@pytest.mark.required
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
@pytest.mark.xfail(sys.platform == "win32", raises=OpenGL.error.Error, reason="Invalid OpenGL context.")
def test_interactive_viewer_disable_viewer_defaults():
"""Test that keyboard shortcuts can be disabled in the interactive viewer."""
# Test with keyboard shortcuts DISABLED
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
run_in_thread=(sys.platform == "linux"),
enable_help_text=False,
enable_default_keybinds=False,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=True,
)
scene.build()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
# Verify the flag is set correctly
assert pyrender_viewer._enable_help_text is False
# Verify that no keybindings are registered
assert len(pyrender_viewer._keybindings) == 0
@pytest.mark.required
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_default_viewer_plugin():
scene = gs.Scene(
viewer_options=gs.options.ViewerOptions(
camera_pos=(2.0, 0.0, 1.0),
camera_lookat=(0.0, 0.0, 0.0),
camera_fov=30,
res=CAM_RES,
run_in_thread=(sys.platform == "linux"),
enable_help_text=True,
enable_default_keybinds=True,
),
profiling_options=gs.options.ProfilingOptions(
show_FPS=False,
),
show_viewer=True,
)
scene.add_entity(morph=gs.morphs.Plane())
scene.add_entity(
morph=gs.morphs.Box(
pos=(0.0, 0.0, 0.2),
size=(0.2, 0.2, 0.2),
euler=(30, 40, 0),
)
)
scene.build()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
assert len(pyrender_viewer._keybindings) > 0, "Expected default keybindings to be registered."
# Add a custom keybind
flags = [False, False, False]
def toggle_flag(idx):
flags[idx] = not flags[idx]
scene.viewer.register_keybinds(
Keybind(
name="toggle_flag_0",
key=Key._0,
key_action=KeyAction.PRESS,
callback=lambda: toggle_flag(0),
),
Keybind(
name="toggle_flag_1",
key=Key._1,
key_action=KeyAction.PRESS,
key_mods=(KeyMod.SHIFT, KeyMod.CTRL),
callback=toggle_flag,
args=(1,),
),
)
# Press key to toggle flag on
pyrender_viewer.dispatch_event("on_key_press", Key._0, 0)
# Press key with modifiers to toggle flag off
pyrender_viewer.dispatch_event("on_key_press", Key._1, KeyMod.SHIFT | KeyMod.CTRL)
# Press key toggle world frame
pyrender_viewer.dispatch_event("on_key_press", Key.W, 0)
wait_for_viewer_events(pyrender_viewer, lambda: flags[0] and flags[1])
assert flags[0], "Expected custom keybind callback to toggle flag on."
assert flags[1], "Expected custom keybind with key modifiers to toggle flag on."
assert pyrender_viewer.gs_context.world_frame_shown, "Expected world frame to be shown after pressing 'W' key."
# Remove the keybind and press key to verify it no longer works
scene.viewer.remove_keybind("toggle_flag_0")
pyrender_viewer.dispatch_event("on_key_press", Key._0, 0)
# Remap the keybind and check it works
scene.viewer.remap_keybind("toggle_flag_1", new_key=Key._2, new_key_mods=None)
pyrender_viewer.dispatch_event("on_key_press", Key._2, 0)
wait_for_viewer_events(pyrender_viewer, lambda: not flags[1])
assert flags[0], "Keybind was not removed properly."
assert not flags[1], "Expected rebinded keybind to toggle flag off."
# Error when remapping non-existent keybind
with pytest.raises(ValueError):
scene.viewer.remap_keybind("non_existent_keybind", new_key=Key._3, new_key_mods=None)
# Error when adding a keybind with same key
with pytest.raises(ValueError):
scene.viewer.register_keybinds(
Keybind(name="conflicting_keybind", key=Key._2, key_action=KeyAction.PRESS, callback=lambda: None),
)
@pytest.mark.required
@pytest.mark.skipif(not IS_INTERACTIVE_VIEWER_AVAILABLE, reason="Interactive viewer not supported on this platform.")
def test_mouse_interaction_plugin():
DT = 0.01
MASS = 100.0
BOX_LENGTH = 0.2
STEPS = 20
DRAG_DY = 8
SPRING_CONST = 1000.0
CAM_FOV = 30
CAM_POS = (0.0, 0.6, 1.2)
scene = gs.Scene(
sim_options=gs.options.SimOptions(
dt=DT,
gravity=(0.0, 0.0, 0.0),
),
viewer_options=gs.options.ViewerOptions(
# Forces odd resolution so that mouse clicks are centered on pixels
res=(2 * (CAM_RES[0] // 2) + 1, 2 * (CAM_RES[0] // 2) + 1),
camera_pos=CAM_POS,
# looking to the top of the box
camera_lookat=(0.0, 0.0, BOX_LENGTH),
camera_fov=CAM_FOV,
run_in_thread=(sys.platform == "linux"),
),
show_viewer=True,
show_FPS=False,
)
scene.add_entity(morph=gs.morphs.Plane())
box = scene.add_entity(
morph=gs.morphs.Box(
pos=(0.0, 0.0, BOX_LENGTH / 2),
size=(BOX_LENGTH, BOX_LENGTH, BOX_LENGTH),
),
material=gs.materials.Rigid(
rho=MASS / (BOX_LENGTH**3),
),
)
_mouse_plugin = scene.viewer.add_plugin(
gs.vis.viewer_plugins.MouseInteractionPlugin(
use_force=True,
spring_const=SPRING_CONST,
)
)
scene.build()
pyrender_viewer = scene.visualizer.viewer._pyrender_viewer
assert pyrender_viewer.is_active
class EventCounterHandler:
def __init__(self):
self.count = 0
def on_mouse_press(self, x: int, y: int, button: int, modifiers: int):
self.count += 1
def on_mouse_drag(self, x: int, y: int, dx: int, dy: int, buttons: int, modifiers: int):
self.count += 1
def on_mouse_release(self, x: int, y: int, buttons: int, modifiers: int):
self.count += 1
event_counter = EventCounterHandler()
expected_count = 0
def check_event_count():
nonlocal expected_count
expected_count += 1
return lambda: event_counter.count == expected_count
pyrender_viewer.push_handlers(event_counter)
scene.step()
assert_allclose(box.get_vel(), 0, tol=gs.EPS)
initial_pos = box.get_pos().clone()
viewport_size = pyrender_viewer._viewport_size
x, y = viewport_size[0] // 2, viewport_size[1] // 2
# Press mouse to grab the box
pyrender_viewer.dispatch_event("on_mouse_press", x, y, MouseButton.LEFT, 0)
# Ensure event is processed
wait_for_viewer_events(pyrender_viewer, check_event_count())
rgb_arrs = []
for i in range(STEPS):
y += DRAG_DY
pyrender_viewer.dispatch_event("on_mouse_drag", x, y, 0, DRAG_DY, MouseButton.LEFT, 0)
wait_for_viewer_events(pyrender_viewer, check_event_count())
scene.step()
if (i + 1) % (STEPS // 2) == 0:
rgb_arr, *_ = pyrender_viewer.render_offscreen(
pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False
)
rgb_arrs.append(rgb_arr)
assert not np.array_equal(rgb_arrs[0], rgb_arrs[1]), "Expected images to be different after dragging the object."
final_pos = box.get_pos()
final_vel = box.get_vel()
assert_allclose(
final_vel[:2],
0.0,
tol=0.002,
err_msg="Final x and y velocities should be near zero since dragging only in z direction.",
)
distance_to_box = np.linalg.norm(tensor_to_array(initial_pos) - CAM_POS)
pixels_to_world = 2.0 * distance_to_box * np.tan(np.radians(CAM_FOV) / 2.0) / viewport_size[1]
total_world_displacement = STEPS * DRAG_DY * pixels_to_world
displacement_z = final_pos[2] - initial_pos[2]
assert displacement_z > gs.EPS, "Box should have moved upward"
assert displacement_z < total_world_displacement, (
"Box displacement should be less than mouse displacement from spring lag"
)
pyrender_viewer.dispatch_event("on_mouse_release", x, y, MouseButton.LEFT, 0)
scene.step()
wait_for_viewer_events(pyrender_viewer, check_event_count())
rgb_arr, *_ = pyrender_viewer.render_offscreen(
pyrender_viewer._camera_node, pyrender_viewer._renderer, rgb=True, depth=False, seg=False, normal=False
)
assert not np.array_equal(rgb_arrs[-1], rgb_arr), "Expected visualization to change after releasing the object."
# The forces from mouse spring are approximate, so use a large tolerance.
# FIXME: Use a more accurate model to predict final velocity.
total_sim_time = STEPS * DT
avg_mouse_velocity = total_world_displacement / total_sim_time
num_tau = total_sim_time * np.sqrt(SPRING_CONST / MASS)
velocity_fraction = 1.0 - (1.0 + num_tau) * np.exp(-num_tau)
expected_vel_z = avg_mouse_velocity * velocity_fraction
assert_allclose(
final_vel[2],
expected_vel_z,
rtol=0.5,
err_msg="Final z velocity does not match expected value based on spring dynamics.",
)
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "tests/test_viewer.py",
"license": "Apache License 2.0",
"lines": 244,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_context.py | import io
import logging
import os
import shutil
import subprocess
import sys
from pathlib import Path
import numpy as np
import torch
from pxr import Sdf, Usd, UsdGeom, UsdPhysics, UsdShade
import genesis as gs
import genesis.utils.mesh as mu
from .usd_material import parse_material_preview_surface
from .usd_utils import extract_scale
# Check for Omniverse Kit support (required for USD baking)
# Note: CI workflows should set OMNI_KIT_ACCEPT_EULA=yes in their env section
try:
import omni.kit_app
HAS_OMNIVERSE_KIT_SUPPORT = True
except ImportError:
HAS_OMNIVERSE_KIT_SUPPORT = False
def decompress_usdz(usdz_path: str):
usdz_folder = mu.get_usd_zip_path(usdz_path)
# The first file in the package must be a native usd file.
# See https://openusd.org/docs/Usdz-File-Format-Specification.html
zip_files = Sdf.ZipFile.Open(usdz_path)
zip_filelist = zip_files.GetFileNames()
root_file = zip_filelist[0]
if not root_file.lower().endswith(gs.options.morphs.USD_FORMATS[:-1]):
gs.raise_exception(f"Invalid usdz root file: {root_file}")
root_path = os.path.join(usdz_folder, root_file)
if not os.path.exists(root_path):
for file_name in zip_filelist:
file_data = io.BytesIO(zip_files.GetFile(file_name))
file_path = os.path.join(usdz_folder, file_name)
file_folder = os.path.dirname(file_path)
os.makedirs(file_folder, exist_ok=True)
with open(file_path, "wb") as out:
out.write(file_data.read())
gs.logger.warning(f"USDZ file {usdz_path} decompressed to {root_path}.")
else:
gs.logger.info(f"Decompressed assets detected and used: {root_path}.")
return root_path
class UsdContext:
"""
Context manager for USD stage parsing and material processing.
This class provides a centralized context for parsing USD files, managing materials,
computing transforms, and handling asset preprocessing. It supports USDZ decompression,
material baking, coordinate system conversion, and asset symlink resolution.
Parameters
----------
stage_file : str
Path to the USD stage file (.usd, .usda, .usdc) or USDZ archive (.usdz).
If a USDZ file is provided, it will be automatically decompressed.
usd_bake_cache : bool, optional
If True, enables material baking and uses last time baked assets if available.
Otherwise, will re-bake materials every time.
Default is True.
Notes
-----
- USDZ files are automatically decompressed to a temporary directory
- The stage's up-axis and meter scale are detected and stored for transform computations
- Material parsing is lazy (only when find_all_materials() is called)
"""
def __init__(self, stage_file: str, use_bake_cache: bool = True):
# decompress usdz
if stage_file.lower().endswith(gs.options.morphs.USD_FORMATS[-1]):
stage_file = decompress_usdz(stage_file)
# detect if baking is needed
self._need_bake = HAS_OMNIVERSE_KIT_SUPPORT
if HAS_OMNIVERSE_KIT_SUPPORT:
if not torch.cuda.is_available():
gs.logger.warning("USD baking requires CUDA GPU. USD baking will be disabled.")
self._need_bake = False
else:
gs.logger.warning(
"omniverse-kit not found. USD baking will be disabled. "
"Please install it with `pip install --extra-index-url https://pypi.nvidia.com omniverse-kit`. "
"See https://genesis-world.readthedocs.io/en/latest/user_guide/getting_started/usd_import.html."
)
# detect bake file caches
self._bake_folder = mu.get_usd_bake_path(stage_file)
self._bake_stage_file = os.path.join(self._bake_folder, os.path.basename(stage_file))
if use_bake_cache:
if os.path.exists(self._bake_stage_file):
self._need_bake = False
gs.logger.info(f"Baked assets detected and used: {self._bake_stage_file}")
stage_file = self._bake_stage_file
else:
if os.path.exists(self._bake_stage_file):
shutil.rmtree(self._bake_folder)
self._stage_file = stage_file
if not os.path.isfile(self._stage_file):
gs.raise_exception(
f"USD file not found: {self._stage_file}. Check that the path is correct and the file exists."
)
try:
self._stage = Usd.Stage.Open(self._stage_file)
except Exception as e:
gs.raise_exception_from(
f"Failed to open USD stage: {self._stage_file}. Ensure the file exists and is a valid USD file.", e
)
self._material_properties: dict[str, tuple[dict, str]] = {} # material_id -> (material_dict, uv_name)
self._material_parsed = False
self._bake_material_paths: dict[str, str] = {} # material_id -> bake_material_path
self._prim_material_bindings: dict[str, str] = {} # prim_path -> material_path
self._xform_cache = UsdGeom.XformCache(Usd.TimeCode.Default())
self._is_yup = UsdGeom.GetStageUpAxis(self._stage) == "Y"
self._meter_scale = UsdGeom.GetStageMetersPerUnit(self._stage)
@property
def stage(self) -> Usd.Stage:
"""
Get the USD stage object.
"""
return self._stage
@property
def stage_file(self) -> str:
"""
Get the path to the USD stage file.
"""
return self._stage_file
def get_prim_id(self, prim: Usd.Prim) -> str:
"""
Get a unique identifier for a prim based on its layer specification.
The identifier is constructed from the layer file path and the prim's path
string. This ensures uniqueness even when the same prim appears in multiple
layers or when using baked stages.
"""
prim_stack = prim.GetPrimStack()
spec = next((s for s in prim_stack if s.specifier == Sdf.SpecifierOver), prim_stack[-1])
spec_path = self._stage_file if spec.layer.identifier == self._bake_stage_file else spec.layer.identifier
return spec_path + spec.path.pathString
def get_binding_material(self, prim: Usd.Prim) -> UsdShade.Material | None:
"""
Get the material bound to a geometry prim.
"""
prim_path = str(prim.GetPath())
if prim_path in self._prim_material_bindings:
return UsdShade.Material(self._stage.GetPrimAtPath(self._prim_material_bindings[prim_path]))
return None
def compute_transform(self, prim: Usd.Prim) -> np.ndarray:
"""
Compute the local-to-world transformation matrix for a prim.
"""
transform = self._xform_cache.GetLocalToWorldTransform(prim)
T_usd = np.asarray(transform, dtype=np.float32) # translation on the bottom row
if self._is_yup:
T_usd @= mu.Y_UP_TRANSFORM
T_usd[:, :3] *= self._meter_scale
return T_usd.transpose()
def compute_gs_transform(self, prim: Usd.Prim, ref_prim: Usd.Prim = None) -> tuple[np.ndarray, np.ndarray]:
"""
Compute the Genesis transform (pose and scale) for a prim.
"""
Q, S = extract_scale(self.compute_transform(prim))
if ref_prim is None:
return Q, S
Q_ref, S_ref = self.compute_gs_transform(ref_prim)
Q_rel = np.linalg.inv(Q_ref) @ Q
return Q_rel, S
def apply_surface(self, geom_prim: Usd.Prim, surface: gs.surfaces.Surface):
"""
Apply material properties from USD to a Genesis surface object.
"""
geom_path = str(geom_prim.GetPath())
applied_surface = surface.copy()
if geom_path in self._prim_material_bindings:
surface_id = self._prim_material_bindings[geom_path]
surface_dict, uv_name = self._material_properties.get(surface_id, ({}, "st"))
# accepted keys: color_texture, opacity_texture, roughness_texture, metallic_texture, normal_texture, emissive_texture, ior
applied_surface.update_texture(**surface_dict)
if surface_id in self._bake_material_paths:
bake_success = True if surface_dict else False
else:
bake_success = None
else:
uv_name, surface_id = "st", None
bake_success = None
return applied_surface, uv_name, surface_id, bake_success
def find_all_rigid_entities(self) -> list[Usd.Prim]:
"""
Find all rigid body entities in the USD stage.
"""
entity_prims = []
stage_iter = iter(Usd.PrimRange(self._stage.GetPseudoRoot()))
for prim in stage_iter:
if prim.HasAPI(UsdPhysics.ArticulationRootAPI):
entity_prims.append(prim)
stage_iter.PruneChildren()
elif prim.HasAPI(UsdPhysics.RigidBodyAPI) or prim.HasAPI(UsdPhysics.CollisionAPI):
entity_prims.append(prim)
stage_iter.PruneChildren()
return entity_prims
def find_all_materials(self):
"""
Parse all materials in the USD stage and optionally bake complex materials.
"""
if self._material_parsed:
return
# parse materials
bound_prims = []
for prim in self._stage.Traverse():
if prim.IsA(UsdGeom.Gprim) or prim.IsA(UsdGeom.Subset):
bound_prims.append(prim)
materials = UsdShade.MaterialBindingAPI.ComputeBoundMaterials(bound_prims)[0]
for bound_prim, material in zip(bound_prims, materials):
geom_path = str(bound_prim.GetPath())
material_prim = material.GetPrim()
if material_prim.IsValid():
# TODO: material_id is also reserved for group_by_material option.
material_id = self.get_prim_id(material_prim)
if material_id not in self._material_properties:
material_dict, uv_name = parse_material_preview_surface(material)
self._material_properties[material_id] = material_dict, uv_name
if self._need_bake and not material_dict:
self._bake_material_paths[material_id] = str(material_prim.GetPath())
self._prim_material_bindings[geom_path] = material_id
else:
if bound_prim.IsA(UsdGeom.Gprim):
gprim = UsdGeom.Gprim(bound_prim)
display_colors = np.asarray(gprim.GetDisplayColorPrimvar().Get() or [], dtype=np.float32)
if display_colors.size > 0:
material_id = self.get_prim_id(bound_prim)
color_texture = gs.textures.ColorTexture(color=tuple(display_colors[0]))
self._material_properties[material_id] = {"color_texture": color_texture}, "st"
self._prim_material_bindings[geom_path] = material_id
self._material_parsed = True
if not self._bake_material_paths:
return
device = gs.utils.get_device(gs.cuda)[0] if gs.device.type == "cpu" else gs.device
self.replace_asset_symlinks()
os.makedirs(self._bake_folder, exist_ok=True)
# Note that it is necessary to call 'bake_usd_material' as a subprocess to ensure proper isolation of omniverse
# kit, otherwise the global conversion registry of some Python bindings will be conflicting with each other,
# ultimately leading to segfault...
commands = [
sys.executable,
os.path.join(os.path.dirname(os.path.abspath(__file__)), "usd_bake.py"),
"--input_file",
self._stage_file,
"--output_dir",
self._bake_folder,
"--usd_material_paths",
*self._bake_material_paths.values(),
"--device",
str(device.index if device.index is not None else 0),
"--log_level",
logging.getLevelName(gs.logger.level).lower(),
]
gs.logger.debug(f"Execute: {' '.join(commands)}")
env = dict(os.environ)
env["OMNI_KIT_ALLOW_ROOT"] = "1"
try:
result = subprocess.run(commands, capture_output=True, check=True, text=True, env=env)
if result.stdout:
gs.logger.debug(result.stdout)
if result.stderr:
gs.logger.warning(result.stderr)
except (subprocess.CalledProcessError, OSError) as e:
gs.logger.warning(
f"Baking process failed: {e} A few possible reasons:"
"\n\t1. The first launch may require accepting the Omniverse EULA. "
"Set `OMNI_KIT_ACCEPT_EULA=yes` to accept it automatically."
"\n\t2. The first launch may install additional dependencies, which can cause a timeout."
"\n\t3. If you have multiple Python environments (especially with different Python versions), "
"Omniverse Kit extensions may conflict across environments. Try to remove the shared omniverse "
"extension folder (e.g. `~/.local/share/ov/data/ext` in Linux) and try again."
)
if os.path.exists(self._bake_stage_file):
gs.logger.warning(f"USD materials baked to file {self._bake_stage_file}")
self._stage = Usd.Stage.Open(self._bake_stage_file)
for bake_material_id, bake_material_path in self._bake_material_paths.items():
bake_material_usd = UsdShade.Material(self._stage.GetPrimAtPath(bake_material_path))
bake_material_dict, uv_name = parse_material_preview_surface(bake_material_usd)
self._material_properties[bake_material_id] = bake_material_dict, uv_name
for baked_texture_obj in Path(self._bake_folder).glob("baked_textures*"):
shutil.rmtree(baked_texture_obj)
def replace_asset_symlinks(self):
"""
Replace asset path symlinks with actual file copies when file extensions differ.
Some USD assets use symlinks that point to files with different extensions
(e.g., .png symlink pointing to .exr). This method finds such symlinks and
replaces them with actual file copies to ensure compatibility.
"""
asset_paths = set()
for prim in self._stage.TraverseAll():
for attr in prim.GetAttributes():
value = attr.Get()
if isinstance(value, Sdf.AssetPath):
asset_paths.add(value.resolvedPath)
elif isinstance(value, list):
for v in value:
if isinstance(v, Sdf.AssetPath):
asset_paths.add(v.resolvedPath)
for asset_path in map(Path, asset_paths):
if not asset_path.is_symlink():
continue
real_path = asset_path.resolve()
if asset_path.suffix.lower() == real_path.suffix.lower():
continue
asset_path.unlink()
if real_path.is_file():
gs.logger.warning(f"Replacing symlink {asset_path} with real file {real_path}.")
shutil.copy2(real_path, asset_path)
def find_joints_in_range(prim_range: Usd.PrimRange) -> list[Usd.Prim]:
"""
Find all joints in a prim range.
Parameters
----------
prim_range : Usd.PrimRange
A prim range to search.
Returns
-------
list[Usd.Prim]
List of joint prims found in the range.
"""
joints: list[Usd.Prim] = []
for prim in prim_range:
if prim.IsA(UsdPhysics.Joint):
joints.append(prim)
return joints
def find_rigid_bodies_in_range(prim_range: Usd.PrimRange) -> set[str]:
"""
Find all rigid bodies in a prim range.
When a rigid body is found, its children are pruned from the search since they
are part of that rigid body and shouldn't be counted separately.
Parameters
----------
prim_range : Usd.PrimRange
A prim range to search. Must support PruneChildren().
Returns
-------
set[str]
Set of rigid body prim paths (as strings).
"""
rigid_bodies: set[str] = set()
prim_iter = iter(prim_range)
for prim in prim_iter:
if prim.HasAPI(UsdPhysics.RigidBodyAPI) or prim.HasAPI(UsdPhysics.CollisionAPI):
rigid_bodies.add(str(prim.GetPath()))
prim_iter.PruneChildren()
return rigid_bodies
def extract_links_referenced_by_joints(
stage: Usd.Stage, joints: list[Usd.Prim], check_rigid_body: bool = True
) -> set[str]:
"""
Extract links referenced by joints.
Parameters
----------
stage : Usd.Stage
The USD stage.
joints : list[Usd.Prim]
List of joint prims to analyze.
check_rigid_body : bool, optional
If True, only include links that are rigid bodies (have RigidBodyAPI or CollisionAPI).
If False, include all links referenced by joints. Default is True.
Returns
-------
set[str]
Set of link prim paths (as strings) referenced by the joints.
"""
links_referenced: set[str] = set()
for joint_prim in joints:
joint = UsdPhysics.Joint(joint_prim)
body0_targets = joint.GetBody0Rel().GetTargets()
body1_targets = joint.GetBody1Rel().GetTargets()
if body0_targets:
body0_path = str(body0_targets[0])
if check_rigid_body:
body0_prim = stage.GetPrimAtPath(body0_path)
if body0_prim.IsValid() and (
body0_prim.HasAPI(UsdPhysics.RigidBodyAPI) or body0_prim.HasAPI(UsdPhysics.CollisionAPI)
):
links_referenced.add(body0_path)
else:
links_referenced.add(body0_path)
if body1_targets:
body1_path = str(body1_targets[0])
if check_rigid_body:
body1_prim = stage.GetPrimAtPath(body1_path)
if body1_prim.IsValid() and (
body1_prim.HasAPI(UsdPhysics.RigidBodyAPI) or body1_prim.HasAPI(UsdPhysics.CollisionAPI)
):
links_referenced.add(body1_path)
else:
links_referenced.add(body1_path)
return links_referenced
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_context.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_geometry.py | import re
from typing import Dict, List
import numpy as np
import trimesh
from pxr import Usd, UsdGeom, UsdPhysics
import genesis as gs
from genesis.utils import geom as gu
from .usd_context import UsdContext
from .usd_utils import AXES_T, usd_attr_array_to_numpy, usd_primvar_array_to_numpy
def geom_exception(geom_type, geom_id, stage_file, reason_msg):
gs.raise_exception(f"{reason_msg} for {geom_type} {geom_id} in usd file {stage_file}.")
def get_triangle_ids(tri_starts, tri_counts):
tri_bases = np.repeat(tri_starts, tri_counts)
tri_offsets = np.arange(tri_counts.sum(), dtype=np.int32)
tri_stages = np.repeat(np.cumsum(tri_counts, dtype=np.int32) - tri_counts, tri_counts)
return tri_bases + tri_offsets - tri_stages
def parse_prim_geoms(
context: UsdContext,
prim: Usd.Prim,
link_prim: Usd.Prim,
links_g_infos: List[List[Dict]],
link_path_to_idx: Dict[str, int],
morph: gs.morphs.USD,
surface: gs.surfaces.Surface,
match_visual=False,
match_collision=False,
):
if not prim.IsActive():
return
if str(prim.GetPath()) in link_path_to_idx:
link_prim = prim
if not match_visual:
for pattern in morph.visual_mesh_prim_patterns:
if re.match(pattern, prim.GetName()):
match_visual = True
break
if not match_collision:
for pattern in morph.collision_mesh_prim_patterns:
if re.match(pattern, prim.GetName()):
match_collision = True
break
if link_prim is not None and prim.IsA(UsdGeom.Gprim):
# parse materials
geom_surface, geom_uvname, _surface_id, bake_success = context.apply_surface(prim, surface)
gprim = UsdGeom.Gprim(prim)
uvs = {geom_uvname: None}
# parse transform
geom_Q, geom_S = context.compute_gs_transform(prim, link_prim)
geom_S *= morph.scale
geom_ST = np.eye(4, dtype=geom_S.dtype)
geom_ST[:3, :3] = geom_S
geom_Q[:3, 3] *= morph.scale
geom_id = context.get_prim_id(prim)
# parse geometry
meshes = []
if prim.IsA(UsdGeom.Mesh):
mesh_prim = UsdGeom.Mesh(prim)
# parse vertices
points = usd_attr_array_to_numpy(mesh_prim.GetPointsAttr(), np.float32)
if points.size == 0:
geom_exception("Mesh", geom_id, morph.file, "No vertices")
# parse faces
faces = usd_attr_array_to_numpy(mesh_prim.GetFaceVertexIndicesAttr(), np.int32)
face_vertex_counts = usd_attr_array_to_numpy(mesh_prim.GetFaceVertexCountsAttr(), np.int32)
points_faces_varying = False
# parse normals
normals = usd_attr_array_to_numpy(mesh_prim.GetNormalsAttr(), np.float32, True)
if normals is not None and normals.shape[0] != points.shape[0]:
if normals.shape[0] == faces.shape[0]: # face varying meshes, adjacent faces do not share vertices
points_faces_varying = True
else:
gs.logger.warning(
f"Normals size mismatch for Mesh {geom_id} in {morph.file}: "
f"expected {points.shape[0]} (vertex) or {faces.shape[0]} (faceVarying), "
f"got {normals.shape[0]}. Discarding normals for this mesh."
)
normals = None
# parse geom subsets
subset_infos = []
face_used_mask = np.full(len(face_vertex_counts), False, dtype=np.bool_)
subsets = UsdGeom.Subset.GetAllGeomSubsets(mesh_prim)
for subset in subsets:
subset_prim = subset.GetPrim()
elem_type = str(subset.GetElementTypeAttr().Get() or "face")
if str(elem_type) == "face":
subset_face_ids_attr = subset.GetIndicesAttr()
subset_face_ids = usd_attr_array_to_numpy(subset_face_ids_attr, np.int32)
if subset_face_ids.size == 0:
continue
face_used_mask[subset_face_ids] = True
subset_surface, subset_uvname, _, subset_bake_success = context.apply_surface(subset_prim, surface)
subset_geom_id = context.get_prim_id(subset_prim)
subset_infos.append(
(subset_face_ids, subset_surface, subset_uvname, subset_geom_id, subset_bake_success)
)
uvs[subset_uvname] = None
else:
gs.logger.warning(f"Unsupported geom subset element type: {elem_type} for {geom_id}")
subset_unused = ~face_used_mask
if subset_unused.any():
subset_infos.append((subset_unused, geom_surface, geom_uvname, geom_id, bake_success))
# parse UVs
for uvname in uvs.keys():
uv = usd_primvar_array_to_numpy(UsdGeom.PrimvarsAPI(prim).GetPrimvar(uvname), np.float32, True)
if uv is not None:
uv[:, 1] = 1.0 - uv[:, 1] # Flip V coordinate
if uv.shape[0] != points.shape[0]:
if uv.shape[0] == faces.shape[0]:
points_faces_varying = True
elif uv.shape[0] == 1:
uv = None
else:
gs.logger.warning(
f"UV size mismatch for Mesh {geom_id} in {morph.file}: "
f"expected {points.shape[0]} (vertex) or {faces.shape[0]} (faceVarying), "
f"got {uv.shape[0]}. Discarding UV data for this mesh."
)
uv = None
uvs[uvname] = uv
# process faces
if face_vertex_counts.size == 0:
triangles = np.empty((0, 3), dtype=np.int32)
face_triangle_starts = np.empty(0, dtype=np.int32)
else:
# rearrange points and faces
if points_faces_varying:
if normals is not None and normals.shape[0] == points.shape[0]:
normals = normals[faces]
for uvname in uvs.keys():
uv = uvs[uvname]
if uv is not None and uv.shape[0] == points.shape[0]:
uvs[uvname] = uv[faces]
points = points[faces]
faces = np.arange(faces.shape[0], dtype=np.int32)
# triangulate faces
# TODO: discard degenerated faces
if np.max(face_vertex_counts) > 3:
triangles, face_triangle_starts = [], []
bi, ti = 0, 0
for face_vertex_count in face_vertex_counts:
face_triangle_starts.append(ti)
if face_vertex_count == 3:
triangles.append([faces[bi + 0], faces[bi + 1], faces[bi + 2]])
elif face_vertex_count > 3:
for i in range(1, face_vertex_count - 1):
triangles.append([faces[bi + 0], faces[bi + i], faces[bi + i + 1]])
bi += face_vertex_count
ti += face_vertex_count - 2
triangles = np.asarray(triangles, dtype=np.int32)
face_triangle_starts = np.asarray(face_triangle_starts, dtype=np.int32)
else:
triangles = faces.reshape(-1, 3)
face_triangle_starts = np.arange(len(face_vertex_counts), dtype=np.int32)
# process mesh
for subset_face_ids, subset_surface, subset_uvname, subset_geom_id, subset_bake_success in subset_infos:
tri_starts = face_triangle_starts[subset_face_ids]
tri_counts = face_vertex_counts[subset_face_ids] - 2
tri_ids = get_triangle_ids(tri_starts, tri_counts)
subset_triangles = triangles[tri_ids]
subset_uv = uvs[subset_uvname]
processed_mesh = trimesh.Trimesh(
vertices=points,
faces=subset_triangles,
vertex_normals=normals,
visual=trimesh.visual.TextureVisuals(uv=subset_uv) if subset_uv is not None else None,
process=True,
)
# TODO: use a more efficient custom function to remove unreferenced vertices
processed_mesh.remove_unreferenced_vertices()
processed_mesh.apply_transform(geom_ST)
subset_points = processed_mesh.vertices
subset_triangles = processed_mesh.faces
subset_normals = processed_mesh.vertex_normals
if subset_uv is not None:
subset_uv = processed_mesh.visual.uv
mesh = gs.Mesh.from_attrs(
verts=subset_points,
faces=subset_triangles,
normals=subset_normals,
surface=subset_surface,
uvs=subset_uv,
)
mesh.metadata.update(
{
"mesh_path": context.stage_file, # unbaked file or cache
"name": subset_geom_id,
"bake_success": bool(subset_bake_success),
}
)
meshes.append(mesh)
geom_data = None
gs_type = gs.GEOM_TYPE.MESH
else: # primitive geometries
geom_S_diag = np.diag(geom_S)
if prim.IsA(UsdGeom.Plane):
plane_prim = UsdGeom.Plane(prim)
width = plane_prim.GetWidthAttr().Get()
length = plane_prim.GetLengthAttr().Get()
axis_T = AXES_T[plane_prim.GetAxisAttr().Get() or "Z"]
w = float(width) * 0.5
l = float(length) * 0.5
tmesh = trimesh.Trimesh(
vertices=np.array([[-w, -l, 0.0], [w, -l, 0.0], [w, l, 0.0], [-w, l, 0.0]], dtype=np.float32),
faces=np.array([[0, 1, 2], [0, 2, 3]], dtype=np.int32),
face_normals=np.array([[0.0, 0.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32),
)
tmesh.apply_transform(axis_T)
geom_data = np.array([0.0, 0.0, 1.0])
gs_type = gs.GEOM_TYPE.PLANE
elif prim.IsA(UsdGeom.Sphere):
sphere_prim = UsdGeom.Sphere(prim)
radius = sphere_prim.GetRadiusAttr().Get()
tmesh = trimesh.creation.icosphere(radius=radius, subdivisions=2)
geom_data = np.array([radius]) * geom_S_diag
gs_type = gs.GEOM_TYPE.SPHERE
elif prim.IsA(UsdGeom.Capsule):
capsule_prim = UsdGeom.Capsule(prim)
radius = capsule_prim.GetRadiusAttr().Get()
height = capsule_prim.GetHeightAttr().Get()
axis_T = AXES_T[capsule_prim.GetAxisAttr().Get() or "Z"]
# TODO: create different trimesh for visual and collision
tmesh = trimesh.creation.capsule(radius=radius, height=height, count=(8, 12))
tmesh.apply_transform(axis_T)
geom_data = np.array([radius, height, 1.0]) * geom_S_diag # TODO: use the correct direction
gs_type = gs.GEOM_TYPE.CAPSULE
elif prim.IsA(UsdGeom.Cube):
cube_prim = UsdGeom.Cube(prim)
size = cube_prim.GetSizeAttr().Get()
extents = np.array([size, size, size], dtype=np.float32)
tmesh = trimesh.creation.box(extents=extents)
geom_data = extents * geom_S_diag
geom_surface.smooth = False
gs_type = gs.GEOM_TYPE.BOX
elif prim.IsA(UsdGeom.Cylinder):
cylinder_prim = UsdGeom.Cylinder(prim)
radius = cylinder_prim.GetRadiusAttr().Get()
height = cylinder_prim.GetHeightAttr().Get()
axis_T = AXES_T[cylinder_prim.GetAxisAttr().Get() or "Z"]
tmesh = trimesh.creation.cylinder(radius=radius, height=height, count=(8, 12))
tmesh.apply_transform(axis_T)
geom_data = np.array([radius, height, 1.0]) * geom_S_diag # TODO: use the correct direction
geom_surface.smooth = False
gs_type = gs.GEOM_TYPE.CYLINDER
else:
gs.raise_exception(f"Unsupported geometry type: {prim.GetTypeName()}")
tmesh.apply_transform(geom_ST)
metadata = {
"name": geom_id,
"bake_success": bool(bake_success),
}
meshes.append(gs.Mesh.from_trimesh(tmesh, surface=geom_surface, metadata=metadata))
geom_pos = geom_Q[:3, 3]
geom_quat = gu.R_to_quat(geom_Q[:3, :3])
is_guide = str(gprim.GetPurposeAttr().Get() or "default") == "guide"
is_visible = str(gprim.ComputeVisibility()) != "invisible"
is_visual = (is_visible and not is_guide) and (match_visual or not (match_collision or match_visual))
is_collision = match_collision or not (match_collision or match_visual)
g_infos = links_g_infos[link_path_to_idx[str(link_prim.GetPath())]]
if is_visual:
for mesh in meshes:
g_infos.append(
dict(
vmesh=mesh,
pos=geom_pos,
quat=geom_quat,
contype=0,
conaffinity=0,
type=gs_type,
data=geom_data,
)
)
if is_collision:
# TODO: use "physics:material:binding" (UsdPhysicsMaterialAPI) to extract frictions
for mesh in meshes:
g_infos.append(
dict(
mesh=mesh,
pos=geom_pos,
quat=geom_quat,
contype=1,
conaffinity=1,
type=gs_type,
data=geom_data,
friction=gu.default_friction(),
sol_params=gu.default_solver_params(),
)
)
predicate = Usd.TraverseInstanceProxies()
prim_range = Usd.PrimRange(prim, predicate)
iterator = iter(prim_range)
# skip the first prim (current prim)
next(iterator)
for child in iterator:
parse_prim_geoms(
context, child, link_prim, links_g_infos, link_path_to_idx, morph, surface, match_visual, match_collision
)
iterator.PruneChildren()
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_geometry.py",
"license": "Apache License 2.0",
"lines": 297,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_material.py | import numpy as np
from PIL import Image
from pxr import Usd, UsdShade
import genesis as gs
from genesis.utils import mesh as mu
CS_ENCODE = {
"raw": "linear",
"sRGB": "srgb",
"auto": None,
"": None,
}
def get_input_attribute_value(shader: UsdShade.Shader, input_name, input_type=None):
shader_input = shader.GetInput(input_name)
if input_type != "value":
if shader_input.GetPrim().IsValid() and shader_input.HasConnectedSource():
shader_input_connect, shader_input_name = shader_input.GetConnectedSource()[:2]
return shader_input_connect.GetPrim(), shader_input_name
if input_type != "attribute":
return shader_input.Get(), None
return None, None
def parse_component(shader: UsdShade.Shader, component_name: str, component_encode: str):
component, component_output = get_input_attribute_value(shader, component_name)
if component_output is None: # constant value
component_factor = component
component_image = None
component_uvname = None
else: # texture shader
component_image, component_overencode, component_uvname = parse_preview_surface(component, component_output)
if component_overencode is not None:
component_encode = component_overencode
component_factor = None
component_texture = mu.create_texture(component_image, component_factor, component_encode)
return component_texture, component_uvname
def get_shader(prim: Usd.Prim, output_name: str) -> UsdShade.Shader:
if prim.IsA(UsdShade.Shader):
return UsdShade.Shader(prim)
elif prim.IsA(UsdShade.NodeGraph):
return UsdShade.NodeGraph(prim).ComputeOutputSource(output_name)[0]
else:
gs.raise_exception(f"Invalid shader type: {prim.GetTypeName()} at {prim.GetPath()}.")
def parse_preview_surface(prim: Usd.Prim, output_name):
shader = get_shader(prim, output_name)
shader_id = shader.GetShaderId()
if shader_id == "UsdPreviewSurface":
uvname = None
# parse color
color_texture, color_uvname = parse_component(shader, "diffuseColor", "srgb")
if color_uvname is not None:
uvname = color_uvname
# parse opacity
opacity_texture, opacity_uvname = parse_component(shader, "opacity", "linear")
if opacity_uvname is not None and uvname is None:
uvname = opacity_uvname
if opacity_texture is not None:
alpha_cutoff = get_input_attribute_value(shader, "opacityThreshold", "value")[0]
opacity_texture.apply_cutoff(alpha_cutoff)
# parse emissive
emissive_texture, emissive_uvname = parse_component(shader, "emissiveColor", "srgb")
if emissive_texture is not None and emissive_texture.is_black():
emissive_texture = None
if emissive_uvname is not None and uvname is None:
uvname = emissive_uvname
# parse metallic
use_specular = get_input_attribute_value(shader, "useSpecularWorkflow", "value")[0]
if not use_specular:
metallic_texture, metallic_uvname = parse_component(shader, "metallic", "linear")
if metallic_uvname is not None and uvname is None:
uvname = metallic_uvname
else:
metallic_texture = None
# parse roughness
roughness_texture, roughness_uvname = parse_component(shader, "roughness", "linear")
if roughness_uvname is not None and uvname is None:
uvname = roughness_uvname
# parse normal
normal_texture, normal_uvname = parse_component(shader, "normal", "linear")
if normal_uvname is not None and uvname is None:
uvname = normal_uvname
# parse ior
ior = get_input_attribute_value(shader, "ior", "value")[0]
if uvname is None:
uvname = "st"
return {
"color_texture": color_texture,
"opacity_texture": opacity_texture,
"roughness_texture": roughness_texture,
"metallic_texture": metallic_texture,
"emissive_texture": emissive_texture,
"normal_texture": normal_texture,
"ior": ior,
}, uvname
elif shader_id == "UsdUVTexture":
texture = get_input_attribute_value(shader, "file", "value")[0]
if texture is not None:
texture_image = np.asarray(Image.open(texture.resolvedPath))
if texture_image.ndim == 3:
if output_name == "r":
texture_image = texture_image[:, :, 0]
elif output_name == "g":
texture_image = texture_image[:, :, 1]
elif output_name == "b":
texture_image = texture_image[:, :, 2]
elif output_name == "a":
texture_image = texture_image[:, :, 3]
elif output_name == "rgb":
texture_image = texture_image[:, :, :3]
else:
gs.raise_exception(f"Invalid output channel for UsdUVTexture: {output_name}.")
else:
texture_image = None
texture_encode = get_input_attribute_value(shader, "sourceColorSpace", "value")[0] or "sRGB"
texture_encode = CS_ENCODE[texture_encode]
texture_uvs, texture_uvs_output = get_input_attribute_value(shader, "st", "attribute")
texture_uvs_name = parse_preview_surface(texture_uvs, texture_uvs_output)
return texture_image, texture_encode, texture_uvs_name
elif shader_id.startswith("UsdPrimvarReader"):
primvar_name = get_input_attribute_value(shader, "varname", "value")[0]
return primvar_name
def parse_material_preview_surface(material: UsdShade.Material) -> tuple[dict, str]:
"""Find the preview surface for a material."""
surface_outputs = material.GetSurfaceOutputs()
candidates_surfaces = []
material_dict, uv_name = {}, "st"
for surface_output in surface_outputs:
if not surface_output.HasConnectedSource():
continue
surface_output_connectable, surface_output_name, _ = surface_output.GetConnectedSource()
surface_output_connect = surface_output_connectable.GetPrim()
surface_shader = get_shader(surface_output_connect, "surface")
surface_shader_implement = surface_shader.GetImplementationSource()
surface_shader_id = surface_shader.GetShaderId()
if surface_shader_implement == "id" and surface_shader_id == "UsdPreviewSurface":
material_dict, uv_name = parse_preview_surface(surface_output_connect, surface_output_name)
break
candidates_surfaces.append((surface_shader.GetPath(), surface_shader_id, surface_shader_implement))
if not material_dict:
candidates_str = "\n".join(
f"\tShader at {shader_path} with implement {shader_impl} and ID {shader_id}."
for shader_path, shader_id, shader_impl in candidates_surfaces
)
gs.logger.debug(f"Material require baking:\n{candidates_str}")
return material_dict, uv_name
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_material.py",
"license": "Apache License 2.0",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Genesis-Embodied-AI/Genesis:genesis/utils/usd/usd_rigid_entity.py | from typing import Dict, List, Tuple
import numpy as np
from pxr import Usd, UsdPhysics
import genesis as gs
from genesis.utils import geom as gu
from genesis.utils import urdf as urdf_utils
from .usd_context import (
UsdContext,
extract_links_referenced_by_joints,
find_joints_in_range,
find_rigid_bodies_in_range,
)
from .usd_geometry import parse_prim_geoms
from .usd_utils import (
AXES_VECTOR,
get_attr_value_by_candidates,
usd_center_of_mass_to_numpy,
usd_inertia_to_numpy,
usd_mass_to_float,
usd_pos_to_numpy,
usd_principal_axes_to_numpy,
usd_quat_to_numpy,
)
DRIVE_NAMES = {
gs.JOINT_TYPE.REVOLUTE: (("angular",),),
gs.JOINT_TYPE.PRISMATIC: (("linear",),),
gs.JOINT_TYPE.SPHERICAL: (
("rotX",),
("rotY",),
("rotZ",),
),
gs.JOINT_TYPE.FIXED: (),
gs.JOINT_TYPE.FREE: (
("transX",),
("transY",),
("transZ",),
("rotX",),
("rotY",),
("rotZ",),
),
}
def _parse_joint_axis_pos(
context: UsdContext, joint: UsdPhysics.Joint, child_link: Usd.Prim, is_body1: bool
) -> Tuple[str, np.ndarray, np.ndarray]:
joint_pos_attr = joint.GetLocalPos1Attr() if is_body1 else joint.GetLocalPos0Attr()
joint_pos = usd_pos_to_numpy(joint_pos_attr.Get()) if joint_pos_attr.HasValue() else gu.zero_pos()
T = context.compute_transform(child_link)
joint_pos = gu.transform_by_T(joint_pos, T)
Q, S = context.compute_gs_transform(child_link)
Q_inv = np.linalg.inv(Q)
joint_pos = Q_inv[:3, :3] @ (joint_pos - Q[:3, 3])
if isinstance(joint, (UsdPhysics.PrismaticJoint, UsdPhysics.RevoluteJoint)):
joint_quat = usd_quat_to_numpy((joint.GetLocalRot1Attr() if is_body1 else joint.GetLocalRot0Attr()).Get())
joint_axis_str = joint.GetAxisAttr().Get() or "X"
joint_axis = gu.transform_by_quat(AXES_VECTOR[joint_axis_str], joint_quat)
joint_axis = gu.transform_by_R(joint_axis, T[:3, :3])
joint_axis = Q_inv[:3, :3] @ joint_axis
if np.linalg.norm(joint_axis) < gs.EPS:
gs.raise_exception(f"Joint axis is zero for joint {joint.GetPath()}.")
joint_axis /= np.linalg.norm(joint_axis)
else:
joint_axis_str, joint_axis = None, None
return joint_axis_str, joint_axis, joint_pos
def _parse_link(
context: UsdContext,
link: Usd.Prim,
joints: List[Tuple[Usd.Prim, int, bool]],
links: List[Usd.Prim],
morph: gs.morphs.USD,
):
l_info = {}
l_info["name"] = str(link.GetPath())
l_info["invweight"] = np.full((2,), fill_value=-1.0)
# parse link fixed state
link_fixed = False
if link.HasAPI(UsdPhysics.RigidBodyAPI):
rigid_body_api = UsdPhysics.RigidBodyAPI(link)
if rigid_body_api.GetKinematicEnabledAttr().Get():
link_fixed = True
elif rigid_body_api.GetRigidBodyEnabledAttr().Get() is False:
link_fixed = True
elif link.HasAPI(UsdPhysics.CollisionAPI):
link_fixed = True
if morph.fixed:
link_fixed = any(parent_idx == -1 for _, parent_idx, _ in joints)
# parse link mass properties
if link.HasAPI(UsdPhysics.MassAPI):
mass_api = UsdPhysics.MassAPI(link)
com_attr = mass_api.GetCenterOfMassAttr()
l_info["inertial_pos"] = usd_center_of_mass_to_numpy(com_attr.Get())
principal_axes_attr = mass_api.GetPrincipalAxesAttr()
l_info["inertial_quat"] = usd_principal_axes_to_numpy(principal_axes_attr.Get())
inertia_attr = mass_api.GetDiagonalInertiaAttr()
l_info["inertial_i"] = usd_inertia_to_numpy(inertia_attr.Get())
mass_attr = mass_api.GetMassAttr()
l_info["inertial_mass"] = usd_mass_to_float(mass_attr.Get())
# set link transform for pure rigid bodies (no joints)
if not joints:
Q, S = context.compute_gs_transform(link, None)
l_info["parent_idx"] = -1
l_info["pos"] = Q[:3, 3]
l_info["quat"] = gu.R_to_quat(Q[:3, :3])
j_infos = []
for joint_prim, parent_idx, is_body1 in joints:
if "parent_idx" not in l_info:
parent_link = None if parent_idx == -1 else links[parent_idx]
Q, S = context.compute_gs_transform(link, parent_link)
l_info["parent_idx"] = parent_idx
l_info["pos"] = Q[:3, 3]
l_info["quat"] = gu.R_to_quat(Q[:3, :3])
elif l_info["parent_idx"] != parent_idx:
gs.raise_exception(f"Link {link.GetPath()} has multiple parents: {l_info['parent_idx']} and {parent_idx}.")
if joint_prim is not None:
joint_type = gs.JOINT_TYPE.FIXED
n_dofs, n_qs = 0, 0
if not link_fixed:
if joint_prim.IsA(UsdPhysics.RevoluteJoint):
joint_type = gs.JOINT_TYPE.REVOLUTE
joint = UsdPhysics.RevoluteJoint(joint_prim)
n_dofs, n_qs = 1, 1
elif joint_prim.IsA(UsdPhysics.PrismaticJoint):
joint_type = gs.JOINT_TYPE.PRISMATIC
joint = UsdPhysics.PrismaticJoint(joint_prim)
n_dofs, n_qs = 1, 1
elif joint_prim.IsA(UsdPhysics.SphericalJoint):
joint_type = gs.JOINT_TYPE.SPHERICAL
joint = UsdPhysics.SphericalJoint(joint_prim)
n_dofs, n_qs = 3, 4
elif joint_prim.GetTypeName() == "PhysicsJoint":
joint_type = gs.JOINT_TYPE.FREE
joint = UsdPhysics.Joint(joint_prim)
n_dofs, n_qs = 6, 7
elif not joint_prim.IsA(UsdPhysics.FixedJoint):
gs.logger.warning(
f"Unsupported USD joint type: {joint_prim.GetTypeName()} for {joint_prim.GetPath()}. "
"Parsed as fixed joint."
)
if joint_type == gs.JOINT_TYPE.FIXED:
joint = UsdPhysics.Joint(joint_prim)
joint_axis_str, joint_axis, joint_pos = _parse_joint_axis_pos(context, joint, link, is_body1)
joint_name = str(joint_prim.GetPath())
else:
if link_fixed:
joint_type = gs.JOINT_TYPE.FIXED
n_dofs, n_qs = 0, 0
else:
joint_type = gs.JOINT_TYPE.FREE
n_dofs, n_qs = 6, 7
joint = None
joint_axis_str, joint_axis, joint_pos = None, None, gu.zero_pos()
joint_name = f"{l_info['name']}_joint"
j_info = {
"name": joint_name,
"sol_params": gu.default_solver_params(),
"n_qs": n_qs,
"n_dofs": n_dofs,
"type": joint_type,
"pos": joint_pos,
"dofs_invweight": np.full(n_dofs, -1.0, dtype=gs.np_float),
}
if joint_type in (gs.JOINT_TYPE.REVOLUTE, gs.JOINT_TYPE.PRISMATIC):
# TODO: use attribute "state:<INSTANCE_NAME>:physics:positiion" to parse init_qpos (but it is IsaacSim specific)
j_info["init_qpos"] = np.zeros(n_qs, dtype=gs.np_float)
if joint_type == gs.JOINT_TYPE.REVOLUTE:
j_info["dofs_motion_ang"] = joint_axis[None]
j_info["dofs_motion_vel"] = np.zeros((1, 3), dtype=gs.np_float)
j_info["dofs_stiffness"] = np.array(
[
get_attr_value_by_candidates(
joint_prim,
candidates=morph.revolute_joint_stiffness_attr_candidates,
attr_name="dofs_stiffness",
default_value=0.0,
)
],
dtype=gs.np_float,
)
j_info["dofs_damping"] = np.array(
[
get_attr_value_by_candidates(
joint_prim,
candidates=morph.revolute_joint_damping_attr_candidates,
attr_name="dofs_damping",
default_value=0.0,
)
],
dtype=gs.np_float,
)
# NOTE: No idea how to scale the angle limits under non-uniform scaling now.
lower_limit_attr = joint.GetLowerLimitAttr()
upper_limit_attr = joint.GetUpperLimitAttr()
lower_limit = np.deg2rad(lower_limit_attr.Get()) if lower_limit_attr.HasValue() else -np.inf
upper_limit = np.deg2rad(upper_limit_attr.Get()) if upper_limit_attr.HasValue() else np.inf
j_info["dofs_limit"] = np.asarray([[lower_limit, upper_limit]], dtype=gs.np_float)
else: # joint_type == gs.JOINT_TYPE.PRISMATIC
j_info["dofs_motion_ang"] = np.zeros((1, 3), dtype=gs.np_float)
j_info["dofs_motion_vel"] = joint_axis[None]
j_info["dofs_stiffness"] = np.array(
[
get_attr_value_by_candidates(
joint_prim,
candidates=morph.prismatic_joint_stiffness_attr_candidates,
attr_name="dofs_stiffness",
default_value=0.0,
)
],
dtype=gs.np_float,
)
j_info["dofs_damping"] = np.array(
[
get_attr_value_by_candidates(
joint_prim,
candidates=morph.prismatic_joint_damping_attr_candidates,
attr_name="dofs_damping",
default_value=0.0,
)
],
dtype=gs.np_float,
)
lower_limit_attr = joint.GetLowerLimitAttr()
upper_limit_attr = joint.GetUpperLimitAttr()
lower_limit = lower_limit_attr.Get() if lower_limit_attr.HasValue() else -np.inf
upper_limit = upper_limit_attr.Get() if upper_limit_attr.HasValue() else np.inf
j_info["dofs_limit"] = np.asarray([[lower_limit, upper_limit]], dtype=gs.np_float) * morph.scale
j_info["init_qpos"] *= morph.scale
else:
j_info["dofs_stiffness"] = np.zeros(n_dofs, dtype=gs.np_float)
j_info["dofs_damping"] = np.zeros(n_dofs, dtype=gs.np_float)
if joint_type == gs.JOINT_TYPE.SPHERICAL:
j_info["dofs_motion_ang"] = np.eye(3)
j_info["dofs_motion_vel"] = np.zeros((3, 3))
j_info["dofs_limit"] = np.tile([-np.inf, np.inf], (3, 1))
j_info["init_qpos"] = gu.identity_quat()
elif joint_type == gs.JOINT_TYPE.FIXED:
j_info["dofs_motion_ang"] = np.zeros((0, 3))
j_info["dofs_motion_vel"] = np.zeros((0, 3))
j_info["dofs_limit"] = np.zeros((0, 2))
j_info["init_qpos"] = np.zeros(0)
else: # joint_type == gs.JOINT_TYPE.FREE
j_info["dofs_motion_ang"] = np.eye(6, 3, -3)
j_info["dofs_motion_vel"] = np.eye(6, 3)
j_info["dofs_limit"] = np.tile([-np.inf, np.inf], (6, 1))
j_info["init_qpos"] = np.concatenate([l_info["pos"] * morph.scale, l_info["quat"]])
# Only parse joint dynamics and drive API for non-fixed and non-free joints
if joint_type not in (gs.JOINT_TYPE.FIXED, gs.JOINT_TYPE.FREE):
j_info["dofs_frictionloss"] = np.full(
(n_dofs,),
get_attr_value_by_candidates(
joint_prim,
candidates=morph.joint_friction_attr_candidates,
attr_name="dofs_frictionloss",
default_value=0.0,
),
dtype=gs.np_float,
)
j_info["dofs_armature"] = np.full(
(n_dofs,),
get_attr_value_by_candidates(
joint_prim,
candidates=morph.joint_armature_attr_candidates,
attr_name="dofs_armature",
default_value=0.0,
),
dtype=gs.np_float,
)
# parse drive API
# References: https://openusd.org/release/api/class_usd_physics_drive_a_p_i.html
# Note: dofs_stiffness and dofs_damping are NOT set here - they are passive joint properties
# that come from joint dynamics, not from DriveAPI (which is an active control system)
dofs_kp, dofs_kv, dofs_force_range = [], [], []
for drive_components in DRIVE_NAMES[joint_type]:
dof_kp, dof_kv, max_force = 0.0, 0.0, np.inf
if joint_axis_str:
drive_components = drive_components + (joint_axis_str,)
for drive_component in drive_components:
if joint_prim.HasAPI(UsdPhysics.DriveAPI, drive_component):
drive = UsdPhysics.DriveAPI.Get(joint_prim, drive_component)
# TODO: use drive.GetTypeAttr().Get() to parse force or velocity.
# Note: Defaults are 0 (stiffness/damping) and inf (maxForce), which are valid.
# Using 'or' is safe here since fallback values match the defaults.
dof_kp = drive.GetStiffnessAttr().Get() or dof_kp
dof_kv = drive.GetDampingAttr().Get() or dof_kv
max_force = drive.GetMaxForceAttr().Get() or max_force
break
dofs_kp.append(dof_kp)
dofs_kv.append(dof_kv)
dofs_force_range.append([-max_force, max_force])
# TODO: Implement target solving in rigid solver. (GetTargetPositionAttr())
j_info["dofs_kp"] = np.asarray(dofs_kp, dtype=gs.np_float)
j_info["dofs_kv"] = np.asarray(dofs_kv, dtype=gs.np_float)
j_info["dofs_force_range"] = np.asarray(dofs_force_range, dtype=gs.np_float)
else:
j_info["dofs_frictionloss"] = np.zeros(n_dofs, dtype=gs.np_float)
j_info["dofs_armature"] = np.zeros(n_dofs, dtype=gs.np_float)
j_info["dofs_kp"] = np.zeros(n_dofs, dtype=gs.np_float)
j_info["dofs_kv"] = np.zeros(n_dofs, dtype=gs.np_float)
j_info["dofs_force_range"] = np.tile([-np.inf, np.inf], (n_dofs, 1))
j_infos.append(j_info)
if abs(1.0 - morph.scale) > gs.EPS:
l_info["pos"] *= morph.scale
if l_info.get("inertial_pos") is not None:
l_info["inertial_pos"] *= morph.scale
if l_info.get("inertial_mass") is not None:
l_info["inertial_mass"] *= morph.scale**3
if l_info.get("inertial_i") is not None:
l_info["inertial_i"] *= morph.scale**5
l_info["invweight"][:] = -1.0
for j_info in j_infos:
j_info["pos"] *= morph.scale
# TODO: parse actuator in USD articulation, now all joints are considered to have actuators
j_info["dofs_kp"] *= morph.scale**3
j_info["dofs_kv"] *= morph.scale**3
j_info["dofs_invweight"][:] = -1.0
return l_info, j_infos
# Rigidbody requirements: https://docs.omniverse.nvidia.com/kit/docs/asset-requirements/latest/capabilities/physics_bodies/physics_rigid_bodies/capability-physics_rigid_bodies.html
# Joint requirements: https://docs.omniverse.nvidia.com/kit/docs/asset-requirements/latest/capabilities/physics_bodies/physics_joints/capability-physics_joints.html
def _parse_articulation_structure(stage: Usd.Stage, entity_prim: Usd.Prim, joint_prims: List[str] | None = None):
link_path_joints = {}
# Get joints to process - joint_prims is computed by parse_usd_rigid_entity (from the stage,
# as joints that reference links in the entity subtree) and passed here.
# If None, it means no joints (pure rigid body case).
if joint_prims is not None:
# Use provided joint prims
joint_prim_objs = [stage.GetPrimAtPath(joint_path) for joint_path in joint_prims]
for prim in joint_prim_objs:
if not prim.IsValid():
gs.raise_exception(f"Invalid joint prim path: {prim}")
if not prim.IsA(UsdPhysics.Joint):
gs.raise_exception(f"Prim {prim.GetPath()} is not a joint.")
else:
# No joints provided - this is a pure rigid body case
joint_prim_objs = []
# Process joints to build link/joint structure
# Only include paths that are actually rigid bodies (have RigidBodyAPI or CollisionAPI)
def is_rigid_body(path: str) -> bool:
"""Check if a prim path is a rigid body."""
prim = stage.GetPrimAtPath(path)
if not prim.IsValid():
return False
return prim.HasAPI(UsdPhysics.RigidBodyAPI) or prim.HasAPI(UsdPhysics.CollisionAPI)
for prim in joint_prim_objs:
joint = UsdPhysics.Joint(prim)
body0_targets = joint.GetBody0Rel().GetTargets() # parent
body1_targets = joint.GetBody1Rel().GetTargets() # child
body0_target_path = str(body0_targets[0]) if body0_targets else None
body1_target_path = str(body1_targets[0]) if body1_targets else None
# Only process if at least one body is a rigid body
body0_is_rigid = body0_target_path and is_rigid_body(body0_target_path)
body1_is_rigid = body1_target_path and is_rigid_body(body1_target_path)
if body1_is_rigid:
# body1 is a rigid body - add it as a link
parent_path = body0_target_path if body0_is_rigid else None
link_path_joints.setdefault(body1_target_path, []).append((prim, parent_path, True))
# If body0 is also a rigid body, add it as a link (may be parent)
if body0_is_rigid:
link_path_joints.setdefault(body0_target_path, [])
elif body0_is_rigid:
# body0 is a rigid body - add it as a link
link_path_joints.setdefault(body0_target_path, []).append((prim, None, False))
# If neither is a rigid body, skip this joint (it doesn't connect rigid bodies)
links, link_joints = [], []
if link_path_joints:
link_path_to_idx = {link_path: idx for idx, link_path in enumerate(link_path_joints.keys())}
link_path_to_idx[None] = -1
for link_path, joints in link_path_joints.items():
if not stage.GetPrimAtPath(link_path):
gs.raise_exception(f"Link {link_path} not found in stage.")
links.append(stage.GetPrimAtPath(link_path))
link_joints.append(
[(joint, link_path_to_idx[parent_path], is_body1) for joint, parent_path, is_body1 in joints]
)
else:
# Pure rigid body case - no joints, no placeholder needed
links = [entity_prim]
link_joints = [[]]
link_path_to_idx = {None: -1, str(entity_prim.GetPath()): 0}
# Only add placeholder joints for articulation links that have no joints
# (pure rigid bodies should not have any joints)
if link_path_joints:
for joints in link_joints:
if not joints:
joints.append((None, -1, False))
return links, link_joints, link_path_to_idx
def _parse_geoms(
context: UsdContext,
entity_prim: Usd.Prim,
link_path_to_idx: Dict[str, int],
morph: gs.morphs.USD,
surface: gs.surfaces.Surface,
) -> List[List[Dict]]:
links_g_infos = [[] for _ in range(len(link_path_to_idx))]
parse_prim_geoms(context, entity_prim, None, links_g_infos, link_path_to_idx, morph, surface)
return links_g_infos
def _parse_links(
context: UsdContext,
links: List[Usd.Prim],
link_joints: List[List[Tuple[Usd.Prim, int, bool]]],
morph: gs.morphs.USD,
) -> Tuple[List[Dict], List[List[Dict]]]:
l_infos = []
links_j_infos = []
for link, joints in zip(links, link_joints):
l_info, link_j_info = _parse_link(context, link, joints, links, morph)
l_infos.append(l_info)
links_j_infos.append(link_j_info)
return l_infos, links_j_infos
def _compute_joint_prim_paths(stage: Usd.Stage, entity_prim: Usd.Prim) -> List[str] | None:
"""
Compute joint prim paths for an entity. Joints are those that reference (body0 or body1)
any rigid body in the entity subtree, so that entities whose prim_path is a link (e.g.
common ancestor of a single link) still get joints that are siblings of that link.
This determines whether the entity is:
- A pure rigid body (no joints referencing links in subtree)
- A pure articulation (has joints, all rigid bodies in subtree are referenced by them)
- Mixed case (error: has both joints and unreferenced rigid bodies in subtree)
Raises
------
gs.GenesisException
If mixed entity is detected (both joints and unreferenced rigid bodies in subtree).
"""
# Find all rigid bodies in the entity_prim subtree
rigid_bodies_in_subtree = find_rigid_bodies_in_range(Usd.PrimRange(entity_prim))
# Find all joints in the stage (filter to those referencing links in subtree below)
all_joints = find_joints_in_range(Usd.PrimRange(stage.GetPseudoRoot()))
# Joints belonging to this entity: those that reference any link in the entity subtree
joints_for_entity = []
for joint_prim in all_joints:
joint = UsdPhysics.Joint(joint_prim)
body0_targets = joint.GetBody0Rel().GetTargets()
body1_targets = joint.GetBody1Rel().GetTargets()
body0_path = str(body0_targets[0]) if body0_targets else None
body1_path = str(body1_targets[0]) if body1_targets else None
if (body0_path and body0_path in rigid_bodies_in_subtree) or (
body1_path and body1_path in rigid_bodies_in_subtree
):
joints_for_entity.append(joint_prim)
links_referenced_by_joints = extract_links_referenced_by_joints(stage, joints_for_entity, check_rigid_body=False)
# Determine entity type (has joints vs pure rigid body, and mixed-case check)
has_joints = len(joints_for_entity) > 0
has_unreferenced_rigid_bodies = len(rigid_bodies_in_subtree - links_referenced_by_joints) > 0
# Check for mixed case error, because scene.add_entity(...) only return 1 entity.
if has_joints and has_unreferenced_rigid_bodies:
unreferenced = rigid_bodies_in_subtree - links_referenced_by_joints
gs.raise_exception(
f"Mixed entity detected at {entity_prim.GetPath()}: "
f"has {len(joints_for_entity)} joints but also has {len(unreferenced)} rigid bodies "
f"not referenced by joints: {list(unreferenced)[:5]}. "
"Use scene.add_stage() to handle mixed entities, or ensure all rigid bodies are connected by joints."
)
# Pure articulation case (has joints, all rigid bodies are referenced)
if has_joints:
return [str(joint.GetPath()) for joint in joints_for_entity]
# Pure rigid body case (no joints)
return None
def parse_usd_rigid_entity(morph: gs.morphs.USD, surface: gs.surfaces.Surface):
"""
Unified parser for USD rigid entities (both articulations and rigid bodies).
Treats rigid bodies as articulation roots with no child links.
Automatically detects whether the prim is an articulation (has joints) or
a rigid body (no joints) and processes accordingly.
Parameters
----------
morph : gs.morphs.USD
USD morph configuration.
surface : gs.surfaces.Surface
Surface configuration.
Returns
-------
l_infos : list
List of link info dictionaries.
links_j_infos : list
List of lists of joint info dictionaries.
links_g_infos : list
List of lists of geometry info dictionaries.
eqs_info : list
List of equality constraint info dictionaries.
"""
context: UsdContext = morph.usd_ctx
context.find_all_materials()
stage: Usd.Stage = context.stage
if morph.prim_path is None:
gs.logger.debug("USD morph has no prim path. Fallback to its default prim path.")
entity_prim = stage.GetDefaultPrim()
else:
entity_prim = stage.GetPrimAtPath(morph.prim_path)
if not entity_prim.IsValid():
if morph.prim_path is None:
err_msg = (
f"Invalid default prim path {entity_prim} in USD file {morph.file}. Please specify 'morph.prim_path'."
)
else:
err_msg = f"Invalid user-specified prim path {entity_prim} in USD file {morph.file}."
gs.raise_exception(err_msg)
# Deduce joint prim paths for this entity and parse articulation structure (links + joints)
joint_prims = _compute_joint_prim_paths(stage, entity_prim)
links, link_joints, link_path_to_idx = _parse_articulation_structure(stage, entity_prim, joint_prims)
links_g_infos = _parse_geoms(context, entity_prim, link_path_to_idx, morph, surface)
l_infos, links_j_infos = _parse_links(context, links, link_joints, morph)
l_infos, links_j_infos, links_g_infos, _ = urdf_utils._order_links(l_infos, links_j_infos, links_g_infos)
eqs_info = [] # USD doesn't support equality constraints
return l_infos, links_j_infos, links_g_infos, eqs_info
| {
"repo_id": "Genesis-Embodied-AI/Genesis",
"file_path": "genesis/utils/usd/usd_rigid_entity.py",
"license": "Apache License 2.0",
"lines": 501,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.