sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
reflex-dev/reflex:reflex/utils/token_manager.py | """Token manager for handling client token to session ID mappings."""
from __future__ import annotations
import asyncio
import dataclasses
import pickle
import uuid
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator, Callable, Coroutine
from types import MappingProxyType
from typing import TYPE_CHECKING, ClassVar
from reflex.istate.manager.redis import StateManagerRedis
from reflex.state import BaseState, StateUpdate
from reflex.utils import console, prerequisites
from reflex.utils.tasks import ensure_task
if TYPE_CHECKING:
from redis.asyncio import Redis
def _get_new_token() -> str:
"""Generate a new unique token.
Returns:
A new UUID4 token string.
"""
return str(uuid.uuid4())
@dataclasses.dataclass(frozen=True, kw_only=True)
class SocketRecord:
"""Record for a connected socket client."""
instance_id: str
sid: str
@dataclasses.dataclass(frozen=True, kw_only=True)
class LostAndFoundRecord:
"""Record for a StateUpdate for a token with its socket on another instance."""
token: str
update: StateUpdate
class TokenManager(ABC):
"""Abstract base class for managing client token to session ID mappings."""
def __init__(self):
"""Initialize the token manager with local dictionaries."""
# Each process has an instance_id to identify its own sockets.
self.instance_id: str = _get_new_token()
# Keep a mapping between client token and socket ID.
self.token_to_socket: dict[str, SocketRecord] = {}
# Keep a mapping between socket ID and client token.
self.sid_to_token: dict[str, str] = {}
@property
def token_to_sid(self) -> MappingProxyType[str, str]:
"""Read-only compatibility property for token_to_socket mapping.
Returns:
The token to session ID mapping.
"""
return MappingProxyType({
token: sr.sid for token, sr in self.token_to_socket.items()
})
async def enumerate_tokens(self) -> AsyncIterator[str]:
"""Iterate over all tokens in the system.
Yields:
All client tokens known to the TokenManager.
"""
for token in self.token_to_socket:
yield token
@abstractmethod
async def link_token_to_sid(self, token: str, sid: str) -> str | None:
"""Link a token to a session ID.
Args:
token: The client token.
sid: The Socket.IO session ID.
Returns:
New token if duplicate detected and new token generated, None otherwise.
"""
@abstractmethod
async def disconnect_token(self, token: str, sid: str) -> None:
"""Clean up token mapping when client disconnects.
Args:
token: The client token.
sid: The Socket.IO session ID.
"""
@classmethod
def create(cls) -> TokenManager:
"""Factory method to create appropriate TokenManager implementation.
Returns:
RedisTokenManager if Redis is available, LocalTokenManager otherwise.
"""
if prerequisites.check_redis_used():
redis_client = prerequisites.get_redis()
if redis_client is not None:
return RedisTokenManager(redis_client)
return LocalTokenManager()
async def disconnect_all(self):
"""Disconnect all tracked tokens when the server is going down."""
token_sid_pairs: set[tuple[str, str]] = {
(token, sr.sid) for token, sr in self.token_to_socket.items()
}
token_sid_pairs.update(
((token, sid) for sid, token in self.sid_to_token.items())
)
# Perform the disconnection logic here
for token, sid in token_sid_pairs:
await self.disconnect_token(token, sid)
class LocalTokenManager(TokenManager):
"""Token manager using local in-memory dictionaries (single worker)."""
def __init__(self):
"""Initialize the local token manager."""
super().__init__()
async def link_token_to_sid(self, token: str, sid: str) -> str | None:
"""Link a token to a session ID.
Args:
token: The client token.
sid: The Socket.IO session ID.
Returns:
New token if duplicate detected and new token generated, None otherwise.
"""
# Check if token is already mapped to a different SID (duplicate tab)
if (
socket_record := self.token_to_socket.get(token)
) is not None and sid != socket_record.sid:
new_token = _get_new_token()
self.token_to_socket[new_token] = SocketRecord(
instance_id=self.instance_id, sid=sid
)
self.sid_to_token[sid] = new_token
return new_token
# Normal case - link token to SID
self.token_to_socket[token] = SocketRecord(
instance_id=self.instance_id, sid=sid
)
self.sid_to_token[sid] = token
return None
async def disconnect_token(self, token: str, sid: str) -> None:
"""Clean up token mapping when client disconnects.
Args:
token: The client token.
sid: The Socket.IO session ID.
"""
# Clean up both mappings
self.token_to_socket.pop(token, None)
self.sid_to_token.pop(sid, None)
class RedisTokenManager(LocalTokenManager):
"""Token manager using Redis for distributed multi-worker support.
Inherits local dict logic from LocalTokenManager and adds Redis layer
for cross-worker duplicate detection.
"""
_token_socket_record_prefix: ClassVar[str] = "token_manager_socket_record_"
def __init__(self, redis: Redis):
"""Initialize the Redis token manager.
Args:
redis: The Redis client instance.
"""
# Initialize parent's local dicts
super().__init__()
self.redis = redis
# Get token expiration from config (default 1 hour)
from reflex.config import get_config
config = get_config()
self.token_expiration = config.redis_token_expiration
# Pub/sub tasks for handling sockets owned by other instances.
self._socket_record_task: asyncio.Task | None = None
self._lost_and_found_task: asyncio.Task | None = None
def _get_redis_key(self, token: str) -> str:
"""Get Redis key for token mapping.
Args:
token: The client token.
Returns:
Redis key following Reflex conventions: token_manager_socket_record_{token}
"""
return f"{self._token_socket_record_prefix}{token}"
async def enumerate_tokens(self) -> AsyncIterator[str]:
"""Iterate over all tokens in the system.
Yields:
All client tokens known to the RedisTokenManager.
"""
cursor = 0
while scan_result := await self.redis.scan(
cursor=cursor, match=self._get_redis_key("*")
):
cursor = int(scan_result[0])
for key in scan_result[1]:
yield key.decode().replace(self._token_socket_record_prefix, "")
if not cursor:
break
async def _handle_socket_record_del(
self, token: str, expired: bool = False
) -> None:
"""Handle deletion of a socket record from Redis.
Args:
token: The client token whose record was deleted.
expired: Whether the deletion was due to expiration.
"""
if (
socket_record := self.token_to_socket.pop(token, None)
) is not None and socket_record.instance_id == self.instance_id:
self.sid_to_token.pop(socket_record.sid, None)
if expired:
# Keep the record alive as long as this process is alive and not deleted.
await self.link_token_to_sid(token, socket_record.sid)
async def _subscribe_socket_record_updates(self) -> None:
"""Subscribe to Redis keyspace notifications for socket record updates."""
await StateManagerRedis(
state=BaseState, redis=self.redis
)._enable_keyspace_notifications()
redis_db = self.redis.get_connection_kwargs().get("db", 0)
async with self.redis.pubsub() as pubsub:
await pubsub.psubscribe(
f"__keyspace@{redis_db}__:{self._get_redis_key('*')}"
)
async for message in pubsub.listen():
if message["type"] == "pmessage":
key = message["channel"].split(b":", 1)[1].decode()
token = key.replace(self._token_socket_record_prefix, "")
if token not in self.token_to_socket:
# We don't know about this token, skip
continue
event = message["data"].decode()
if event in ("del", "expired", "evicted"):
await self._handle_socket_record_del(
token,
expired=(event == "expired"),
)
elif event == "set":
await self._get_token_owner(token, refresh=True)
def _ensure_socket_record_task(self) -> None:
"""Ensure the socket record updates subscriber task is running."""
ensure_task(
owner=self,
task_attribute="_socket_record_task",
coro_function=self._subscribe_socket_record_updates,
suppress_exceptions=[Exception],
)
async def link_token_to_sid(self, token: str, sid: str) -> str | None:
"""Link a token to a session ID with Redis-based duplicate detection.
Args:
token: The client token.
sid: The Socket.IO session ID.
Returns:
New token if duplicate detected and new token generated, None otherwise.
"""
# Fast local check first (handles reconnections)
if (
socket_record := self.token_to_socket.get(token)
) is not None and sid == socket_record.sid:
return None # Same token, same SID = reconnection, no Redis check needed
# Make sure the update subscriber is running
self._ensure_socket_record_task()
# Check Redis for cross-worker duplicates
redis_key = self._get_redis_key(token)
try:
token_exists_in_redis = await self.redis.exists(redis_key)
except Exception as e:
console.error(f"Redis error checking token existence: {e}")
return await super().link_token_to_sid(token, sid)
new_token = None
if token_exists_in_redis:
# Duplicate exists somewhere - generate new token
token = new_token = _get_new_token()
redis_key = self._get_redis_key(new_token)
# Store in local dicts
socket_record = self.token_to_socket[token] = SocketRecord(
instance_id=self.instance_id, sid=sid
)
self.sid_to_token[sid] = token
# Store in Redis if possible
try:
await self.redis.set(
redis_key,
pickle.dumps(socket_record),
ex=self.token_expiration,
)
except Exception as e:
console.error(f"Redis error storing token: {e}")
# Return the new token if one was generated
return new_token
async def disconnect_token(self, token: str, sid: str) -> None:
"""Clean up token mapping when client disconnects.
Args:
token: The client token.
sid: The Socket.IO session ID.
"""
# Only clean up if we own it locally (fast ownership check)
if (
(socket_record := self.token_to_socket.get(token)) is not None
and socket_record.sid == sid
and socket_record.instance_id == self.instance_id
):
# Clean up Redis
redis_key = self._get_redis_key(token)
try:
await self.redis.delete(redis_key)
except Exception as e:
console.error(f"Redis error deleting token: {e}")
# Clean up local dicts (always do this)
await super().disconnect_token(token, sid)
@staticmethod
def _get_lost_and_found_key(instance_id: str) -> str:
"""Get the Redis key for lost and found deltas for an instance.
Args:
instance_id: The instance ID.
Returns:
The Redis key for lost and found deltas.
"""
return f"token_manager_lost_and_found_{instance_id}"
async def _subscribe_lost_and_found_updates(
self,
emit_update: Callable[[StateUpdate, str], Coroutine[None, None, None]],
) -> None:
"""Subscribe to Redis channel notifications for lost and found deltas.
Args:
emit_update: The function to emit state updates.
"""
async with self.redis.pubsub() as pubsub:
await pubsub.psubscribe(
f"channel:{self._get_lost_and_found_key(self.instance_id)}"
)
async for message in pubsub.listen():
if message["type"] == "pmessage":
record = pickle.loads(message["data"])
await emit_update(record.update, record.token)
def ensure_lost_and_found_task(
self,
emit_update: Callable[[StateUpdate, str], Coroutine[None, None, None]],
) -> None:
"""Ensure the lost and found subscriber task is running.
Args:
emit_update: The function to emit state updates.
"""
ensure_task(
owner=self,
task_attribute="_lost_and_found_task",
coro_function=self._subscribe_lost_and_found_updates,
suppress_exceptions=[Exception],
emit_update=emit_update,
)
async def _get_token_owner(self, token: str, refresh: bool = False) -> str | None:
"""Get the instance ID of the owner of a token.
Args:
token: The client token.
refresh: Whether to fetch the latest record from Redis.
Returns:
The instance ID of the owner, or None if not found.
"""
if (
not refresh
and (socket_record := self.token_to_socket.get(token)) is not None
):
return socket_record.instance_id
redis_key = self._get_redis_key(token)
try:
record_pkl = await self.redis.get(redis_key)
if record_pkl:
socket_record = pickle.loads(record_pkl)
self.token_to_socket[token] = socket_record
self.sid_to_token[socket_record.sid] = token
return socket_record.instance_id
console.warn(f"Redis token owner not found for token {token}")
except Exception as e:
console.error(f"Redis error getting token owner: {e}")
return None
async def emit_lost_and_found(
self,
token: str,
update: StateUpdate,
) -> bool:
"""Emit a lost and found delta to Redis.
Args:
token: The client token.
update: The state update.
Returns:
True if the delta was published, False otherwise.
"""
# See where this update belongs
owner_instance_id = await self._get_token_owner(token)
if owner_instance_id is None:
return False
record = LostAndFoundRecord(token=token, update=update)
try:
await self.redis.publish(
f"channel:{self._get_lost_and_found_key(owner_instance_id)}",
pickle.dumps(record),
)
except Exception as e:
console.error(f"Redis error publishing lost and found delta: {e}")
else:
return True
return False
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/utils/token_manager.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:tests/units/utils/test_token_manager.py | """Unit tests for TokenManager implementations."""
import asyncio
import pickle
import time
from collections.abc import Callable, Generator
from contextlib import asynccontextmanager
from unittest.mock import AsyncMock, Mock, patch
import pytest
from reflex import config
from reflex.app import EventNamespace
from reflex.istate.data import RouterData
from reflex.state import StateUpdate
from reflex.utils.token_manager import (
LocalTokenManager,
RedisTokenManager,
SocketRecord,
TokenManager,
)
class TestTokenManager:
"""Tests for the TokenManager factory."""
@patch("reflex.utils.token_manager.prerequisites.check_redis_used")
@patch("reflex.utils.token_manager.prerequisites.get_redis")
def test_create_local_when_no_redis(self, mock_get_redis, mock_check_redis_used):
"""Test factory creates LocalTokenManager when Redis is not available.
Args:
mock_get_redis: Mock for prerequisites.get_redis.
mock_check_redis_used: Mock for prerequisites.check_redis_used.
"""
mock_check_redis_used.return_value = False
manager = TokenManager.create()
assert isinstance(manager, LocalTokenManager)
mock_get_redis.assert_not_called()
@patch("reflex.utils.token_manager.prerequisites.check_redis_used")
@patch("reflex.utils.token_manager.prerequisites.get_redis")
def test_create_local_when_redis_client_none(
self, mock_get_redis, mock_check_redis_used
):
"""Test factory creates LocalTokenManager when Redis client is None.
Args:
mock_get_redis: Mock for prerequisites.get_redis.
mock_check_redis_used: Mock for prerequisites.check_redis_used.
"""
mock_check_redis_used.return_value = True
mock_get_redis.return_value = None
manager = TokenManager.create()
assert isinstance(manager, LocalTokenManager)
@patch("reflex.utils.token_manager.prerequisites.check_redis_used")
@patch("reflex.utils.token_manager.prerequisites.get_redis")
def test_create_redis_when_redis_available(
self, mock_get_redis, mock_check_redis_used
):
"""Test factory creates RedisTokenManager when Redis is available.
Args:
mock_get_redis: Mock for prerequisites.get_redis.
mock_check_redis_used: Mock for prerequisites.check_redis_used.
"""
mock_check_redis_used.return_value = True
mock_redis_client = Mock()
mock_redis_client.get_connection_kwargs.return_value = {"db": 0}
mock_get_redis.return_value = mock_redis_client
manager = TokenManager.create()
assert isinstance(manager, RedisTokenManager)
assert manager.redis is mock_redis_client
class TestLocalTokenManager:
"""Tests for LocalTokenManager."""
@pytest.fixture
def manager(self):
"""Create a LocalTokenManager instance.
Returns:
A LocalTokenManager instance for testing.
"""
return LocalTokenManager()
@pytest.mark.parametrize(
("token", "sid"),
[
("token1", "sid1"),
("test-token", "test-sid"),
("12345", "67890"),
],
)
async def test_link_token_to_sid_normal_case(self, manager, token, sid):
"""Test normal token linking returns None.
Args:
manager: LocalTokenManager fixture instance.
token: Token string to test.
sid: Session ID string to test.
"""
result = await manager.link_token_to_sid(token, sid)
assert result is None
assert manager.token_to_sid[token] == sid
assert manager.sid_to_token[sid] == token
async def test_link_token_to_sid_same_token_same_sid(self, manager):
"""Test linking same token to same SID (reconnection).
Args:
manager: LocalTokenManager fixture instance.
"""
token, sid = "token1", "sid1"
await manager.link_token_to_sid(token, sid)
result = await manager.link_token_to_sid(token, sid)
assert result is None
assert manager.token_to_sid[token] == sid
assert manager.sid_to_token[sid] == token
async def test_link_token_to_sid_duplicate_token_different_sid(self, manager):
"""Test duplicate token detection generates new token.
Args:
manager: LocalTokenManager fixture instance.
"""
token = "duplicate_token"
sid1, sid2 = "sid1", "sid2"
# First connection
result1 = await manager.link_token_to_sid(token, sid1)
assert result1 is None
# Duplicate token with different SID
result2 = await manager.link_token_to_sid(token, sid2)
assert result2 is not None
assert result2 != token
# Check mappings
assert manager.token_to_sid[result2] == sid2
assert manager.sid_to_token[sid2] == result2
assert manager.token_to_sid[token] == sid1
@pytest.mark.parametrize(
("token", "sid"),
[
("token1", "sid1"),
("test-token", "test-sid"),
],
)
async def test_disconnect_token(self, manager, token, sid):
"""Test token disconnection cleans up mappings.
Args:
manager: LocalTokenManager fixture instance.
token: Token string to test.
sid: Session ID string to test.
"""
await manager.link_token_to_sid(token, sid)
await manager.disconnect_token(token, sid)
assert token not in manager.token_to_sid
assert sid not in manager.sid_to_token
async def test_disconnect_nonexistent_token(self, manager):
"""Test disconnecting nonexistent token doesn't raise error.
Args:
manager: LocalTokenManager fixture instance.
"""
await manager.disconnect_token("nonexistent", "nonexistent")
assert len(manager.token_to_sid) == 0
assert len(manager.sid_to_token) == 0
async def test_enumerate_tokens(self, manager):
"""Test enumerate_tokens yields all linked tokens.
Args:
manager: LocalTokenManager fixture instance.
"""
tokens_sids = [("token1", "sid1"), ("token2", "sid2"), ("token3", "sid3")]
for token, sid in tokens_sids:
await manager.link_token_to_sid(token, sid)
found_tokens = set()
async for token in manager.enumerate_tokens():
found_tokens.add(token)
expected_tokens = {token for token, _ in tokens_sids}
assert found_tokens == expected_tokens
# Disconnect a token and ensure it's removed.
await manager.disconnect_token("token2", "sid2")
expected_tokens.remove("token2")
found_tokens = set()
async for token in manager.enumerate_tokens():
found_tokens.add(token)
assert found_tokens == expected_tokens
# Disconnect all tokens, none should remain
await manager.disconnect_all()
found_tokens = set()
async for token in manager.enumerate_tokens():
found_tokens.add(token)
assert not found_tokens
class TestRedisTokenManager:
"""Tests for RedisTokenManager."""
@pytest.fixture
def mock_redis(self):
"""Create a mock Redis client.
Returns:
AsyncMock configured as Redis client for testing.
"""
redis = AsyncMock()
redis.exists = AsyncMock()
redis.set = AsyncMock()
redis.delete = AsyncMock()
# Non-async call
redis.get_connection_kwargs = Mock(return_value={"db": 0})
# Mock out pubsub
async def listen():
await asyncio.sleep(1)
if False:
yield
return
@asynccontextmanager
async def pubsub(): # noqa: RUF029
pubsub_mock = AsyncMock()
pubsub_mock.listen = listen
yield pubsub_mock
redis.pubsub = pubsub
return redis
@pytest.fixture
def manager(self, mock_redis):
"""Create a RedisTokenManager instance with mocked config.
Args:
mock_redis: Mock Redis client fixture.
Returns:
RedisTokenManager instance for testing.
"""
with patch("reflex.config.get_config") as mock_get_config:
mock_config = Mock()
mock_config.redis_token_expiration = 3600
mock_get_config.return_value = mock_config
return RedisTokenManager(mock_redis)
def test_get_redis_key(self, manager):
"""Test Redis key generation follows expected pattern.
Args:
manager: RedisTokenManager fixture instance.
"""
token = "test_token_123"
expected_key = f"token_manager_socket_record_{token}"
assert manager._get_redis_key(token) == expected_key
async def test_link_token_to_sid_normal_case(self, manager, mock_redis):
"""Test normal token linking stores in both Redis and local dicts.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
mock_redis.exists.return_value = False
result = await manager.link_token_to_sid(token, sid)
assert result is None
mock_redis.exists.assert_called_once_with(
f"token_manager_socket_record_{token}"
)
mock_redis.set.assert_called_once_with(
f"token_manager_socket_record_{token}",
pickle.dumps(SocketRecord(instance_id=manager.instance_id, sid=sid)),
ex=3600,
)
assert manager.token_to_socket[token].sid == sid
assert manager.sid_to_token[sid] == token
async def test_link_token_to_sid_reconnection_skips_redis(
self, manager, mock_redis
):
"""Test reconnection with same token/SID skips Redis check.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
manager.token_to_socket[token] = SocketRecord(
instance_id=manager.instance_id, sid=sid
)
result = await manager.link_token_to_sid(token, sid)
assert result is None
mock_redis.exists.assert_not_called()
mock_redis.set.assert_not_called()
async def test_link_token_to_sid_duplicate_detected(self, manager, mock_redis):
"""Test duplicate token detection generates new token.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
mock_redis.exists.return_value = True
result = await manager.link_token_to_sid(token, sid)
assert result is not None
assert result != token
assert len(result) == 36 # UUID4 length
mock_redis.exists.assert_called_once_with(
f"token_manager_socket_record_{token}"
)
mock_redis.set.assert_called_once_with(
f"token_manager_socket_record_{result}",
pickle.dumps(SocketRecord(instance_id=manager.instance_id, sid=sid)),
ex=3600,
)
assert manager.token_to_sid[result] == sid
assert manager.sid_to_token[sid] == result
async def test_link_token_to_sid_redis_error_fallback(self, manager, mock_redis):
"""Test Redis error falls back to local manager behavior.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
mock_redis.exists.side_effect = Exception("Redis connection error")
with patch.object(
LocalTokenManager, "link_token_to_sid", new_callable=AsyncMock
) as mock_super:
mock_super.return_value = None
result = await manager.link_token_to_sid(token, sid)
assert result is None
mock_super.assert_called_once_with(token, sid)
async def test_link_token_to_sid_redis_set_error_continues(
self, manager, mock_redis
):
"""Test Redis set error doesn't prevent local storage.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
mock_redis.exists.return_value = False
mock_redis.set.side_effect = Exception("Redis set error")
result = await manager.link_token_to_sid(token, sid)
assert result is None
assert manager.token_to_sid[token] == sid
assert manager.sid_to_token[sid] == token
async def test_disconnect_token_owned_locally(self, manager, mock_redis):
"""Test disconnect cleans up both Redis and local mappings when owned locally.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
manager.token_to_socket[token] = SocketRecord(
instance_id=manager.instance_id, sid=sid
)
manager.sid_to_token[sid] = token
await manager.disconnect_token(token, sid)
mock_redis.delete.assert_called_once_with(
f"token_manager_socket_record_{token}"
)
assert token not in manager.token_to_sid
assert sid not in manager.sid_to_token
async def test_disconnect_token_not_owned_locally(self, manager, mock_redis):
"""Test disconnect doesn't clean up when not owned locally.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
await manager.disconnect_token(token, sid)
mock_redis.delete.assert_not_called()
async def test_disconnect_token_redis_error(self, manager, mock_redis):
"""Test disconnect continues with local cleanup even if Redis fails.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
"""
token, sid = "token1", "sid1"
manager.token_to_socket[token] = SocketRecord(
instance_id=manager.instance_id, sid=sid
)
manager.sid_to_token[sid] = token
mock_redis.delete.side_effect = Exception("Redis delete error")
await manager.disconnect_token(token, sid)
assert token not in manager.token_to_sid
assert sid not in manager.sid_to_token
@pytest.mark.parametrize(
"redis_error",
[
Exception("Connection timeout"),
Exception("Redis server down"),
Exception("Network error"),
],
)
async def test_various_redis_errors_handled_gracefully(
self, manager, mock_redis, redis_error
):
"""Test various Redis errors are handled gracefully.
Args:
manager: RedisTokenManager fixture instance.
mock_redis: Mock Redis client fixture.
redis_error: Exception to test error handling.
"""
token, sid = "token1", "sid1"
mock_redis.exists.side_effect = redis_error
with patch.object(
LocalTokenManager, "link_token_to_sid", new_callable=AsyncMock
) as mock_super:
mock_super.return_value = None
result = await manager.link_token_to_sid(token, sid)
assert result is None
mock_super.assert_called_once()
def test_inheritance_from_local_manager(self, manager):
"""Test RedisTokenManager inherits from LocalTokenManager.
Args:
manager: RedisTokenManager fixture instance.
"""
assert isinstance(manager, LocalTokenManager)
assert hasattr(manager, "token_to_sid")
assert hasattr(manager, "sid_to_token")
@pytest.fixture
def redis_url():
"""Returns the Redis URL from the environment."""
redis_url = config.get_config().redis_url
if redis_url is None:
pytest.skip("Redis URL not configured")
return redis_url
def query_string_for(token: str) -> dict[str, str]:
"""Generate query string for given token.
Args:
token: The token to generate query string for.
Returns:
The generated query string.
"""
return {"QUERY_STRING": f"token={token}"}
@pytest.fixture
def event_namespace_factory() -> Generator[Callable[[], EventNamespace], None, None]:
"""Yields the EventNamespace factory function."""
namespace = config.get_config().get_event_namespace()
created_objs = []
def new_event_namespace() -> EventNamespace:
state = Mock()
state.router_data = {}
mock_app = Mock()
mock_app.state_manager.modify_state = Mock(
return_value=AsyncMock(__aenter__=AsyncMock(return_value=state))
)
event_namespace = EventNamespace(namespace=namespace, app=mock_app)
event_namespace.emit = AsyncMock()
created_objs.append(event_namespace)
return event_namespace
yield new_event_namespace
for obj in created_objs:
asyncio.run(obj._token_manager.disconnect_all())
@pytest.mark.usefixtures("redis_url")
@pytest.mark.asyncio
async def test_redis_token_manager_enumerate_tokens(
event_namespace_factory: Callable[[], EventNamespace],
):
"""Integration test for RedisTokenManager enumerate_tokens interface.
Should support enumerating tokens across separate instances of the
RedisTokenManager.
Args:
event_namespace_factory: Factory fixture for EventNamespace instances.
"""
event_namespace1 = event_namespace_factory()
event_namespace2 = event_namespace_factory()
await event_namespace1.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.on_connect(sid="sid2", environ=query_string_for("token2"))
found_tokens = set()
async for token in event_namespace1._token_manager.enumerate_tokens():
found_tokens.add(token)
assert "token1" in found_tokens
assert "token2" in found_tokens
assert len(found_tokens) == 2
await event_namespace1._token_manager.disconnect_all()
found_tokens = set()
async for token in event_namespace1._token_manager.enumerate_tokens():
found_tokens.add(token)
assert "token2" in found_tokens
assert len(found_tokens) == 1
await event_namespace2._token_manager.disconnect_all()
found_tokens = set()
async for token in event_namespace1._token_manager.enumerate_tokens():
found_tokens.add(token)
assert not found_tokens
@pytest.mark.usefixtures("redis_url")
@pytest.mark.asyncio
async def test_redis_token_manager_get_token_owner(
event_namespace_factory: Callable[[], EventNamespace],
):
"""Integration test for RedisTokenManager get_token_owner interface.
Should support retrieving the owner of a token across separate instances of the
RedisTokenManager.
Args:
event_namespace_factory: Factory fixture for EventNamespace instances.
"""
event_namespace1 = event_namespace_factory()
event_namespace2 = event_namespace_factory()
await event_namespace1.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.on_connect(sid="sid2", environ=query_string_for("token2"))
assert isinstance((manager1 := event_namespace1._token_manager), RedisTokenManager)
assert isinstance((manager2 := event_namespace2._token_manager), RedisTokenManager)
assert await manager1._get_token_owner("token1") == manager1.instance_id
assert await manager1._get_token_owner("token2") == manager2.instance_id
assert await manager2._get_token_owner("token1") == manager1.instance_id
assert await manager2._get_token_owner("token2") == manager2.instance_id
async def _wait_for_call_count_positive(mock: Mock, timeout: float = 5.0):
"""Wait until the mock's call count is positive.
Args:
mock: The mock to wait on.
timeout: The maximum time to wait in seconds.
"""
deadline = time.monotonic() + timeout
while mock.call_count == 0 and time.monotonic() < deadline: # noqa: ASYNC110
await asyncio.sleep(0.1)
@pytest.mark.usefixtures("redis_url")
@pytest.mark.asyncio
async def test_redis_token_manager_lost_and_found(
event_namespace_factory: Callable[[], EventNamespace],
):
"""Updates emitted for lost and found tokens should be routed correctly via redis.
Args:
event_namespace_factory: Factory fixture for EventNamespace instances.
"""
event_namespace1 = event_namespace_factory()
emit1_mock: Mock = event_namespace1.emit # pyright: ignore[reportAssignmentType]
event_namespace2 = event_namespace_factory()
emit2_mock: Mock = event_namespace2.emit # pyright: ignore[reportAssignmentType]
await event_namespace1.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.on_connect(sid="sid2", environ=query_string_for("token2"))
await event_namespace2.emit_update(StateUpdate(), token="token1")
await _wait_for_call_count_positive(emit1_mock)
emit2_mock.assert_not_called()
emit1_mock.assert_called_once()
emit1_mock.reset_mock()
await event_namespace2.emit_update(StateUpdate(), token="token2")
await _wait_for_call_count_positive(emit2_mock)
emit1_mock.assert_not_called()
emit2_mock.assert_called_once()
emit2_mock.reset_mock()
if task := event_namespace1.on_disconnect(sid="sid1"):
await task
await event_namespace2.emit_update(StateUpdate(), token="token1")
# Update should be dropped on the floor.
await asyncio.sleep(2)
emit1_mock.assert_not_called()
emit2_mock.assert_not_called()
await event_namespace2.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.emit_update(StateUpdate(), token="token1")
await _wait_for_call_count_positive(emit2_mock)
emit1_mock.assert_not_called()
emit2_mock.assert_called_once()
emit2_mock.reset_mock()
if task := event_namespace2.on_disconnect(sid="sid1"):
await task
await event_namespace1.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.emit_update(StateUpdate(), token="token1")
await _wait_for_call_count_positive(emit1_mock)
emit2_mock.assert_not_called()
emit1_mock.assert_called_once()
emit1_mock.reset_mock()
@pytest.mark.usefixtures("redis_url")
@pytest.mark.asyncio
async def test_redis_token_manager_lost_and_found_router_data(
event_namespace_factory: Callable[[], EventNamespace],
):
"""Updates emitted for lost and found tokens should serialize properly.
Args:
event_namespace_factory: Factory fixture for EventNamespace instances.
"""
event_namespace1 = event_namespace_factory()
emit1_mock: Mock = event_namespace1.emit # pyright: ignore[reportAssignmentType]
event_namespace2 = event_namespace_factory()
emit2_mock: Mock = event_namespace2.emit # pyright: ignore[reportAssignmentType]
await event_namespace1.on_connect(sid="sid1", environ=query_string_for("token1"))
await event_namespace2.on_connect(sid="sid2", environ=query_string_for("token2"))
router = RouterData.from_router_data(
{"headers": {"x-test": "value"}},
)
await event_namespace2.emit_update(
StateUpdate(delta={"state": {"router": router}}), token="token1"
)
await _wait_for_call_count_positive(emit1_mock)
emit2_mock.assert_not_called()
emit1_mock.assert_called_once()
assert isinstance(emit1_mock.call_args[0][1], StateUpdate)
assert isinstance(emit1_mock.call_args[0][1].delta["state"]["router"], RouterData)
assert emit1_mock.call_args[0][1].delta["state"]["router"] == router
emit1_mock.reset_mock()
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/utils/test_token_manager.py",
"license": "Apache License 2.0",
"lines": 553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/vars/test_dep_tracking.py | """Tests for dependency tracking functionality."""
from __future__ import annotations
import sys
import pytest
import reflex as rx
import tests.units.states.upload as tus_upload
from reflex.state import State
from reflex.utils.exceptions import VarValueError
from reflex.vars.dep_tracking import (
DependencyTracker,
UntrackedLocalVarError,
get_cell_value,
)
class DependencyTestState(State):
"""Test state for dependency tracking tests."""
count: rx.Field[int] = rx.field(default=0)
name: rx.Field[str] = rx.field(default="test")
items: rx.Field[list[str]] = rx.field(default_factory=list)
board: rx.Field[list[list[int]]] = rx.field(default_factory=list)
class AnotherTestState(State):
"""Another test state for cross-state dependencies."""
value: rx.Field[int] = rx.field(default=42)
text: rx.Field[str] = rx.field(default="hello")
def test_simple_attribute_access():
"""Test tracking simple attribute access on self."""
def simple_func(self: DependencyTestState):
return self.count
tracker = DependencyTracker(simple_func, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
def test_multiple_attribute_access():
"""Test tracking multiple attribute access on self."""
def multi_attr_func(self: DependencyTestState):
return self.count + len(self.name) + len(self.items)
tracker = DependencyTracker(multi_attr_func, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count", "name", "items"}}
assert tracker.dependencies == expected_deps
def test_method_call_dependencies():
"""Test tracking dependencies from method calls."""
class StateWithMethod(State):
value: int = 0
def helper_method(self):
return self.value * 2
def func_with_method_call(self):
return self.helper_method()
tracker = DependencyTracker(StateWithMethod.func_with_method_call, StateWithMethod)
# Should track dependencies from both the method call and the method itself
expected_deps = {StateWithMethod.get_full_name(): {"value"}}
assert tracker.dependencies == expected_deps
def test_nested_function_dependencies():
"""Test tracking dependencies in nested functions."""
def func_with_nested(self: DependencyTestState):
def inner():
return self.count
return inner()
tracker = DependencyTracker(func_with_nested, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
def test_list_comprehension_dependencies():
"""Test tracking dependencies in list comprehensions."""
def func_with_comprehension(self: DependencyTestState):
return [x for x in self.items if len(x) > self.count]
tracker = DependencyTracker(func_with_comprehension, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"items", "count"}}
assert tracker.dependencies == expected_deps
def test_list_comprehension_dependencies_2():
"""Test tracking dependencies in list comprehensions."""
def func_with_comprehension(self: DependencyTestState):
return [[self.board[r][c] for r in range(3)] for c in range(5)]
tracker = DependencyTracker(func_with_comprehension, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"board"}}
assert tracker.dependencies == expected_deps
def test_invalid_attribute_access():
"""Test that accessing invalid attributes raises VarValueError."""
def invalid_func(self: DependencyTestState):
return self.parent_state
with pytest.raises(
VarValueError, match="cannot access arbitrary state via `parent_state`"
):
DependencyTracker(invalid_func, DependencyTestState)
def test_get_state_functionality():
"""Test tracking dependencies when using get_state."""
async def func_with_get_state(self: DependencyTestState):
other_state = await self.get_state(AnotherTestState)
return other_state.value
tracker = DependencyTracker(func_with_get_state, DependencyTestState)
expected_deps = {AnotherTestState.get_full_name(): {"value"}}
assert tracker.dependencies == expected_deps
def test_get_state_functionality_direct():
"""Test tracking dependencies when using get_state without assigning to interim local variable."""
async def func_with_get_state_direct(self: DependencyTestState):
return (await self.get_state(AnotherTestState)).value
tracker = DependencyTracker(func_with_get_state_direct, DependencyTestState)
expected_deps = {AnotherTestState.get_full_name(): {"value"}}
assert tracker.dependencies == expected_deps
def test_get_state_with_local_var_error():
"""Test that get_state with local variables raises appropriate error."""
async def invalid_get_state_func(self: DependencyTestState):
state_cls = AnotherTestState
return (await self.get_state(state_cls)).value
with pytest.raises(
UntrackedLocalVarError, match="'state_cls' is not tracked in the current scope"
):
DependencyTracker(invalid_get_state_func, DependencyTestState)
def test_get_state_with_import_from():
"""Test that get_state with function-local `from ... import ...` finds correct dependency."""
async def get_state_import_from(self: DependencyTestState):
from tests.units.states.mutation import MutableTestState
return (await self.get_state(MutableTestState)).hashmap
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import_from, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"hashmap"}}
assert tracker.dependencies == expected_deps
def test_get_state_with_import_from_multiple():
"""Test that get_state with function-local `from ... import ...` finds correct dependency."""
async def get_state_import_from(self: DependencyTestState):
from tests.units.states.upload import ChildFileUploadState, SubUploadState
return (await self.get_state(SubUploadState)).img, (
await self.get_state(ChildFileUploadState)
).img_list
tracker = DependencyTracker(get_state_import_from, DependencyTestState)
expected_deps = {
tus_upload.SubUploadState.get_full_name(): {"img"},
tus_upload.ChildFileUploadState.get_full_name(): {"img_list"},
}
assert tracker.dependencies == expected_deps
def test_get_state_with_import_from_as():
"""Test that get_state with function-local `from ... import ... as ...` finds correct dependency."""
async def get_state_import_from_as(self: DependencyTestState):
from tests.units.states.mutation import MutableTestState as mts
return (await self.get_state(mts)).hashmap
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import_from_as, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"hashmap"}}
assert tracker.dependencies == expected_deps
def test_get_state_with_import():
"""Test that get_state with function-local `import ...` finds correct dependency."""
async def get_state_import(self: DependencyTestState):
import tests.units.states.mutation
return (
await self.get_state(tests.units.states.mutation.MutableTestState)
).hashmap
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"hashmap"}}
assert tracker.dependencies == expected_deps
def test_get_state_with_import_as():
"""Test that get_state with function-local `import ... as ...` finds correct dependency."""
async def get_state_import_as(self: DependencyTestState):
import tests.units.states.mutation as mutation
return (await self.get_state(mutation.MutableTestState)).hashmap
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import_as, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"hashmap"}}
assert tracker.dependencies == expected_deps
def test_get_state_with_import_from_method():
"""Test that get_state with function-local `from ... import ...` finds correct dependency through a method call."""
async def get_state_import_from(self: DependencyTestState):
from tests.units.states.mutation import MutableTestState
return (await self.get_state(MutableTestState))._get_array()
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import_from, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"array"}}
assert tracker.dependencies == expected_deps
def test_get_state_access_imported_global_module():
"""Test tracking simple attribute access on self."""
async def get_state_imported_global(self: DependencyTestState):
return (await self.get_state(tus_upload.SubUploadState)).img
tracker = DependencyTracker(get_state_imported_global, DependencyTestState)
expected_deps = {tus_upload.SubUploadState.get_full_name(): {"img"}}
assert tracker.dependencies == expected_deps
def test_nested_function():
"""Test tracking dependencies in nested functions."""
def func_with_nested(self: DependencyTestState):
async def inner(): # noqa: RUF029
if self.board:
pass
return self.count
tracker = DependencyTracker(func_with_nested, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"board", "count"}}
assert tracker.dependencies == expected_deps
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="Requires Python 3.11+ for positions"
)
def test_get_var_value_functionality():
"""Test tracking dependencies when using get_var_value."""
async def func_with_get_var_value(self: DependencyTestState):
return await self.get_var_value(DependencyTestState.count)
tracker = DependencyTracker(func_with_get_var_value, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="Requires Python 3.11+ for positions"
)
def test_get_var_value_multiple_lines_functionality():
"""Test tracking dependencies when using get_var_value spread out on multiple lines."""
async def func_with_get_var_value(self: DependencyTestState):
return await self.get_var_value(
DependencyTestState.
# annoying comment
count
)
tracker = DependencyTracker(func_with_get_var_value, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
@pytest.mark.skipif(
sys.version_info < (3, 11), reason="Requires Python 3.11+ for positions"
)
def test_get_var_value_with_import_from():
"""Test that get_var_value with function-local `from ... import ...` finds correct dependency."""
async def get_state_import_from(self: DependencyTestState):
from tests.units.states.mutation import MutableTestState
return await self.get_var_value(MutableTestState.hashmap) # pyright: ignore[reportArgumentType]
from tests.units.states.mutation import MutableTestState
tracker = DependencyTracker(get_state_import_from, DependencyTestState)
expected_deps = {MutableTestState.get_full_name(): {"hashmap"}}
assert tracker.dependencies == expected_deps
def test_merge_deps():
"""Test merging dependencies from multiple trackers."""
def func1(self: DependencyTestState):
return self.count
def func2(self: DependencyTestState):
return self.name
tracker1 = DependencyTracker(func1, DependencyTestState)
tracker2 = DependencyTracker(func2, DependencyTestState)
tracker1._merge_deps(tracker2)
expected_deps = {DependencyTestState.get_full_name(): {"count", "name"}}
assert tracker1.dependencies == expected_deps
def test_get_globals_with_function():
"""Test _get_globals method with a function."""
def test_func(self: DependencyTestState):
return self.count
tracker = DependencyTracker(test_func, DependencyTestState)
globals_dict = tracker._get_globals()
assert isinstance(globals_dict, dict)
assert "DependencyTestState" in globals_dict
assert "State" in globals_dict
def test_get_globals_with_code_object():
"""Test _get_globals method with a code object."""
def test_func(self: DependencyTestState):
return self.count
code_obj = test_func.__code__
tracker = DependencyTracker(code_obj, DependencyTestState)
globals_dict = tracker._get_globals()
assert not globals_dict
def test_get_closure_with_function():
"""Test _get_closure method with a function that has closure."""
outer_var = "test"
def func_with_closure(self: DependencyTestState):
return self.count + len(outer_var)
tracker = DependencyTracker(func_with_closure, DependencyTestState)
closure_dict = tracker._get_closure()
assert isinstance(closure_dict, dict)
assert "outer_var" in closure_dict
assert closure_dict["outer_var"] == "test"
def test_get_closure_with_code_object():
"""Test _get_closure method with a code object."""
def test_func(self: DependencyTestState):
return self.count
code_obj = test_func.__code__
tracker = DependencyTracker(code_obj, DependencyTestState)
closure_dict = tracker._get_closure()
assert not closure_dict
def test_property_dependencies():
"""Test tracking dependencies through property access."""
class StateWithProperty(State):
_value: int = 0
def computed_value(self) -> int:
return self._value * 2
def func_with_property(self):
return self.computed_value
tracker = DependencyTracker(StateWithProperty.func_with_property, StateWithProperty)
# Should track dependencies from the property getter
expected_deps = {StateWithProperty.get_full_name(): {"_value"}}
assert tracker.dependencies == expected_deps
def test_no_dependencies():
"""Test functions with no state dependencies."""
def func_no_deps(self: DependencyTestState):
return 42
tracker = DependencyTracker(func_no_deps, DependencyTestState)
assert not tracker.dependencies
def test_complex_expression_dependencies():
"""Test tracking dependencies in complex expressions."""
def complex_func(self: DependencyTestState):
return (self.count * 2 + len(self.name)) if self.items else 0
tracker = DependencyTracker(complex_func, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count", "name", "items"}}
assert tracker.dependencies == expected_deps
def test_equality_expression_dependencies():
"""Test tracking dependencies in equality expressions.
With the state attribute on the right hand side, python generates
LOAD_FAST_LOAD_FAST family instructions.
"""
def equality_func(self: DependencyTestState):
my_val = 2
return my_val == self.count
tracker = DependencyTracker(equality_func, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
def test_equality_expression_dependencies_lhs():
"""Test tracking dependencies in equality expressions (state on left hand side)."""
def equality_func(self: DependencyTestState):
my_val = 2
return self.count == my_val
tracker = DependencyTracker(equality_func, DependencyTestState)
expected_deps = {DependencyTestState.get_full_name(): {"count"}}
assert tracker.dependencies == expected_deps
def test_equality_expression_dependencies_get_state():
"""Test tracking dependencies in equality expressions with retrieved state."""
async def equality_func_get_state(self: DependencyTestState):
another_state = await self.get_state(AnotherTestState)
my_val = 2
return my_val == another_state.value
tracker = DependencyTracker(equality_func_get_state, DependencyTestState)
expected_deps = {AnotherTestState.get_full_name(): {"value"}}
assert tracker.dependencies == expected_deps
def test_get_cell_value_with_valid_cell():
"""Test get_cell_value with a valid cell containing a value."""
# Create a closure to get a cell object
value = "test_value"
def outer():
def inner():
return value
return inner
inner_func = outer()
assert inner_func.__closure__ is not None
cell = inner_func.__closure__[0]
result = get_cell_value(cell)
assert result == "test_value"
def test_cross_state_dependencies_complex():
"""Test complex cross-state dependency scenarios."""
class StateA(State):
value_a: int = 1
class StateB(State):
value_b: int = 2
async def complex_cross_state_func(self: DependencyTestState):
state_a = await self.get_state(StateA)
state_b = await self.get_state(StateB)
return state_a.value_a + state_b.value_b
tracker = DependencyTracker(complex_cross_state_func, DependencyTestState)
expected_deps = {
StateA.get_full_name(): {"value_a"},
StateB.get_full_name(): {"value_b"},
}
assert tracker.dependencies == expected_deps
def test_dependencies_with_computed_var():
"""Test that computed vars are handled correctly in dependency tracking."""
class StateWithComputedVar(State):
base_value: int = 0
@rx.var
def computed_value(self) -> int:
return self.base_value * 2
def func_using_computed_var(self: StateWithComputedVar):
return self.computed_value
tracker = DependencyTracker(func_using_computed_var, StateWithComputedVar)
# Should track the computed var, not its dependencies
expected_deps = {StateWithComputedVar.get_full_name(): {"computed_value"}}
assert tracker.dependencies == expected_deps
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/vars/test_dep_tracking.py",
"license": "Apache License 2.0",
"lines": 358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/vars/test_dep_tracking_integration.py | """Integration tests for dependency tracking with computed vars."""
from __future__ import annotations
import reflex as rx
from reflex.state import State
class IntegrationTestState(State):
"""State for integration testing with dependency tracker."""
count: int = 0
name: str = "test"
items: list[str] = []
@rx.var
def computed_count(self) -> int:
"""A computed var that depends on count.
Returns:
The double of the count.
"""
return self.count * 2
@rx.var
def computed_name_length(self) -> int:
"""A computed var that depends on name.
Returns:
The length of the name.
"""
return len(self.name)
@rx.var
def complex_computed(self) -> str:
"""A computed var with complex dependencies.
Returns:
A string combining name, count, and items length.
"""
return f"{self.name}_{self.count}_{len(self.items)}"
def helper_method(self) -> int:
"""A helper method that accesses state.
Returns:
The sum of count and the length of name.
"""
return self.count + len(self.name)
class OtherIntegrationState(State):
"""Another state for cross-state dependency testing."""
value: int = 42
@rx.var
def doubled_value(self) -> int:
"""A computed var that depends on value.
Returns:
The double of the value.
"""
return self.value * 2
def test_computed_var_dependencies():
"""Test that computed vars automatically track dependencies correctly."""
# Test the _deps method which uses DependencyTracker internally
computed_count = IntegrationTestState.computed_vars["computed_count"]
deps = computed_count._deps(objclass=IntegrationTestState)
expected_deps = {IntegrationTestState.get_full_name(): {"count"}}
assert deps == expected_deps
def test_complex_computed_var_dependencies():
"""Test complex computed var with multiple dependencies."""
complex_computed = IntegrationTestState.computed_vars["complex_computed"]
deps = complex_computed._deps(objclass=IntegrationTestState)
expected_deps = {IntegrationTestState.get_full_name(): {"name", "count", "items"}}
assert deps == expected_deps
def test_multiple_computed_vars():
"""Test that different computed vars track their own dependencies."""
computed_count = IntegrationTestState.computed_vars["computed_count"]
computed_name_length = IntegrationTestState.computed_vars["computed_name_length"]
count_deps = computed_count._deps(objclass=IntegrationTestState)
name_deps = computed_name_length._deps(objclass=IntegrationTestState)
assert count_deps == {IntegrationTestState.get_full_name(): {"count"}}
assert name_deps == {IntegrationTestState.get_full_name(): {"name"}}
def test_method_dependencies_integration():
"""Test tracking dependencies through method calls in computed vars."""
class StateWithMethodDeps(State):
value: int = 0
def helper_method(self):
return self.value
@rx.var
def computed_with_method(self) -> int:
return self.helper_method() * 2
computed = StateWithMethodDeps.computed_vars["computed_with_method"]
deps = computed._deps(objclass=StateWithMethodDeps)
expected_deps = {StateWithMethodDeps.get_full_name(): {"value"}}
assert deps == expected_deps
def test_cross_state_dependencies():
"""Test dependencies across different state classes."""
class StateWithCrossDeps(State):
@rx.var
async def cross_state_computed(self) -> int:
other = await self.get_state(OtherIntegrationState)
return other.value + 10
computed = StateWithCrossDeps.computed_vars["cross_state_computed"]
deps = computed._deps(objclass=StateWithCrossDeps)
expected_deps = {OtherIntegrationState.get_full_name(): {"value"}}
assert deps == expected_deps
def test_nested_function_in_computed_var():
"""Test that nested functions within computed vars track dependencies."""
class StateWithNested(State):
items: list[str] = []
multiplier: int = 2
@rx.var
def nested_computed(self) -> int:
def inner():
return len(self.items) * self.multiplier
return inner()
computed = StateWithNested.computed_vars["nested_computed"]
deps = computed._deps(objclass=StateWithNested)
expected_deps = {StateWithNested.get_full_name(): {"items", "multiplier"}}
assert deps == expected_deps
def test_list_comprehension_in_computed_var():
"""Test that list comprehensions in computed vars track dependencies."""
class StateWithComprehension(State):
items: list[str] = []
min_length: int = 3
@rx.var
def filtered_items(self) -> list[str]:
return [item for item in self.items if len(item) >= self.min_length]
computed = StateWithComprehension.computed_vars["filtered_items"]
deps = computed._deps(objclass=StateWithComprehension)
expected_deps = {StateWithComprehension.get_full_name(): {"items", "min_length"}}
assert deps == expected_deps
def test_property_access_in_computed_var():
"""Test that property access in computed vars tracks dependencies."""
class StateWithProperty(State):
_internal_value: int = 0
@property
def value_property(self):
return self._internal_value * 2
@rx.var
def computed_with_property(self) -> int:
return self.value_property + 1
computed = StateWithProperty.computed_vars["computed_with_property"]
deps = computed._deps(objclass=StateWithProperty)
expected_deps = {StateWithProperty.get_full_name(): {"_internal_value"}}
assert deps == expected_deps
def test_no_dependencies_computed_var():
"""Test computed vars with no state dependencies."""
class StateWithNoDeps(State):
@rx.var
def constant_computed(self) -> int:
return 42
computed = StateWithNoDeps.computed_vars["constant_computed"]
deps = computed._deps(objclass=StateWithNoDeps)
# Should have no dependencies
assert deps == {}
def test_conditional_dependencies():
"""Test computed vars with conditional dependencies."""
class StateWithConditional(State):
flag: bool = True
value_a: int = 10
value_b: int = 20
@rx.var
def conditional_computed(self) -> int:
return self.value_a if self.flag else self.value_b
computed = StateWithConditional.computed_vars["conditional_computed"]
deps = computed._deps(objclass=StateWithConditional)
# Should track all potentially accessed attributes
expected_deps = {
StateWithConditional.get_full_name(): {"flag", "value_a", "value_b"}
}
assert deps == expected_deps
def test_error_handling_in_dependency_tracking():
"""Test that dependency tracking handles errors gracefully."""
class StateWithError(State):
value: int = 0
@rx.var
def computed_with_error(self) -> int:
# This should still track 'value' even if there are other issues
return self.value
computed = StateWithError.computed_vars["computed_with_error"]
deps = computed._deps(objclass=StateWithError)
expected_deps = {StateWithError.get_full_name(): {"value"}}
assert deps == expected_deps
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/vars/test_dep_tracking_integration.py",
"license": "Apache License 2.0",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/components/core/window_events.py | """Window event listener component for Reflex."""
from __future__ import annotations
from typing import Any, cast
import reflex as rx
from reflex.components.base.fragment import Fragment
from reflex.components.component import StatefulComponent, field
from reflex.constants.compiler import Hooks
from reflex.event import key_event, no_args_event_spec
from reflex.vars.base import Var, VarData
from reflex.vars.object import ObjectVar
def _on_resize_spec() -> tuple[Var[int], Var[int]]:
"""Args spec for the on_resize event trigger.
Returns:
A tuple containing window width and height variables.
"""
return (Var("window.innerWidth"), Var("window.innerHeight"))
def _on_scroll_spec() -> tuple[Var[float], Var[float]]:
"""Args spec for the on_scroll event trigger.
Returns:
A tuple containing window scroll X and Y position variables.
"""
return (Var("window.scrollX"), Var("window.scrollY"))
def _on_visibility_change_spec() -> tuple[Var[bool]]:
"""Args spec for the on_visibility_change event trigger.
Returns:
A tuple containing the document hidden state variable.
"""
return (Var("document.hidden"),)
def _on_storage_spec(e: ObjectVar) -> tuple[Var[str], Var[str], Var[str], Var[str]]:
"""Args spec for the on_storage event trigger.
Args:
e: The storage event.
Returns:
A tuple containing key, old value, new value, and URL variables.
"""
return (e.key.to(str), e.oldValue.to(str), e.newValue.to(str), e.url.to(str))
class WindowEventListener(Fragment):
"""A component that listens for window events."""
# Event handlers
on_resize: rx.EventHandler[_on_resize_spec]
on_scroll: rx.EventHandler[_on_scroll_spec]
on_focus: rx.EventHandler[no_args_event_spec]
on_blur: rx.EventHandler[no_args_event_spec]
on_visibility_change: rx.EventHandler[_on_visibility_change_spec]
on_before_unload: rx.EventHandler[no_args_event_spec]
on_key_down: rx.EventHandler[key_event]
on_popstate: rx.EventHandler[no_args_event_spec]
on_storage: rx.EventHandler[_on_storage_spec]
hooks: list[str] = field(default_factory=list, is_javascript_property=False)
@classmethod
def create(cls, **props) -> WindowEventListener:
"""Create a WindowEventListener component.
Args:
**props: The props to set on the component.
Returns:
The created component.
"""
real_component = cast("WindowEventListener", super().create(**props))
hooks = StatefulComponent._fix_event_triggers(real_component)
real_component.hooks = hooks
return real_component
def _exclude_props(self) -> list[str]:
"""Exclude event handler props from being passed to Fragment.
Returns:
List of prop names to exclude from the Fragment.
"""
return [*super()._exclude_props(), *self.event_triggers.keys()]
def add_hooks(self) -> list[str | Var[Any]]:
"""Add hooks to register window event listeners.
Returns:
The hooks to add to the component.
"""
hooks: list[str | Var[Any]] = [*self.hooks]
for prop_name, event_trigger in self.event_triggers.items():
# Get JS event name: remove on_ prefix and underscores
event_name = prop_name.removeprefix("on_").replace("_", "")
hook_expr = f"""
useEffect(() => {{
if (typeof window === 'undefined') return;
const fn = {Var.create(event_trigger)};
window.addEventListener('{event_name}', fn);
return () => window.removeEventListener('{event_name}', fn);
}}, []);
"""
hooks.append(
Var(
hook_expr,
_var_data=VarData(position=Hooks.HookPosition.POST_TRIGGER),
)
)
return hooks
window_event_listener = WindowEventListener.create
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/components/core/window_events.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
reflex-dev/reflex:tests/units/utils/test_processes.py | """Test process utilities."""
import socket
import threading
from contextlib import closing
from unittest import mock
import pytest
from reflex.testing import DEFAULT_TIMEOUT, AppHarness
from reflex.utils.processes import is_process_on_port
def test_is_process_on_port_free_port():
"""Test is_process_on_port returns False when port is free."""
# Find a free port
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(("", 0))
free_port = sock.getsockname()[1]
# Port should be free after socket is closed
assert not is_process_on_port(free_port)
def test_is_process_on_port_occupied_port():
"""Test is_process_on_port returns True when port is occupied."""
# Create a server socket to occupy a port
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(("", 0))
server_socket.listen(1)
occupied_port = server_socket.getsockname()[1]
try:
# Port should be occupied
assert is_process_on_port(occupied_port)
finally:
server_socket.close()
def test_is_process_on_port_ipv6():
"""Test is_process_on_port works with IPv6."""
# Test with IPv6 socket
try:
server_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
server_socket.bind(("", 0))
server_socket.listen(1)
occupied_port = server_socket.getsockname()[1]
try:
# Port should be occupied on IPv6
assert is_process_on_port(occupied_port)
finally:
server_socket.close()
except OSError:
# IPv6 might not be available on some systems
pytest.skip("IPv6 not available on this system")
def test_is_process_on_port_both_protocols():
"""Test is_process_on_port detects occupation on either IPv4 or IPv6."""
# Create IPv4 server
ipv4_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipv4_socket.bind(("", 0))
ipv4_socket.listen(1)
port = ipv4_socket.getsockname()[1]
try:
# Should detect IPv4 occupation
assert is_process_on_port(port)
finally:
ipv4_socket.close()
@pytest.mark.parametrize("port", [0, 1, 80, 443, 8000, 3000, 65535])
def test_is_process_on_port_various_ports(port):
"""Test is_process_on_port with various port numbers.
Args:
port: The port number to test.
"""
# This test just ensures the function doesn't crash with different port numbers
# The actual result depends on what's running on the system
result = is_process_on_port(port)
assert isinstance(result, bool)
def test_is_process_on_port_mock_socket_error():
"""Test is_process_on_port handles socket errors gracefully."""
with mock.patch("socket.socket") as mock_socket:
mock_socket_instance = mock.MagicMock()
mock_socket.return_value = mock_socket_instance
mock_socket_instance.__enter__.return_value = mock_socket_instance
mock_socket_instance.bind.side_effect = OSError("Mock socket error")
# Should return True when socket operations fail
result = is_process_on_port(8080)
assert result is True
def test_is_process_on_port_permission_error():
"""Test is_process_on_port handles permission errors."""
with mock.patch("socket.socket") as mock_socket:
mock_socket_instance = mock.MagicMock()
mock_socket.return_value = mock_socket_instance
mock_socket_instance.__enter__.return_value = mock_socket_instance
mock_socket_instance.bind.side_effect = PermissionError("Permission denied")
# Should return True when permission is denied (can't bind = port is "occupied")
result = is_process_on_port(80)
assert result is True
def test_is_process_on_port_concurrent_access():
"""Test is_process_on_port works correctly with concurrent access."""
shared = None
is_open = threading.Event()
do_close = threading.Event()
def create_server_and_test():
nonlocal do_close, is_open, shared
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(("", 0))
server.listen(1)
port = server.getsockname()[1]
shared = port
is_open.set()
do_close.wait(timeout=DEFAULT_TIMEOUT)
server.close()
thread = threading.Thread(target=create_server_and_test)
thread.start()
is_open.wait(timeout=DEFAULT_TIMEOUT)
try:
assert shared is not None
# Port should be occupied while server is running (both bound-only and listening)
assert AppHarness._poll_for(
lambda: shared is not None and is_process_on_port(shared)
)
finally:
do_close.set()
thread.join(timeout=DEFAULT_TIMEOUT)
# Give it a moment for the socket to be fully released
assert AppHarness._poll_for(
lambda: shared is not None and not is_process_on_port(shared)
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/utils/test_processes.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/components/react_router/dom.py | """Components for client side navigation within React Router applications."""
from __future__ import annotations
from typing import ClassVar, Literal, TypedDict
from reflex.components.el.elements.inline import A
from reflex.vars.base import Var
LiteralLinkDiscover = Literal["none", "render"]
class To(TypedDict):
"""Structured object for navigating via the `to` prop."""
# A URL pathname, beginning with a /
pathname: str
# A URL search string, beginning with a ?.
search: str
# A URL fragment identifier, beginning with a #.
hash: str
class ReactRouterLink(A):
"""Links are accessible elements used primarily for navigation. This component is styled to resemble a hyperlink and semantically renders an <a>."""
library = "react-router"
tag = "Link"
alias = "ReactRouterLink"
# The page to link to.
to: Var[str | To]
# Replaces the current entry in the history stack instead of pushing a new one onto it.
replace: Var[bool]
# Will use document navigation instead of client side routing when the link is clicked: the browser will handle the transition normally (as if it were an <a href>).
reload_document: Var[bool]
# Prevents the scroll position from being reset to the top of the window when the link is clicked and the app is using ScrollRestoration. This only prevents new locations resetting scroll to the top, scroll position will be restored for back/forward button navigation.
prevent_scroll_reset: Var[bool]
# Defines the link discovery behavior
discover: Var[LiteralLinkDiscover]
# Enables a View Transition for this navigation.
view_transition: Var[bool]
@classmethod
def create(cls, *children, **props):
"""Create a ReactRouterLink component for client-side navigation.
Args:
*children: The children of the component.
**props: The props of the component.
Returns:
The ReactRouterLink component.
"""
# React Router special behavior is triggered on the `to` prop, not href.
if "to" not in props and "href" in props:
props["to"] = props.pop("href")
return super().create(*children, **props)
_invalid_children: ClassVar[list[str]] = ["A", "ReactRouterLink"]
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/components/react_router/dom.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
reflex-dev/reflex:tests/units/test_environment.py | """Tests for the environment module."""
import enum
import os
import tempfile
from pathlib import Path
from typing import Annotated
from unittest.mock import patch
import pytest
from reflex import constants
from reflex.environment import (
EnvironmentVariables,
EnvVar,
ExecutorType,
ExistingPath,
PerformanceMode,
SequenceOptions,
_load_dotenv_from_files,
_paths_from_env_files,
_paths_from_environment,
env_var,
environment,
get_default_value_for_field,
get_type_hints_environment,
interpret_boolean_env,
interpret_enum_env,
interpret_env_var_value,
interpret_existing_path_env,
interpret_int_env,
interpret_path_env,
interpret_plugin_class_env,
interpret_plugin_env,
)
from reflex.plugins import Plugin
from reflex.utils.exceptions import EnvironmentVarValueError
class TestPlugin(Plugin):
"""Test plugin for testing purposes."""
class _TestEnum(enum.Enum):
"""Test enum for testing purposes."""
VALUE1 = "value1"
VALUE2 = "value2"
class TestInterpretFunctions:
"""Test the interpret functions."""
def test_interpret_boolean_env_true_values(self):
"""Test boolean interpretation with true values."""
true_values = ["true", "1", "yes", "y", "TRUE", "True", "YES", "Y"]
for value in true_values:
assert interpret_boolean_env(value, "TEST_FIELD") is True
def test_interpret_boolean_env_false_values(self):
"""Test boolean interpretation with false values."""
false_values = ["false", "0", "no", "n", "FALSE", "False", "NO", "N"]
for value in false_values:
assert interpret_boolean_env(value, "TEST_FIELD") is False
def test_interpret_boolean_env_invalid_value(self):
"""Test boolean interpretation with invalid values."""
with pytest.raises(EnvironmentVarValueError, match="Invalid boolean value"):
interpret_boolean_env("invalid", "TEST_FIELD")
def test_interpret_int_env_valid(self):
"""Test integer interpretation with valid values."""
assert interpret_int_env("42", "TEST_FIELD") == 42
assert interpret_int_env("-10", "TEST_FIELD") == -10
assert interpret_int_env("0", "TEST_FIELD") == 0
def test_interpret_int_env_invalid(self):
"""Test integer interpretation with invalid values."""
with pytest.raises(EnvironmentVarValueError, match="Invalid integer value"):
interpret_int_env("not_a_number", "TEST_FIELD")
def test_interpret_path_env(self):
"""Test path interpretation."""
result = interpret_path_env("/some/path", "TEST_FIELD")
assert isinstance(result, Path)
assert str(result) == str(Path("/some/path"))
def test_interpret_existing_path_env_valid(self):
"""Test existing path interpretation with valid path."""
with tempfile.TemporaryDirectory() as temp_dir:
result = interpret_existing_path_env(temp_dir, "TEST_FIELD")
assert isinstance(result, Path)
assert result.exists()
def test_interpret_existing_path_env_invalid(self):
"""Test existing path interpretation with non-existent path."""
with pytest.raises(EnvironmentVarValueError, match="Path does not exist"):
interpret_existing_path_env("/non/existent/path", "TEST_FIELD")
def test_interpret_plugin_env_valid(self):
"""Test plugin interpretation with valid plugin."""
result = interpret_plugin_env(
"tests.units.test_environment.TestPlugin", "TEST_FIELD"
)
assert isinstance(result, TestPlugin)
def test_interpret_plugin_env_invalid_format(self):
"""Test plugin interpretation with invalid format."""
with pytest.raises(EnvironmentVarValueError, match="Invalid plugin value"):
interpret_plugin_env("invalid_format", "TEST_FIELD")
def test_interpret_plugin_env_import_error(self):
"""Test plugin interpretation with import error."""
with pytest.raises(EnvironmentVarValueError, match="Failed to import module"):
interpret_plugin_env("non.existent.module.Plugin", "TEST_FIELD")
def test_interpret_plugin_env_missing_class(self):
"""Test plugin interpretation with missing class."""
with pytest.raises(EnvironmentVarValueError, match="Invalid plugin class"):
interpret_plugin_env(
"tests.units.test_environment.NonExistentPlugin", "TEST_FIELD"
)
def test_interpret_plugin_env_invalid_class(self):
"""Test plugin interpretation with invalid class."""
with pytest.raises(EnvironmentVarValueError, match="Invalid plugin class"):
interpret_plugin_env("tests.units.test_environment.TestEnum", "TEST_FIELD")
def test_interpret_plugin_class_env_valid(self):
"""Test plugin class interpretation returns the class, not an instance."""
result = interpret_plugin_class_env(
"tests.units.test_environment.TestPlugin", "TEST_FIELD"
)
assert result is TestPlugin
def test_interpret_plugin_class_env_invalid_format(self):
"""Test plugin class interpretation with invalid format."""
with pytest.raises(EnvironmentVarValueError, match="Invalid plugin value"):
interpret_plugin_class_env("invalid_format", "TEST_FIELD")
def test_interpret_plugin_class_env_import_error(self):
"""Test plugin class interpretation with import error."""
with pytest.raises(EnvironmentVarValueError, match="Failed to import module"):
interpret_plugin_class_env("non.existent.module.Plugin", "TEST_FIELD")
def test_interpret_plugin_class_env_invalid_class(self):
"""Test plugin class interpretation with invalid class."""
with pytest.raises(EnvironmentVarValueError, match="Invalid plugin class"):
interpret_plugin_class_env(
"tests.units.test_environment.TestEnum", "TEST_FIELD"
)
def test_interpret_enum_env_valid(self):
"""Test enum interpretation with valid values."""
result = interpret_enum_env("value1", _TestEnum, "TEST_FIELD")
assert result == _TestEnum.VALUE1
def test_interpret_enum_env_invalid(self):
"""Test enum interpretation with invalid values."""
with pytest.raises(EnvironmentVarValueError, match="Invalid enum value"):
interpret_enum_env("invalid_value", _TestEnum, "TEST_FIELD")
class TestInterpretEnvVarValue:
"""Test the interpret_env_var_value function."""
def test_interpret_string(self):
"""Test string interpretation."""
result = interpret_env_var_value(" test ", str, "TEST_FIELD")
assert result == "test"
def test_interpret_boolean(self):
"""Test boolean interpretation."""
result = interpret_env_var_value("true", bool, "TEST_FIELD")
assert result is True
def test_interpret_int(self):
"""Test integer interpretation."""
result = interpret_env_var_value("42", int, "TEST_FIELD")
assert result == 42
def test_interpret_path(self):
"""Test path interpretation."""
result = interpret_env_var_value("/test/path", Path, "TEST_FIELD")
assert isinstance(result, Path)
def test_interpret_existing_path(self):
"""Test existing path interpretation."""
with tempfile.TemporaryDirectory() as temp_dir:
result = interpret_env_var_value(temp_dir, ExistingPath, "TEST_FIELD")
assert isinstance(result, Path)
def test_interpret_plugin(self):
"""Test plugin interpretation."""
result = interpret_env_var_value(
"tests.units.test_environment.TestPlugin", Plugin, "TEST_FIELD"
)
assert isinstance(result, TestPlugin)
def test_interpret_plugin_class(self):
"""Test type[Plugin] interpretation returns the class."""
result = interpret_env_var_value(
"tests.units.test_environment.TestPlugin",
type[Plugin],
"TEST_FIELD",
)
assert result is TestPlugin
def test_interpret_list(self):
"""Test list interpretation."""
result = interpret_env_var_value("1:2:3", list[int], "TEST_FIELD")
assert result == [1, 2, 3]
def test_interpret_annotated_sequence(self):
"""Test annotated sequence interpretation."""
annotated_type = Annotated[
list[str], SequenceOptions(delimiter=",", strip=True)
]
result = interpret_env_var_value("a, b, c ", annotated_type, "TEST_FIELD")
assert result == ["a", "b", "c"]
def test_interpret_enum(self):
"""Test enum interpretation."""
result = interpret_env_var_value("value1", _TestEnum, "TEST_FIELD")
assert result == _TestEnum.VALUE1
def test_interpret_union_tries_each_type(self):
"""Test that union types try each type in order."""
# str matches first
assert interpret_env_var_value("hello", int | str, "TEST_FIELD") == "hello"
# int matches first
assert interpret_env_var_value("42", int | str, "TEST_FIELD") == 42
# bool matches before str
assert interpret_env_var_value("true", bool | str, "TEST_FIELD") is True
def test_interpret_union_no_match(self):
"""Test that union types raise an error if no type matches."""
with pytest.raises(EnvironmentVarValueError, match="Could not interpret"):
interpret_env_var_value("not_a_number", int | bool, "TEST_FIELD")
def test_interpret_unsupported_type(self):
"""Test unsupported type raises an error."""
with pytest.raises(ValueError, match="Invalid type for environment variable"):
interpret_env_var_value("test", dict, "TEST_FIELD")
def test_interpret_optional_type(self):
"""Test optional type interpretation."""
# This should work by extracting the inner type
result = interpret_env_var_value("42", int | None, "TEST_FIELD")
assert result == 42
class TestEnvVar:
"""Test the EnvVar class."""
def test_init(self):
"""Test EnvVar initialization."""
env_var_instance = EnvVar("TEST_VAR", "default", str)
assert env_var_instance.name == "TEST_VAR"
assert env_var_instance.default == "default"
assert env_var_instance.type_ is str
def test_interpret(self):
"""Test EnvVar interpret method."""
env_var_instance = EnvVar("TEST_VAR", 0, int)
result = env_var_instance.interpret("42")
assert result == 42
def test_getenv_set(self, monkeypatch):
"""Test getenv when environment variable is set.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "test_value")
env_var_instance = EnvVar("TEST_VAR", "default", str)
result = env_var_instance.getenv()
assert result == "test_value"
def test_getenv_not_set(self):
"""Test getenv when environment variable is not set."""
env_var_instance = EnvVar("NONEXISTENT_VAR", "default", str)
result = env_var_instance.getenv()
assert result is None
def test_getenv_empty_string(self, monkeypatch):
"""Test getenv with empty string value.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "")
env_var_instance = EnvVar("TEST_VAR", "default", str)
result = env_var_instance.getenv()
assert result is None
def test_getenv_whitespace_only(self, monkeypatch):
"""Test getenv with whitespace-only value.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", " ")
env_var_instance = EnvVar("TEST_VAR", "default", str)
result = env_var_instance.getenv()
assert result is None
def test_is_set_true(self, monkeypatch):
"""Test is_set when variable is set.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "value")
env_var_instance = EnvVar("TEST_VAR", "default", str)
assert env_var_instance.is_set() is True
def test_is_set_false(self):
"""Test is_set when variable is not set."""
env_var_instance = EnvVar("NONEXISTENT_VAR", "default", str)
assert env_var_instance.is_set() is False
def test_is_set_empty_string(self, monkeypatch):
"""Test is_set with empty string.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "")
env_var_instance = EnvVar("TEST_VAR", "default", str)
assert env_var_instance.is_set() is False
def test_get_with_env_value(self, monkeypatch):
"""Test get method when environment variable is set.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "env_value")
env_var_instance = EnvVar("TEST_VAR", "default", str)
result = env_var_instance.get()
assert result == "env_value"
def test_get_with_default(self):
"""Test get method when environment variable is not set."""
env_var_instance = EnvVar("NONEXISTENT_VAR", "default", str)
result = env_var_instance.get()
assert result == "default"
def test_set_string_value(self):
"""Test setting a string value."""
env_var_instance = EnvVar("TEST_VAR", "default", str)
env_var_instance.set("new_value") # type: ignore[arg-type]
assert os.environ.get("TEST_VAR") == "new_value"
# Clean up
del os.environ["TEST_VAR"]
def test_set_none_value(self, monkeypatch):
"""Test setting None value removes the environment variable.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("TEST_VAR", "value")
env_var_instance = EnvVar("TEST_VAR", "default", str)
env_var_instance.set(None)
assert "TEST_VAR" not in os.environ
def test_set_enum_value(self):
"""Test setting an enum value."""
env_var_instance = EnvVar("TEST_VAR", _TestEnum.VALUE1, _TestEnum)
env_var_instance.set(_TestEnum.VALUE2) # type: ignore[arg-type]
assert os.environ.get("TEST_VAR") == "value2"
# Clean up
del os.environ["TEST_VAR"]
def test_set_list_value(self):
"""Test setting a list value."""
env_var_instance = EnvVar("TEST_VAR", [], list[int])
env_var_instance.set([1, 2, 3]) # type: ignore[arg-type]
assert os.environ.get("TEST_VAR") == "1:2:3"
# Clean up
del os.environ["TEST_VAR"]
class TestEnvVarDescriptor:
"""Test the env_var descriptor."""
def test_descriptor_get_normal(self):
"""Test getting EnvVar from descriptor."""
class TestEnv:
TEST_VAR: EnvVar[str] = env_var("default")
env_var_instance = TestEnv.TEST_VAR
assert isinstance(env_var_instance, EnvVar)
assert env_var_instance.name == "TEST_VAR"
assert env_var_instance.default == "default"
def test_descriptor_get_internal(self):
"""Test getting internal EnvVar from descriptor."""
class TestEnv:
INTERNAL_VAR: EnvVar[str] = env_var("default", internal=True)
env_var_instance = TestEnv.INTERNAL_VAR
assert isinstance(env_var_instance, EnvVar)
assert env_var_instance.name == "__INTERNAL_VAR"
assert env_var_instance.default == "default"
class TestExecutorType:
"""Test the ExecutorType enum and related functionality."""
def test_executor_type_values(self):
"""Test ExecutorType enum values."""
assert ExecutorType.THREAD.value == "thread"
assert ExecutorType.PROCESS.value == "process"
assert ExecutorType.MAIN_THREAD.value == "main_thread"
def test_get_executor_main_thread_mode(self):
"""Test executor selection in main thread mode."""
with (
patch.object(
environment.REFLEX_COMPILE_EXECUTOR,
"get",
return_value=ExecutorType.MAIN_THREAD,
),
patch.object(
environment.REFLEX_COMPILE_PROCESSES, "get", return_value=None
),
patch.object(environment.REFLEX_COMPILE_THREADS, "get", return_value=None),
):
executor = ExecutorType.get_executor_from_environment()
# Test the main thread executor functionality
with executor:
future = executor.submit(lambda x: x * 2, 5)
assert future.result() == 10
def test_get_executor_returns_executor(self):
"""Test that get_executor_from_environment returns an executor."""
# Test with default values - should return some kind of executor
executor = ExecutorType.get_executor_from_environment()
assert executor is not None
# Test that we can use it as a context manager
with executor:
future = executor.submit(lambda: "test")
assert future.result() == "test"
class TestUtilityFunctions:
"""Test utility functions."""
def test_get_type_hints_environment(self):
"""Test get_type_hints_environment function."""
class TestClass:
var1: str
var2: int
hints = get_type_hints_environment(TestClass)
assert "var1" in hints
assert "var2" in hints
assert hints["var1"] is str
assert hints["var2"] is int
def test_paths_from_env_files(self):
"""Test _paths_from_env_files function."""
env_files = "/path/one" + os.pathsep + "/path/two" + os.pathsep + "/path/three"
result = _paths_from_env_files(env_files)
# Should be reversed order
expected = [Path("/path/three"), Path("/path/two"), Path("/path/one")]
assert result == expected
def test_paths_from_env_files_with_spaces(self):
"""Test _paths_from_env_files with spaces."""
env_files = (
" /path/one " + os.pathsep + " /path/two " + os.pathsep + " /path/three "
)
result = _paths_from_env_files(env_files)
expected = [Path("/path/three"), Path("/path/two"), Path("/path/one")]
assert result == expected
def test_paths_from_env_files_empty(self):
"""Test _paths_from_env_files with empty string."""
result = _paths_from_env_files("")
assert result == []
def test_paths_from_environment_set(self, monkeypatch):
"""Test _paths_from_environment when REFLEX_ENV_FILE is set.
Args:
monkeypatch: pytest monkeypatch fixture.
"""
monkeypatch.setenv("REFLEX_ENV_FILE", "/path/one" + os.pathsep + "/path/two")
result = _paths_from_environment()
expected = [Path("/path/two"), Path("/path/one")]
assert result == expected
def test_paths_from_environment_not_set(self):
"""Test _paths_from_environment when REFLEX_ENV_FILE is not set."""
# Ensure the env var is not set
if "REFLEX_ENV_FILE" in os.environ:
del os.environ["REFLEX_ENV_FILE"]
result = _paths_from_environment()
assert result == []
@patch("reflex.environment.load_dotenv")
def test_load_dotenv_from_files_with_dotenv(self, mock_load_dotenv):
"""Test _load_dotenv_from_files when dotenv is available.
Args:
mock_load_dotenv: Mock for the load_dotenv function.
"""
with tempfile.TemporaryDirectory() as temp_dir:
file1 = Path(temp_dir) / "file1.env"
file2 = Path(temp_dir) / "file2.env"
file1.touch()
file2.touch()
_load_dotenv_from_files([file1, file2])
assert mock_load_dotenv.call_count == 2
mock_load_dotenv.assert_any_call(file1, override=True)
mock_load_dotenv.assert_any_call(file2, override=True)
@patch("reflex.environment.load_dotenv", None)
@patch("reflex.utils.console")
def test_load_dotenv_from_files_without_dotenv(self, mock_console):
"""Test _load_dotenv_from_files when dotenv is not available.
Args:
mock_console: Mock for the console object.
"""
with tempfile.TemporaryDirectory() as temp_dir:
file1 = Path(temp_dir) / "file1.env"
file1.touch()
_load_dotenv_from_files([file1])
mock_console.error.assert_called_once()
def test_load_dotenv_from_files_empty_list(self):
"""Test _load_dotenv_from_files with empty file list."""
# Should not raise any errors
_load_dotenv_from_files([])
@patch("reflex.environment.load_dotenv")
def test_load_dotenv_from_files_nonexistent_file(self, mock_load_dotenv):
"""Test _load_dotenv_from_files with non-existent file.
Args:
mock_load_dotenv: Mock for the load_dotenv function.
"""
nonexistent_file = Path("/non/existent/file.env")
_load_dotenv_from_files([nonexistent_file])
# Should not call load_dotenv for non-existent files
mock_load_dotenv.assert_not_called()
class TestEnvironmentVariables:
"""Test the EnvironmentVariables class and its instances."""
def test_environment_instance_exists(self):
"""Test that the environment instance exists and is properly typed."""
assert isinstance(environment, EnvironmentVariables)
def test_environment_variables_have_correct_types(self):
"""Test that environment variables have the correct types."""
# Test a few key environment variables
assert isinstance(environment.REFLEX_USE_NPM, EnvVar)
assert isinstance(environment.REFLEX_USE_GRANIAN, EnvVar)
assert isinstance(environment.REFLEX_WEB_WORKDIR, EnvVar)
assert isinstance(environment.REFLEX_FRONTEND_PORT, EnvVar)
assert isinstance(environment.REFLEX_BACKEND_PORT, EnvVar)
def test_environment_variables_defaults(self):
"""Test that environment variables have the expected defaults."""
assert environment.REFLEX_USE_NPM.get() is False
assert environment.REFLEX_USE_GRANIAN.get() is False
assert environment.REFLEX_USE_SYSTEM_BUN.get() is False
assert environment.REFLEX_WEB_WORKDIR.get() == Path(constants.Dirs.WEB)
assert environment.REFLEX_STATES_WORKDIR.get() == Path(constants.Dirs.STATES)
def test_internal_environment_variables(self):
"""Test internal environment variables have correct names."""
assert environment.REFLEX_COMPILE_CONTEXT.name == "__REFLEX_COMPILE_CONTEXT"
assert environment.REFLEX_SKIP_COMPILE.name == "__REFLEX_SKIP_COMPILE"
def test_performance_mode_enum(self):
"""Test PerformanceMode enum."""
assert PerformanceMode.WARN.value == "warn"
assert PerformanceMode.RAISE.value == "raise"
assert PerformanceMode.OFF.value == "off"
# Test that the default is WARN
assert environment.REFLEX_PERF_MODE.get() == PerformanceMode.WARN
class TestGetDefaultValueForField:
"""Test the get_default_value_for_field function."""
def test_get_default_value_for_field_with_default(self):
"""Test field with default value."""
import dataclasses
@dataclasses.dataclass
class TestClass:
field: str = "default_value"
field = dataclasses.fields(TestClass)[0]
result = get_default_value_for_field(field)
assert result == "default_value"
def test_get_default_value_for_field_with_default_factory(self):
"""Test field with default factory."""
import dataclasses
@dataclasses.dataclass
class TestClass:
field: list = dataclasses.field(default_factory=list)
field = dataclasses.fields(TestClass)[0]
result = get_default_value_for_field(field)
assert result == []
def test_get_default_value_for_field_without_default(self):
"""Test field without default value or factory."""
import dataclasses
@dataclasses.dataclass
class TestClass:
field: str
field = dataclasses.fields(TestClass)[0]
with pytest.raises(ValueError, match="Missing value for environment variable"):
get_default_value_for_field(field)
@pytest.fixture(autouse=True)
def cleanup_env_vars():
"""Clean up test environment variables after each test.
Yields:
None: Fixture yields control back to the test.
"""
test_vars = [
"TEST_VAR",
"NONEXISTENT_VAR",
"BLUBB",
"__INTERNAL",
"BOOLEAN",
"LIST",
"__INTERNAL_VAR",
]
yield
for var in test_vars:
if var in os.environ:
print(var)
del os.environ[var]
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/test_environment.py",
"license": "Apache License 2.0",
"lines": 534,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:tests/units/plugins/test_sitemap.py | """Unit tests for the sitemap plugin."""
import datetime
from unittest.mock import MagicMock, patch
import reflex as rx
from reflex.app import UnevaluatedPage
from reflex.plugins.sitemap import SitemapLink, generate_links_for_sitemap, generate_xml
def test_generate_xml_empty_links():
"""Test generate_xml with an empty list of links."""
xml_output = generate_xml([])
expected = """<?xml version='1.0' encoding='utf-8'?>
<urlset xmlns="https://www.sitemaps.org/schemas/sitemap/0.9" />"""
assert xml_output == expected
def test_generate_xml_single_link_loc_only():
"""Test generate_xml with a single link having only loc."""
links: list[SitemapLink] = [{"loc": "https://example.com"}]
xml_output = generate_xml(links)
expected = """<?xml version='1.0' encoding='utf-8'?>
<urlset xmlns="https://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://example.com</loc>
</url>
</urlset>"""
assert xml_output == expected
def test_generate_xml_multiple_links_all_fields():
"""Test generate_xml with multiple links having all fields."""
now = datetime.datetime(2023, 6, 13, 12, 0, 0)
links: list[SitemapLink] = [
{
"loc": "https://example.com/page1",
"lastmod": now,
"changefreq": "daily",
"priority": 0.8,
},
{
"loc": "https://example.com/page2",
"lastmod": datetime.datetime(2023, 1, 1, 0, 0, 0),
"changefreq": "weekly",
"priority": 0.5,
},
]
xml_output = generate_xml(links)
expected = """<?xml version='1.0' encoding='utf-8'?>
<urlset xmlns="https://www.sitemaps.org/schemas/sitemap/0.9">
<url>
<loc>https://example.com/page1</loc>
<changefreq>daily</changefreq>
<lastmod>2023-06-13T12:00:00</lastmod>
<priority>0.8</priority>
</url>
<url>
<loc>https://example.com/page2</loc>
<changefreq>weekly</changefreq>
<lastmod>2023-01-01T00:00:00</lastmod>
<priority>0.5</priority>
</url>
</urlset>"""
assert xml_output == expected
@patch("reflex.config.get_config")
@patch("reflex.utils.console.warn")
def test_generate_links_for_sitemap_static_routes(
mock_warn: MagicMock, mock_get_config: MagicMock
):
"""Test generate_links_for_sitemap with static routes.
Args:
mock_warn: Mock for the console.warn function.
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://example.com"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="index",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
),
UnevaluatedPage(
component=mock_component,
route="about",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
),
UnevaluatedPage(
component=mock_component,
route="contact",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"priority": 0.7, "changefreq": "monthly"}},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 3
assert {"loc": "https://example.com/"} in links
assert {"loc": "https://example.com/about"} in links
assert {
"loc": "https://example.com/contact",
"priority": 0.7,
"changefreq": "monthly",
} in links
mock_warn.assert_not_called()
@patch("reflex.config.get_config")
@patch("reflex.utils.console.warn")
def test_generate_links_for_sitemap_dynamic_routes(
mock_warn: MagicMock, mock_get_config: MagicMock
):
"""Test generate_links_for_sitemap with dynamic routes.
Args:
mock_warn: Mock for the console.warn function.
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://sub.example.org"
now = datetime.datetime(2023, 6, 13, 12, 0, 0)
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="blog/[id]",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={
"sitemap": {
"loc": "/custom-blog-path",
"lastmod": now,
"priority": 0.9,
}
},
),
UnevaluatedPage(
component=mock_component,
route="products/[name]",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
), # No sitemap config
UnevaluatedPage(
component=mock_component,
route="user/[user_id]/profile",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"changefreq": "yearly"}},
), # Has sitemap config but no loc
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
expected_link = {
"loc": "https://sub.example.org/custom-blog-path",
"lastmod": now,
"priority": 0.9,
}
assert expected_link in links
assert mock_warn.call_count == 1
mock_warn.assert_any_call(
"Dynamic route 'user/[user_id]/profile' does not have a 'loc' in sitemap configuration. Skipping."
)
@patch("reflex.config.get_config")
@patch("reflex.utils.console.warn")
def test_generate_links_for_sitemap_404_route(
mock_warn: MagicMock, mock_get_config: MagicMock
):
"""Test generate_links_for_sitemap with the 404 route.
Args:
mock_warn: Mock for the console.warn function.
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = None # No deploy URL
def mock_component():
return rx.text("404")
pages = [
UnevaluatedPage(
component=mock_component,
route="404",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "/custom-404", "priority": 0.1}},
),
UnevaluatedPage(
component=mock_component,
route="404",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"priority": 0.2}},
), # Has sitemap config but no loc
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
assert {"loc": "/custom-404", "priority": 0.1} in links
mock_warn.assert_called_once_with(
"Route 404 '404' does not have a 'loc' in sitemap configuration. Skipping."
)
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_opt_out(mock_get_config: MagicMock):
"""Test generate_links_for_sitemap with sitemap set to None.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = None # No deploy URL
def mock_component():
return rx.text("Unlisted")
pages = [
UnevaluatedPage(
component=mock_component,
route="unlisted",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": None},
),
UnevaluatedPage(
component=mock_component,
route="listed",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
assert {"loc": "/listed"} in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_loc_override(mock_get_config: MagicMock):
"""Test generate_links_for_sitemap with loc override in sitemap config.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "http://localhost:3000"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="features",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "https://override.com/features_page"}},
),
UnevaluatedPage(
component=mock_component,
route="pricing",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "/custom_pricing"}},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 2
assert {"loc": "https://override.com/features_page"} in links
assert {"loc": "http://localhost:3000/custom_pricing"} in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_priority_clamping(mock_get_config: MagicMock):
"""Test that priority is clamped between 0.0 and 1.0.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://example.com"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="high_prio",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"priority": 1.5}},
),
UnevaluatedPage(
component=mock_component,
route="low_prio",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"priority": -0.5}},
),
UnevaluatedPage(
component=mock_component,
route="valid_prio",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"priority": 0.5}},
),
]
links = generate_links_for_sitemap(pages)
expected_links = [
{"loc": "https://example.com/high_prio", "priority": 1.0},
{"loc": "https://example.com/low_prio", "priority": 0.0},
{"loc": "https://example.com/valid_prio", "priority": 0.5},
]
for expected_link in expected_links:
assert expected_link in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_no_deploy_url(mock_get_config: MagicMock):
"""Test generate_links_for_sitemap when deploy_url is not set.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = None
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="home",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "/home"}},
),
UnevaluatedPage(
component=mock_component,
route="about",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
), # No loc, should use route
UnevaluatedPage(
component=mock_component,
route="index",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
), # Special case for index
]
links = generate_links_for_sitemap(pages)
assert len(links) == 3
expected_links = [{"loc": "/home"}, {"loc": "/about"}, {"loc": "/"}]
for expected_link in expected_links:
assert expected_link in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_deploy_url_trailing_slash(
mock_get_config: MagicMock,
):
"""Test generate_links_for_sitemap with deploy_url having a trailing slash.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://example.com/"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="testpage",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
assert {"loc": "https://example.com/testpage"} in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_loc_leading_slash(mock_get_config: MagicMock):
"""Test generate_links_for_sitemap with loc having a leading slash.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://example.com"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="another",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "/another"}},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
assert {"loc": "https://example.com/another"} in links
@patch("reflex.config.get_config")
def test_generate_links_for_sitemap_loc_full_url(mock_get_config: MagicMock):
"""Test generate_links_for_sitemap with loc being a full URL.
Args:
mock_get_config: Mock for the get_config function.
"""
mock_get_config.return_value.deploy_url = "https://example.com"
def mock_component():
return rx.text("Test")
pages = [
UnevaluatedPage(
component=mock_component,
route="external",
title=None,
description=None,
image="favicon.ico",
on_load=None,
meta=[],
context={"sitemap": {"loc": "http://othersite.com/page"}},
),
]
links = generate_links_for_sitemap(pages)
assert len(links) == 1
assert {"loc": "http://othersite.com/page"} in links
| {
"repo_id": "reflex-dev/reflex",
"file_path": "tests/units/plugins/test_sitemap.py",
"license": "Apache License 2.0",
"lines": 455,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
reflex-dev/reflex:reflex/components/core/helmet.py | """Helmet component module."""
from reflex.components.component import Component
class Helmet(Component):
"""A helmet component."""
library = "react-helmet@6.1.0"
tag = "Helmet"
helmet = Helmet.create
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/components/core/helmet.py",
"license": "Apache License 2.0",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
reflex-dev/reflex:reflex/plugins/sitemap.py | """Sitemap plugin for Reflex."""
import datetime
from collections.abc import Sequence
from pathlib import Path
from types import SimpleNamespace
from typing import TYPE_CHECKING, Literal, TypedDict
from xml.etree.ElementTree import Element, SubElement, indent, tostring
from typing_extensions import NotRequired
from reflex import constants
from .base import Plugin as PluginBase
if TYPE_CHECKING:
from reflex.app import UnevaluatedPage
Location = str
LastModified = datetime.datetime
ChangeFrequency = Literal[
"always", "hourly", "daily", "weekly", "monthly", "yearly", "never"
]
Priority = float
class SitemapLink(TypedDict):
"""A link in the sitemap."""
loc: Location
lastmod: NotRequired[LastModified]
changefreq: NotRequired[ChangeFrequency]
priority: NotRequired[Priority]
class SitemapLinkConfiguration(TypedDict):
"""Configuration for a sitemap link."""
loc: NotRequired[Location]
lastmod: NotRequired[LastModified]
changefreq: NotRequired[ChangeFrequency]
priority: NotRequired[Priority]
class Constants(SimpleNamespace):
"""Sitemap constants."""
FILE_PATH: Path = Path(constants.Dirs.PUBLIC) / "sitemap.xml"
def configuration_with_loc(
*, config: SitemapLinkConfiguration, deploy_url: str | None, loc: Location
) -> SitemapLink:
"""Set the 'loc' field of the configuration.
Args:
config: The configuration dictionary.
deploy_url: The deployment URL, if any.
loc: The location to set.
Returns:
A SitemapLink dictionary with the 'loc' field set.
"""
if deploy_url and not loc.startswith("http://") and not loc.startswith("https://"):
loc = f"{deploy_url.rstrip('/')}/{loc.lstrip('/')}"
link: SitemapLink = {"loc": loc}
if (lastmod := config.get("lastmod")) is not None:
link["lastmod"] = lastmod
if (changefreq := config.get("changefreq")) is not None:
link["changefreq"] = changefreq
if (priority := config.get("priority")) is not None:
link["priority"] = min(1.0, max(0.0, priority))
return link
def generate_xml(links: Sequence[SitemapLink]) -> str:
"""Generate an XML sitemap from a list of links.
Args:
links: A sequence of SitemapLink dictionaries.
Returns:
A pretty-printed XML string representing the sitemap.
"""
urlset = Element("urlset", xmlns="https://www.sitemaps.org/schemas/sitemap/0.9")
for link in links:
url = SubElement(urlset, "url")
loc_element = SubElement(url, "loc")
loc_element.text = link["loc"]
if (changefreq := link.get("changefreq")) is not None:
changefreq_element = SubElement(url, "changefreq")
changefreq_element.text = changefreq
if (lastmod := link.get("lastmod")) is not None:
lastmod_element = SubElement(url, "lastmod")
if isinstance(lastmod, datetime.datetime):
lastmod = lastmod.isoformat()
lastmod_element.text = lastmod
if (priority := link.get("priority")) is not None:
priority_element = SubElement(url, "priority")
priority_element.text = str(priority)
indent(urlset, " ")
return tostring(urlset, encoding="utf-8", xml_declaration=True).decode("utf-8")
def is_route_dynamic(route: str) -> bool:
"""Check if a route is dynamic.
Args:
route: The route to check.
Returns:
True if the route is dynamic, False otherwise.
"""
return "[" in route and "]" in route
def generate_links_for_sitemap(
unevaluated_pages: Sequence["UnevaluatedPage"],
) -> list[SitemapLink]:
"""Generate sitemap links from unevaluated pages.
Args:
unevaluated_pages: Sequence of unevaluated pages.
Returns:
A list of SitemapLink dictionaries.
"""
from reflex.config import get_config
from reflex.utils import console
deploy_url = get_config().deploy_url
links: list[SitemapLink] = []
for page in unevaluated_pages:
sitemap_config: SitemapLinkConfiguration | None = page.context.get(
"sitemap", {}
)
if sitemap_config is None:
continue
if is_route_dynamic(page.route) or page.route == "404":
if not sitemap_config:
continue
if (loc := sitemap_config.get("loc")) is None:
route_message = (
"Dynamic route" if is_route_dynamic(page.route) else "Route 404"
)
console.warn(
route_message
+ f" '{page.route}' does not have a 'loc' in sitemap configuration. Skipping."
)
continue
sitemap_link = configuration_with_loc(
config=sitemap_config, deploy_url=deploy_url, loc=loc
)
elif (loc := sitemap_config.get("loc")) is not None:
sitemap_link = configuration_with_loc(
config=sitemap_config, deploy_url=deploy_url, loc=loc
)
else:
loc = page.route if page.route != "index" else "/"
if not loc.startswith("/"):
loc = "/" + loc
sitemap_link = configuration_with_loc(
config=sitemap_config, deploy_url=deploy_url, loc=loc
)
links.append(sitemap_link)
return links
def sitemap_task(unevaluated_pages: Sequence["UnevaluatedPage"]) -> tuple[str, str]:
"""Task to generate the sitemap XML file.
Args:
unevaluated_pages: Sequence of unevaluated pages.
Returns:
A tuple containing the file path and the generated XML content.
"""
return (
str(Constants.FILE_PATH),
generate_xml(generate_links_for_sitemap(unevaluated_pages)),
)
class SitemapPlugin(PluginBase):
"""Sitemap plugin for Reflex."""
def pre_compile(self, **context):
"""Generate the sitemap XML file before compilation.
Args:
context: The context for the plugin.
"""
unevaluated_pages = context.get("unevaluated_pages", [])
context["add_save_task"](sitemap_task, unevaluated_pages)
Plugin = SitemapPlugin
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/sitemap.py",
"license": "Apache License 2.0",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/plugins/shared_tailwind.py | """Tailwind CSS configuration types for Reflex plugins."""
import dataclasses
from collections.abc import Mapping
from copy import deepcopy
from typing import Any, Literal, TypedDict
from typing_extensions import NotRequired, Unpack
from .base import Plugin as PluginBase
TailwindPluginImport = TypedDict(
"TailwindPluginImport",
{
"name": str,
"from": str,
},
)
TailwindPluginWithCallConfig = TypedDict(
"TailwindPluginWithCallConfig",
{
"name": str,
"import": NotRequired[TailwindPluginImport],
"call": str,
"args": NotRequired[dict[str, Any]],
},
)
TailwindPluginWithoutCallConfig = TypedDict(
"TailwindPluginWithoutCallConfig",
{
"name": str,
"import": NotRequired[TailwindPluginImport],
},
)
TailwindPluginConfig = (
TailwindPluginWithCallConfig | TailwindPluginWithoutCallConfig | str
)
def remove_version_from_plugin(plugin: TailwindPluginConfig) -> TailwindPluginConfig:
"""Remove the version from a plugin name.
Args:
plugin: The plugin to remove the version from.
Returns:
The plugin without the version.
"""
from reflex.utils.format import format_library_name
if isinstance(plugin, str):
return format_library_name(plugin)
if plugin_import := plugin.get("import"):
plugin_import["from"] = format_library_name(plugin_import["from"])
plugin["name"] = format_library_name(plugin["name"])
return plugin
class TailwindConfig(TypedDict):
"""Tailwind CSS configuration options.
See: https://tailwindcss.com/docs/configuration
"""
content: NotRequired[list[str]]
important: NotRequired[str | bool]
prefix: NotRequired[str]
separator: NotRequired[str]
presets: NotRequired[list[str]]
darkMode: NotRequired[Literal["media", "class", "selector"]]
theme: NotRequired[dict[str, Any]]
corePlugins: NotRequired[list[str] | dict[str, bool]]
plugins: NotRequired[list[TailwindPluginConfig]]
def tailwind_config_js_template(
*, default_content: list[str], **kwargs: Unpack[TailwindConfig]
):
"""Generate a Tailwind CSS configuration file in JavaScript format.
Args:
default_content: The default content to use if none is provided.
**kwargs: The template variables.
Returns:
The Tailwind config template.
"""
import json
# Extract parameters
plugins = kwargs.get("plugins", [])
presets = kwargs.get("presets", [])
content = kwargs.get("content")
theme = kwargs.get("theme")
dark_mode = kwargs.get("darkMode")
core_plugins = kwargs.get("corePlugins")
important = kwargs.get("important")
prefix = kwargs.get("prefix")
separator = kwargs.get("separator")
# Extract destructured imports from plugin dicts only
imports = [
plugin["import"]
for plugin in plugins
if isinstance(plugin, Mapping) and "import" in plugin
]
# Generate import statements for destructured imports
import_lines = "\n".join([
f"import {{ {imp['name']} }} from {json.dumps(imp['from'])};" for imp in imports
])
# Generate plugin imports
plugin_imports = []
for i, plugin in enumerate(plugins, 1):
if isinstance(plugin, Mapping) and "call" not in plugin:
plugin_imports.append(
f"import plugin{i} from {json.dumps(plugin['name'])};"
)
elif not isinstance(plugin, Mapping):
plugin_imports.append(f"import plugin{i} from {json.dumps(plugin)};")
plugin_imports_lines = "\n".join(plugin_imports)
presets_imports_lines = "\n".join([
f"import preset{i} from {json.dumps(preset)};"
for i, preset in enumerate(presets, 1)
])
# Generate plugin array
plugin_list = []
for i, plugin in enumerate(plugins, 1):
if isinstance(plugin, Mapping) and "call" in plugin:
args_part = ""
if "args" in plugin:
args_part = json.dumps(plugin["args"])
plugin_list.append(f"{plugin['call']}({args_part})")
else:
plugin_list.append(f"plugin{i}")
plugin_use_str = ",".join(plugin_list)
return rf"""
{import_lines}
{plugin_imports_lines}
{presets_imports_lines}
export default {{
content: {json.dumps(content or default_content)},
theme: {json.dumps(theme or {})},
{f"darkMode: {json.dumps(dark_mode)}," if dark_mode is not None else ""}
{f"corePlugins: {json.dumps(core_plugins)}," if core_plugins is not None else ""}
{f"importants: {json.dumps(important)}," if important is not None else ""}
{f"prefix: {json.dumps(prefix)}," if prefix is not None else ""}
{f"separator: {json.dumps(separator)}," if separator is not None else ""}
{f"presets: [{', '.join(f'preset{i}' for i in range(1, len(presets) + 1))}]," if presets else ""}
plugins: [{plugin_use_str}]
}};
"""
@dataclasses.dataclass
class TailwindPlugin(PluginBase):
"""Plugin for Tailwind CSS."""
config: TailwindConfig = dataclasses.field(
default_factory=lambda: TailwindConfig(
plugins=[
"@tailwindcss/typography@0.5.19",
],
)
)
def get_frontend_development_dependencies(self, **context) -> list[str]:
"""Get the packages required by the plugin.
Args:
**context: The context for the plugin.
Returns:
A list of packages required by the plugin.
"""
config = self.get_config()
return [
plugin if isinstance(plugin, str) else plugin.get("name")
for plugin in config.get("plugins", [])
] + config.get("presets", [])
def get_config(self) -> TailwindConfig:
"""Get the Tailwind CSS configuration.
Returns:
The Tailwind CSS configuration.
"""
from reflex.config import get_config
rxconfig_config = getattr(get_config(), "tailwind", None)
if rxconfig_config is not None and rxconfig_config != self.config:
from reflex.utils import console
console.warn(
"It seems you have provided a tailwind configuration in your call to `rx.Config`."
f" You should provide the configuration as an argument to `rx.plugins.{self.__class__.__name__}()` instead."
)
return rxconfig_config
return self.config
def get_unversioned_config(self) -> TailwindConfig:
"""Get the Tailwind CSS configuration without version-specific adjustments.
Returns:
The Tailwind CSS configuration without version-specific adjustments.
"""
from reflex.utils.format import format_library_name
config = deepcopy(self.get_config())
if presets := config.get("presets"):
# Somehow, having an empty list of presets breaks Tailwind.
# So we only set the presets if there are any.
config["presets"] = [format_library_name(preset) for preset in presets]
config["plugins"] = [
remove_version_from_plugin(plugin) for plugin in config.get("plugins", [])
]
return config
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/shared_tailwind.py",
"license": "Apache License 2.0",
"lines": 184,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/components/field.py | """Shared field infrastructure for components and props."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import _MISSING_TYPE, MISSING
from typing import Annotated, Any, Generic, TypeVar, get_origin
from reflex.utils import types
from reflex.utils.compat import annotations_from_namespace
FIELD_TYPE = TypeVar("FIELD_TYPE")
class BaseField(Generic[FIELD_TYPE]):
"""Base field class used by internal metadata classes."""
def __init__(
self,
default: FIELD_TYPE | _MISSING_TYPE = MISSING,
default_factory: Callable[[], FIELD_TYPE] | None = None,
annotated_type: type[Any] | _MISSING_TYPE = MISSING,
) -> None:
"""Initialize the field.
Args:
default: The default value for the field.
default_factory: The default factory for the field.
annotated_type: The annotated type for the field.
"""
self.default = default
self.default_factory = default_factory
self.outer_type_ = self.annotated_type = annotated_type
# Process type annotation
type_origin = get_origin(annotated_type) or annotated_type
if type_origin is Annotated:
type_origin = annotated_type.__origin__ # pyright: ignore [reportAttributeAccessIssue]
# For Annotated types, use the actual type inside the annotation
self.type_ = annotated_type
else:
# For other types (including Union), preserve the original type
self.type_ = annotated_type
self.type_origin = type_origin
def default_value(self) -> FIELD_TYPE:
"""Get the default value for the field.
Returns:
The default value for the field.
Raises:
ValueError: If no default value or factory is provided.
"""
if self.default is not MISSING:
return self.default
if self.default_factory is not None:
return self.default_factory()
msg = "No default value or factory provided."
raise ValueError(msg)
class FieldBasedMeta(type):
"""Shared metaclass for field-based classes like components and props.
Provides common field inheritance and processing logic for both
PropsBaseMeta and BaseComponentMeta.
"""
def __new__(
cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any]
) -> type:
"""Create a new field-based class.
Args:
name: The name of the class.
bases: The base classes.
namespace: The class namespace.
Returns:
The new class.
"""
# Collect inherited fields from base classes
inherited_fields = cls._collect_inherited_fields(bases)
# Get annotations from the namespace
annotations = cls._resolve_annotations(namespace, name)
# Process field overrides (fields with values but no annotations)
own_fields = cls._process_field_overrides(
namespace, annotations, inherited_fields
)
# Process annotated fields
own_fields.update(
cls._process_annotated_fields(namespace, annotations, inherited_fields)
)
# Finalize fields and store on class
cls._finalize_fields(namespace, inherited_fields, own_fields)
return super().__new__(cls, name, bases, namespace)
@classmethod
def _collect_inherited_fields(cls, bases: tuple[type, ...]) -> dict[str, Any]:
inherited_fields: dict[str, Any] = {}
# Collect inherited fields from base classes
for base in bases[::-1]:
if hasattr(base, "_inherited_fields"):
inherited_fields.update(base._inherited_fields)
for base in bases[::-1]:
if hasattr(base, "_own_fields"):
inherited_fields.update(base._own_fields)
return inherited_fields
@classmethod
def _resolve_annotations(
cls, namespace: dict[str, Any], name: str
) -> dict[str, Any]:
return types.resolve_annotations(
annotations_from_namespace(namespace),
namespace["__module__"],
)
@classmethod
def _process_field_overrides(
cls,
namespace: dict[str, Any],
annotations: dict[str, Any],
inherited_fields: dict[str, Any],
) -> dict[str, Any]:
own_fields: dict[str, Any] = {}
for key, value in namespace.items():
if key not in annotations and key in inherited_fields:
inherited_field = inherited_fields[key]
new_field = cls._create_field(
annotated_type=inherited_field.annotated_type,
default=value,
default_factory=None,
)
own_fields[key] = new_field
return own_fields
@classmethod
def _process_annotated_fields(
cls,
namespace: dict[str, Any],
annotations: dict[str, Any],
inherited_fields: dict[str, Any],
) -> dict[str, Any]:
raise NotImplementedError
@classmethod
def _create_field(
cls,
annotated_type: Any,
default: Any = MISSING,
default_factory: Callable[[], Any] | None = None,
) -> Any:
raise NotImplementedError
@classmethod
def _finalize_fields(
cls,
namespace: dict[str, Any],
inherited_fields: dict[str, Any],
own_fields: dict[str, Any],
) -> None:
# Combine all fields
all_fields = inherited_fields | own_fields
# Set field names for compatibility
for field_name, field in all_fields.items():
field._name = field_name
# Store field mappings on the class
namespace["_own_fields"] = own_fields
namespace["_inherited_fields"] = inherited_fields
namespace["_fields"] = all_fields
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/components/field.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/environment.py | """Environment variable management."""
from __future__ import annotations
import concurrent.futures
import dataclasses
import enum
import importlib
import multiprocessing
import os
import platform
from collections.abc import Callable, Sequence
from functools import lru_cache
from pathlib import Path
from typing import (
TYPE_CHECKING,
Annotated,
Any,
Generic,
Literal,
TypeVar,
get_args,
get_origin,
get_type_hints,
)
from reflex import constants
from reflex.constants.base import LogLevel
from reflex.plugins import Plugin
from reflex.utils.exceptions import EnvironmentVarValueError
from reflex.utils.types import GenericType, is_union, value_inside_optional
def get_default_value_for_field(field: dataclasses.Field) -> Any:
"""Get the default value for a field.
Args:
field: The field.
Returns:
The default value.
Raises:
ValueError: If no default value is found.
"""
if field.default != dataclasses.MISSING:
return field.default
if field.default_factory != dataclasses.MISSING:
return field.default_factory()
msg = f"Missing value for environment variable {field.name} and no default value found"
raise ValueError(msg)
# TODO: Change all interpret_.* signatures to value: str, field: dataclasses.Field once we migrate rx.Config to dataclasses
def interpret_boolean_env(value: str, field_name: str) -> bool:
"""Interpret a boolean environment variable value.
Args:
value: The environment variable value.
field_name: The field name.
Returns:
The interpreted value.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
true_values = ["true", "1", "yes", "y"]
false_values = ["false", "0", "no", "n"]
if value.lower() in true_values:
return True
if value.lower() in false_values:
return False
msg = f"Invalid boolean value: {value!r} for {field_name}"
raise EnvironmentVarValueError(msg)
def interpret_int_env(value: str, field_name: str) -> int:
"""Interpret an integer environment variable value.
Args:
value: The environment variable value.
field_name: The field name.
Returns:
The interpreted value.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
try:
return int(value)
except ValueError as ve:
msg = f"Invalid integer value: {value!r} for {field_name}"
raise EnvironmentVarValueError(msg) from ve
def interpret_float_env(value: str, field_name: str) -> float:
"""Interpret a float environment variable value.
Args:
value: The environment variable value.
field_name: The field name.
Returns:
The interpreted value.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
try:
return float(value)
except ValueError as ve:
msg = f"Invalid float value: {value!r} for {field_name}"
raise EnvironmentVarValueError(msg) from ve
def interpret_existing_path_env(value: str, field_name: str) -> ExistingPath:
"""Interpret a path environment variable value as an existing path.
Args:
value: The environment variable value.
field_name: The field name.
Returns:
The interpreted value.
Raises:
EnvironmentVarValueError: If the path does not exist.
"""
path = Path(value)
if not path.exists():
msg = f"Path does not exist: {path!r} for {field_name}"
raise EnvironmentVarValueError(msg)
return path
def interpret_path_env(value: str, field_name: str) -> Path:
"""Interpret a path environment variable value.
Args:
value: The environment variable value.
field_name: The field name.
Returns:
The interpreted value.
"""
return Path(value)
def interpret_plugin_class_env(value: str, field_name: str) -> type[Plugin]:
"""Interpret an environment variable value as a Plugin subclass.
Resolves a fully qualified import path to the Plugin subclass it refers to.
Args:
value: The environment variable value (e.g. "reflex.plugins.sitemap.SitemapPlugin").
field_name: The field name.
Returns:
The Plugin subclass.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
if "." not in value:
msg = f"Invalid plugin value: {value!r} for {field_name}. Plugin name must be in the format 'package.module.PluginName'."
raise EnvironmentVarValueError(msg)
import_path, plugin_name = value.rsplit(".", 1)
try:
module = importlib.import_module(import_path)
except ImportError as e:
msg = f"Failed to import module {import_path!r} for {field_name}: {e}"
raise EnvironmentVarValueError(msg) from e
try:
plugin_class = getattr(module, plugin_name, None)
except Exception as e:
msg = f"Failed to get plugin class {plugin_name!r} from module {import_path!r} for {field_name}: {e}"
raise EnvironmentVarValueError(msg) from e
if not isinstance(plugin_class, type) or not issubclass(plugin_class, Plugin):
msg = f"Invalid plugin class: {plugin_name!r} for {field_name}. Must be a subclass of Plugin."
raise EnvironmentVarValueError(msg)
return plugin_class
def interpret_plugin_env(value: str, field_name: str) -> Plugin:
"""Interpret a plugin environment variable value.
Resolves a fully qualified import path and returns an instance of the Plugin.
Args:
value: The environment variable value (e.g. "reflex.plugins.sitemap.SitemapPlugin").
field_name: The field name.
Returns:
An instance of the Plugin subclass.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
plugin_class = interpret_plugin_class_env(value, field_name)
try:
return plugin_class()
except Exception as e:
msg = f"Failed to instantiate plugin {plugin_class.__name__!r} for {field_name}: {e}"
raise EnvironmentVarValueError(msg) from e
def interpret_enum_env(value: str, field_type: GenericType, field_name: str) -> Any:
"""Interpret an enum environment variable value.
Args:
value: The environment variable value.
field_type: The field type.
field_name: The field name.
Returns:
The interpreted value.
Raises:
EnvironmentVarValueError: If the value is invalid.
"""
try:
return field_type(value)
except ValueError as ve:
msg = f"Invalid enum value: {value!r} for {field_name}"
raise EnvironmentVarValueError(msg) from ve
@dataclasses.dataclass(frozen=True, kw_only=True, slots=True)
class SequenceOptions:
"""Options for interpreting Sequence environment variables."""
delimiter: str = ":"
strip: bool = False
DEFAULT_SEQUENCE_OPTIONS = SequenceOptions()
def interpret_env_var_value(
value: str, field_type: GenericType, field_name: str
) -> Any:
"""Interpret an environment variable value based on the field type.
Args:
value: The environment variable value.
field_type: The field type.
field_name: The field name.
Returns:
The interpreted value.
Raises:
ValueError: If the value is invalid.
EnvironmentVarValueError: If the value is invalid for the specific type.
"""
field_type = value_inside_optional(field_type)
if is_union(field_type):
errors = []
for arg in (union_types := get_args(field_type)):
try:
return interpret_env_var_value(value, arg, field_name)
except (ValueError, EnvironmentVarValueError) as e: # noqa: PERF203
errors.append(e)
msg = f"Could not interpret {value!r} for {field_name} as any of {union_types}: {errors}"
raise EnvironmentVarValueError(msg)
value = value.strip()
if field_type is bool:
return interpret_boolean_env(value, field_name)
if field_type is str:
return value
if field_type is LogLevel:
loglevel = LogLevel.from_string(value)
if loglevel is None:
msg = f"Invalid log level value: {value} for {field_name}"
raise EnvironmentVarValueError(msg)
return loglevel
if field_type is int:
return interpret_int_env(value, field_name)
if field_type is float:
return interpret_float_env(value, field_name)
if field_type is Path:
return interpret_path_env(value, field_name)
if field_type is ExistingPath:
return interpret_existing_path_env(value, field_name)
if field_type is Plugin:
return interpret_plugin_env(value, field_name)
if get_origin(field_type) is type:
type_args = get_args(field_type)
if (
type_args
and isinstance(type_args[0], type)
and issubclass(type_args[0], Plugin)
):
return interpret_plugin_class_env(value, field_name)
if get_origin(field_type) is Literal:
literal_values = get_args(field_type)
for literal_value in literal_values:
if isinstance(literal_value, str) and literal_value == value:
return literal_value
if isinstance(literal_value, bool):
try:
interpreted_bool = interpret_boolean_env(value, field_name)
if interpreted_bool == literal_value:
return interpreted_bool
except EnvironmentVarValueError:
continue
if isinstance(literal_value, int):
try:
interpreted_int = interpret_int_env(value, field_name)
if interpreted_int == literal_value:
return interpreted_int
except EnvironmentVarValueError:
continue
msg = f"Invalid literal value: {value!r} for {field_name}, expected one of {literal_values}"
raise EnvironmentVarValueError(msg)
# If the field is Annotated with SequenceOptions, extract the options
sequence_options = DEFAULT_SEQUENCE_OPTIONS
if get_origin(field_type) is Annotated:
annotated_args = get_args(field_type)
field_type = annotated_args[0]
for arg in annotated_args[1:]:
if isinstance(arg, SequenceOptions):
sequence_options = arg
break
if get_origin(field_type) in (list, Sequence):
items = value.split(sequence_options.delimiter)
if sequence_options.strip:
items = [item.strip() for item in items]
return [
interpret_env_var_value(
v,
get_args(field_type)[0],
f"{field_name}[{i}]",
)
for i, v in enumerate(items)
]
if isinstance(field_type, type) and issubclass(field_type, enum.Enum):
return interpret_enum_env(value, field_type, field_name)
msg = f"Invalid type for environment variable {field_name}: {field_type}. This is probably an issue in Reflex."
raise ValueError(msg)
T = TypeVar("T")
class EnvVar(Generic[T]):
"""Environment variable."""
name: str
default: Any
type_: T
def __init__(self, name: str, default: Any, type_: T) -> None:
"""Initialize the environment variable.
Args:
name: The environment variable name.
default: The default value.
type_: The type of the value.
"""
self.name = name
self.default = default
self.type_ = type_
def interpret(self, value: str) -> T:
"""Interpret the environment variable value.
Args:
value: The environment variable value.
Returns:
The interpreted value.
"""
return interpret_env_var_value(value, self.type_, self.name)
def getenv(self) -> T | None:
"""Get the interpreted environment variable value.
Returns:
The environment variable value.
"""
env_value = os.getenv(self.name, None)
if env_value and env_value.strip():
return self.interpret(env_value)
return None
def is_set(self) -> bool:
"""Check if the environment variable is set.
Returns:
True if the environment variable is set.
"""
return bool(os.getenv(self.name, "").strip())
def get(self) -> T:
"""Get the interpreted environment variable value or the default value if not set.
Returns:
The interpreted value.
"""
env_value = self.getenv()
if env_value is not None:
return env_value
return self.default
def set(self, value: T | None) -> None:
"""Set the environment variable. None unsets the variable.
Args:
value: The value to set.
"""
if value is None:
_ = os.environ.pop(self.name, None)
else:
if isinstance(value, enum.Enum):
value = value.value
if isinstance(value, list):
str_value = ":".join(str(v) for v in value)
else:
str_value = str(value)
os.environ[self.name] = str_value
@lru_cache
def get_type_hints_environment(cls: type) -> dict[str, Any]:
"""Get the type hints for the environment variables.
Args:
cls: The class.
Returns:
The type hints.
"""
return get_type_hints(cls)
class env_var: # noqa: N801 # pyright: ignore [reportRedeclaration]
"""Descriptor for environment variables."""
name: str
default: Any
internal: bool = False
def __init__(self, default: Any, internal: bool = False) -> None:
"""Initialize the descriptor.
Args:
default: The default value.
internal: Whether the environment variable is reflex internal.
"""
self.default = default
self.internal = internal
def __set_name__(self, owner: Any, name: str):
"""Set the name of the descriptor.
Args:
owner: The owner class.
name: The name of the descriptor.
"""
self.name = name
def __get__(
self, instance: EnvironmentVariables, owner: type[EnvironmentVariables]
):
"""Get the EnvVar instance.
Args:
instance: The instance.
owner: The owner class.
Returns:
The EnvVar instance.
"""
type_ = get_args(get_type_hints_environment(owner)[self.name])[0]
env_name = self.name
if self.internal:
env_name = f"__{env_name}"
return EnvVar(name=env_name, default=self.default, type_=type_)
if TYPE_CHECKING:
def env_var(default: Any, internal: bool = False) -> EnvVar:
"""Typing helper for the env_var descriptor.
Args:
default: The default value.
internal: Whether the environment variable is reflex internal.
Returns:
The EnvVar instance.
"""
return default
class PathExistsFlag:
"""Flag to indicate that a path must exist."""
ExistingPath = Annotated[Path, PathExistsFlag]
class PerformanceMode(enum.Enum):
"""Performance mode for the app."""
WARN = "warn"
RAISE = "raise"
OFF = "off"
class ExecutorType(enum.Enum):
"""Executor for compiling the frontend."""
THREAD = "thread"
PROCESS = "process"
MAIN_THREAD = "main_thread"
@classmethod
def get_executor_from_environment(cls):
"""Get the executor based on the environment variables.
Returns:
The executor.
"""
from reflex.utils import console
executor_type = environment.REFLEX_COMPILE_EXECUTOR.get()
reflex_compile_processes = environment.REFLEX_COMPILE_PROCESSES.get()
reflex_compile_threads = environment.REFLEX_COMPILE_THREADS.get()
# By default, use the main thread. Unless the user has specified a different executor.
# Using a process pool is much faster, but not supported on all platforms. It's gated behind a flag.
if executor_type is None:
if (
platform.system() not in ("Linux", "Darwin")
and reflex_compile_processes is not None
):
console.warn("Multiprocessing is only supported on Linux and MacOS.")
if (
platform.system() in ("Linux", "Darwin")
and reflex_compile_processes is not None
):
if reflex_compile_processes == 0:
console.warn(
"Number of processes must be greater than 0. If you want to use the default number of processes, set REFLEX_COMPILE_EXECUTOR to 'process'. Defaulting to None."
)
reflex_compile_processes = None
elif reflex_compile_processes < 0:
console.warn(
"Number of processes must be greater than 0. Defaulting to None."
)
reflex_compile_processes = None
executor_type = ExecutorType.PROCESS
elif reflex_compile_threads is not None:
if reflex_compile_threads == 0:
console.warn(
"Number of threads must be greater than 0. If you want to use the default number of threads, set REFLEX_COMPILE_EXECUTOR to 'thread'. Defaulting to None."
)
reflex_compile_threads = None
elif reflex_compile_threads < 0:
console.warn(
"Number of threads must be greater than 0. Defaulting to None."
)
reflex_compile_threads = None
executor_type = ExecutorType.THREAD
else:
executor_type = ExecutorType.MAIN_THREAD
match executor_type:
case ExecutorType.PROCESS:
executor = concurrent.futures.ProcessPoolExecutor(
max_workers=reflex_compile_processes,
mp_context=multiprocessing.get_context("fork"),
)
case ExecutorType.THREAD:
executor = concurrent.futures.ThreadPoolExecutor(
max_workers=reflex_compile_threads
)
case ExecutorType.MAIN_THREAD:
FUTURE_RESULT_TYPE = TypeVar("FUTURE_RESULT_TYPE")
class MainThreadExecutor:
def __enter__(self):
return self
def __exit__(self, *args):
pass
def submit(
self, fn: Callable[..., FUTURE_RESULT_TYPE], *args, **kwargs
) -> concurrent.futures.Future[FUTURE_RESULT_TYPE]:
future_job = concurrent.futures.Future()
future_job.set_result(fn(*args, **kwargs))
return future_job
executor = MainThreadExecutor()
return executor
class EnvironmentVariables:
"""Environment variables class to instantiate environment variables."""
# Indicate the current command that was invoked in the reflex CLI.
REFLEX_COMPILE_CONTEXT: EnvVar[constants.CompileContext] = env_var(
constants.CompileContext.UNDEFINED, internal=True
)
# Whether to use npm over bun to install and run the frontend.
REFLEX_USE_NPM: EnvVar[bool] = env_var(False)
# The npm registry to use.
NPM_CONFIG_REGISTRY: EnvVar[str | None] = env_var(None)
# Whether to use Granian for the backend. By default, the backend uses Uvicorn if available.
REFLEX_USE_GRANIAN: EnvVar[bool] = env_var(False)
# Whether to use the system installed bun. If set to false, bun will be bundled with the app.
REFLEX_USE_SYSTEM_BUN: EnvVar[bool] = env_var(False)
# The working directory for the frontend directory.
REFLEX_WEB_WORKDIR: EnvVar[Path] = env_var(Path(constants.Dirs.WEB))
# The working directory for the states directory.
REFLEX_STATES_WORKDIR: EnvVar[Path] = env_var(Path(constants.Dirs.STATES))
# Path to the alembic config file
ALEMBIC_CONFIG: EnvVar[ExistingPath] = env_var(Path(constants.ALEMBIC_CONFIG))
# Include schemas in alembic migrations.
ALEMBIC_INCLUDE_SCHEMAS: EnvVar[bool] = env_var(False)
# Disable SSL verification for HTTPX requests.
SSL_NO_VERIFY: EnvVar[bool] = env_var(False)
# The directory to store uploaded files.
REFLEX_UPLOADED_FILES_DIR: EnvVar[Path] = env_var(
Path(constants.Dirs.UPLOADED_FILES)
)
REFLEX_COMPILE_EXECUTOR: EnvVar[ExecutorType | None] = env_var(None)
# Whether to use separate processes to compile the frontend and how many. If not set, defaults to thread executor.
REFLEX_COMPILE_PROCESSES: EnvVar[int | None] = env_var(None)
# Whether to use separate threads to compile the frontend and how many. Defaults to `min(32, os.cpu_count() + 4)`.
REFLEX_COMPILE_THREADS: EnvVar[int | None] = env_var(None)
# The directory to store reflex dependencies.
REFLEX_DIR: EnvVar[Path] = env_var(constants.Reflex.DIR)
# Whether to print the SQL queries if the log level is INFO or lower.
SQLALCHEMY_ECHO: EnvVar[bool] = env_var(False)
# Whether to check db connections before using them.
SQLALCHEMY_POOL_PRE_PING: EnvVar[bool] = env_var(True)
# The size of the database connection pool.
SQLALCHEMY_POOL_SIZE: EnvVar[int] = env_var(5)
# The maximum overflow size of the database connection pool.
SQLALCHEMY_MAX_OVERFLOW: EnvVar[int] = env_var(10)
# Recycle connections after this many seconds.
SQLALCHEMY_POOL_RECYCLE: EnvVar[int] = env_var(-1)
# The timeout for acquiring a connection from the pool.
SQLALCHEMY_POOL_TIMEOUT: EnvVar[int] = env_var(30)
# Whether to ignore the redis config error. Some redis servers only allow out-of-band configuration.
REFLEX_IGNORE_REDIS_CONFIG_ERROR: EnvVar[bool] = env_var(False)
# Whether to skip purging the web directory in dev mode.
REFLEX_PERSIST_WEB_DIR: EnvVar[bool] = env_var(False)
# This env var stores the execution mode of the app
REFLEX_ENV_MODE: EnvVar[constants.Env] = env_var(constants.Env.DEV)
# Whether to run the backend only. Exclusive with REFLEX_FRONTEND_ONLY.
REFLEX_BACKEND_ONLY: EnvVar[bool] = env_var(False)
# Whether to run the frontend only. Exclusive with REFLEX_BACKEND_ONLY.
REFLEX_FRONTEND_ONLY: EnvVar[bool] = env_var(False)
# The port to run the frontend on.
REFLEX_FRONTEND_PORT: EnvVar[int | None] = env_var(None)
# The port to run the backend on.
REFLEX_BACKEND_PORT: EnvVar[int | None] = env_var(None)
# If this env var is set to "yes", App.compile will be a no-op
REFLEX_SKIP_COMPILE: EnvVar[bool] = env_var(False, internal=True)
# Whether to run app harness tests in headless mode.
APP_HARNESS_HEADLESS: EnvVar[bool] = env_var(False)
# Which app harness driver to use.
APP_HARNESS_DRIVER: EnvVar[str] = env_var("Chrome")
# Arguments to pass to the app harness driver.
APP_HARNESS_DRIVER_ARGS: EnvVar[str] = env_var("")
# Whether to check for outdated package versions.
REFLEX_CHECK_LATEST_VERSION: EnvVar[bool] = env_var(True)
# In which performance mode to run the app.
REFLEX_PERF_MODE: EnvVar[PerformanceMode] = env_var(PerformanceMode.WARN)
# The maximum size of the reflex state in kilobytes.
REFLEX_STATE_SIZE_LIMIT: EnvVar[int] = env_var(1000)
# Whether to use the turbopack bundler.
REFLEX_USE_TURBOPACK: EnvVar[bool] = env_var(False)
# Additional paths to include in the hot reload. Separated by a colon.
REFLEX_HOT_RELOAD_INCLUDE_PATHS: EnvVar[list[Path]] = env_var([])
# Paths to exclude from the hot reload. Takes precedence over include paths. Separated by a colon.
REFLEX_HOT_RELOAD_EXCLUDE_PATHS: EnvVar[list[Path]] = env_var([])
# Enables different behavior for when the backend would do a cold start if it was inactive.
REFLEX_DOES_BACKEND_COLD_START: EnvVar[bool] = env_var(False)
# The timeout for the backend to do a cold start in seconds.
REFLEX_BACKEND_COLD_START_TIMEOUT: EnvVar[int] = env_var(10)
# Used by flexgen to enumerate the pages.
REFLEX_ADD_ALL_ROUTES_ENDPOINT: EnvVar[bool] = env_var(False)
# The address to bind the HTTP client to. You can set this to "::" to enable IPv6.
REFLEX_HTTP_CLIENT_BIND_ADDRESS: EnvVar[str | None] = env_var(None)
# Maximum size of the message in the websocket server in bytes.
REFLEX_SOCKET_MAX_HTTP_BUFFER_SIZE: EnvVar[int] = env_var(
constants.POLLING_MAX_HTTP_BUFFER_SIZE
)
# The interval to send a ping to the websocket server in seconds.
REFLEX_SOCKET_INTERVAL: EnvVar[int] = env_var(constants.Ping.INTERVAL)
# The timeout to wait for a pong from the websocket server in seconds.
REFLEX_SOCKET_TIMEOUT: EnvVar[int] = env_var(constants.Ping.TIMEOUT)
# Whether to run Granian in a spawn process. This enables Reflex to pick up on environment variable changes between hot reloads.
REFLEX_STRICT_HOT_RELOAD: EnvVar[bool] = env_var(False)
# The path to the reflex log file. If not set, the log file will be stored in the reflex user directory.
REFLEX_LOG_FILE: EnvVar[Path | None] = env_var(None)
# Enable full logging of debug messages to reflex user directory.
REFLEX_ENABLE_FULL_LOGGING: EnvVar[bool] = env_var(False)
# Whether to enable hot module replacement
VITE_HMR: EnvVar[bool] = env_var(True)
# Whether to force a full reload on changes.
VITE_FORCE_FULL_RELOAD: EnvVar[bool] = env_var(False)
# Whether to enable Rolldown's experimental HMR.
VITE_EXPERIMENTAL_HMR: EnvVar[bool] = env_var(False)
# Whether to generate sourcemaps for the frontend.
VITE_SOURCEMAP: EnvVar[Literal[False, True, "inline", "hidden"]] = env_var(False) # noqa: RUF038
# Whether to enable SSR for the frontend.
REFLEX_SSR: EnvVar[bool] = env_var(True)
# Whether to mount the compiled frontend app in the backend server in production.
REFLEX_MOUNT_FRONTEND_COMPILED_APP: EnvVar[bool] = env_var(False, internal=True)
# How long to delay writing updated states to disk. (Higher values mean less writes, but more chance of lost data.)
REFLEX_STATE_MANAGER_DISK_DEBOUNCE_SECONDS: EnvVar[float] = env_var(2.0)
# How long to wait between automatic reload on frontend error to avoid reload loops.
REFLEX_AUTO_RELOAD_COOLDOWN_TIME_MS: EnvVar[int] = env_var(10_000)
# Whether to enable debug logging for the redis state manager.
REFLEX_STATE_MANAGER_REDIS_DEBUG: EnvVar[bool] = env_var(False)
# Whether to opportunistically hold the redis lock to allow fast in-memory access while uncontended.
REFLEX_OPLOCK_ENABLED: EnvVar[bool] = env_var(False)
# How long to opportunistically hold the redis lock in milliseconds (must be less than the token expiration).
REFLEX_OPLOCK_HOLD_TIME_MS: EnvVar[int] = env_var(0)
environment = EnvironmentVariables()
try:
from dotenv import load_dotenv
except ImportError:
load_dotenv = None
def _paths_from_env_files(env_files: str) -> list[Path]:
"""Convert a string of paths separated by os.pathsep into a list of Path objects.
Args:
env_files: The string of paths.
Returns:
A list of Path objects.
"""
# load env files in reverse order
return list(
reversed([
Path(path)
for path_element in env_files.split(os.pathsep)
if (path := path_element.strip())
])
)
def _load_dotenv_from_files(files: list[Path]):
"""Load environment variables from a list of files.
Args:
files: A list of Path objects representing the environment variable files.
"""
from reflex.utils import console
if not files:
return
if load_dotenv is None:
console.error(
"""The `python-dotenv` package is required to load environment variables from a file. Run `pip install "python-dotenv>=1.1.0"`."""
)
return
for env_file in files:
if env_file.exists():
load_dotenv(env_file, override=True)
def _paths_from_environment() -> list[Path]:
"""Get the paths from the REFLEX_ENV_FILE environment variable.
Returns:
A list of Path objects.
"""
env_files = os.environ.get("REFLEX_ENV_FILE")
if not env_files:
return []
return _paths_from_env_files(env_files)
def _load_dotenv_from_env():
"""Load environment variables from paths specified in REFLEX_ENV_FILE."""
_load_dotenv_from_files(_paths_from_environment())
# Load the env files at import time if they are set in the ENV_FILE environment variable.
_load_dotenv_from_env()
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/environment.py",
"license": "Apache License 2.0",
"lines": 660,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
reflex-dev/reflex:reflex/plugins/base.py | """Base class for all plugins."""
from collections.abc import Callable, Sequence
from pathlib import Path
from typing import TYPE_CHECKING, ParamSpec, Protocol, TypedDict
from typing_extensions import Unpack
if TYPE_CHECKING:
from reflex.app import App, UnevaluatedPage
class CommonContext(TypedDict):
"""Common context for all plugins."""
P = ParamSpec("P")
class AddTaskProtocol(Protocol):
"""Protocol for adding a task to the pre-compile context."""
def __call__(
self,
task: Callable[P, list[tuple[str, str]] | tuple[str, str] | None],
/,
*args: P.args,
**kwargs: P.kwargs,
) -> None:
"""Add a task to the pre-compile context.
Args:
task: The task to add.
args: The arguments to pass to the task
kwargs: The keyword arguments to pass to the task
"""
class PreCompileContext(CommonContext):
"""Context for pre-compile hooks."""
add_save_task: AddTaskProtocol
add_modify_task: Callable[[str, Callable[[str], str]], None]
unevaluated_pages: Sequence["UnevaluatedPage"]
class PostCompileContext(CommonContext):
"""Context for post-compile hooks."""
app: "App"
class Plugin:
"""Base class for all plugins."""
def get_frontend_development_dependencies(
self, **context: Unpack[CommonContext]
) -> list[str] | set[str] | tuple[str, ...]:
"""Get the NPM packages required by the plugin for development.
Args:
context: The context for the plugin.
Returns:
A list of packages required by the plugin for development.
"""
return []
def get_frontend_dependencies(
self, **context: Unpack[CommonContext]
) -> list[str] | set[str] | tuple[str, ...]:
"""Get the NPM packages required by the plugin.
Args:
context: The context for the plugin.
Returns:
A list of packages required by the plugin.
"""
return []
def get_static_assets(
self, **context: Unpack[CommonContext]
) -> Sequence[tuple[Path, str | bytes]]:
"""Get the static assets required by the plugin.
Args:
context: The context for the plugin.
Returns:
A list of static assets required by the plugin.
"""
return []
def get_stylesheet_paths(self, **context: Unpack[CommonContext]) -> Sequence[str]:
"""Get the paths to the stylesheets required by the plugin relative to the styles directory.
Args:
context: The context for the plugin.
Returns:
A list of paths to the stylesheets required by the plugin.
"""
return []
def pre_compile(self, **context: Unpack[PreCompileContext]) -> None:
"""Called before the compilation of the plugin.
Args:
context: The context for the plugin.
"""
def post_compile(self, **context: Unpack[PostCompileContext]) -> None:
"""Called after the compilation of the plugin.
Args:
context: The context for the plugin.
"""
def __repr__(self):
"""Return a string representation of the plugin.
Returns:
A string representation of the plugin.
"""
return f"{self.__class__.__name__}()"
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/base.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
reflex-dev/reflex:reflex/plugins/tailwind_v3.py | """Base class for all plugins."""
import dataclasses
from pathlib import Path
from types import SimpleNamespace
from reflex.constants.base import Dirs
from reflex.constants.compiler import Ext, PageNames
from reflex.plugins.shared_tailwind import (
TailwindConfig,
TailwindPlugin,
tailwind_config_js_template,
)
class Constants(SimpleNamespace):
"""Tailwind constants."""
# The Tailwindcss version
VERSION = "tailwindcss@3.4.17"
# The Tailwind config.
CONFIG = "tailwind.config.js"
# Default Tailwind content paths
CONTENT = [f"./{Dirs.PAGES}/**/*.{{js,ts,jsx,tsx}}", "./utils/**/*.{js,ts,jsx,tsx}"]
# Relative tailwind style path to root stylesheet in Dirs.STYLES.
ROOT_STYLE_PATH = "./tailwind.css"
# Content of the style content.
ROOT_STYLE_CONTENT = """
@import "tailwindcss/base";
@import url('{radix_url}');
@tailwind components;
@tailwind utilities;
"""
# The default tailwind css.
TAILWIND_CSS = "@import url('./tailwind.css');"
def compile_config(config: TailwindConfig):
"""Compile the Tailwind config.
Args:
config: The Tailwind config.
Returns:
The compiled Tailwind config.
"""
return Constants.CONFIG, tailwind_config_js_template(
**config,
default_content=Constants.CONTENT,
)
def compile_root_style():
"""Compile the Tailwind root style.
Returns:
The compiled Tailwind root style.
"""
from reflex.compiler.compiler import RADIX_THEMES_STYLESHEET
return str(
Path(Dirs.STYLES) / Constants.ROOT_STYLE_PATH
), Constants.ROOT_STYLE_CONTENT.format(
radix_url=RADIX_THEMES_STYLESHEET,
)
def _index_of_element_that_has(haystack: list[str], needle: str) -> int | None:
return next(
(i for i, line in enumerate(haystack) if needle in line),
None,
)
def add_tailwind_to_postcss_config(postcss_file_content: str) -> str:
"""Add tailwind to the postcss config.
Args:
postcss_file_content: The content of the postcss config file.
Returns:
The modified postcss config file content.
"""
from reflex.constants import Dirs
postcss_file_lines = postcss_file_content.splitlines()
if _index_of_element_that_has(postcss_file_lines, "tailwindcss") is not None:
return postcss_file_content
line_with_postcss_plugins = _index_of_element_that_has(
postcss_file_lines, "plugins"
)
if not line_with_postcss_plugins:
print( # noqa: T201
f"Could not find line with 'plugins' in {Dirs.POSTCSS_JS}. "
"Please make sure the file exists and is valid."
)
return postcss_file_content
postcss_import_line = _index_of_element_that_has(
postcss_file_lines, '"postcss-import"'
)
postcss_file_lines.insert(
(postcss_import_line or line_with_postcss_plugins) + 1, "tailwindcss: {},"
)
return "\n".join(postcss_file_lines)
def add_tailwind_to_css_file(css_file_content: str) -> str:
"""Add tailwind to the css file.
Args:
css_file_content: The content of the css file.
Returns:
The modified css file content.
"""
from reflex.compiler.compiler import RADIX_THEMES_STYLESHEET
if Constants.TAILWIND_CSS.splitlines()[0] in css_file_content:
return css_file_content
if RADIX_THEMES_STYLESHEET not in css_file_content:
print( # noqa: T201
f"Could not find line with '{RADIX_THEMES_STYLESHEET}' in {Dirs.STYLES}. "
"Please make sure the file exists and is valid."
)
return css_file_content
return css_file_content.replace(
f"@import url('{RADIX_THEMES_STYLESHEET}');",
Constants.TAILWIND_CSS,
)
@dataclasses.dataclass
class TailwindV3Plugin(TailwindPlugin):
"""Plugin for Tailwind CSS."""
def get_frontend_development_dependencies(self, **context) -> list[str]:
"""Get the packages required by the plugin.
Args:
**context: The context for the plugin.
Returns:
A list of packages required by the plugin.
"""
return [
*super().get_frontend_development_dependencies(**context),
Constants.VERSION,
]
def pre_compile(self, **context):
"""Pre-compile the plugin.
Args:
context: The context for the plugin.
"""
context["add_save_task"](compile_config, self.get_unversioned_config())
context["add_save_task"](compile_root_style)
context["add_modify_task"](Dirs.POSTCSS_JS, add_tailwind_to_postcss_config)
context["add_modify_task"](
str(Path(Dirs.STYLES) / (PageNames.STYLESHEET_ROOT + Ext.CSS)),
add_tailwind_to_css_file,
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/tailwind_v3.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
reflex-dev/reflex:reflex/plugins/tailwind_v4.py | """Base class for all plugins."""
import dataclasses
from pathlib import Path
from types import SimpleNamespace
from reflex.constants.base import Dirs
from reflex.constants.compiler import Ext, PageNames
from reflex.plugins.shared_tailwind import (
TailwindConfig,
TailwindPlugin,
tailwind_config_js_template,
)
class Constants(SimpleNamespace):
"""Tailwind constants."""
# The Tailwindcss version
VERSION = "tailwindcss@4.1.18"
# The Tailwind config.
CONFIG = "tailwind.config.js"
# Default Tailwind content paths
CONTENT = [f"./{Dirs.PAGES}/**/*.{{js,ts,jsx,tsx}}", "./utils/**/*.{js,ts,jsx,tsx}"]
# Relative tailwind style path to root stylesheet in Dirs.STYLES.
ROOT_STYLE_PATH = "./tailwind.css"
# Content of the style content.
ROOT_STYLE_CONTENT = """@layer theme, base, components, utilities;
@import "tailwindcss/theme.css" layer(theme);
@import "tailwindcss/preflight.css" layer(base);
@import "{radix_url}" layer(components);
@import "tailwindcss/utilities.css" layer(utilities);
@config "../tailwind.config.js";
"""
# The default tailwind css.
TAILWIND_CSS = "@import url('./tailwind.css');"
def compile_config(config: TailwindConfig):
"""Compile the Tailwind config.
Args:
config: The Tailwind config.
Returns:
The compiled Tailwind config.
"""
return Constants.CONFIG, tailwind_config_js_template(
**config,
default_content=Constants.CONTENT,
)
def compile_root_style():
"""Compile the Tailwind root style.
Returns:
The compiled Tailwind root style.
"""
from reflex.compiler.compiler import RADIX_THEMES_STYLESHEET
return str(
Path(Dirs.STYLES) / Constants.ROOT_STYLE_PATH
), Constants.ROOT_STYLE_CONTENT.format(
radix_url=RADIX_THEMES_STYLESHEET,
)
def _index_of_element_that_has(haystack: list[str], needle: str) -> int | None:
return next(
(i for i, line in enumerate(haystack) if needle in line),
None,
)
def add_tailwind_to_postcss_config(postcss_file_content: str) -> str:
"""Add tailwind to the postcss config.
Args:
postcss_file_content: The content of the postcss config file.
Returns:
The modified postcss config file content.
"""
from reflex.constants import Dirs
postcss_file_lines = postcss_file_content.splitlines()
line_with_postcss_plugins = _index_of_element_that_has(
postcss_file_lines, "plugins"
)
if not line_with_postcss_plugins:
print( # noqa: T201
f"Could not find line with 'plugins' in {Dirs.POSTCSS_JS}. "
"Please make sure the file exists and is valid."
)
return postcss_file_content
plugins_to_remove = ['"postcss-import"', "tailwindcss", "autoprefixer"]
plugins_to_add = ['"@tailwindcss/postcss"']
for plugin in plugins_to_remove:
plugin_index = _index_of_element_that_has(postcss_file_lines, plugin)
if plugin_index is not None:
postcss_file_lines.pop(plugin_index)
for plugin in plugins_to_add[::-1]:
if not _index_of_element_that_has(postcss_file_lines, plugin):
postcss_file_lines.insert(
line_with_postcss_plugins + 1, f" {plugin}: {{}},"
)
return "\n".join(postcss_file_lines)
def add_tailwind_to_css_file(css_file_content: str) -> str:
"""Add tailwind to the css file.
Args:
css_file_content: The content of the css file.
Returns:
The modified css file content.
"""
from reflex.compiler.compiler import RADIX_THEMES_STYLESHEET
if Constants.TAILWIND_CSS.splitlines()[0] in css_file_content:
return css_file_content
if RADIX_THEMES_STYLESHEET not in css_file_content:
print( # noqa: T201
f"Could not find line with '{RADIX_THEMES_STYLESHEET}' in {Dirs.STYLES}. "
"Please make sure the file exists and is valid."
)
return css_file_content
return css_file_content.replace(
f"@import url('{RADIX_THEMES_STYLESHEET}');",
Constants.TAILWIND_CSS,
)
@dataclasses.dataclass
class TailwindV4Plugin(TailwindPlugin):
"""Plugin for Tailwind CSS."""
def get_frontend_development_dependencies(self, **context) -> list[str]:
"""Get the packages required by the plugin.
Args:
**context: The context for the plugin.
Returns:
A list of packages required by the plugin.
"""
return [
*super().get_frontend_development_dependencies(**context),
Constants.VERSION,
"@tailwindcss/postcss@4.1.18",
]
def pre_compile(self, **context):
"""Pre-compile the plugin.
Args:
context: The context for the plugin.
"""
context["add_save_task"](compile_config, self.get_unversioned_config())
context["add_save_task"](compile_root_style)
context["add_modify_task"](Dirs.POSTCSS_JS, add_tailwind_to_postcss_config)
context["add_modify_task"](
str(Path(Dirs.STYLES) / (PageNames.STYLESHEET_ROOT + Ext.CSS)),
add_tailwind_to_css_file,
)
| {
"repo_id": "reflex-dev/reflex",
"file_path": "reflex/plugins/tailwind_v4.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:example_tts_turbo.py | import torchaudio as ta
import torch
from chatterbox.tts_turbo import ChatterboxTurboTTS
# Load the Turbo model
model = ChatterboxTurboTTS.from_pretrained(device="cuda")
# Generate with Paralinguistic Tags
text = "Oh, that's hilarious! [chuckle] Um anyway, we do have a new model in store. It's the SkyNet T-800 series and it's got basically everything. Including AI integration with ChatGPT and all that jazz. Would you like me to get some prices for you?"
# Generate audio (requires a reference clip for voice cloning)
# wav = model.generate(text, audio_prompt_path="your_10s_ref_clip.wav")
wav = model.generate(text)
ta.save("test-turbo.wav", wav, model.sr)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "example_tts_turbo.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:gradio_tts_turbo_app.py | import random
import numpy as np
import torch
import gradio as gr
from chatterbox.tts_turbo import ChatterboxTurboTTS
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
EVENT_TAGS = [
"[clear throat]", "[sigh]", "[shush]", "[cough]", "[groan]",
"[sniff]", "[gasp]", "[chuckle]", "[laugh]"
]
# --- REFINED CSS ---
# 1. tag-container: Forces the row to wrap items instead of scrolling. Removes borders/backgrounds.
# 2. tag-btn: Sets the specific look (indigo theme) and stops them from stretching.
CUSTOM_CSS = """
.tag-container {
display: flex !important;
flex-wrap: wrap !important; /* This fixes the one-per-line issue */
gap: 8px !important;
margin-top: 5px !important;
margin-bottom: 10px !important;
border: none !important;
background: transparent !important;
}
.tag-btn {
min-width: fit-content !important;
width: auto !important;
height: 32px !important;
font-size: 13px !important;
background: #eef2ff !important;
border: 1px solid #c7d2fe !important;
color: #3730a3 !important;
border-radius: 6px !important;
padding: 0 10px !important;
margin: 0 !important;
box-shadow: none !important;
}
.tag-btn:hover {
background: #c7d2fe !important;
transform: translateY(-1px);
}
"""
INSERT_TAG_JS = """
(tag_val, current_text) => {
const textarea = document.querySelector('#main_textbox textarea');
if (!textarea) return current_text + " " + tag_val;
const start = textarea.selectionStart;
const end = textarea.selectionEnd;
let prefix = " ";
let suffix = " ";
if (start === 0) prefix = "";
else if (current_text[start - 1] === ' ') prefix = "";
if (end < current_text.length && current_text[end] === ' ') suffix = "";
return current_text.slice(0, start) + prefix + tag_val + suffix + current_text.slice(end);
}
"""
def set_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def load_model():
print(f"Loading Chatterbox-Turbo on {DEVICE}...")
model = ChatterboxTurboTTS.from_pretrained(DEVICE)
return model
def generate(
model,
text,
audio_prompt_path,
temperature,
seed_num,
min_p,
top_p,
top_k,
repetition_penalty,
norm_loudness
):
if model is None:
model = ChatterboxTurboTTS.from_pretrained(DEVICE)
if seed_num != 0:
set_seed(int(seed_num))
wav = model.generate(
text,
audio_prompt_path=audio_prompt_path,
temperature=temperature,
min_p=min_p,
top_p=top_p,
top_k=int(top_k),
repetition_penalty=repetition_penalty,
norm_loudness=norm_loudness,
)
return (model.sr, wav.squeeze(0).numpy())
with gr.Blocks(title="Chatterbox Turbo", css=CUSTOM_CSS) as demo:
gr.Markdown("# ⚡ Chatterbox Turbo")
model_state = gr.State(None)
with gr.Row():
with gr.Column():
text = gr.Textbox(
value="Oh, that's hilarious! [chuckle] Um anyway, we do have a new model in store. It's the SkyNet T-800 series and it's got basically everything. Including AI integration with ChatGPT and um all that jazz. Would you like me to get some prices for you?",
label="Text to synthesize (max chars 300)",
max_lines=5,
elem_id="main_textbox"
)
# --- Event Tags ---
# Switched back to Row, but applied specific CSS to force wrapping
with gr.Row(elem_classes=["tag-container"]):
for tag in EVENT_TAGS:
# elem_classes targets the button specifically
btn = gr.Button(tag, elem_classes=["tag-btn"])
btn.click(
fn=None,
inputs=[btn, text],
outputs=text,
js=INSERT_TAG_JS
)
ref_wav = gr.Audio(
sources=["upload", "microphone"],
type="filepath",
label="Reference Audio File",
value="https://storage.googleapis.com/chatterbox-demo-samples/prompts/female_random_podcast.wav"
)
run_btn = gr.Button("Generate ⚡", variant="primary")
with gr.Column():
audio_output = gr.Audio(label="Output Audio")
with gr.Accordion("Advanced Options", open=False):
seed_num = gr.Number(value=0, label="Random seed (0 for random)")
temp = gr.Slider(0.05, 2.0, step=.05, label="Temperature", value=0.8)
top_p = gr.Slider(0.00, 1.00, step=0.01, label="Top P", value=0.95)
top_k = gr.Slider(0, 1000, step=10, label="Top K", value=1000)
repetition_penalty = gr.Slider(1.00, 2.00, step=0.05, label="Repetition Penalty", value=1.2)
min_p = gr.Slider(0.00, 1.00, step=0.01, label="Min P (Set to 0 to disable)", value=0.00)
norm_loudness = gr.Checkbox(value=True, label="Normalize Loudness (-27 LUFS)")
demo.load(fn=load_model, inputs=[], outputs=model_state)
run_btn.click(
fn=generate,
inputs=[
model_state,
text,
ref_wav,
temp,
seed_num,
min_p,
top_p,
top_k,
repetition_penalty,
norm_loudness,
],
outputs=audio_output,
)
if __name__ == "__main__":
demo.queue(
max_size=50,
default_concurrency_limit=1,
).launch(share=True)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "gradio_tts_turbo_app.py",
"license": "MIT License",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/utils/intmeanflow.py | import torch
import torch.nn as nn
def get_intmeanflow_time_mixer(dims):
""""
Diagonal init as described in 3.3 https://arxiv.org/pdf/2510.07979
"""
layer = nn.Linear(dims * 2, dims, bias=False)
with torch.no_grad():
target_weight = torch.zeros(dims, 2 * dims)
target_weight[:, 0:dims] = torch.eye(dims)
layer.weight.data = target_weight
return layer
if __name__ == '__main__':
D_example = 6
W_layer = get_intmeanflow_time_mixer(D_example)
print(f"Layer weight (AFTER init):\n{W_layer.weight.data}\n")
e_t = torch.tensor([0., 1., 2., 3., 4., 5.])
e_r = torch.tensor([6., 7., 8., 9., 10., 11.])
e_concat = torch.cat([e_t, e_r]).unsqueeze(0) # Shape (1, 12)
output = W_layer(e_concat)
print(f"Test Input e_t: \n{e_t}")
print(f"Test Input e_r: \n{e_r}")
print(f"Test Input concat: \n{e_concat}")
print(f"Forward Pass Output: \n{output.squeeze(0)}")
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/utils/intmeanflow.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/tts_turbo.py | import os
import math
from dataclasses import dataclass
from pathlib import Path
import librosa
import torch
import perth
import pyloudnorm as ln
from safetensors.torch import load_file
from huggingface_hub import snapshot_download
from transformers import AutoTokenizer
from .models.t3 import T3
from .models.s3tokenizer import S3_SR
from .models.s3gen import S3GEN_SR, S3Gen
from .models.tokenizers import EnTokenizer
from .models.voice_encoder import VoiceEncoder
from .models.t3.modules.cond_enc import T3Cond
from .models.t3.modules.t3_config import T3Config
from .models.s3gen.const import S3GEN_SIL
import logging
logger = logging.getLogger(__name__)
REPO_ID = "ResembleAI/chatterbox-turbo"
def punc_norm(text: str) -> str:
"""
Quick cleanup func for punctuation from LLMs or
containing chars not seen often in the dataset
"""
if len(text) == 0:
return "You need to add some text for me to talk."
# Capitalise first letter
if text[0].islower():
text = text[0].upper() + text[1:]
# Remove multiple space chars
text = " ".join(text.split())
# Replace uncommon/llm punc
punc_to_replace = [
("…", ", "),
(":", ","),
("—", "-"),
("–", "-"),
(" ,", ","),
("“", "\""),
("”", "\""),
("‘", "'"),
("’", "'"),
]
for old_char_sequence, new_char in punc_to_replace:
text = text.replace(old_char_sequence, new_char)
# Add full stop if no ending punc
text = text.rstrip(" ")
sentence_enders = {".", "!", "?", "-", ","}
if not any(text.endswith(p) for p in sentence_enders):
text += "."
return text
@dataclass
class Conditionals:
"""
Conditionals for T3 and S3Gen
- T3 conditionals:
- speaker_emb
- clap_emb
- cond_prompt_speech_tokens
- cond_prompt_speech_emb
- emotion_adv
- S3Gen conditionals:
- prompt_token
- prompt_token_len
- prompt_feat
- prompt_feat_len
- embedding
"""
t3: T3Cond
gen: dict
def to(self, device):
self.t3 = self.t3.to(device=device)
for k, v in self.gen.items():
if torch.is_tensor(v):
self.gen[k] = v.to(device=device)
return self
def save(self, fpath: Path):
arg_dict = dict(
t3=self.t3.__dict__,
gen=self.gen
)
torch.save(arg_dict, fpath)
@classmethod
def load(cls, fpath, map_location="cpu"):
if isinstance(map_location, str):
map_location = torch.device(map_location)
kwargs = torch.load(fpath, map_location=map_location, weights_only=True)
return cls(T3Cond(**kwargs['t3']), kwargs['gen'])
class ChatterboxTurboTTS:
ENC_COND_LEN = 15 * S3_SR
DEC_COND_LEN = 10 * S3GEN_SR
def __init__(
self,
t3: T3,
s3gen: S3Gen,
ve: VoiceEncoder,
tokenizer: EnTokenizer,
device: str,
conds: Conditionals = None,
):
self.sr = S3GEN_SR # sample rate of synthesized audio
self.t3 = t3
self.s3gen = s3gen
self.ve = ve
self.tokenizer = tokenizer
self.device = device
self.conds = conds
self.watermarker = perth.PerthImplicitWatermarker()
@classmethod
def from_local(cls, ckpt_dir, device) -> 'ChatterboxTurboTTS':
ckpt_dir = Path(ckpt_dir)
# Always load to CPU first for non-CUDA devices to handle CUDA-saved models
if device in ["cpu", "mps"]:
map_location = torch.device('cpu')
else:
map_location = None
ve = VoiceEncoder()
ve.load_state_dict(
load_file(ckpt_dir / "ve.safetensors")
)
ve.to(device).eval()
# Turbo specific hp
hp = T3Config(text_tokens_dict_size=50276)
hp.llama_config_name = "GPT2_medium"
hp.speech_tokens_dict_size = 6563
hp.input_pos_emb = None
hp.speech_cond_prompt_len = 375
hp.use_perceiver_resampler = False
hp.emotion_adv = False
t3 = T3(hp)
t3_state = load_file(ckpt_dir / "t3_turbo_v1.safetensors")
if "model" in t3_state.keys():
t3_state = t3_state["model"][0]
t3.load_state_dict(t3_state)
del t3.tfmr.wte
t3.to(device).eval()
s3gen = S3Gen(meanflow=True)
weights = load_file(ckpt_dir / "s3gen_meanflow.safetensors")
s3gen.load_state_dict(
weights, strict=True
)
s3gen.to(device).eval()
tokenizer = AutoTokenizer.from_pretrained(ckpt_dir)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
if len(tokenizer) != 50276:
print(f"WARNING: Tokenizer len {len(tokenizer)} != 50276")
conds = None
builtin_voice = ckpt_dir / "conds.pt"
if builtin_voice.exists():
conds = Conditionals.load(builtin_voice, map_location=map_location).to(device)
return cls(t3, s3gen, ve, tokenizer, device, conds=conds)
@classmethod
def from_pretrained(cls, device) -> 'ChatterboxTurboTTS':
# Check if MPS is available on macOS
if device == "mps" and not torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
print("MPS not available because the current MacOS version is not 12.3+ and/or you do not have an MPS-enabled device on this machine.")
device = "cpu"
local_path = snapshot_download(
repo_id=REPO_ID,
token=os.getenv("HF_TOKEN") or True,
# Optional: Filter to download only what you need
allow_patterns=["*.safetensors", "*.json", "*.txt", "*.pt", "*.model"]
)
return cls.from_local(local_path, device)
def norm_loudness(self, wav, sr, target_lufs=-27):
try:
meter = ln.Meter(sr)
loudness = meter.integrated_loudness(wav)
gain_db = target_lufs - loudness
gain_linear = 10.0 ** (gain_db / 20.0)
if math.isfinite(gain_linear) and gain_linear > 0.0:
wav = wav * gain_linear
except Exception as e:
print(f"Warning: Error in norm_loudness, skipping: {e}")
return wav
def prepare_conditionals(self, wav_fpath, exaggeration=0.5, norm_loudness=True):
## Load and norm reference wav
s3gen_ref_wav, _sr = librosa.load(wav_fpath, sr=S3GEN_SR)
assert len(s3gen_ref_wav) / _sr > 5.0, "Audio prompt must be longer than 5 seconds!"
if norm_loudness:
s3gen_ref_wav = self.norm_loudness(s3gen_ref_wav, _sr)
ref_16k_wav = librosa.resample(s3gen_ref_wav, orig_sr=S3GEN_SR, target_sr=S3_SR)
s3gen_ref_wav = s3gen_ref_wav[:self.DEC_COND_LEN]
s3gen_ref_dict = self.s3gen.embed_ref(s3gen_ref_wav, S3GEN_SR, device=self.device)
# Speech cond prompt tokens
if plen := self.t3.hp.speech_cond_prompt_len:
s3_tokzr = self.s3gen.tokenizer
t3_cond_prompt_tokens, _ = s3_tokzr.forward([ref_16k_wav[:self.ENC_COND_LEN]], max_len=plen)
t3_cond_prompt_tokens = torch.atleast_2d(t3_cond_prompt_tokens).to(self.device)
# Voice-encoder speaker embedding
ve_embed = torch.from_numpy(self.ve.embeds_from_wavs([ref_16k_wav], sample_rate=S3_SR))
ve_embed = ve_embed.mean(axis=0, keepdim=True).to(self.device)
t3_cond = T3Cond(
speaker_emb=ve_embed,
cond_prompt_speech_tokens=t3_cond_prompt_tokens,
emotion_adv=exaggeration * torch.ones(1, 1, 1),
).to(device=self.device)
self.conds = Conditionals(t3_cond, s3gen_ref_dict)
def generate(
self,
text,
repetition_penalty=1.2,
min_p=0.00,
top_p=0.95,
audio_prompt_path=None,
exaggeration=0.0,
cfg_weight=0.0,
temperature=0.8,
top_k=1000,
norm_loudness=True,
):
if audio_prompt_path:
self.prepare_conditionals(audio_prompt_path, exaggeration=exaggeration, norm_loudness=norm_loudness)
else:
assert self.conds is not None, "Please `prepare_conditionals` first or specify `audio_prompt_path`"
if cfg_weight > 0.0 or exaggeration > 0.0 or min_p > 0.0:
logger.warning("CFG, min_p and exaggeration are not supported by Turbo version and will be ignored.")
# Norm and tokenize text
text = punc_norm(text)
text_tokens = self.tokenizer(text, return_tensors="pt", padding=True, truncation=True)
text_tokens = text_tokens.input_ids.to(self.device)
speech_tokens = self.t3.inference_turbo(
t3_cond=self.conds.t3,
text_tokens=text_tokens,
temperature=temperature,
top_k=top_k,
top_p=top_p,
repetition_penalty=repetition_penalty,
)
# Remove OOV tokens and add silence to end
speech_tokens = speech_tokens[speech_tokens < 6561]
speech_tokens = speech_tokens.to(self.device)
silence = torch.tensor([S3GEN_SIL, S3GEN_SIL, S3GEN_SIL]).long().to(self.device)
speech_tokens = torch.cat([speech_tokens, silence])
wav, _ = self.s3gen.inference(
speech_tokens=speech_tokens,
ref_dict=self.conds.gen,
n_cfm_timesteps=2,
)
wav = wav.squeeze(0).detach().cpu().numpy()
watermarked_wav = self.watermarker.apply_watermark(wav, sample_rate=self.sr)
return torch.from_numpy(watermarked_wav).unsqueeze(0)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/tts_turbo.py",
"license": "MIT License",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/mtl_tts.py | from dataclasses import dataclass
from pathlib import Path
import os
import librosa
import torch
import perth
import torch.nn.functional as F
from safetensors.torch import load_file as load_safetensors
from huggingface_hub import snapshot_download
from .models.t3 import T3
from .models.t3.modules.t3_config import T3Config
from .models.s3tokenizer import S3_SR, drop_invalid_tokens
from .models.s3gen import S3GEN_SR, S3Gen
from .models.tokenizers import MTLTokenizer
from .models.voice_encoder import VoiceEncoder
from .models.t3.modules.cond_enc import T3Cond
REPO_ID = "ResembleAI/chatterbox"
# Supported languages for the multilingual model
SUPPORTED_LANGUAGES = {
"ar": "Arabic",
"da": "Danish",
"de": "German",
"el": "Greek",
"en": "English",
"es": "Spanish",
"fi": "Finnish",
"fr": "French",
"he": "Hebrew",
"hi": "Hindi",
"it": "Italian",
"ja": "Japanese",
"ko": "Korean",
"ms": "Malay",
"nl": "Dutch",
"no": "Norwegian",
"pl": "Polish",
"pt": "Portuguese",
"ru": "Russian",
"sv": "Swedish",
"sw": "Swahili",
"tr": "Turkish",
"zh": "Chinese",
}
def punc_norm(text: str) -> str:
"""
Quick cleanup func for punctuation from LLMs or
containing chars not seen often in the dataset
"""
if len(text) == 0:
return "You need to add some text for me to talk."
# Capitalise first letter
if text[0].islower():
text = text[0].upper() + text[1:]
# Remove multiple space chars
text = " ".join(text.split())
# Replace uncommon/llm punc
punc_to_replace = [
("...", ", "),
("…", ", "),
(":", ","),
(" - ", ", "),
(";", ", "),
("—", "-"),
("–", "-"),
(" ,", ","),
("“", "\""),
("”", "\""),
("‘", "'"),
("’", "'"),
]
for old_char_sequence, new_char in punc_to_replace:
text = text.replace(old_char_sequence, new_char)
# Add full stop if no ending punc
text = text.rstrip(" ")
sentence_enders = {".", "!", "?", "-", ",","、",",","。","?","!"}
if not any(text.endswith(p) for p in sentence_enders):
text += "."
return text
@dataclass
class Conditionals:
"""
Conditionals for T3 and S3Gen
- T3 conditionals:
- speaker_emb
- clap_emb
- cond_prompt_speech_tokens
- cond_prompt_speech_emb
- emotion_adv
- S3Gen conditionals:
- prompt_token
- prompt_token_len
- prompt_feat
- prompt_feat_len
- embedding
"""
t3: T3Cond
gen: dict
def to(self, device):
self.t3 = self.t3.to(device=device)
for k, v in self.gen.items():
if torch.is_tensor(v):
self.gen[k] = v.to(device=device)
return self
def save(self, fpath: Path):
arg_dict = dict(
t3=self.t3.__dict__,
gen=self.gen
)
torch.save(arg_dict, fpath)
@classmethod
def load(cls, fpath, map_location="cpu"):
kwargs = torch.load(fpath, map_location=map_location, weights_only=True)
return cls(T3Cond(**kwargs['t3']), kwargs['gen'])
class ChatterboxMultilingualTTS:
ENC_COND_LEN = 6 * S3_SR
DEC_COND_LEN = 10 * S3GEN_SR
def __init__(
self,
t3: T3,
s3gen: S3Gen,
ve: VoiceEncoder,
tokenizer: MTLTokenizer,
device: str,
conds: Conditionals = None,
):
self.sr = S3GEN_SR # sample rate of synthesized audio
self.t3 = t3
self.s3gen = s3gen
self.ve = ve
self.tokenizer = tokenizer
self.device = device
self.conds = conds
self.watermarker = perth.PerthImplicitWatermarker()
@classmethod
def get_supported_languages(cls):
"""Return dictionary of supported language codes and names."""
return SUPPORTED_LANGUAGES.copy()
@classmethod
def from_local(cls, ckpt_dir, device) -> 'ChatterboxMultilingualTTS':
ckpt_dir = Path(ckpt_dir)
ve = VoiceEncoder()
ve.load_state_dict(
torch.load(ckpt_dir / "ve.pt", weights_only=True)
)
ve.to(device).eval()
t3 = T3(T3Config.multilingual())
t3_state = load_safetensors(ckpt_dir / "t3_mtl23ls_v2.safetensors")
if "model" in t3_state.keys():
t3_state = t3_state["model"][0]
t3.load_state_dict(t3_state)
t3.to(device).eval()
s3gen = S3Gen()
s3gen.load_state_dict(
torch.load(ckpt_dir / "s3gen.pt", weights_only=True)
)
s3gen.to(device).eval()
tokenizer = MTLTokenizer(
str(ckpt_dir / "grapheme_mtl_merged_expanded_v1.json")
)
conds = None
if (builtin_voice := ckpt_dir / "conds.pt").exists():
conds = Conditionals.load(builtin_voice).to(device)
return cls(t3, s3gen, ve, tokenizer, device, conds=conds)
@classmethod
def from_pretrained(cls, device: torch.device) -> 'ChatterboxMultilingualTTS':
ckpt_dir = Path(
snapshot_download(
repo_id=REPO_ID,
repo_type="model",
revision="main",
allow_patterns=["ve.pt", "t3_mtl23ls_v2.safetensors", "s3gen.pt", "grapheme_mtl_merged_expanded_v1.json", "conds.pt", "Cangjie5_TC.json"],
token=os.getenv("HF_TOKEN"),
)
)
return cls.from_local(ckpt_dir, device)
def prepare_conditionals(self, wav_fpath, exaggeration=0.5):
## Load reference wav
s3gen_ref_wav, _sr = librosa.load(wav_fpath, sr=S3GEN_SR)
ref_16k_wav = librosa.resample(s3gen_ref_wav, orig_sr=S3GEN_SR, target_sr=S3_SR)
s3gen_ref_wav = s3gen_ref_wav[:self.DEC_COND_LEN]
s3gen_ref_dict = self.s3gen.embed_ref(s3gen_ref_wav, S3GEN_SR, device=self.device)
# Speech cond prompt tokens
t3_cond_prompt_tokens = None
if plen := self.t3.hp.speech_cond_prompt_len:
s3_tokzr = self.s3gen.tokenizer
t3_cond_prompt_tokens, _ = s3_tokzr.forward([ref_16k_wav[:self.ENC_COND_LEN]], max_len=plen)
t3_cond_prompt_tokens = torch.atleast_2d(t3_cond_prompt_tokens).to(self.device)
# Voice-encoder speaker embedding
ve_embed = torch.from_numpy(self.ve.embeds_from_wavs([ref_16k_wav], sample_rate=S3_SR))
ve_embed = ve_embed.mean(axis=0, keepdim=True).to(self.device)
t3_cond = T3Cond(
speaker_emb=ve_embed,
cond_prompt_speech_tokens=t3_cond_prompt_tokens,
emotion_adv=exaggeration * torch.ones(1, 1, 1),
).to(device=self.device)
self.conds = Conditionals(t3_cond, s3gen_ref_dict)
def generate(
self,
text,
language_id,
audio_prompt_path=None,
exaggeration=0.5,
cfg_weight=0.5,
temperature=0.8,
repetition_penalty=2.0,
min_p=0.05,
top_p=1.0,
):
# Validate language_id
if language_id and language_id.lower() not in SUPPORTED_LANGUAGES:
supported_langs = ", ".join(SUPPORTED_LANGUAGES.keys())
raise ValueError(
f"Unsupported language_id '{language_id}'. "
f"Supported languages: {supported_langs}"
)
if audio_prompt_path:
self.prepare_conditionals(audio_prompt_path, exaggeration=exaggeration)
else:
assert self.conds is not None, "Please `prepare_conditionals` first or specify `audio_prompt_path`"
# Update exaggeration if needed
if float(exaggeration) != float(self.conds.t3.emotion_adv[0, 0, 0].item()):
_cond: T3Cond = self.conds.t3
self.conds.t3 = T3Cond(
speaker_emb=_cond.speaker_emb,
cond_prompt_speech_tokens=_cond.cond_prompt_speech_tokens,
emotion_adv=exaggeration * torch.ones(1, 1, 1),
).to(device=self.device)
# Norm and tokenize text
text = punc_norm(text)
text_tokens = self.tokenizer.text_to_tokens(text, language_id=language_id.lower() if language_id else None).to(self.device)
text_tokens = torch.cat([text_tokens, text_tokens], dim=0) # Need two seqs for CFG
sot = self.t3.hp.start_text_token
eot = self.t3.hp.stop_text_token
text_tokens = F.pad(text_tokens, (1, 0), value=sot)
text_tokens = F.pad(text_tokens, (0, 1), value=eot)
with torch.inference_mode():
speech_tokens = self.t3.inference(
t3_cond=self.conds.t3,
text_tokens=text_tokens,
max_new_tokens=1000, # TODO: use the value in config
temperature=temperature,
cfg_weight=cfg_weight,
repetition_penalty=repetition_penalty,
min_p=min_p,
top_p=top_p,
)
# Extract only the conditional batch.
speech_tokens = speech_tokens[0]
# TODO: output becomes 1D
speech_tokens = drop_invalid_tokens(speech_tokens)
speech_tokens = speech_tokens.to(self.device)
wav, _ = self.s3gen.inference(
speech_tokens=speech_tokens,
ref_dict=self.conds.gen,
)
wav = wav.squeeze(0).detach().cpu().numpy()
watermarked_wav = self.watermarker.apply_watermark(wav, sample_rate=self.sr)
return torch.from_numpy(watermarked_wav).unsqueeze(0)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/mtl_tts.py",
"license": "MIT License",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/configs.py | from ..utils import AttrDict
CFM_PARAMS = AttrDict({
"sigma_min": 1e-06,
"solver": "euler",
"t_scheduler": "cosine",
"training_cfg_rate": 0.2,
"inference_cfg_rate": 0.7,
"reg_loss_type": "l1"
})
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/configs.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:example_for_mac.py | import torch
import torchaudio as ta
from chatterbox.tts import ChatterboxTTS
# Detect device (Mac with M1/M2/M3/M4)
device = "mps" if torch.backends.mps.is_available() else "cpu"
map_location = torch.device(device)
torch_load_original = torch.load
def patched_torch_load(*args, **kwargs):
if 'map_location' not in kwargs:
kwargs['map_location'] = map_location
return torch_load_original(*args, **kwargs)
torch.load = patched_torch_load
model = ChatterboxTTS.from_pretrained(device=device)
text = "Today is the day. I want to move like a titan at dawn, sweat like a god forging lightning. No more excuses. From now on, my mornings will be temples of discipline. I am going to work out like the gods… every damn day."
# If you want to synthesize with a different voice, specify the audio prompt
AUDIO_PROMPT_PATH = "YOUR_FILE.wav"
wav = model.generate(
text,
audio_prompt_path=AUDIO_PROMPT_PATH,
exaggeration=2.0,
cfg_weight=0.5
)
ta.save("test-2.wav", wav, model.sr)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "example_for_mac.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:example_tts.py | import torchaudio as ta
import torch
from chatterbox.tts import ChatterboxTTS
from chatterbox.mtl_tts import ChatterboxMultilingualTTS
# Automatically detect the best available device
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
print(f"Using device: {device}")
model = ChatterboxTTS.from_pretrained(device=device)
text = "Ezreal and Jinx teamed up with Ahri, Yasuo, and Teemo to take down the enemy's Nexus in an epic late-game pentakill."
wav = model.generate(text)
ta.save("test-1.wav", wav, model.sr)
multilingual_model = ChatterboxMultilingualTTS.from_pretrained(device=device)
text = "Bonjour, comment ça va? Ceci est le modèle de synthèse vocale multilingue Chatterbox, il prend en charge 23 langues."
wav = multilingual_model.generate(text, language_id="fr")
ta.save("test-2.wav", wav, multilingual_model.sr)
# If you want to synthesize with a different voice, specify the audio prompt
AUDIO_PROMPT_PATH = "YOUR_FILE.wav"
wav = model.generate(text, audio_prompt_path=AUDIO_PROMPT_PATH)
ta.save("test-3.wav", wav, model.sr)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "example_tts.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:example_vc.py | import torch
import torchaudio as ta
from chatterbox.vc import ChatterboxVC
# Automatically detect the best available device
if torch.cuda.is_available():
device = "cuda"
elif torch.backends.mps.is_available():
device = "mps"
else:
device = "cpu"
print(f"Using device: {device}")
AUDIO_PATH = "YOUR_FILE.wav"
TARGET_VOICE_PATH = "YOUR_FILE.wav"
model = ChatterboxVC.from_pretrained(device)
wav = model.generate(
audio=AUDIO_PATH,
target_voice_path=TARGET_VOICE_PATH,
)
ta.save("testvc.wav", wav, model.sr)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "example_vc.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:gradio_tts_app.py | import random
import numpy as np
import torch
import gradio as gr
from chatterbox.tts import ChatterboxTTS
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
def set_seed(seed: int):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
np.random.seed(seed)
def load_model():
model = ChatterboxTTS.from_pretrained(DEVICE)
return model
def generate(model, text, audio_prompt_path, exaggeration, temperature, seed_num, cfgw, min_p, top_p, repetition_penalty):
if model is None:
model = ChatterboxTTS.from_pretrained(DEVICE)
if seed_num != 0:
set_seed(int(seed_num))
wav = model.generate(
text,
audio_prompt_path=audio_prompt_path,
exaggeration=exaggeration,
temperature=temperature,
cfg_weight=cfgw,
min_p=min_p,
top_p=top_p,
repetition_penalty=repetition_penalty,
)
return (model.sr, wav.squeeze(0).numpy())
with gr.Blocks() as demo:
model_state = gr.State(None) # Loaded once per session/user
with gr.Row():
with gr.Column():
text = gr.Textbox(
value="Now let's make my mum's favourite. So three mars bars into the pan. Then we add the tuna and just stir for a bit, just let the chocolate and fish infuse. A sprinkle of olive oil and some tomato ketchup. Now smell that. Oh boy this is going to be incredible.",
label="Text to synthesize (max chars 300)",
max_lines=5
)
ref_wav = gr.Audio(sources=["upload", "microphone"], type="filepath", label="Reference Audio File", value=None)
exaggeration = gr.Slider(0.25, 2, step=.05, label="Exaggeration (Neutral = 0.5, extreme values can be unstable)", value=.5)
cfg_weight = gr.Slider(0.0, 1, step=.05, label="CFG/Pace", value=0.5)
with gr.Accordion("More options", open=False):
seed_num = gr.Number(value=0, label="Random seed (0 for random)")
temp = gr.Slider(0.05, 5, step=.05, label="temperature", value=.8)
min_p = gr.Slider(0.00, 1.00, step=0.01, label="min_p || Newer Sampler. Recommend 0.02 > 0.1. Handles Higher Temperatures better. 0.00 Disables", value=0.05)
top_p = gr.Slider(0.00, 1.00, step=0.01, label="top_p || Original Sampler. 1.0 Disables(recommended). Original 0.8", value=1.00)
repetition_penalty = gr.Slider(1.00, 2.00, step=0.1, label="repetition_penalty", value=1.2)
run_btn = gr.Button("Generate", variant="primary")
with gr.Column():
audio_output = gr.Audio(label="Output Audio")
demo.load(fn=load_model, inputs=[], outputs=model_state)
run_btn.click(
fn=generate,
inputs=[
model_state,
text,
ref_wav,
exaggeration,
temp,
seed_num,
cfg_weight,
min_p,
top_p,
repetition_penalty,
],
outputs=audio_output,
)
if __name__ == "__main__":
demo.queue(
max_size=50,
default_concurrency_limit=1,
).launch(share=True)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "gradio_tts_app.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:gradio_vc_app.py | import torch
import gradio as gr
from chatterbox.vc import ChatterboxVC
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
model = ChatterboxVC.from_pretrained(DEVICE)
def generate(audio, target_voice_path):
wav = model.generate(
audio, target_voice_path=target_voice_path,
)
return model.sr, wav.squeeze(0).numpy()
demo = gr.Interface(
generate,
[
gr.Audio(sources=["upload", "microphone"], type="filepath", label="Input audio file"),
gr.Audio(sources=["upload", "microphone"], type="filepath", label="Target voice audio file (if none, the default voice is used)", value=None),
],
"audio",
)
if __name__ == "__main__":
demo.launch()
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "gradio_vc_app.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/decoder.py | # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import pack, rearrange, repeat
from .utils.mask import add_optional_chunk_mask
from .matcha.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, \
TimestepEmbedding, Upsample1D
from .matcha.transformer import BasicTransformerBlock
from .utils.intmeanflow import get_intmeanflow_time_mixer
def mask_to_bias(mask: torch.Tensor, dtype: torch.dtype) -> torch.Tensor:
assert mask.dtype == torch.bool
assert dtype in [torch.float32, torch.bfloat16, torch.float16]
mask = mask.to(dtype)
# attention mask bias
# NOTE(Mddct): torch.finfo jit issues
# chunk_masks = (1.0 - chunk_masks) * torch.finfo(dtype).min
mask = (1.0 - mask) * -1.0e+10
return mask
class Transpose(torch.nn.Module):
def __init__(self, dim0: int, dim1: int):
super().__init__()
self.dim0 = dim0
self.dim1 = dim1
def forward(self, x: torch.Tensor):
x = torch.transpose(x, self.dim0, self.dim1)
return x
class CausalBlock1D(Block1D):
def __init__(self, dim: int, dim_out: int):
super(CausalBlock1D, self).__init__(dim, dim_out)
self.block = torch.nn.Sequential(
CausalConv1d(dim, dim_out, 3),
Transpose(1, 2),
nn.LayerNorm(dim_out),
Transpose(1, 2),
nn.Mish(),
)
def forward(self, x: torch.Tensor, mask: torch.Tensor):
output = self.block(x * mask)
return output * mask
class CausalResnetBlock1D(ResnetBlock1D):
def __init__(self, dim: int, dim_out: int, time_emb_dim: int, groups: int = 8):
super(CausalResnetBlock1D, self).__init__(dim, dim_out, time_emb_dim, groups)
self.block1 = CausalBlock1D(dim, dim_out)
self.block2 = CausalBlock1D(dim_out, dim_out)
class CausalConv1d(torch.nn.Conv1d):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
super(CausalConv1d, self).__init__(in_channels, out_channels,
kernel_size, stride,
padding=0, dilation=dilation,
groups=groups, bias=bias,
padding_mode=padding_mode,
device=device, dtype=dtype)
assert stride == 1
self.causal_padding = (kernel_size - 1, 0)
def forward(self, x: torch.Tensor):
x = F.pad(x, self.causal_padding)
x = super(CausalConv1d, self).forward(x)
return x
class ConditionalDecoder(nn.Module):
def __init__(
self,
in_channels=320,
out_channels=80,
causal=True,
channels=[256],
dropout=0.0,
attention_head_dim=64,
n_blocks=4,
num_mid_blocks=12,
num_heads=8,
act_fn="gelu",
meanflow=False,
):
"""
This decoder requires an input with the same shape of the target. So, if your text content
is shorter or longer than the outputs, please re-sampling it before feeding to the decoder.
"""
super().__init__()
channels = tuple(channels)
self.meanflow = meanflow
self.in_channels = in_channels
self.out_channels = out_channels
self.causal = causal
self.time_embeddings = SinusoidalPosEmb(in_channels)
time_embed_dim = channels[0] * 4
self.time_mlp = TimestepEmbedding(
in_channels=in_channels,
time_embed_dim=time_embed_dim,
act_fn="silu",
)
self.down_blocks = nn.ModuleList([])
self.mid_blocks = nn.ModuleList([])
self.up_blocks = nn.ModuleList([])
# NOTE jrm: `static_chunk_size` is missing?
self.static_chunk_size = 0
output_channel = in_channels
for i in range(len(channels)): # pylint: disable=consider-using-enumerate
input_channel = output_channel
output_channel = channels[i]
is_last = i == len(channels) - 1
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
dim=output_channel,
num_attention_heads=num_heads,
attention_head_dim=attention_head_dim,
dropout=dropout,
activation_fn=act_fn,
)
for _ in range(n_blocks)
]
)
downsample = (
Downsample1D(output_channel) if not is_last else
CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
)
self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample]))
for _ in range(num_mid_blocks):
input_channel = channels[-1]
out_channels = channels[-1]
resnet = CausalResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) if self.causal else \
ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim)
transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
dim=output_channel,
num_attention_heads=num_heads,
attention_head_dim=attention_head_dim,
dropout=dropout,
activation_fn=act_fn,
)
for _ in range(n_blocks)
]
)
self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks]))
channels = channels[::-1] + (channels[0],)
for i in range(len(channels) - 1):
input_channel = channels[i] * 2
output_channel = channels[i + 1]
is_last = i == len(channels) - 2
resnet = CausalResnetBlock1D(
dim=input_channel,
dim_out=output_channel,
time_emb_dim=time_embed_dim,
) if self.causal else ResnetBlock1D(
dim=input_channel,
dim_out=output_channel,
time_emb_dim=time_embed_dim,
)
transformer_blocks = nn.ModuleList(
[
BasicTransformerBlock(
dim=output_channel,
num_attention_heads=num_heads,
attention_head_dim=attention_head_dim,
dropout=dropout,
activation_fn=act_fn,
)
for _ in range(n_blocks)
]
)
upsample = (
Upsample1D(output_channel, use_conv_transpose=True)
if not is_last
else CausalConv1d(output_channel, output_channel, 3) if self.causal else nn.Conv1d(output_channel, output_channel, 3, padding=1)
)
self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample]))
self.final_block = CausalBlock1D(channels[-1], channels[-1]) if self.causal else Block1D(channels[-1], channels[-1])
self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1)
self.initialize_weights()
self.time_embed_mixer = None
if self.meanflow:
self.time_embed_mixer = get_intmeanflow_time_mixer(time_embed_dim)
@property
def dtype(self):
return self.final_proj.weight.dtype
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.GroupNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x, mask, mu, t, spks=None, cond=None, r=None):
"""Forward pass of the UNet1DConditional model.
Args:
x: (B, 80, T)
mask (_type_)
t (_type_): shape (batch_size)
spks (_type_, optional) Defaults to None.
cond (_type_, optional)
r: end time for meanflow mode (shape (1,) tensor)
Raises:
ValueError: _description_
ValueError: _description_
Returns:
_type_: _description_
"""
t = self.time_embeddings(t).to(t.dtype)
t = self.time_mlp(t)
if self.meanflow:
r = self.time_embeddings(r).to(t.dtype)
r = self.time_mlp(r)
concat_embed = torch.cat([t, r], dim=1)
t = self.time_embed_mixer(concat_embed)
x = pack([x, mu], "b * t")[0]
if spks is not None:
spks = repeat(spks, "b c -> b c t", t=x.shape[-1])
x = pack([x, spks], "b * t")[0]
if cond is not None:
x = pack([x, cond], "b * t")[0]
hiddens = []
masks = [mask]
for resnet, transformer_blocks, downsample in self.down_blocks:
mask_down = masks[-1]
x = resnet(x, mask_down, t)
x = rearrange(x, "b c t -> b t c").contiguous()
# attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down)
attn_mask = add_optional_chunk_mask(x, mask_down.bool(), False, False, 0, self.static_chunk_size, -1)
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
for transformer_block in transformer_blocks:
x = transformer_block(
hidden_states=x,
attention_mask=attn_mask,
timestep=t,
)
x = rearrange(x, "b t c -> b c t").contiguous()
hiddens.append(x) # Save hidden states for skip connections
x = downsample(x * mask_down)
masks.append(mask_down[:, :, ::2])
masks = masks[:-1]
mask_mid = masks[-1]
for resnet, transformer_blocks in self.mid_blocks:
x = resnet(x, mask_mid, t)
x = rearrange(x, "b c t -> b t c").contiguous()
# attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid)
attn_mask = add_optional_chunk_mask(x, mask_mid.bool(), False, False, 0, self.static_chunk_size, -1)
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
for transformer_block in transformer_blocks:
x = transformer_block(
hidden_states=x,
attention_mask=attn_mask,
timestep=t,
)
x = rearrange(x, "b t c -> b c t").contiguous()
for resnet, transformer_blocks, upsample in self.up_blocks:
mask_up = masks.pop()
skip = hiddens.pop()
x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0]
x = resnet(x, mask_up, t)
x = rearrange(x, "b c t -> b t c").contiguous()
# attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up)
attn_mask = add_optional_chunk_mask(x, mask_up.bool(), False, False, 0, self.static_chunk_size, -1)
attn_mask = mask_to_bias(attn_mask == 1, x.dtype)
for transformer_block in transformer_blocks:
x = transformer_block(
hidden_states=x,
attention_mask=attn_mask,
timestep=t,
)
x = rearrange(x, "b t c -> b c t").contiguous()
x = upsample(x * mask_up)
x = self.final_block(x, mask_up)
output = self.final_proj(x * mask_up)
return output * mask
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/decoder.py",
"license": "MIT License",
"lines": 298,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/f0_predictor.py | # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
from torch.nn.utils.parametrizations import weight_norm
class ConvRNNF0Predictor(nn.Module):
def __init__(self,
num_class: int = 1,
in_channels: int = 80,
cond_channels: int = 512
):
super().__init__()
self.num_class = num_class
self.condnet = nn.Sequential(
weight_norm(
nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1)
),
nn.ELU(),
weight_norm(
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
),
nn.ELU(),
weight_norm(
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
),
nn.ELU(),
weight_norm(
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
),
nn.ELU(),
weight_norm(
nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1)
),
nn.ELU(),
)
self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.condnet(x)
x = x.transpose(1, 2)
return torch.abs(self.classifier(x).squeeze(-1))
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/f0_predictor.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/flow.py | # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
from typing import Dict, Optional
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
from torch.nn import functional as F
from .utils.mask import make_pad_mask
from .configs import CFM_PARAMS
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
def _repeat_batch_dim(tnsr, B, ndim):
"repeat batch dimension if it's equal to 1"
if tnsr is not None:
# add missing batch dim if needed
while tnsr.ndim < ndim:
tnsr = tnsr[None]
# repeat batch dim as needed
if B > 1 and tnsr.size(0) == 1:
tnsr = tnsr.repeat(B, *([1] * (ndim - 1)))
assert tnsr.ndim == ndim, f"Expected {ndim=}, got {tnsr.ndim=}"
return tnsr
class CausalMaskedDiffWithXvec(torch.nn.Module):
def __init__(self,
input_size: int = 512,
output_size: int = 80,
spk_embed_dim: int = 192,
output_type: str = "mel",
vocab_size: int = 6561,
input_frame_rate: int = 25,
only_mask_loss: bool = True,
token_mel_ratio: int = 2,
pre_lookahead_len: int = 3,
encoder: torch.nn.Module = None,
decoder: torch.nn.Module = None,
decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1,
'cfm_params': DictConfig(
{'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine',
'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7,
'reg_loss_type': 'l1'}),
'decoder_params': {'channels': [256, 256], 'dropout': 0.0,
'attention_head_dim': 64,
'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8,
'act_fn': 'gelu'}},
mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050,
'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.decoder_conf = decoder_conf
self.mel_feat_conf = mel_feat_conf
self.vocab_size = vocab_size
self.output_type = output_type
self.input_frame_rate = input_frame_rate
logging.info(f"input frame rate={self.input_frame_rate}")
self.input_embedding = nn.Embedding(vocab_size, input_size)
self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size)
self.encoder = encoder
self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size)
self.decoder = decoder
self.only_mask_loss = only_mask_loss
self.token_mel_ratio = token_mel_ratio
self.pre_lookahead_len = pre_lookahead_len
# NOTE: copied in from cosyvoice repo
def compute_loss(
self,
batch: dict,
device: torch.device,
) -> Dict[str, Optional[torch.Tensor]]:
token = batch['speech_token'].to(device)
token_len = batch['speech_token_len'].to(device)
feat = batch['speech_feat'].to(device) # (B, 80, T)
feat_len = batch['speech_feat_len'].to(device)
embedding = batch['embedding'].to(device)
# NOTE unified training, static_chunk_size > 0 or = 0
# streaming = True if random.random() < 0.5 else False
# xvec projection
embedding = F.normalize(embedding, dim=1)
embedding = self.spk_embed_affine_layer(embedding)
# concat text and prompt_text
mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device) # (B, T, 1)
token = self.input_embedding(torch.clamp(token, min=0)) * mask # (B, T, emb)
# text encode
h, h_lengths = self.encoder(token, token_len) # (B, T, C) -> (B, 2T, C)
h = self.encoder_proj(h)
# get conditions
conds = torch.zeros(feat.shape, device=token.device)
for i, j in enumerate(feat_len):
if random.random() < 0.5:
continue
index = random.randint(0, int(0.3 * j))
conds[i, :, :index] = feat[i, :, :index]
mask = (~make_pad_mask(h_lengths.sum(dim=-1).squeeze(dim=1))).to(h)
loss, _ = self.decoder.compute_loss(
feat.contiguous(),
mask.unsqueeze(1),
h.transpose(1, 2).contiguous(),
embedding,
cond=conds,
# streaming=streaming,
)
return {'loss': loss}
@torch.inference_mode()
def inference(self,
token,
token_len,
prompt_token,
prompt_token_len,
prompt_feat,
prompt_feat_len,
embedding,
finalize,
n_timesteps=10,
noised_mels=None,
meanflow=False):
# token: (B, n_toks)
# token_len: (B,)
B = token.size(0)
# xvec projection
embedding = torch.atleast_2d(embedding)
embedding = F.normalize(embedding, dim=1)
embedding = self.spk_embed_affine_layer(embedding) # (1 or B, emb_dim)
# adjust shapes (batching logic)
prompt_token = _repeat_batch_dim(prompt_token, B, ndim=2) # (B, n_prompt)
prompt_token_len = _repeat_batch_dim(prompt_token_len, B, ndim=1) # (B,)
prompt_feat = _repeat_batch_dim(prompt_feat, B, ndim=3) # (B, n_feat, feat_dim=80)
prompt_feat_len = _repeat_batch_dim(prompt_feat_len, B, ndim=1) # (B,) or None
embedding = _repeat_batch_dim(embedding, B, ndim=2) # (B, emb_dim)
# concat text and prompt_text
token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len
mask = (~make_pad_mask(token_len)).unsqueeze(-1).to(embedding)
if (token >= self.vocab_size).any():
logger.error(f"{token.max()}>{self.vocab_size}\n out-of-range special tokens found in flow, fix inputs!")
token = self.input_embedding(token.long()) * mask
# text encode
h, h_masks = self.encoder(token, token_len)
if finalize is False:
h = h[:, :-self.pre_lookahead_len * self.token_mel_ratio]
h_lengths = h_masks.sum(dim=-1).squeeze(dim=-1)
mel_len1, mel_len2 = prompt_feat.shape[1], h.shape[1] - prompt_feat.shape[1]
h = self.encoder_proj(h)
# # get conditions
conds = torch.zeros([B, mel_len1 + mel_len2, self.output_size], device=token.device).to(h.dtype)
conds[:, :mel_len1] = prompt_feat
conds = conds.transpose(1, 2)
mask = (~make_pad_mask(h_lengths)).unsqueeze(1).to(h)
if mask.shape[0] != B:
mask = mask.repeat(B, 1, 1)
feat, _ = self.decoder(
mu=h.transpose(1, 2).contiguous(),
mask=mask,
spks=embedding,
cond=conds,
n_timesteps=n_timesteps,
noised_mels=noised_mels,
meanflow=meanflow,
)
feat = feat[:, :, mel_len1:]
assert feat.shape[2] == mel_len2
return feat, None # NOTE jrm: why are they returning None here?
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/flow.py",
"license": "MIT License",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/flow_matching.py | # Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import torch
import torch.nn.functional as F
from .matcha.flow_matching import BASECFM
from .configs import CFM_PARAMS
from tqdm import tqdm
def cast_all(*args, dtype):
return [a if (not a.dtype.is_floating_point) or a.dtype == dtype else a.to(dtype) for a in args]
class ConditionalCFM(BASECFM):
def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None):
super().__init__(
n_feats=in_channels,
cfm_params=cfm_params,
n_spks=n_spks,
spk_emb_dim=spk_emb_dim,
)
self.t_scheduler = cfm_params.t_scheduler
self.training_cfg_rate = cfm_params.training_cfg_rate
self.inference_cfg_rate = cfm_params.inference_cfg_rate
in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0)
# Just change the architecture of the estimator here
self.estimator = estimator
@torch.inference_mode()
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, prompt_len=0, flow_cache=torch.zeros(1, 80, 0, 2)):
"""Forward diffusion
Args:
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
n_timesteps (int): number of diffusion steps
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size, spk_emb_dim)
cond: Not used but kept for future purposes
Returns:
sample: generated mel-spectrogram
shape: (batch_size, n_feats, mel_timesteps)
"""
raise NotImplementedError("unused, needs updating for meanflow model")
z = torch.randn_like(mu).to(mu.device).to(mu.dtype) * temperature
cache_size = flow_cache.shape[2]
# fix prompt and overlap part mu and z
if cache_size != 0:
z[:, :, :cache_size] = flow_cache[:, :, :, 0]
mu[:, :, :cache_size] = flow_cache[:, :, :, 1]
z_cache = torch.concat([z[:, :, :prompt_len], z[:, :, -34:]], dim=2)
mu_cache = torch.concat([mu[:, :, :prompt_len], mu[:, :, -34:]], dim=2)
flow_cache = torch.stack([z_cache, mu_cache], dim=-1)
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
if self.t_scheduler == 'cosine':
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), flow_cache
def solve_euler(self, x, t_span, mu, mask, spks, cond, meanflow=False):
"""
Fixed euler solver for ODEs.
Args:
x (torch.Tensor): random noise
t_span (torch.Tensor): n_timesteps interpolated
shape: (n_timesteps + 1,)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size, spk_emb_dim)
cond: Not used but kept for future purposes
meanflow: meanflow mode
"""
in_dtype = x.dtype
x, t_span, mu, mask, spks, cond = cast_all(x, t_span, mu, mask, spks, cond, dtype=self.estimator.dtype)
# Duplicated batch dims are for CFG
# Do not use concat, it may cause memory format changed and trt infer with wrong results!
B, T = mu.size(0), x.size(2)
x_in = torch.zeros([2 * B, 80, T], device=x.device, dtype=x.dtype)
mask_in = torch.zeros([2 * B, 1, T], device=x.device, dtype=x.dtype)
mu_in = torch.zeros([2 * B, 80, T], device=x.device, dtype=x.dtype)
t_in = torch.zeros([2 * B ], device=x.device, dtype=x.dtype)
spks_in = torch.zeros([2 * B, 80 ], device=x.device, dtype=x.dtype)
cond_in = torch.zeros([2 * B, 80, T], device=x.device, dtype=x.dtype)
r_in = torch.zeros([2 * B ], device=x.device, dtype=x.dtype) # (only used for meanflow)
for t, r in zip(t_span[:-1], t_span[1:]):
t = t.unsqueeze(dim=0)
r = r.unsqueeze(dim=0)
# Shapes:
# x_in ( 2B, 80, T )
# mask_in ( 2B, 1, T )
# mu_in ( 2B, 80, T )
# t_in ( 2B, )
# spks_in ( 2B, 80, )
# cond_in ( 2B, 80, T )
# r_in ( 2B, )
# x ( B, 80, T )
# mask ( B, 1, T )
# mu ( B, 80, T )
# t ( B, )
# spks ( B, 80, )
# cond ( B, 80, T )
# r ( B, )
x_in[:B] = x_in[B:] = x
mask_in[:B] = mask_in[B:] = mask
mu_in[:B] = mu
t_in[:B] = t_in[B:] = t
spks_in[:B] = spks
cond_in[:B] = cond
r_in[:B] = r_in[B:] = r # (only used for meanflow)
dxdt = self.estimator.forward(
x=x_in, mask=mask_in, mu=mu_in, t=t_in, spks=spks_in, cond=cond_in,
r=r_in if meanflow else None,
)
dxdt, cfg_dxdt = torch.split(dxdt, [B, B], dim=0)
dxdt = ((1.0 + self.inference_cfg_rate) * dxdt - self.inference_cfg_rate * cfg_dxdt)
dt = r - t
x = x + dt * dxdt
return x.to(in_dtype)
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
"""Computes diffusion loss
Args:
x1 (torch.Tensor): Target
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): target mask
shape: (batch_size, 1, mel_timesteps)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
shape: (batch_size, spk_emb_dim)
Returns:
loss: conditional flow matching loss
y: conditional flow
shape: (batch_size, n_feats, mel_timesteps)
"""
b, _, t = mu.shape
# random timestep
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
if self.t_scheduler == 'cosine':
t = 1 - torch.cos(t * 0.5 * torch.pi)
# sample noise p(x_0)
z = torch.randn_like(x1)
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
u = x1 - (1 - self.sigma_min) * z
# during training, we randomly drop condition to trade off mode coverage and sample fidelity
if self.training_cfg_rate > 0:
cfg_mask = torch.rand(b, device=x1.device) > self.training_cfg_rate
mu = mu * cfg_mask.view(-1, 1, 1)
spks = spks * cfg_mask.view(-1, 1)
cond = cond * cfg_mask.view(-1, 1, 1)
pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond)
loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1])
return loss, y
class CausalConditionalCFM(ConditionalCFM):
def __init__(self, in_channels=240, cfm_params=CFM_PARAMS, n_spks=1, spk_emb_dim=80, estimator=None):
super().__init__(in_channels, cfm_params, n_spks, spk_emb_dim, estimator)
# TODO: BAD BAD IDEA - IT'LL MESS UP DISTILLATION - SETTING TO NONE
self.rand_noise = None
@torch.inference_mode()
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None, noised_mels=None, meanflow=False):
"""Forward diffusion
Args:
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
n_timesteps (int): number of diffusion steps
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size, spk_emb_dim)
cond: Not used but kept for future purposes
noised_mels: gt mels noised a time t
Returns:
sample: generated mel-spectrogram
shape: (batch_size, n_feats, mel_timesteps)
"""
B = mu.size(0)
z = torch.randn_like(mu)
if noised_mels is not None:
prompt_len = mu.size(2) - noised_mels.size(2)
z[..., prompt_len:] = noised_mels
# time steps for reverse diffusion
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device, dtype=mu.dtype)
if (not meanflow) and (self.t_scheduler == 'cosine'):
t_span = 1 - torch.cos(t_span * 0.5 * torch.pi)
# NOTE: right now, the only meanflow models are also distilled models, which don't need CFG
# because they were distilled with CFG outputs. We would need to add another hparam and
# change the conditional logic here if we want to use CFG inference with a meanflow model.
if meanflow:
return self.basic_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond), None
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond, meanflow=meanflow), None
def basic_euler(self, x, t_span, mu, mask, spks, cond):
in_dtype = x.dtype
x, t_span, mu, mask, spks, cond = cast_all(x, t_span, mu, mask, spks, cond, dtype=self.estimator.dtype)
print("S3 Token -> Mel Inference...")
for t, r in tqdm(zip(t_span[..., :-1], t_span[..., 1:]), total=t_span.shape[-1] - 1):
t, r = t[None], r[None]
dxdt = self.estimator.forward(x, mask=mask, mu=mu, t=t, spks=spks, cond=cond, r=r)
dt = r - t
x = x + dt * dxdt
return x.to(in_dtype)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/flow_matching.py",
"license": "MIT License",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/hifigan.py | # jrm: adapted from CosyVoice/cosyvoice/hifigan/generator.py
# most modules should be reusable, but I found their SineGen changed a git.
# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HIFI-GAN"""
from typing import Dict, Optional, List
import numpy as np
from scipy.signal import get_window
import torch
import torch.nn.functional as F
from torch.nn import Conv1d
from torch.nn import ConvTranspose1d
from torch.nn.utils import remove_weight_norm
from torch.nn.utils.parametrizations import weight_norm
from torch.distributions.uniform import Uniform
from torch import nn, sin, pow
from torch.nn import Parameter
class Snake(nn.Module):
'''
Implementation of a sine-based periodic activation function
Shape:
- Input: (B, C, T)
- Output: (B, C, T), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
https://arxiv.org/abs/2006.08195
Examples:
>>> a1 = snake(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
'''
Initialization.
INPUT:
- in_features: shape of the input
- alpha: trainable parameter
alpha is initialized to 1 by default, higher values = higher-frequency.
alpha will be trained along with the rest of your model.
'''
super(Snake, self).__init__()
self.in_features = in_features
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
Snake ∶= x + 1/a * sin^2 (xa)
'''
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
if self.alpha_logscale:
alpha = torch.exp(alpha)
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
"""hifigan based generator implementation.
This code is modified from https://github.com/jik876/hifi-gan
,https://github.com/kan-bayashi/ParallelWaveGAN and
https://github.com/NVIDIA/BigVGAN
"""
class ResBlock(torch.nn.Module):
"""Residual block module in HiFiGAN/BigVGAN."""
def __init__(
self,
channels: int = 512,
kernel_size: int = 3,
dilations: List[int] = [1, 3, 5],
):
super(ResBlock, self).__init__()
self.convs1 = nn.ModuleList()
self.convs2 = nn.ModuleList()
for dilation in dilations:
self.convs1.append(
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation,
padding=get_padding(kernel_size, dilation)
)
)
)
self.convs2.append(
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=get_padding(kernel_size, 1)
)
)
)
self.convs1.apply(init_weights)
self.convs2.apply(init_weights)
self.activations1 = nn.ModuleList([
Snake(channels, alpha_logscale=False)
for _ in range(len(self.convs1))
])
self.activations2 = nn.ModuleList([
Snake(channels, alpha_logscale=False)
for _ in range(len(self.convs2))
])
def forward(self, x: torch.Tensor) -> torch.Tensor:
for idx in range(len(self.convs1)):
xt = self.activations1[idx](x)
xt = self.convs1[idx](xt)
xt = self.activations2[idx](xt)
xt = self.convs2[idx](xt)
x = xt + x
return x
def remove_weight_norm(self):
for idx in range(len(self.convs1)):
remove_weight_norm(self.convs1[idx])
remove_weight_norm(self.convs2[idx])
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
def _f02uv(self, f0):
# generate uv signal
uv = (f0 > self.voiced_threshold).type(torch.float32)
return uv
@torch.no_grad()
def forward(self, f0):
"""
:param f0: [B, 1, sample_len], Hz
:return: [B, 1, sample_len]
"""
F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device)
for i in range(self.harmonic_num + 1):
F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate
theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1)
u_dist = Uniform(low=-np.pi, high=np.pi)
phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device)
phase_vec[:, 0, :] = 0
# generate sine waveforms
sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec)
# generate uv signal
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class SourceModuleHnNSF(torch.nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
self.l_tanh = torch.nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
with torch.no_grad():
sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2))
sine_wavs = sine_wavs.transpose(1, 2)
uv = uv.transpose(1, 2)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
class HiFTGenerator(nn.Module):
"""
HiFTNet Generator: Neural Source Filter + ISTFTNet
https://arxiv.org/abs/2309.09493
"""
def __init__(
self,
in_channels: int = 80,
base_channels: int = 512,
nb_harmonics: int = 8,
sampling_rate: int = 22050,
nsf_alpha: float = 0.1,
nsf_sigma: float = 0.003,
nsf_voiced_threshold: float = 10,
upsample_rates: List[int] = [8, 8],
upsample_kernel_sizes: List[int] = [16, 16],
istft_params: Dict[str, int] = {"n_fft": 16, "hop_len": 4},
resblock_kernel_sizes: List[int] = [3, 7, 11],
resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
source_resblock_kernel_sizes: List[int] = [7, 11],
source_resblock_dilation_sizes: List[List[int]] = [[1, 3, 5], [1, 3, 5]],
lrelu_slope: float = 0.1,
audio_limit: float = 0.99,
f0_predictor: torch.nn.Module = None,
):
super(HiFTGenerator, self).__init__()
self.out_channels = 1
self.nb_harmonics = nb_harmonics
self.sampling_rate = sampling_rate
self.istft_params = istft_params
self.lrelu_slope = lrelu_slope
self.audio_limit = audio_limit
self.num_kernels = len(resblock_kernel_sizes)
self.num_upsamples = len(upsample_rates)
self.m_source = SourceModuleHnNSF(
sampling_rate=sampling_rate,
upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"],
harmonic_num=nb_harmonics,
sine_amp=nsf_alpha,
add_noise_std=nsf_sigma,
voiced_threshod=nsf_voiced_threshold)
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"])
self.conv_pre = weight_norm(
Conv1d(in_channels, base_channels, 7, 1, padding=3)
)
# Up
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)):
self.ups.append(
weight_norm(
ConvTranspose1d(
base_channels // (2**i),
base_channels // (2**(i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
# Down
self.source_downs = nn.ModuleList()
self.source_resblocks = nn.ModuleList()
downsample_rates = [1] + upsample_rates[::-1][:-1]
downsample_cum_rates = np.cumprod(downsample_rates)
for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, source_resblock_dilation_sizes)):
if u == 1:
self.source_downs.append(
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1)
)
else:
self.source_downs.append(
Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2))
)
self.source_resblocks.append(
ResBlock(base_channels // (2 ** (i + 1)), k, d)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = base_channels // (2**(i + 1))
for _, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)):
self.resblocks.append(ResBlock(ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
self.reflection_pad = nn.ReflectionPad1d((1, 0))
self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32))
self.f0_predictor = f0_predictor
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
self.m_source.remove_weight_norm()
for l in self.source_downs:
remove_weight_norm(l)
for l in self.source_resblocks:
l.remove_weight_norm()
def _stft(self, x):
spec = torch.stft(
x,
self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device),
return_complex=True)
spec = torch.view_as_real(spec) # [B, F, TT, 2]
return spec[..., 0], spec[..., 1]
def _istft(self, magnitude, phase):
magnitude = torch.clip(magnitude, max=1e2)
real = magnitude * torch.cos(phase)
img = magnitude * torch.sin(phase)
inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"],
self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device))
return inverse_transform
def decode(self, x: torch.Tensor, s: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
s_stft_real, s_stft_imag = self._stft(s.squeeze(1))
s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1)
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, self.lrelu_slope)
x = self.ups[i](x)
if i == self.num_upsamples - 1:
x = self.reflection_pad(x)
# fusion
si = self.source_downs[i](s_stft)
si = self.source_resblocks[i](si)
x = x + si
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :])
phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy
x = self._istft(magnitude, phase)
x = torch.clamp(x, -self.audio_limit, self.audio_limit)
return x
def forward(
self,
batch: dict,
device: torch.device,
) -> Dict[str, Optional[torch.Tensor]]:
speech_feat = batch['speech_feat'].transpose(1, 2).to(device)
# mel->f0
f0 = self.f0_predictor(speech_feat)
# f0->source
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
s, _, _ = self.m_source(s)
s = s.transpose(1, 2)
# mel+source->speech
generated_speech = self.decode(x=speech_feat, s=s)
return generated_speech, f0
@torch.inference_mode()
def inference(self, speech_feat: torch.Tensor, cache_source: torch.Tensor = torch.zeros(1, 1, 0)) -> torch.Tensor:
# mel->f0
f0 = self.f0_predictor(speech_feat)
# f0->source
s = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t
s, _, _ = self.m_source(s)
s = s.transpose(1, 2)
# use cache_source to avoid glitch
if cache_source.shape[2] != 0:
s[:, :, :cache_source.shape[2]] = cache_source
generated_speech = self.decode(x=speech_feat, s=s)
return generated_speech, s
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/hifigan.py",
"license": "MIT License",
"lines": 410,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/matcha/flow_matching.py | from abc import ABC
import torch
import torch.nn.functional as F
from .decoder import Decoder
class BASECFM(torch.nn.Module, ABC):
def __init__(
self,
n_feats,
cfm_params,
n_spks=1,
spk_emb_dim=128,
):
super().__init__()
self.n_feats = n_feats
self.n_spks = n_spks
self.spk_emb_dim = spk_emb_dim
self.solver = cfm_params.solver
if hasattr(cfm_params, "sigma_min"):
self.sigma_min = cfm_params.sigma_min
else:
self.sigma_min = 1e-4
self.estimator = None
@torch.inference_mode()
def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None):
"""Forward diffusion
Args:
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
n_timesteps (int): number of diffusion steps
temperature (float, optional): temperature for scaling noise. Defaults to 1.0.
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size, spk_emb_dim)
cond: Not used but kept for future purposes
Returns:
sample: generated mel-spectrogram
shape: (batch_size, n_feats, mel_timesteps)
"""
z = torch.randn_like(mu) * temperature
t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device)
return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond)
def solve_euler(self, x, t_span, mu, mask, spks, cond):
"""
Fixed euler solver for ODEs.
Args:
x (torch.Tensor): random noise
t_span (torch.Tensor): n_timesteps interpolated
shape: (n_timesteps + 1,)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): output_mask
shape: (batch_size, 1, mel_timesteps)
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size, spk_emb_dim)
cond: Not used but kept for future purposes
"""
t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0]
# I am storing this because I can later plot it by putting a debugger here and saving it to a file
# Or in future might add like a return_all_steps flag
sol = []
for step in range(1, len(t_span)):
dphi_dt = self.estimator(x, mask, mu, t, spks, cond)
x = x + dt * dphi_dt
t = t + dt
sol.append(x)
if step < len(t_span) - 1:
dt = t_span[step + 1] - t
return sol[-1]
def compute_loss(self, x1, mask, mu, spks=None, cond=None):
"""Computes diffusion loss
Args:
x1 (torch.Tensor): Target
shape: (batch_size, n_feats, mel_timesteps)
mask (torch.Tensor): target mask
shape: (batch_size, 1, mel_timesteps)
mu (torch.Tensor): output of encoder
shape: (batch_size, n_feats, mel_timesteps)
spks (torch.Tensor, optional): speaker embedding. Defaults to None.
shape: (batch_size, spk_emb_dim)
Returns:
loss: conditional flow matching loss
y: conditional flow
shape: (batch_size, n_feats, mel_timesteps)
"""
b, _, t = mu.shape
# random timestep
t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype)
# sample noise p(x_0)
z = torch.randn_like(x1)
y = (1 - (1 - self.sigma_min) * t) * z + t * x1
u = x1 - (1 - self.sigma_min) * z
loss = F.mse_loss(self.estimator(y, mask, mu, t.squeeze(), spks), u, reduction="sum") / (
torch.sum(mask) * u.shape[1]
)
return loss, y
class CFM(BASECFM):
def __init__(self, in_channels, out_channel, cfm_params, decoder_params, n_spks=1, spk_emb_dim=64):
super().__init__(
n_feats=in_channels,
cfm_params=cfm_params,
n_spks=n_spks,
spk_emb_dim=spk_emb_dim,
)
in_channels = in_channels + (spk_emb_dim if n_spks > 1 else 0)
# Just change the architecture of the estimator here
self.estimator = Decoder(in_channels=in_channels, out_channels=out_channel, **decoder_params)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/matcha/flow_matching.py",
"license": "MIT License",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/matcha/text_encoder.py | """ from https://github.com/jaywalnut310/glow-tts """
import math
import torch
import torch.nn as nn
from einops import rearrange
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-4):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = torch.nn.Parameter(torch.ones(channels))
self.beta = torch.nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
self.conv_layers = torch.nn.ModuleList()
self.norm_layers = torch.nn.ModuleList()
self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(
torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2)
)
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class DurationPredictor(nn.Module):
def __init__(self, in_channels, filter_channels, kernel_size, p_dropout):
super().__init__()
self.in_channels = in_channels
self.filter_channels = filter_channels
self.p_dropout = p_dropout
self.drop = torch.nn.Dropout(p_dropout)
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_1 = LayerNorm(filter_channels)
self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_2 = LayerNorm(filter_channels)
self.proj = torch.nn.Conv1d(filter_channels, 1, 1)
def forward(self, x, x_mask):
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.norm_1(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
x = torch.relu(x)
x = self.norm_2(x)
x = self.drop(x)
x = self.proj(x * x_mask)
return x * x_mask
class RotaryPositionalEmbeddings(nn.Module):
"""
## RoPE module
Rotary encoding transforms pairs of features by rotating in the 2D plane.
That is, it organizes the $d$ features as $\frac{d}{2}$ pairs.
Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it
by an angle depending on the position of the token.
"""
def __init__(self, d: int, base: int = 10_000):
r"""
* `d` is the number of features $d$
* `base` is the constant used for calculating $\Theta$
"""
super().__init__()
self.base = base
self.d = int(d)
self.cos_cached = None
self.sin_cached = None
def _build_cache(self, x: torch.Tensor):
r"""
Cache $\cos$ and $\sin$ values
"""
# Return if cache is already built
if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]:
return
# Get sequence length
seq_len = x.shape[0]
# $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device)
# Create position indexes `[0, 1, ..., seq_len - 1]`
seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device)
# Calculate the product of position index and $\theta_i$
idx_theta = torch.einsum("n,d->nd", seq_idx, theta)
# Concatenate so that for row $m$ we have
# $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$
idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1)
# Cache them
self.cos_cached = idx_theta2.cos()[:, None, None, :]
self.sin_cached = idx_theta2.sin()[:, None, None, :]
def _neg_half(self, x: torch.Tensor):
# $\frac{d}{2}$
d_2 = self.d // 2
# Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1)
def forward(self, x: torch.Tensor):
"""
* `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]`
"""
# Cache $\cos$ and $\sin$ values
x = rearrange(x, "b h t d -> t b h d")
self._build_cache(x)
# Split the features, we can choose to apply rotary embeddings only to a partial set of features.
x_rope, x_pass = x[..., : self.d], x[..., self.d :]
# Calculate
# $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$
neg_half_x = self._neg_half(x_rope)
x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]])
return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d")
class MultiHeadAttention(nn.Module):
def __init__(
self,
channels,
out_channels,
n_heads,
heads_share=True,
p_dropout=0.0,
proximal_bias=False,
proximal_init=False,
):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.heads_share = heads_share
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = torch.nn.Conv1d(channels, channels, 1)
self.conv_k = torch.nn.Conv1d(channels, channels, 1)
self.conv_v = torch.nn.Conv1d(channels, channels, 1)
# from https://nn.labml.ai/transformers/rope/index.html
self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5)
self.conv_o = torch.nn.Conv1d(channels, out_channels, 1)
self.drop = torch.nn.Dropout(p_dropout)
torch.nn.init.xavier_uniform_(self.conv_q.weight)
torch.nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
torch.nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
b, d, t_s, t_t = (*key.size(), query.size(2))
query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads)
key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads)
value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads)
query = self.query_rotary_pe(query)
key = self.key_rotary_pe(key)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
p_attn = torch.nn.functional.softmax(scores, dim=-1)
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
output = output.transpose(2, 3).contiguous().view(b, d, t_t)
return output, p_attn
@staticmethod
def _attention_bias_proximal(length):
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2)
self.drop = torch.nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(x * x_mask)
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
return x * x_mask
class Encoder(nn.Module):
def __init__(
self,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size=1,
p_dropout=0.0,
**kwargs,
):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.drop = torch.nn.Dropout(p_dropout)
self.attn_layers = torch.nn.ModuleList()
self.norm_layers_1 = torch.nn.ModuleList()
self.ffn_layers = torch.nn.ModuleList()
self.norm_layers_2 = torch.nn.ModuleList()
for _ in range(self.n_layers):
self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout))
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(
hidden_channels,
hidden_channels,
filter_channels,
kernel_size,
p_dropout=p_dropout,
)
)
self.norm_layers_2.append(LayerNorm(hidden_channels))
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
for i in range(self.n_layers):
x = x * x_mask
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = self.norm_layers_1[i](x + y)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = self.norm_layers_2[i](x + y)
x = x * x_mask
return x
class TextEncoder(nn.Module):
def __init__(
self,
encoder_type,
encoder_params,
duration_predictor_params,
n_vocab,
n_spks=1,
spk_emb_dim=128,
):
super().__init__()
self.encoder_type = encoder_type
self.n_vocab = n_vocab
self.n_feats = encoder_params.n_feats
self.n_channels = encoder_params.n_channels
self.spk_emb_dim = spk_emb_dim
self.n_spks = n_spks
self.emb = torch.nn.Embedding(n_vocab, self.n_channels)
torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5)
if encoder_params.prenet:
self.prenet = ConvReluNorm(
self.n_channels,
self.n_channels,
self.n_channels,
kernel_size=5,
n_layers=3,
p_dropout=0.5,
)
else:
self.prenet = lambda x, x_mask: x
self.encoder = Encoder(
encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0),
encoder_params.filter_channels,
encoder_params.n_heads,
encoder_params.n_layers,
encoder_params.kernel_size,
encoder_params.p_dropout,
)
self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1)
self.proj_w = DurationPredictor(
self.n_channels + (spk_emb_dim if n_spks > 1 else 0),
duration_predictor_params.filter_channels_dp,
duration_predictor_params.kernel_size,
duration_predictor_params.p_dropout,
)
def forward(self, x, x_lengths, spks=None):
"""Run forward pass to the transformer based encoder and duration predictor
Args:
x (torch.Tensor): text input
shape: (batch_size, max_text_length)
x_lengths (torch.Tensor): text input lengths
shape: (batch_size,)
spks (torch.Tensor, optional): speaker ids. Defaults to None.
shape: (batch_size,)
Returns:
mu (torch.Tensor): average output of the encoder
shape: (batch_size, n_feats, max_text_length)
logw (torch.Tensor): log duration predicted by the duration predictor
shape: (batch_size, 1, max_text_length)
x_mask (torch.Tensor): mask for the text input
shape: (batch_size, 1, max_text_length)
"""
x = self.emb(x) * math.sqrt(self.n_channels)
x = torch.transpose(x, 1, -1)
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
x = self.prenet(x, x_mask)
if self.n_spks > 1:
x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1)
x = self.encoder(x, x_mask)
mu = self.proj_m(x) * x_mask
x_dp = torch.detach(x)
logw = self.proj_w(x_dp, x_mask)
return mu, logw, x_mask
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/matcha/text_encoder.py",
"license": "MIT License",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/matcha/transformer.py | from typing import Any, Dict, Optional
import torch
import torch.nn as nn
from diffusers.models.attention import (
GEGLU,
GELU,
AdaLayerNorm,
AdaLayerNormZero,
ApproximateGELU,
)
from diffusers.models.attention_processor import Attention
from diffusers.models.lora import LoRACompatibleLinear
from diffusers.utils.torch_utils import maybe_allow_in_graph
class SnakeBeta(nn.Module):
"""
A modified Snake function which uses separate parameters for the magnitude of the periodic components
Shape:
- Input: (B, C, T)
- Output: (B, C, T), same shape as the input
Parameters:
- alpha - trainable parameter that controls frequency
- beta - trainable parameter that controls magnitude
References:
- This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
https://arxiv.org/abs/2006.08195
Examples:
>>> a1 = snakebeta(256)
>>> x = torch.randn(256)
>>> x = a1(x)
"""
def __init__(self, in_features, out_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True):
"""
Initialization.
INPUT:
- in_features: shape of the input
- alpha - trainable parameter that controls frequency
- beta - trainable parameter that controls magnitude
alpha is initialized to 1 by default, higher values = higher-frequency.
beta is initialized to 1 by default, higher values = higher-magnitude.
alpha will be trained along with the rest of your model.
"""
super().__init__()
self.in_features = out_features if isinstance(out_features, list) else [out_features]
self.proj = LoRACompatibleLinear(in_features, out_features)
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = nn.Parameter(torch.zeros(self.in_features) * alpha)
self.beta = nn.Parameter(torch.zeros(self.in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = nn.Parameter(torch.ones(self.in_features) * alpha)
self.beta = nn.Parameter(torch.ones(self.in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.beta.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
"""
Forward pass of the function.
Applies the function to the input elementwise.
SnakeBeta ∶= x + 1/b * sin^2 (xa)
"""
x = self.proj(x)
if self.alpha_logscale:
alpha = torch.exp(self.alpha)
beta = torch.exp(self.beta)
else:
alpha = self.alpha
beta = self.beta
x = x + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(torch.sin(x * alpha), 2)
return x
class FeedForward(nn.Module):
r"""
A feed-forward layer.
Parameters:
dim (`int`): The number of channels in the input.
dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
"""
def __init__(
self,
dim: int,
dim_out: Optional[int] = None,
mult: int = 4,
dropout: float = 0.0,
activation_fn: str = "geglu",
final_dropout: bool = False,
):
super().__init__()
inner_dim = int(dim * mult)
dim_out = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
act_fn = GELU(dim, inner_dim)
if activation_fn == "gelu-approximate":
act_fn = GELU(dim, inner_dim, approximate="tanh")
elif activation_fn == "geglu":
act_fn = GEGLU(dim, inner_dim)
elif activation_fn == "geglu-approximate":
act_fn = ApproximateGELU(dim, inner_dim)
elif activation_fn == "snakebeta":
act_fn = SnakeBeta(dim, inner_dim)
self.net = nn.ModuleList([])
# project in
self.net.append(act_fn)
# project dropout
self.net.append(nn.Dropout(dropout))
# project out
self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(dropout))
def forward(self, hidden_states):
for module in self.net:
hidden_states = module(hidden_states)
return hidden_states
@maybe_allow_in_graph
class BasicTransformerBlock(nn.Module):
r"""
A basic Transformer block.
Parameters:
dim (`int`): The number of channels in the input and output.
num_attention_heads (`int`): The number of heads to use for multi-head attention.
attention_head_dim (`int`): The number of channels in each head.
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
only_cross_attention (`bool`, *optional*):
Whether to use only cross-attention layers. In this case two cross attention layers are used.
double_self_attention (`bool`, *optional*):
Whether to use two self-attention layers. In this case no cross attention layers are used.
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
num_embeds_ada_norm (:
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
attention_bias (:
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
"""
def __init__(
self,
dim: int,
num_attention_heads: int,
attention_head_dim: int,
dropout=0.0,
cross_attention_dim: Optional[int] = None,
activation_fn: str = "geglu",
num_embeds_ada_norm: Optional[int] = None,
attention_bias: bool = False,
only_cross_attention: bool = False,
double_self_attention: bool = False,
upcast_attention: bool = False,
norm_elementwise_affine: bool = True,
norm_type: str = "layer_norm",
final_dropout: bool = False,
):
super().__init__()
self.only_cross_attention = only_cross_attention
self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
)
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
elif self.use_ada_layer_norm_zero:
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
else:
self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.attn1 = Attention(
query_dim=dim,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
upcast_attention=upcast_attention,
)
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
self.norm2 = (
AdaLayerNorm(dim, num_embeds_ada_norm)
if self.use_ada_layer_norm
else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
)
self.attn2 = Attention(
query_dim=dim,
cross_attention_dim=cross_attention_dim if not double_self_attention else None,
heads=num_attention_heads,
dim_head=attention_head_dim,
dropout=dropout,
bias=attention_bias,
upcast_attention=upcast_attention,
# scale_qk=False, # uncomment this to not to use flash attention
) # is self-attn if encoder_hidden_states is none
else:
self.norm2 = None
self.attn2 = None
# 3. Feed-forward
self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
# let chunk size default to None
self._chunk_size = None
self._chunk_dim = 0
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
# Sets chunk feed-forward
self._chunk_size = chunk_size
self._chunk_dim = dim
def forward(
self,
hidden_states: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
timestep: Optional[torch.LongTensor] = None,
cross_attention_kwargs: Dict[str, Any] = None,
class_labels: Optional[torch.LongTensor] = None,
):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
norm_hidden_states = self.norm1(hidden_states, timestep)
elif self.use_ada_layer_norm_zero:
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
)
else:
norm_hidden_states = self.norm1(hidden_states)
cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
attn_output = self.attn1(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask,
**cross_attention_kwargs,
)
if self.use_ada_layer_norm_zero:
attn_output = gate_msa.unsqueeze(1) * attn_output
hidden_states = attn_output + hidden_states
# 2. Cross-Attention
if self.attn2 is not None:
norm_hidden_states = (
self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
)
attn_output = self.attn2(
norm_hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
**cross_attention_kwargs,
)
hidden_states = attn_output + hidden_states
# 3. Feed-forward
norm_hidden_states = self.norm3(hidden_states)
if self.use_ada_layer_norm_zero:
norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
)
num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
ff_output = torch.cat(
[self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
dim=self._chunk_dim,
)
else:
ff_output = self.ff(norm_hidden_states)
if self.use_ada_layer_norm_zero:
ff_output = gate_mlp.unsqueeze(1) * ff_output
hidden_states = ff_output + hidden_states
return hidden_states
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/matcha/transformer.py",
"license": "MIT License",
"lines": 276,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/s3gen.py | # Modified from CosyVoice https://github.com/FunAudioLLM/CosyVoice
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import numpy as np
import torch
import torchaudio as ta
from functools import lru_cache
from typing import Optional
from ..s3tokenizer import S3_SR, SPEECH_VOCAB_SIZE, S3Tokenizer
from .const import S3GEN_SR
from .flow import CausalMaskedDiffWithXvec
from .xvector import CAMPPlus
from .utils.mel import mel_spectrogram
from .f0_predictor import ConvRNNF0Predictor
from .hifigan import HiFTGenerator
from .transformer.upsample_encoder import UpsampleConformerEncoder
from .flow_matching import CausalConditionalCFM
from .decoder import ConditionalDecoder
from .configs import CFM_PARAMS
def drop_invalid_tokens(x):
assert len(x.shape) <= 2 and x.shape[0] == 1, "only batch size of one allowed for now"
return x[x < SPEECH_VOCAB_SIZE]
# TODO: global resampler cache
@lru_cache(100)
def get_resampler(src_sr, dst_sr, device):
return ta.transforms.Resample(src_sr, dst_sr).to(device)
class S3Token2Mel(torch.nn.Module):
"""
S3Gen's CFM decoder maps S3 speech tokens to mel-spectrograms.
TODO: make these modules configurable?
"""
def __init__(self, meanflow=False):
super().__init__()
self.tokenizer = S3Tokenizer("speech_tokenizer_v2_25hz")
self.mel_extractor = mel_spectrogram # TODO: make it a torch module?
self.speaker_encoder = CAMPPlus(
# NOTE: This doesn't affect inference. It turns off activation checkpointing
# (a training optimization), which causes a crazy DDP error with accelerate
memory_efficient=False,
)
self.meanflow = meanflow
encoder = UpsampleConformerEncoder(
output_size=512,
attention_heads=8,
linear_units=2048,
num_blocks=6,
dropout_rate=0.1,
positional_dropout_rate=0.1,
attention_dropout_rate=0.1,
normalize_before=True,
input_layer='linear',
pos_enc_layer_type='rel_pos_espnet',
selfattention_layer_type='rel_selfattn',
input_size=512,
use_cnn_module=False,
macaron_style=False,
)
estimator = ConditionalDecoder(
in_channels=320,
out_channels=80,
causal=True,
channels=[256],
dropout=0.0,
attention_head_dim=64,
n_blocks=4,
num_mid_blocks=12,
num_heads=8,
act_fn='gelu',
meanflow=self.meanflow,
)
cfm_params = CFM_PARAMS
decoder = CausalConditionalCFM(
spk_emb_dim=80,
cfm_params=cfm_params,
estimator=estimator,
)
self.flow = CausalMaskedDiffWithXvec(
encoder=encoder,
decoder=decoder
)
self.resamplers = {}
@property
def device(self):
params = self.tokenizer.parameters()
return next(params).device
@property
def dtype(self):
params = self.flow.parameters()
return next(params).dtype
def embed_ref(
self,
ref_wav: torch.Tensor,
ref_sr: int,
device="auto",
ref_fade_out=True,
):
device = self.device if device == "auto" else device
if isinstance(ref_wav, np.ndarray):
ref_wav = torch.from_numpy(ref_wav).float()
if ref_wav.device != device:
ref_wav = ref_wav.to(device)
if len(ref_wav.shape) == 1:
ref_wav = ref_wav.unsqueeze(0) # (B, L)
if ref_wav.size(1) > 10 * ref_sr:
print("WARNING: s3gen received ref longer than 10s")
ref_wav_24 = ref_wav
if ref_sr != S3GEN_SR:
ref_wav_24 = get_resampler(ref_sr, S3GEN_SR, device)(ref_wav)
ref_wav_24 = ref_wav_24.to(device=device, dtype=self.dtype)
ref_mels_24 = self.mel_extractor(ref_wav_24).transpose(1, 2).to(dtype=self.dtype)
ref_mels_24_len = None
# Resample to 16kHz
ref_wav_16 = ref_wav
if ref_sr != S3_SR:
ref_wav_16 = get_resampler(ref_sr, S3_SR, device)(ref_wav)
# Speaker embedding
ref_x_vector = self.speaker_encoder.inference(ref_wav_16.to(dtype=self.dtype))
# Tokenize 16khz reference
ref_speech_tokens, ref_speech_token_lens = self.tokenizer(ref_wav_16.float())
# Make sure mel_len = 2 * stoken_len (happens when the input is not padded to multiple of 40ms)
if ref_mels_24.shape[1] != 2 * ref_speech_tokens.shape[1]:
logging.warning(
"Reference mel length is not equal to 2 * reference token length.\n"
)
ref_speech_tokens = ref_speech_tokens[:, :ref_mels_24.shape[1] // 2]
ref_speech_token_lens[0] = ref_speech_tokens.shape[1]
return dict(
prompt_token=ref_speech_tokens.to(device),
prompt_token_len=ref_speech_token_lens,
prompt_feat=ref_mels_24,
prompt_feat_len=ref_mels_24_len,
embedding=ref_x_vector,
)
def forward(
self,
speech_tokens: torch.LongTensor,
# locally-computed ref embedding (mutex with ref_dict)
ref_wav: Optional[torch.Tensor],
ref_sr: Optional[int],
# pre-computed ref embedding (prod API)
ref_dict: Optional[dict] = None,
n_cfm_timesteps = None,
finalize: bool = False,
speech_token_lens=None,
noised_mels=None,
):
"""
Generate waveforms from S3 speech tokens and a reference waveform, which the speaker timbre is inferred from.
NOTE:
- The speaker encoder accepts 16 kHz waveform.
- S3TokenizerV2 accepts 16 kHz waveform.
- The mel-spectrogram for the reference assumes 24 kHz input signal.
- This function is designed for batch_size=1 only.
Args
----
- `speech_tokens`: S3 speech tokens [B=1, T]
- `ref_wav`: reference waveform (`torch.Tensor` with shape=[B=1, T])
- `ref_sr`: reference sample rate
- `finalize`: whether streaming is finished or not. Note that if False, the last 3 tokens will be ignored.
"""
assert (ref_wav is None) ^ (ref_dict is None), f"Must provide exactly one of ref_wav or ref_dict (got {ref_wav} and {ref_dict})"
if ref_dict is None:
ref_dict = self.embed_ref(ref_wav, ref_sr)
else:
# type/device casting (all values will be numpy if it's from a prod API call)
for rk in list(ref_dict):
if isinstance(ref_dict[rk], np.ndarray):
ref_dict[rk] = torch.from_numpy(ref_dict[rk])
if torch.is_tensor(ref_dict[rk]):
ref_dict[rk] = ref_dict[rk].to(device=self.device, dtype=self.dtype)
speech_tokens = torch.atleast_2d(speech_tokens)
# backcompat
if speech_token_lens is None:
speech_token_lens = torch.LongTensor([st.size(-1) for st in speech_tokens]).to(self.device)
output_mels, _ = self.flow.inference(
token=speech_tokens,
token_len=speech_token_lens,
finalize=finalize,
noised_mels=noised_mels,
n_timesteps=n_cfm_timesteps,
meanflow=self.meanflow,
**ref_dict,
)
return output_mels
class S3Token2Wav(S3Token2Mel):
"""
The decoder of S3Gen is a concat of token-to-mel (CFM) and a mel-to-waveform (HiFiGAN) modules.
TODO: make these modules configurable?
"""
ignore_state_dict_missing = ("tokenizer._mel_filters", "tokenizer.window")
def __init__(self, meanflow=False):
super().__init__(meanflow)
f0_predictor = ConvRNNF0Predictor()
self.mel2wav = HiFTGenerator(
sampling_rate=S3GEN_SR,
upsample_rates=[8, 5, 3],
upsample_kernel_sizes=[16, 11, 7],
source_resblock_kernel_sizes=[7, 7, 11],
source_resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
f0_predictor=f0_predictor,
)
# silence out a few ms and fade audio in to reduce artifacts
n_trim = S3GEN_SR // 50 # 20ms = half of a frame
trim_fade = torch.zeros(2 * n_trim)
trim_fade[n_trim:] = (torch.cos(torch.linspace(torch.pi, 0, n_trim)) + 1) / 2
self.register_buffer("trim_fade", trim_fade, persistent=False) # (buffers get automatic device casting)
self.estimator_dtype = "fp32"
def forward(
self,
speech_tokens,
# locally-computed ref embedding (mutex with ref_dict)
ref_wav: Optional[torch.Tensor],
ref_sr: Optional[int],
# pre-computed ref embedding (prod API)
ref_dict: Optional[dict] = None,
finalize: bool = False,
speech_token_lens=None,
skip_vocoder=False,
n_cfm_timesteps=None,
noised_mels=None,
):
"""
Generate waveforms from S3 speech tokens and a reference waveform, which the speaker timbre is inferred from.
NOTE: used for sync synthesis only. Please use `S3GenStreamer` for streaming synthesis.
"""
output_mels = super().forward(
speech_tokens, speech_token_lens=speech_token_lens, ref_wav=ref_wav,
ref_sr=ref_sr, ref_dict=ref_dict, finalize=finalize,
n_cfm_timesteps=n_cfm_timesteps, noised_mels=noised_mels,
)
if skip_vocoder:
return output_mels
# TODO jrm: ignoring the speed control (mel interpolation) and the HiFTGAN caching mechanisms for now.
hift_cache_source = torch.zeros(1, 1, 0).to(self.device)
output_wavs, *_ = self.mel2wav.inference(speech_feat=output_mels, cache_source=hift_cache_source)
if not self.training:
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
return output_wavs
@torch.inference_mode()
def flow_inference(
self,
speech_tokens,
# locally-computed ref embedding (mutex with ref_dict)
ref_wav: Optional[torch.Tensor] = None,
ref_sr: Optional[int] = None,
# pre-computed ref embedding (prod API)
ref_dict: Optional[dict] = None,
n_cfm_timesteps = None,
finalize: bool = False,
speech_token_lens=None,
):
n_cfm_timesteps = n_cfm_timesteps or (2 if self.meanflow else 10)
noise = None
if self.meanflow:
noise = torch.randn(1, 80, speech_tokens.size(-1) * 2, dtype=self.dtype, device=self.device)
output_mels = super().forward(
speech_tokens, speech_token_lens=speech_token_lens, ref_wav=ref_wav, ref_sr=ref_sr, ref_dict=ref_dict,
n_cfm_timesteps=n_cfm_timesteps, finalize=finalize, noised_mels=noise,
)
return output_mels
@torch.inference_mode()
def hift_inference(self, speech_feat, cache_source: torch.Tensor = None):
if cache_source is None:
cache_source = torch.zeros(1, 1, 0).to(device=self.device, dtype=self.dtype)
return self.mel2wav.inference(speech_feat=speech_feat, cache_source=cache_source)
@torch.inference_mode()
def inference(
self,
speech_tokens,
# locally-computed ref embedding (mutex with ref_dict)
ref_wav: Optional[torch.Tensor] = None,
ref_sr: Optional[int] = None,
# pre-computed ref embedding (prod API)
ref_dict: Optional[dict] = None,
# left as a kwarg because this can change input/output size ratio
drop_invalid_tokens=True,
n_cfm_timesteps=None,
speech_token_lens=None,
):
# hallucination prevention, drop special tokens
# if drop_invalid_tokens:
# speech_tokens, speech_token_lens = drop_invalid(speech_tokens, pad=S3_QUIET_PAD)
output_mels = self.flow_inference(
speech_tokens,
speech_token_lens=speech_token_lens,
ref_wav=ref_wav,
ref_sr=ref_sr,
ref_dict=ref_dict,
n_cfm_timesteps=n_cfm_timesteps,
finalize=True,
)
output_mels = output_mels.to(dtype=self.dtype) # FIXME (fp16 mode) is this still needed?
output_wavs, output_sources = self.hift_inference(output_mels, None)
# NOTE: ad-hoc method to reduce "spillover" from the reference clip.
output_wavs[:, :len(self.trim_fade)] *= self.trim_fade
return output_wavs, output_sources
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/s3gen.py",
"license": "MIT License",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/activation.py | # Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe)
# 2020 Northwestern Polytechnical University (Pengcheng Guo)
# 2020 Mobvoi Inc (Binbin Zhang)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Swish() activation function for Conformer."""
import torch
from torch import nn, sin, pow
from torch.nn import Parameter
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return Swish activation function."""
return x * torch.sigmoid(x)
# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license.
# LICENSE is in incl_licenses directory.
class Snake(nn.Module):
'''
Implementation of a sine-based periodic activation function
Shape:
- Input: (B, C, T)
- Output: (B, C, T), same shape as the input
Parameters:
- alpha - trainable parameter
References:
- This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
https://arxiv.org/abs/2006.08195
Examples:
>>> a1 = snake(256)
>>> x = torch.randn(256)
>>> x = a1(x)
'''
def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False):
'''
Initialization.
INPUT:
- in_features: shape of the input
- alpha: trainable parameter
alpha is initialized to 1 by default, higher values = higher-frequency.
alpha will be trained along with the rest of your model.
'''
super(Snake, self).__init__()
self.in_features = in_features
# initialize alpha
self.alpha_logscale = alpha_logscale
if self.alpha_logscale: # log scale alphas initialized to zeros
self.alpha = Parameter(torch.zeros(in_features) * alpha)
else: # linear scale alphas initialized to ones
self.alpha = Parameter(torch.ones(in_features) * alpha)
self.alpha.requires_grad = alpha_trainable
self.no_div_by_zero = 0.000000001
def forward(self, x):
'''
Forward pass of the function.
Applies the function to the input elementwise.
Snake ∶= x + 1/a * sin^2 (xa)
'''
alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
if self.alpha_logscale:
alpha = torch.exp(alpha)
x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2)
return x
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/activation.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/attention.py | # Copyright (c) 2019 Shigeki Karita
# 2020 Mobvoi Inc (Binbin Zhang)
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Multi-Head Attention layer definition."""
import math
from typing import Tuple
import torch
from torch import nn
class MultiHeadedAttention(nn.Module):
"""Multi-Head Attention layer.
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self,
n_head: int,
n_feat: int,
dropout_rate: float,
key_bias: bool = True):
"""Construct an MultiHeadedAttention object."""
super().__init__()
assert n_feat % n_head == 0
# We assume d_v always equals d_k
self.d_k = n_feat // n_head
self.h = n_head
self.linear_q = nn.Linear(n_feat, n_feat)
self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias)
self.linear_v = nn.Linear(n_feat, n_feat)
self.linear_out = nn.Linear(n_feat, n_feat)
self.dropout = nn.Dropout(p=dropout_rate)
def forward_qkv(
self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Transform query, key and value.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
Returns:
torch.Tensor: Transformed query tensor, size
(#batch, n_head, time1, d_k).
torch.Tensor: Transformed key tensor, size
(#batch, n_head, time2, d_k).
torch.Tensor: Transformed value tensor, size
(#batch, n_head, time2, d_k).
"""
n_batch = query.size(0)
q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)
k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)
v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)
q = q.transpose(1, 2) # (batch, head, time1, d_k)
k = k.transpose(1, 2) # (batch, head, time2, d_k)
v = v.transpose(1, 2) # (batch, head, time2, d_k)
return q, k, v
def forward_attention(
self,
value: torch.Tensor,
scores: torch.Tensor,
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool)
) -> torch.Tensor:
"""Compute attention context vector.
Args:
value (torch.Tensor): Transformed value, size
(#batch, n_head, time2, d_k).
scores (torch.Tensor): Attention score, size
(#batch, n_head, time1, time2).
mask (torch.Tensor): Mask, size (#batch, 1, time2) or
(#batch, time1, time2), (0, 0, 0) means fake mask.
Returns:
torch.Tensor: Transformed value (#batch, time1, d_model)
weighted by the attention score (#batch, time1, time2).
"""
n_batch = value.size(0)
# NOTE(xcsong): When will `if mask.size(2) > 0` be True?
# 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the
# 1st chunk to ease the onnx export.]
# 2. pytorch training
if mask.size(2) > 0: # time2 > 0
mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)
# For last chunk, time2 might be larger than scores.size(-1)
mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2)
scores = scores.masked_fill(mask, -float('inf'))
attn = torch.softmax(scores, dim=-1).masked_fill(
mask, 0.0) # (batch, head, time1, time2)
# NOTE(xcsong): When will `if mask.size(2) > 0` be False?
# 1. onnx(16/-1, -1/-1, 16/0)
# 2. jit (16/-1, -1/-1, 16/0, 16/4)
else:
attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)
p_attn = self.dropout(attn)
x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)
x = (x.transpose(1, 2).contiguous().view(n_batch, -1,
self.h * self.d_k)
) # (batch, time1, d_model)
return self.linear_out(x) # (batch, time1, d_model)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
pos_emb: torch.Tensor = torch.empty(0),
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute scaled dot product attention.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2).
1.When applying cross attention between decoder and encoder,
the batch padding mask for input is in (#batch, 1, T) shape.
2.When applying self attention of encoder,
the mask is in (#batch, T, T) shape.
3.When applying self attention of decoder,
the mask is in (#batch, L, L) shape.
4.If the different position in decoder see different block
of the encoder, such as Mocha, the passed in mask could be
in (#batch, L, T) shape. But there is no such case in current
CosyVoice.
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
where `cache_t == chunk_size * num_decoding_left_chunks`
and `head * d_k == size`
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
where `cache_t == chunk_size * num_decoding_left_chunks`
and `head * d_k == size`
"""
q, k, v = self.forward_qkv(query, key, value)
# NOTE(xcsong):
# when export onnx model, for 1st chunk, we feed
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
# and we will always do splitting and
# concatnation(this will simplify onnx export). Note that
# it's OK to concat & split zero-shaped tensors(see code below).
# when export jit model, for 1st chunk, we always feed
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
# >>> a = torch.ones((1, 2, 0, 4))
# >>> b = torch.ones((1, 2, 3, 4))
# >>> c = torch.cat((a, b), dim=2)
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
if cache.size(0) > 0:
key_cache, value_cache = torch.split(cache,
cache.size(-1) // 2,
dim=-1)
k = torch.cat([key_cache, k], dim=2)
v = torch.cat([value_cache, v], dim=2)
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
# non-trivial to calculate `next_cache_start` here.
new_cache = torch.cat((k, v), dim=-1)
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)
return self.forward_attention(v, scores, mask), new_cache
class RelPositionMultiHeadedAttention(MultiHeadedAttention):
"""Multi-Head Attention layer with relative position encoding.
Paper: https://arxiv.org/abs/1901.02860
Args:
n_head (int): The number of heads.
n_feat (int): The number of features.
dropout_rate (float): Dropout rate.
"""
def __init__(self,
n_head: int,
n_feat: int,
dropout_rate: float,
key_bias: bool = True):
"""Construct an RelPositionMultiHeadedAttention object."""
super().__init__(n_head, n_feat, dropout_rate, key_bias)
# linear transformation for positional encoding
self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)
# these two learnable bias are used in matrix c and matrix d
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))
self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))
torch.nn.init.xavier_uniform_(self.pos_bias_u)
torch.nn.init.xavier_uniform_(self.pos_bias_v)
def rel_shift(self, x: torch.Tensor) -> torch.Tensor:
"""Compute relative positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).
time1 means the length of query vector.
Returns:
torch.Tensor: Output tensor.
"""
zero_pad = torch.zeros((x.size()[0], x.size()[1], x.size()[2], 1),
device=x.device,
dtype=x.dtype)
x_padded = torch.cat([zero_pad, x], dim=-1)
x_padded = x_padded.view(x.size()[0],
x.size()[1],
x.size(3) + 1, x.size(2))
x = x_padded[:, :, 1:].view_as(x)[
:, :, :, : x.size(-1) // 2 + 1
] # only keep the positions from 0 to time2
return x
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
pos_emb: torch.Tensor = torch.empty(0),
cache: torch.Tensor = torch.zeros((0, 0, 0, 0))
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute 'Scaled Dot Product Attention' with rel. positional encoding.
Args:
query (torch.Tensor): Query tensor (#batch, time1, size).
key (torch.Tensor): Key tensor (#batch, time2, size).
value (torch.Tensor): Value tensor (#batch, time2, size).
mask (torch.Tensor): Mask tensor (#batch, 1, time2) or
(#batch, time1, time2), (0, 0, 0) means fake mask.
pos_emb (torch.Tensor): Positional embedding tensor
(#batch, time2, size).
cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),
where `cache_t == chunk_size * num_decoding_left_chunks`
and `head * d_k == size`
Returns:
torch.Tensor: Output tensor (#batch, time1, d_model).
torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)
where `cache_t == chunk_size * num_decoding_left_chunks`
and `head * d_k == size`
"""
q, k, v = self.forward_qkv(query, key, value)
q = q.transpose(1, 2) # (batch, time1, head, d_k)
# NOTE(xcsong):
# when export onnx model, for 1st chunk, we feed
# cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)
# or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).
# In all modes, `if cache.size(0) > 0` will alwayse be `True`
# and we will always do splitting and
# concatnation(this will simplify onnx export). Note that
# it's OK to concat & split zero-shaped tensors(see code below).
# when export jit model, for 1st chunk, we always feed
# cache(0, 0, 0, 0) since jit supports dynamic if-branch.
# >>> a = torch.ones((1, 2, 0, 4))
# >>> b = torch.ones((1, 2, 3, 4))
# >>> c = torch.cat((a, b), dim=2)
# >>> torch.equal(b, c) # True
# >>> d = torch.split(a, 2, dim=-1)
# >>> torch.equal(d[0], d[1]) # True
if cache.size(0) > 0:
key_cache, value_cache = torch.split(cache,
cache.size(-1) // 2,
dim=-1)
k = torch.cat([key_cache, k], dim=2)
v = torch.cat([value_cache, v], dim=2)
# NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's
# non-trivial to calculate `next_cache_start` here.
new_cache = torch.cat((k, v), dim=-1)
n_batch_pos = pos_emb.size(0)
p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)
p = p.transpose(1, 2) # (batch, head, time1, d_k)
# (batch, head, time1, d_k)
q_with_bias_u = (q + self.pos_bias_u.to(q.device)).transpose(1, 2)
# (batch, head, time1, d_k)
q_with_bias_v = (q + self.pos_bias_v.to(q.device)).transpose(1, 2)
# compute attention score
# first compute matrix a and matrix c
# as described in https://arxiv.org/abs/1901.02860 Section 3.3
# (batch, head, time1, time2)
matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))
# compute matrix b and matrix d
# (batch, head, time1, time2)
matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))
# NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used
if matrix_ac.shape != matrix_bd.shape:
matrix_bd = self.rel_shift(matrix_bd)
scores = (matrix_ac + matrix_bd) / math.sqrt(
self.d_k) # (batch, head, time1, time2)
return self.forward_attention(v, scores, mask), new_cache
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/attention.py",
"license": "MIT License",
"lines": 289,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/convolution.py | # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
"""ConvolutionModule definition."""
from typing import Tuple
import torch
from torch import nn
class ConvolutionModule(nn.Module):
"""ConvolutionModule in Conformer model."""
def __init__(self,
channels: int,
kernel_size: int = 15,
activation: nn.Module = nn.ReLU(),
norm: str = "batch_norm",
causal: bool = False,
bias: bool = True):
"""Construct an ConvolutionModule object.
Args:
channels (int): The number of channels of conv layers.
kernel_size (int): Kernel size of conv layers.
causal (int): Whether use causal convolution or not
"""
super().__init__()
self.pointwise_conv1 = nn.Conv1d(
channels,
2 * channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
# self.lorder is used to distinguish if it's a causal convolution,
# if self.lorder > 0: it's a causal convolution, the input will be
# padded with self.lorder frames on the left in forward.
# else: it's a symmetrical convolution
if causal:
padding = 0
self.lorder = kernel_size - 1
else:
# kernel_size should be an odd number for none causal convolution
assert (kernel_size - 1) % 2 == 0
padding = (kernel_size - 1) // 2
self.lorder = 0
self.depthwise_conv = nn.Conv1d(
channels,
channels,
kernel_size,
stride=1,
padding=padding,
groups=channels,
bias=bias,
)
assert norm in ['batch_norm', 'layer_norm']
if norm == "batch_norm":
self.use_layer_norm = False
self.norm = nn.BatchNorm1d(channels)
else:
self.use_layer_norm = True
self.norm = nn.LayerNorm(channels)
self.pointwise_conv2 = nn.Conv1d(
channels,
channels,
kernel_size=1,
stride=1,
padding=0,
bias=bias,
)
self.activation = activation
def forward(
self,
x: torch.Tensor,
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
cache: torch.Tensor = torch.zeros((0, 0, 0)),
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute convolution module.
Args:
x (torch.Tensor): Input tensor (#batch, time, channels).
mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),
(0, 0, 0) means fake mask.
cache (torch.Tensor): left context cache, it is only
used in causal convolution (#batch, channels, cache_t),
(0, 0, 0) meas fake cache.
Returns:
torch.Tensor: Output tensor (#batch, time, channels).
"""
# exchange the temporal dimension and the feature dimension
x = x.transpose(1, 2) # (#batch, channels, time)
# mask batch padding
if mask_pad.size(2) > 0: # time > 0
x.masked_fill_(~mask_pad, 0.0)
if self.lorder > 0:
if cache.size(2) == 0: # cache_t == 0
x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0)
else:
assert cache.size(0) == x.size(0) # equal batch
assert cache.size(1) == x.size(1) # equal channel
x = torch.cat((cache, x), dim=2)
assert (x.size(2) > self.lorder)
new_cache = x[:, :, -self.lorder:]
else:
# It's better we just return None if no cache is required,
# However, for JIT export, here we just fake one tensor instead of
# None.
new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
# GLU mechanism
x = self.pointwise_conv1(x) # (batch, 2*channel, dim)
x = nn.functional.glu(x, dim=1) # (batch, channel, dim)
# 1D Depthwise Conv
x = self.depthwise_conv(x)
if self.use_layer_norm:
x = x.transpose(1, 2)
x = self.activation(self.norm(x))
if self.use_layer_norm:
x = x.transpose(1, 2)
x = self.pointwise_conv2(x)
# mask batch padding
if mask_pad.size(2) > 0: # time > 0
x.masked_fill_(~mask_pad, 0.0)
return x.transpose(1, 2), new_cache
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/convolution.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/embedding.py | # Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
"""Positonal Encoding Module."""
import math
from typing import Tuple, Union
import torch
import torch.nn.functional as F
import numpy as np
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
:param int d_model: embedding dim
:param float dropout_rate: dropout rate
:param int max_len: maximum input length
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
"""
def __init__(self,
d_model: int,
dropout_rate: float,
max_len: int = 5000,
reverse: bool = False):
"""Construct an PositionalEncoding object."""
super().__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.max_len = max_len
self.pe = torch.zeros(self.max_len, self.d_model)
position = torch.arange(0, self.max_len,
dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32) *
-(math.log(10000.0) / self.d_model))
self.pe[:, 0::2] = torch.sin(position * div_term)
self.pe[:, 1::2] = torch.cos(position * div_term)
self.pe = self.pe.unsqueeze(0)
def forward(self,
x: torch.Tensor,
offset: Union[int, torch.Tensor] = 0) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Add positional encoding.
Args:
x (torch.Tensor): Input. Its shape is (batch, time, ...)
offset (int, torch.tensor): position offset
Returns:
torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)
torch.Tensor: for compatibility to RelPositionalEncoding
"""
self.pe = self.pe.to(x.device)
pos_emb = self.position_encoding(offset, x.size(1), False)
x = x * self.xscale + pos_emb
return self.dropout(x), self.dropout(pos_emb)
def position_encoding(self,
offset: Union[int, torch.Tensor],
size: int,
apply_dropout: bool = True) -> torch.Tensor:
""" For getting encoding in a streaming fashion
Attention!!!!!
we apply dropout only once at the whole utterance level in a none
streaming way, but will call this function several times with
increasing input size in a streaming scenario, so the dropout will
be applied several times.
Args:
offset (int or torch.tensor): start offset
size (int): required size of position encoding
Returns:
torch.Tensor: Corresponding encoding
"""
# How to subscript a Union type:
# https://github.com/pytorch/pytorch/issues/69434
if isinstance(offset, int):
assert offset + size <= self.max_len
pos_emb = self.pe[:, offset:offset + size]
elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar
assert offset + size <= self.max_len
pos_emb = self.pe[:, offset:offset + size]
else: # for batched streaming decoding on GPU
assert torch.max(offset) + size <= self.max_len
index = offset.unsqueeze(1) + \
torch.arange(0, size).to(offset.device) # B X T
flag = index > 0
# remove negative offset
index = index * flag
pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model
if apply_dropout:
pos_emb = self.dropout(pos_emb)
return pos_emb
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding module.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
"""Initialize class."""
super().__init__(d_model, dropout_rate, max_len, reverse=True)
def forward(self,
x: torch.Tensor,
offset: Union[int, torch.Tensor] = 0) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Compute positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
torch.Tensor: Positional embedding tensor (1, time, `*`).
"""
self.pe = self.pe.to(x.device)
x = x * self.xscale
pos_emb = self.position_encoding(offset, x.size(1), False)
return self.dropout(x), self.dropout(pos_emb)
class WhisperPositionalEncoding(PositionalEncoding):
""" Sinusoids position encoding used in openai-whisper.encoder
"""
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500):
super().__init__(d_model, dropout_rate, max_len)
self.xscale = 1.0
log_timescale_increment = np.log(10000) / (d_model // 2 - 1)
inv_timescales = torch.exp(-log_timescale_increment *
torch.arange(d_model // 2))
scaled_time = torch.arange(max_len)[:, np.newaxis] * \
inv_timescales[np.newaxis, :]
pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
delattr(self, "pe")
self.register_buffer("pe", pe.unsqueeze(0))
class LearnablePositionalEncoding(PositionalEncoding):
""" Learnable position encoding used in openai-whisper.decoder
"""
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448):
super().__init__(d_model, dropout_rate, max_len)
# NOTE(xcsong): overwrite self.pe & self.xscale
self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model))
self.xscale = 1.0
class NoPositionalEncoding(torch.nn.Module):
""" No position encoding
"""
def __init__(self, d_model: int, dropout_rate: float):
super().__init__()
self.d_model = d_model
self.dropout = torch.nn.Dropout(p=dropout_rate)
def forward(self,
x: torch.Tensor,
offset: Union[int, torch.Tensor] = 0) \
-> Tuple[torch.Tensor, torch.Tensor]:
""" Just return zero vector for interface compatibility
"""
pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)
return self.dropout(x), pos_emb
def position_encoding(self, offset: Union[int, torch.Tensor],
size: int) -> torch.Tensor:
return torch.zeros(1, size, self.d_model)
class EspnetRelPositionalEncoding(torch.nn.Module):
"""Relative positional encoding module (new implementation).
Details can be found in https://github.com/espnet/espnet/pull/2816.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):
"""Construct an PositionalEncoding object."""
super(EspnetRelPositionalEncoding, self).__init__()
self.d_model = d_model
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
def extend_pe(self, x: torch.Tensor):
"""Reset the positional encodings."""
if self.pe is not None:
# self.pe contains both positive and negative parts
# the length of self.pe is 2 * input_len - 1
if self.pe.size(1) >= x.size(1) * 2 - 1:
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
# Suppose `i` means to the position of query vecotr and `j` means the
# position of key vector. We use position relative positions when keys
# are to the left (i>j) and negative relative positions otherwise (i<j).
pe_positive = torch.zeros(x.size(1), self.d_model)
pe_negative = torch.zeros(x.size(1), self.d_model)
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe_positive[:, 0::2] = torch.sin(position * div_term)
pe_positive[:, 1::2] = torch.cos(position * div_term)
pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)
pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)
# Reserve the order of positive indices and concat both positive and
# negative indices. This is used to support the shifting trick
# as in https://arxiv.org/abs/1901.02860
pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)
pe_negative = pe_negative[1:].unsqueeze(0)
pe = torch.cat([pe_positive, pe_negative], dim=1)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0) \
-> Tuple[torch.Tensor, torch.Tensor]:
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.position_encoding(size=x.size(1), offset=offset)
return self.dropout(x), self.dropout(pos_emb)
def position_encoding(self,
offset: Union[int, torch.Tensor],
size: int) -> torch.Tensor:
""" For getting encoding in a streaming fashion
Attention!!!!!
we apply dropout only once at the whole utterance level in a none
streaming way, but will call this function several times with
increasing input size in a streaming scenario, so the dropout will
be applied several times.
Args:
offset (int or torch.tensor): start offset
size (int): required size of position encoding
Returns:
torch.Tensor: Corresponding encoding
"""
pos_emb = self.pe[
:,
self.pe.size(1) // 2 - size + 1: self.pe.size(1) // 2 + size,
]
return pos_emb
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/embedding.py",
"license": "MIT License",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/encoder_layer.py | # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
"""Encoder self-attention layer definition."""
from typing import Optional, Tuple
import torch
from torch import nn
class TransformerEncoderLayer(nn.Module):
"""Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
instance can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward`, instance can be used as the argument.
dropout_rate (float): Dropout rate.
normalize_before (bool):
True: use layer_norm before each sub-block.
False: to use layer_norm after each sub-block.
"""
def __init__(
self,
size: int,
self_attn: torch.nn.Module,
feed_forward: torch.nn.Module,
dropout_rate: float,
normalize_before: bool = True,
):
"""Construct an EncoderLayer object."""
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.norm1 = nn.LayerNorm(size, eps=1e-12)
self.norm2 = nn.LayerNorm(size, eps=1e-12)
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
def forward(
self,
x: torch.Tensor,
mask: torch.Tensor,
pos_emb: torch.Tensor,
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Compute encoded features.
Args:
x (torch.Tensor): (#batch, time, size)
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
(0, 0, 0) means fake mask.
pos_emb (torch.Tensor): just for interface compatibility
to ConformerEncoderLayer
mask_pad (torch.Tensor): does not used in transformer layer,
just for unified api with conformer.
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
cnn_cache (torch.Tensor): Convolution cache in conformer layer
(#batch=1, size, cache_t2), not used here, it's for interface
compatibility to ConformerEncoderLayer.
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time, time).
torch.Tensor: att_cache tensor,
(#batch=1, head, cache_t1 + time, d_k * 2).
torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).
"""
residual = x
if self.normalize_before:
x = self.norm1(x)
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache)
x = residual + self.dropout(x_att)
if not self.normalize_before:
x = self.norm1(x)
residual = x
if self.normalize_before:
x = self.norm2(x)
x = residual + self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm2(x)
fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
return x, mask, new_att_cache, fake_cnn_cache
class ConformerEncoderLayer(nn.Module):
"""Encoder layer module.
Args:
size (int): Input dimension.
self_attn (torch.nn.Module): Self-attention module instance.
`MultiHeadedAttention` or `RelPositionMultiHeadedAttention`
instance can be used as the argument.
feed_forward (torch.nn.Module): Feed-forward module instance.
`PositionwiseFeedForward` instance can be used as the argument.
feed_forward_macaron (torch.nn.Module): Additional feed-forward module
instance.
`PositionwiseFeedForward` instance can be used as the argument.
conv_module (torch.nn.Module): Convolution module instance.
`ConvlutionModule` instance can be used as the argument.
dropout_rate (float): Dropout rate.
normalize_before (bool):
True: use layer_norm before each sub-block.
False: use layer_norm after each sub-block.
"""
def __init__(
self,
size: int,
self_attn: torch.nn.Module,
feed_forward: Optional[nn.Module] = None,
feed_forward_macaron: Optional[nn.Module] = None,
conv_module: Optional[nn.Module] = None,
dropout_rate: float = 0.1,
normalize_before: bool = True,
):
"""Construct an EncoderLayer object."""
super().__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.feed_forward_macaron = feed_forward_macaron
self.conv_module = conv_module
self.norm_ff = nn.LayerNorm(size, eps=1e-12) # for the FNN module
self.norm_mha = nn.LayerNorm(size, eps=1e-12) # for the MHA module
if feed_forward_macaron is not None:
self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-12)
self.ff_scale = 0.5
else:
self.ff_scale = 1.0
if self.conv_module is not None:
self.norm_conv = nn.LayerNorm(size, eps=1e-12) # for the CNN module
self.norm_final = nn.LayerNorm(
size, eps=1e-12) # for the final output of the block
self.dropout = nn.Dropout(dropout_rate)
self.size = size
self.normalize_before = normalize_before
def forward(
self,
x: torch.Tensor,
mask: torch.Tensor,
pos_emb: torch.Tensor,
mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""Compute encoded features.
Args:
x (torch.Tensor): (#batch, time, size)
mask (torch.Tensor): Mask tensor for the input (#batch, time,time),
(0, 0, 0) means fake mask.
pos_emb (torch.Tensor): positional encoding, must not be None
for ConformerEncoderLayer.
mask_pad (torch.Tensor): batch padding mask used for conv module.
(#batch, 1,time), (0, 0, 0) means fake mask.
att_cache (torch.Tensor): Cache tensor of the KEY & VALUE
(#batch=1, head, cache_t1, d_k * 2), head * d_k == size.
cnn_cache (torch.Tensor): Convolution cache in conformer layer
(#batch=1, size, cache_t2)
Returns:
torch.Tensor: Output tensor (#batch, time, size).
torch.Tensor: Mask tensor (#batch, time, time).
torch.Tensor: att_cache tensor,
(#batch=1, head, cache_t1 + time, d_k * 2).
torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).
"""
# whether to use macaron style
if self.feed_forward_macaron is not None:
residual = x
if self.normalize_before:
x = self.norm_ff_macaron(x)
x = residual + self.ff_scale * self.dropout(
self.feed_forward_macaron(x))
if not self.normalize_before:
x = self.norm_ff_macaron(x)
# multi-headed self-attention module
residual = x
if self.normalize_before:
x = self.norm_mha(x)
x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb,
att_cache)
x = residual + self.dropout(x_att)
if not self.normalize_before:
x = self.norm_mha(x)
# convolution module
# Fake new cnn cache here, and then change it in conv_module
new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)
if self.conv_module is not None:
residual = x
if self.normalize_before:
x = self.norm_conv(x)
x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)
x = residual + self.dropout(x)
if not self.normalize_before:
x = self.norm_conv(x)
# feed forward module
residual = x
if self.normalize_before:
x = self.norm_ff(x)
x = residual + self.ff_scale * self.dropout(self.feed_forward(x))
if not self.normalize_before:
x = self.norm_ff(x)
if self.conv_module is not None:
x = self.norm_final(x)
return x, mask, new_att_cache, new_cnn_cache
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/encoder_layer.py",
"license": "MIT License",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py | # Copyright (c) 2019 Shigeki Karita
# 2020 Mobvoi Inc (Binbin Zhang)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Positionwise feed forward layer definition."""
import torch
class PositionwiseFeedForward(torch.nn.Module):
"""Positionwise feed forward layer.
FeedForward are appied on each position of the sequence.
The output dim is same with the input dim.
Args:
idim (int): Input dimenstion.
hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate.
activation (torch.nn.Module): Activation function
"""
def __init__(
self,
idim: int,
hidden_units: int,
dropout_rate: float,
activation: torch.nn.Module = torch.nn.ReLU(),
):
"""Construct a PositionwiseFeedForward object."""
super(PositionwiseFeedForward, self).__init__()
self.w_1 = torch.nn.Linear(idim, hidden_units)
self.activation = activation
self.dropout = torch.nn.Dropout(dropout_rate)
self.w_2 = torch.nn.Linear(hidden_units, idim)
def forward(self, xs: torch.Tensor) -> torch.Tensor:
"""Forward function.
Args:
xs: input tensor (B, L, D)
Returns:
output tensor, (B, L, D)
"""
return self.w_2(self.dropout(self.activation(self.w_1(xs))))
class MoEFFNLayer(torch.nn.Module):
"""
Mixture of expert with Positionwise feed forward layer
See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf
The output dim is same with the input dim.
Modified from https://github.com/Lightning-AI/lit-gpt/pull/823
https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219
Args:
n_expert: number of expert.
n_expert_per_token: The actual number of experts used for each frame
idim (int): Input dimenstion.
hidden_units (int): The number of hidden units.
dropout_rate (float): Dropout rate.
activation (torch.nn.Module): Activation function
"""
def __init__(
self,
n_expert: int,
n_expert_per_token: int,
idim: int,
hidden_units: int,
dropout_rate: float,
activation: torch.nn.Module = torch.nn.ReLU(),
):
super(MoEFFNLayer, self).__init__()
self.gate = torch.nn.Linear(idim, n_expert, bias=False)
self.experts = torch.nn.ModuleList(
PositionwiseFeedForward(idim, hidden_units, dropout_rate,
activation) for _ in range(n_expert))
self.n_expert_per_token = n_expert_per_token
def forward(self, xs: torch.Tensor) -> torch.Tensor:
"""Foward function.
Args:
xs: input tensor (B, L, D)
Returns:
output tensor, (B, L, D)
"""
B, L, D = xs.size(
) # batch size, sequence length, embedding dimension (idim)
xs = xs.view(-1, D) # (B*L, D)
router = self.gate(xs) # (B*L, n_expert)
logits, indices = torch.topk(
router, self.n_expert_per_token
) # probs:(B*L, n_expert), indices: (B*L, n_expert)
weights = torch.nn.functional.softmax(
logits, dim=1,
dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token)
output = torch.zeros_like(xs) # (B*L, D)
for i, expert in enumerate(self.experts):
mask = indices == i
batch_idx, ith_expert = torch.where(mask)
output[batch_idx] += weights[batch_idx, ith_expert, None] * expert(
xs[batch_idx])
return output.view(B, L, D)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/positionwise_feed_forward.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/subsampling.py | # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
"""Subsampling layer definition."""
from typing import Tuple, Union
import torch
class BaseSubsampling(torch.nn.Module):
def __init__(self):
super().__init__()
self.right_context = 0
self.subsampling_rate = 1
def position_encoding(self, offset: Union[int, torch.Tensor],
size: int) -> torch.Tensor:
return self.pos_enc.position_encoding(offset, size)
class EmbedinigNoSubsampling(BaseSubsampling):
"""Embedding input without subsampling
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
super().__init__()
self.embed = torch.nn.Embedding(idim, odim)
self.pos_enc = pos_enc_class
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Input x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: linear input tensor (#batch, time', odim),
where time' = time .
torch.Tensor: linear input mask (#batch, 1, time'),
where time' = time .
"""
x = self.embed(x)
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask
class LinearNoSubsampling(BaseSubsampling):
"""Linear transform the input without subsampling
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an linear object."""
super().__init__()
self.out = torch.nn.Sequential(
torch.nn.Linear(idim, odim),
torch.nn.LayerNorm(odim, eps=1e-5),
torch.nn.Dropout(dropout_rate),
)
self.pos_enc = pos_enc_class
self.right_context = 0
self.subsampling_rate = 1
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Input x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: linear input tensor (#batch, time', odim),
where time' = time .
torch.Tensor: linear input mask (#batch, 1, time'),
where time' = time .
"""
x = self.out(x)
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask
class Conv1dSubsampling2(BaseSubsampling):
"""Convolutional 1D subsampling (to 1/2 length).
It is designed for Whisper, ref:
https://github.com/openai/whisper/blob/main/whisper/model.py
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an Conv1dSubsampling2 object."""
super().__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1),
torch.nn.GELU(),
torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1),
torch.nn.GELU(),
)
self.pos_enc = pos_enc_class
# The right context for every conv layer is computed by:
# (kernel_size - 1) * frame_rate_of_this_layer
self.subsampling_rate = 2
# 4 = (3 - 1) * 1 + (3 - 1) * 1
self.right_context = 4
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 2.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 2.
torch.Tensor: positional encoding
"""
time = x.size(1)
x = x.transpose(1, 2) # (b, f, t)
x = self.conv(x)
x = x.transpose(1, 2) # (b, t, f)
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, (time + 1) % 2::2]
class Conv2dSubsampling4(BaseSubsampling):
"""Convolutional 2D subsampling (to 1/4 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an Conv2dSubsampling4 object."""
super().__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.out = torch.nn.Sequential(
torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim))
self.pos_enc = pos_enc_class
# The right context for every conv layer is computed by:
# (kernel_size - 1) * frame_rate_of_this_layer
self.subsampling_rate = 4
# 6 = (3 - 1) * 1 + (3 - 1) * 2
self.right_context = 6
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 4.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 4.
torch.Tensor: positional encoding
"""
x = x.unsqueeze(1) # (b, c=1, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]
class Conv2dSubsampling6(BaseSubsampling):
"""Convolutional 2D subsampling (to 1/6 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
pos_enc (torch.nn.Module): Custom position encoding layer.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an Conv2dSubsampling6 object."""
super().__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 5, 3),
torch.nn.ReLU(),
)
self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3),
odim)
self.pos_enc = pos_enc_class
# 10 = (3 - 1) * 1 + (5 - 1) * 2
self.subsampling_rate = 6
self.right_context = 10
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 6.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 6.
torch.Tensor: positional encoding
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]
class Conv2dSubsampling8(BaseSubsampling):
"""Convolutional 2D subsampling (to 1/8 length).
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an Conv2dSubsampling8 object."""
super().__init__()
self.conv = torch.nn.Sequential(
torch.nn.Conv2d(1, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
torch.nn.Conv2d(odim, odim, 3, 2),
torch.nn.ReLU(),
)
self.linear = torch.nn.Linear(
odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim)
self.pos_enc = pos_enc_class
self.subsampling_rate = 8
# 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4
self.right_context = 14
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Subsample x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: Subsampled tensor (#batch, time', odim),
where time' = time // 8.
torch.Tensor: Subsampled mask (#batch, 1, time'),
where time' = time // 8.
torch.Tensor: positional encoding
"""
x = x.unsqueeze(1) # (b, c, t, f)
x = self.conv(x)
b, c, t, f = x.size()
x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]
class LegacyLinearNoSubsampling(BaseSubsampling):
"""Linear transform the input without subsampling
Args:
idim (int): Input dimension.
odim (int): Output dimension.
dropout_rate (float): Dropout rate.
"""
def __init__(self, idim: int, odim: int, dropout_rate: float,
pos_enc_class: torch.nn.Module):
"""Construct an linear object."""
super().__init__()
self.out = torch.nn.Sequential(
torch.nn.Linear(idim, odim),
torch.nn.LayerNorm(odim, eps=1e-5),
torch.nn.Dropout(dropout_rate),
torch.nn.ReLU(),
)
self.pos_enc = pos_enc_class
self.right_context = 0
self.subsampling_rate = 1
def forward(
self,
x: torch.Tensor,
x_mask: torch.Tensor,
offset: Union[int, torch.Tensor] = 0
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""Input x.
Args:
x (torch.Tensor): Input tensor (#batch, time, idim).
x_mask (torch.Tensor): Input mask (#batch, 1, time).
Returns:
torch.Tensor: linear input tensor (#batch, time', odim),
where time' = time .
torch.Tensor: linear input mask (#batch, 1, time'),
where time' = time .
"""
x = self.out(x)
x, pos_emb = self.pos_enc(x, offset)
return x, pos_emb, x_mask
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/subsampling.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/transformer/upsample_encoder.py | # Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu)
# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn)
# 2024 Alibaba Inc (Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from ESPnet(https://github.com/espnet/espnet)
"""Encoder definition."""
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
from .convolution import ConvolutionModule
from .encoder_layer import ConformerEncoderLayer
from .positionwise_feed_forward import PositionwiseFeedForward
from ..utils.class_utils import (
COSYVOICE_EMB_CLASSES,
COSYVOICE_SUBSAMPLE_CLASSES,
COSYVOICE_ATTENTION_CLASSES,
COSYVOICE_ACTIVATION_CLASSES,
)
from ..utils.mask import make_pad_mask
from ..utils.mask import add_optional_chunk_mask
class Upsample1D(nn.Module):
"""A 1D upsampling layer with an optional convolution.
Parameters:
channels (`int`):
number of channels in the inputs and outputs.
use_conv (`bool`, default `False`):
option to use a convolution.
use_conv_transpose (`bool`, default `False`):
option to use a convolution transpose.
out_channels (`int`, optional):
number of output channels. Defaults to `channels`.
"""
def __init__(self, channels: int, out_channels: int, stride: int = 2):
super().__init__()
self.channels = channels
self.out_channels = out_channels
self.stride = stride
# In this mode, first repeat interpolate, than conv with stride=1
self.conv = nn.Conv1d(self.channels, self.out_channels, stride * 2 + 1, stride=1, padding=0)
def forward(self, inputs: torch.Tensor, input_lengths: torch.Tensor):
outputs = F.interpolate(inputs, scale_factor=float(self.stride), mode="nearest")
outputs = F.pad(outputs, (self.stride * 2, 0), value=0.0)
outputs = self.conv(outputs)
return outputs, input_lengths * self.stride
class PreLookaheadLayer(nn.Module):
def __init__(self, channels: int, pre_lookahead_len: int = 1):
super().__init__()
self.channels = channels
self.pre_lookahead_len = pre_lookahead_len
self.conv1 = nn.Conv1d(
channels, channels,
kernel_size=pre_lookahead_len + 1,
stride=1, padding=0,
)
self.conv2 = nn.Conv1d(
channels, channels,
kernel_size=3, stride=1, padding=0,
)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""
inputs: (batch_size, seq_len, channels)
"""
outputs = inputs.transpose(1, 2).contiguous()
# look ahead
outputs = F.pad(outputs, (0, self.pre_lookahead_len), mode='constant', value=0.0)
outputs = F.leaky_relu(self.conv1(outputs))
# outputs
outputs = F.pad(outputs, (2, 0), mode='constant', value=0.0)
outputs = self.conv2(outputs)
outputs = outputs.transpose(1, 2).contiguous()
# residual connection
outputs = outputs + inputs
return outputs
class UpsampleConformerEncoder(torch.nn.Module):
def __init__(
self,
input_size: int = 512,
output_size: int = 512,
attention_heads: int = 8,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
input_layer: str = "linear",
pos_enc_layer_type: str = "rel_pos_espnet",
normalize_before: bool = True,
static_chunk_size: int = 0,
use_dynamic_chunk: bool = False,
global_cmvn: torch.nn.Module = None,
use_dynamic_left_chunk: bool = False,
positionwise_conv_kernel_size: int = 1,
macaron_style: bool = False,
selfattention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = False,
cnn_module_kernel: int = 15,
causal: bool = False,
cnn_module_norm: str = "batch_norm",
key_bias: bool = True,
gradient_checkpointing: bool = False,
):
"""
Args:
input_size (int): input dim
output_size (int): dimension of attention
attention_heads (int): the number of heads of multi head attention
linear_units (int): the hidden units number of position-wise feed
forward
num_blocks (int): the number of decoder blocks
dropout_rate (float): dropout rate
attention_dropout_rate (float): dropout rate in attention
positional_dropout_rate (float): dropout rate after adding
positional encoding
input_layer (str): input layer type.
optional [linear, conv2d, conv2d6, conv2d8]
pos_enc_layer_type (str): Encoder positional encoding layer type.
opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos]
normalize_before (bool):
True: use layer_norm before each sub-block of a layer.
False: use layer_norm after each sub-block of a layer.
static_chunk_size (int): chunk size for static chunk training and
decoding
use_dynamic_chunk (bool): whether use dynamic chunk size for
training or not, You can only use fixed chunk(chunk_size > 0)
or dyanmic chunk size(use_dynamic_chunk = True)
global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module
use_dynamic_left_chunk (bool): whether use dynamic left chunk in
dynamic chunk training
key_bias: whether use bias in attention.linear_k, False for whisper models.
gradient_checkpointing: rerunning a forward-pass segment for each
checkpointed segment during backward.
"""
super().__init__()
self._output_size = output_size
self.global_cmvn = global_cmvn
self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
input_size,
output_size,
dropout_rate,
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
positional_dropout_rate),
)
self.normalize_before = normalize_before
self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5)
self.static_chunk_size = static_chunk_size
self.use_dynamic_chunk = use_dynamic_chunk
self.use_dynamic_left_chunk = use_dynamic_left_chunk
self.gradient_checkpointing = gradient_checkpointing
activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]()
# self-attention module definition
encoder_selfattn_layer_args = (
attention_heads,
output_size,
attention_dropout_rate,
key_bias,
)
# feed-forward module definition
positionwise_layer_args = (
output_size,
linear_units,
dropout_rate,
activation,
)
# convolution module definition
convolution_layer_args = (output_size, cnn_module_kernel, activation,
cnn_module_norm, causal)
self.pre_lookahead_layer = PreLookaheadLayer(channels=512, pre_lookahead_len=3)
self.encoders = torch.nn.ModuleList([
ConformerEncoderLayer(
output_size,
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
*encoder_selfattn_layer_args),
PositionwiseFeedForward(*positionwise_layer_args),
PositionwiseFeedForward(
*positionwise_layer_args) if macaron_style else None,
ConvolutionModule(
*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
) for _ in range(num_blocks)
])
self.up_layer = Upsample1D(channels=512, out_channels=512, stride=2)
self.up_embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer](
input_size,
output_size,
dropout_rate,
COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size,
positional_dropout_rate),
)
self.up_encoders = torch.nn.ModuleList([
ConformerEncoderLayer(
output_size,
COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](
*encoder_selfattn_layer_args),
PositionwiseFeedForward(*positionwise_layer_args),
PositionwiseFeedForward(
*positionwise_layer_args) if macaron_style else None,
ConvolutionModule(
*convolution_layer_args) if use_cnn_module else None,
dropout_rate,
normalize_before,
) for _ in range(4)
])
def output_size(self) -> int:
return self._output_size
def forward(
self,
xs: torch.Tensor,
xs_lens: torch.Tensor,
decoding_chunk_size: int = 0,
num_decoding_left_chunks: int = -1,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Embed positions in tensor.
Args:
xs: padded input tensor (B, T, D)
xs_lens: input length (B)
decoding_chunk_size: decoding chunk size for dynamic chunk
0: default for training, use random dynamic chunk.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
num_decoding_left_chunks: number of left chunks, this is for decoding,
the chunk size is decoding_chunk_size.
>=0: use num_decoding_left_chunks
<0: use all left chunks
Returns:
encoder output tensor xs, and subsampled masks
xs: padded output tensor (B, T' ~= T/subsample_rate, D)
masks: torch.Tensor batch padding mask after subsample
(B, 1, T' ~= T/subsample_rate)
NOTE(xcsong):
We pass the `__call__` method of the modules instead of `forward` to the
checkpointing API because `__call__` attaches all the hooks of the module.
https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2
"""
T = xs.size(1)
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
if self.global_cmvn is not None:
xs = self.global_cmvn(xs)
xs, pos_emb, masks = self.embed(xs, masks)
mask_pad = masks # (B, 1, T/subsample_rate)
chunk_masks = add_optional_chunk_mask(xs, masks,
self.use_dynamic_chunk,
self.use_dynamic_left_chunk,
decoding_chunk_size,
self.static_chunk_size,
num_decoding_left_chunks)
# lookahead + conformer encoder
xs = self.pre_lookahead_layer(xs)
xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad)
# upsample + conformer encoder
xs = xs.transpose(1, 2).contiguous()
xs, xs_lens = self.up_layer(xs, xs_lens)
xs = xs.transpose(1, 2).contiguous()
T = xs.size(1)
masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T)
xs, pos_emb, masks = self.up_embed(xs, masks)
mask_pad = masks # (B, 1, T/subsample_rate)
chunk_masks = add_optional_chunk_mask(xs, masks,
self.use_dynamic_chunk,
self.use_dynamic_left_chunk,
decoding_chunk_size,
self.static_chunk_size * self.up_layer.stride,
num_decoding_left_chunks)
xs = self.forward_up_layers(xs, chunk_masks, pos_emb, mask_pad)
if self.normalize_before:
xs = self.after_norm(xs)
# Here we assume the mask is not changed in encoder layers, so just
# return the masks before encoder layers, and the masks will be used
# for cross attention with decoder later
return xs, masks
def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
pos_emb: torch.Tensor,
mask_pad: torch.Tensor) -> torch.Tensor:
for layer in self.encoders:
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
return xs
def forward_up_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor,
pos_emb: torch.Tensor,
mask_pad: torch.Tensor) -> torch.Tensor:
for layer in self.up_encoders:
xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad)
return xs
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/transformer/upsample_encoder.py",
"license": "MIT License",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/utils/class_utils.py | # Copyright [2023-11-28] <sxc19@mails.tsinghua.edu.cn, Xingchen Song>
# 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..transformer.activation import Swish
from ..transformer.subsampling import (
LinearNoSubsampling,
EmbedinigNoSubsampling,
Conv1dSubsampling2,
Conv2dSubsampling4,
Conv2dSubsampling6,
Conv2dSubsampling8,
)
from ..transformer.embedding import (
PositionalEncoding,
RelPositionalEncoding,
WhisperPositionalEncoding,
LearnablePositionalEncoding,
NoPositionalEncoding)
from ..transformer.attention import (MultiHeadedAttention,
RelPositionMultiHeadedAttention)
from ..transformer.embedding import EspnetRelPositionalEncoding
from ..transformer.subsampling import LegacyLinearNoSubsampling
COSYVOICE_ACTIVATION_CLASSES = {
"hardtanh": torch.nn.Hardtanh,
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"selu": torch.nn.SELU,
"swish": getattr(torch.nn, "SiLU", Swish),
"gelu": torch.nn.GELU,
}
COSYVOICE_SUBSAMPLE_CLASSES = {
"linear": LinearNoSubsampling,
"linear_legacy": LegacyLinearNoSubsampling,
"embed": EmbedinigNoSubsampling,
"conv1d2": Conv1dSubsampling2,
"conv2d": Conv2dSubsampling4,
"conv2d6": Conv2dSubsampling6,
"conv2d8": Conv2dSubsampling8,
'paraformer_dummy': torch.nn.Identity
}
COSYVOICE_EMB_CLASSES = {
"embed": PositionalEncoding,
"abs_pos": PositionalEncoding,
"rel_pos": RelPositionalEncoding,
"rel_pos_espnet": EspnetRelPositionalEncoding,
"no_pos": NoPositionalEncoding,
"abs_pos_whisper": WhisperPositionalEncoding,
"embed_learnable_pe": LearnablePositionalEncoding,
}
COSYVOICE_ATTENTION_CLASSES = {
"selfattn": MultiHeadedAttention,
"rel_selfattn": RelPositionMultiHeadedAttention,
}
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/utils/class_utils.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/utils/mask.py | # Copyright (c) 2019 Shigeki Karita
# 2020 Mobvoi Inc (Binbin Zhang)
# 2024 Alibaba Inc (authors: Xiang Lyu)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
'''
def subsequent_mask(
size: int,
device: torch.device = torch.device("cpu"),
) -> torch.Tensor:
"""Create mask for subsequent steps (size, size).
This mask is used only in decoder which works in an auto-regressive mode.
This means the current step could only do attention with its left steps.
In encoder, fully attention is used when streaming is not necessary and
the sequence is not long. In this case, no attention mask is needed.
When streaming is need, chunk-based attention is used in encoder. See
subsequent_chunk_mask for the chunk-based attention mask.
Args:
size (int): size of mask
str device (str): "cpu" or "cuda" or torch.Tensor.device
dtype (torch.device): result dtype
Returns:
torch.Tensor: mask
Examples:
>>> subsequent_mask(3)
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]
"""
ret = torch.ones(size, size, device=device, dtype=torch.bool)
return torch.tril(ret)
'''
def subsequent_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int = -1,
device: torch.device = torch.device("cpu"),
) -> torch.Tensor:
"""Create mask for subsequent steps (size, size) with chunk size,
this is for streaming encoder
Args:
size (int): size of mask
chunk_size (int): size of chunk
num_left_chunks (int): number of left chunks
<0: use full chunk
>=0: use num_left_chunks
device (torch.device): "cpu" or "cuda" or torch.Tensor.device
Returns:
torch.Tensor: mask
Examples:
>>> subsequent_chunk_mask(4, 2)
[[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]
"""
# NOTE this modified implementation meets onnx export requirements, but it doesn't support num_left_chunks
# actually this is not needed after we have inference cache implemented, will remove it later
pos_idx = torch.arange(size, device=device)
block_value = (torch.div(pos_idx, chunk_size, rounding_mode='trunc') + 1) * chunk_size
ret = pos_idx.unsqueeze(0) < block_value.unsqueeze(1)
return ret
def add_optional_chunk_mask(xs: torch.Tensor,
masks: torch.Tensor,
use_dynamic_chunk: bool,
use_dynamic_left_chunk: bool,
decoding_chunk_size: int,
static_chunk_size: int,
num_decoding_left_chunks: int,
enable_full_context: bool = True):
""" Apply optional mask for encoder.
Args:
xs (torch.Tensor): padded input, (B, L, D), L for max length
mask (torch.Tensor): mask for xs, (B, 1, L)
use_dynamic_chunk (bool): whether to use dynamic chunk or not
use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
training.
decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
0: default for training, use random dynamic chunk.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
static_chunk_size (int): chunk size for static chunk training/decoding
if it's greater than 0, if use_dynamic_chunk is true,
this parameter will be ignored
num_decoding_left_chunks: number of left chunks, this is for decoding,
the chunk size is decoding_chunk_size.
>=0: use num_decoding_left_chunks
<0: use all left chunks
enable_full_context (bool):
True: chunk size is either [1, 25] or full context(max_len)
False: chunk size ~ U[1, 25]
Returns:
torch.Tensor: chunk mask of the input xs.
"""
# Whether to use chunk mask or not
if use_dynamic_chunk:
max_len = xs.size(1)
if decoding_chunk_size < 0:
chunk_size = max_len
num_left_chunks = -1
elif decoding_chunk_size > 0:
chunk_size = decoding_chunk_size
num_left_chunks = num_decoding_left_chunks
else:
# chunk size is either [1, 25] or full context(max_len).
# Since we use 4 times subsampling and allow up to 1s(100 frames)
# delay, the maximum frame is 100 / 4 = 25.
chunk_size = torch.randint(1, max_len, (1, )).item()
num_left_chunks = -1
if chunk_size > max_len // 2 and enable_full_context:
chunk_size = max_len
else:
chunk_size = chunk_size % 25 + 1
if use_dynamic_left_chunk:
max_left_chunks = (max_len - 1) // chunk_size
num_left_chunks = torch.randint(0, max_left_chunks,
(1, )).item()
chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size,
num_left_chunks,
xs.device) # (L, L)
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
chunk_masks = masks & chunk_masks # (B, L, L)
elif static_chunk_size > 0:
num_left_chunks = num_decoding_left_chunks
chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size,
num_left_chunks,
xs.device) # (L, L)
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
chunk_masks = masks & chunk_masks # (B, L, L)
else:
chunk_masks = masks
assert chunk_masks.dtype == torch.bool
if (chunk_masks.sum(dim=-1) == 0).sum().item() != 0:
logging.warning('get chunk_masks all false at some timestep, force set to true, make sure they are masked in futuer computation!')
chunk_masks[chunk_masks.sum(dim=-1)==0] = True
return chunk_masks
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
"""Make mask tensor containing indices of padded part.
See description of make_non_pad_mask.
Args:
lengths (torch.Tensor): Batch of lengths (B,).
Returns:
torch.Tensor: Mask tensor containing indices of padded part.
Examples:
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
"""
lengths = lengths.long()
batch_size = lengths.size(0)
max_len = max_len if max_len > 0 else lengths.max().item()
seq_range = torch.arange(0,
max_len,
dtype=torch.int64,
device=lengths.device)
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_length_expand = lengths.unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
return mask
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/utils/mask.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/s3gen/utils/mel.py | """mel-spectrogram extraction in Matcha-TTS"""
import logging
from librosa.filters import mel as librosa_mel_fn
import torch
import numpy as np
logger = logging.getLogger(__name__)
# NOTE: they decalred these global vars
mel_basis = {}
hann_window = {}
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
"""
feat_extractor: !name:matcha.utils.audio.mel_spectrogram
n_fft: 1920
num_mels: 80
sampling_rate: 24000
hop_size: 480
win_size: 1920
fmin: 0
fmax: 8000
center: False
"""
def mel_spectrogram(y, n_fft=1920, num_mels=80, sampling_rate=24000, hop_size=480, win_size=1920,
fmin=0, fmax=8000, center=False):
"""Copied from https://github.com/shivammehta25/Matcha-TTS/blob/main/matcha/utils/audio.py
Set default values according to Cosyvoice's config.
"""
if isinstance(y, np.ndarray):
y = torch.tensor(y).float()
if len(y.shape) == 1:
y = y[None, ]
# Debug: Check for audio clipping (values outside [-1.0, 1.0] range)
min_val = torch.min(y)
max_val = torch.max(y)
if min_val < -1.0 or max_val > 1.0:
logger.warning(f"Audio values outside normalized range: min={min_val.item():.4f}, max={max_val.item():.4f}")
global mel_basis, hann_window # pylint: disable=global-statement,global-variable-not-assigned
if f"{str(fmax)}_{str(y.device)}" not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax)
mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(
y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect"
)
y = y.squeeze(1)
spec = torch.view_as_real(
torch.stft(
y,
n_fft,
hop_length=hop_size,
win_length=win_size,
window=hann_window[str(y.device)],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
)
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3gen/utils/mel.py",
"license": "MIT License",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/s3tokenizer/s3tokenizer.py | from typing import List, Tuple
import numpy as np
import librosa
import torch
import torch.nn.functional as F
from s3tokenizer.utils import padding
from s3tokenizer.model_v2 import (
S3TokenizerV2,
ModelConfig,
)
# Sampling rate of the inputs to S3TokenizerV2
S3_SR = 16_000
S3_HOP = 160 # 100 frames/sec
S3_TOKEN_HOP = 640 # 25 tokens/sec
S3_TOKEN_RATE = 25
SPEECH_VOCAB_SIZE = 6561
class S3Tokenizer(S3TokenizerV2):
"""
s3tokenizer.S3TokenizerV2 with the following changes:
- a more integrated `forward`
- compute `log_mel_spectrogram` using `_mel_filters` and `window` in `register_buffers`
"""
ignore_state_dict_missing = ("_mel_filters", "window")
def __init__(
self,
name: str="speech_tokenizer_v2_25hz",
config: ModelConfig = ModelConfig()
):
super().__init__(name)
self.n_fft = 400
_mel_filters = librosa.filters.mel(
sr=S3_SR,
n_fft=self.n_fft,
n_mels=config.n_mels
)
self.register_buffer(
"_mel_filters",
torch.FloatTensor(_mel_filters),
)
self.register_buffer(
"window",
torch.hann_window(self.n_fft),
)
def pad(self, wavs, sr) -> List[torch.Tensor]:
"""
Given a list of wavs with the same `sample_rate`, pad them so that the length is multiple of 40ms (S3 runs at 25 token/sec).
"""
processed_wavs = []
for wav in wavs:
if isinstance(wav, np.ndarray):
wav = torch.from_numpy(wav)
if wav.dim() == 1:
wav = wav.unsqueeze(0)
n_tokens = (wav.shape[1] / sr) * S3_TOKEN_RATE
n_tokens = np.ceil(n_tokens)
intended_wav_len = n_tokens * (sr / S3_TOKEN_RATE)
intended_wav_len = int(intended_wav_len)
wav = torch.nn.functional.pad(
wav,
(0, intended_wav_len - wav.shape[-1]),
mode="constant",
value=0
)
processed_wavs.append(wav)
return processed_wavs
def _prepare_audio(self, wavs):
"""Prepare a list of audios for s3tokenizer processing."""
processed_wavs = []
for wav in wavs:
if isinstance(wav, np.ndarray):
wav = torch.from_numpy(wav)
if wav.dim() == 1:
wav = wav.unsqueeze(0)
processed_wavs.append(wav)
return processed_wavs
@torch.no_grad()
def forward(
self,
wavs: torch.Tensor,
accelerator: 'Accelerator'=None,
max_len: int=None,
) -> Tuple[torch.Tensor, torch.LongTensor]:
"""
NOTE: mel-spec has a hop size of 160 points (100 frame/sec).
FIXME: this class inherits `nn.Module` but doesn't accept `torch.Tensor` and handles a list of wavs one by one, which is unexpected.
Args
----
- `wavs`: 16 kHz speech audio
- `max_len` max length to truncate the output sequence to (25 token/sec).
NOTE: please pad the waveform if longer sequence is needed.
"""
processed_wavs = self._prepare_audio(wavs)
mels, mel_lens = [], []
for wav in processed_wavs:
wav = wav.to(self.device)
mel = self.log_mel_spectrogram(wav) # [B=1, F, T]
if max_len is not None:
mel = mel[..., :max_len * 4] # num_mel_frames = 4 * num_tokens
mels.append(mel.squeeze(0))
mels, mel_lens = padding(mels)
if accelerator is None:
tokenizer = self
else:
tokenizer = accelerator.unwrap_model(self)
speech_tokens, speech_token_lens = tokenizer.quantize(mels, mel_lens.to(self.device))
return (
speech_tokens.long().detach(),
speech_token_lens.long().detach(),
)
def log_mel_spectrogram(
self,
audio: torch.Tensor,
padding: int = 0,
):
"""
Compute the log-Mel spectrogram of
Parameters
----------
audio: torch.Tensor, shape = (*)
The path to audio or either a NumPy array or Tensor containing the
audio waveform in 16 kHz
padding: int
Number of zero samples to pad to the right
Returns
-------
torch.Tensor, shape = (128, n_frames)
A Tensor that contains the Mel spectrogram
"""
if not torch.is_tensor(audio):
audio = torch.from_numpy(audio)
audio = audio.to(self.device)
if padding > 0:
audio = F.pad(audio, (0, padding))
stft = torch.stft(
audio, self.n_fft, S3_HOP,
window=self.window.to(self.device),
return_complex=True
)
magnitudes = stft[..., :-1].abs()**2
mel_spec = self._mel_filters.to(self.device) @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/s3tokenizer/s3tokenizer.py",
"license": "MIT License",
"lines": 144,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/t3/inference/alignment_stream_analyzer.py | # Copyright (c) 2025 Resemble AI
# Author: John Meade, Jeremy Hsu
# MIT License
import logging
import torch
from dataclasses import dataclass
from types import MethodType
logger = logging.getLogger(__name__)
LLAMA_ALIGNED_HEADS = [(12, 15), (13, 11), (9, 2)]
@dataclass
class AlignmentAnalysisResult:
# was this frame detected as being part of a noisy beginning chunk with potential hallucinations?
false_start: bool
# was this frame detected as being part of a long tail with potential hallucinations?
long_tail: bool
# was this frame detected as repeating existing text content?
repetition: bool
# was the alignment position of this frame too far from the previous frame?
discontinuity: bool
# has inference reached the end of the text tokens? eg, this remains false if inference stops early
complete: bool
# approximate position in the text token sequence. Can be used for generating online timestamps.
position: int
class AlignmentStreamAnalyzer:
def __init__(self, tfmr, queue, text_tokens_slice, alignment_layer_idx=9, eos_idx=0):
"""
Some transformer TTS models implicitly solve text-speech alignment in one or more of their self-attention
activation maps. This module exploits this to perform online integrity checks which streaming.
A hook is injected into the specified attention layer, and heuristics are used to determine alignment
position, repetition, etc.
NOTE: currently requires no queues.
"""
# self.queue = queue
self.text_tokens_slice = (i, j) = text_tokens_slice
self.eos_idx = eos_idx
self.alignment = torch.zeros(0, j-i)
# self.alignment_bin = torch.zeros(0, j-i)
self.curr_frame_pos = 0
self.text_position = 0
self.started = False
self.started_at = None
self.complete = False
self.completed_at = None
# Track generated tokens for repetition detection
self.generated_tokens = []
# Using `output_attentions=True` is incompatible with optimized attention kernels, so
# using it for all layers slows things down too much. We can apply it to just one layer
# by intercepting the kwargs and adding a forward hook (credit: jrm)
self.last_aligned_attns = []
for i, (layer_idx, head_idx) in enumerate(LLAMA_ALIGNED_HEADS):
self.last_aligned_attns += [None]
self._add_attention_spy(tfmr, i, layer_idx, head_idx)
def _add_attention_spy(self, tfmr, buffer_idx, layer_idx, head_idx):
"""
Adds a forward hook to a specific attention layer to collect outputs.
"""
def attention_forward_hook(module, input, output):
"""
See `LlamaAttention.forward`; the output is a 3-tuple: `attn_output, attn_weights, past_key_value`.
NOTE:
- When `output_attentions=True`, `LlamaSdpaAttention.forward` calls `LlamaAttention.forward`.
- `attn_output` has shape [B, H, T0, T0] for the 0th entry, and [B, H, 1, T0+i] for the rest i-th.
"""
if isinstance(output, tuple) and len(output) > 1 and output[1] is not None:
step_attention = output[1].cpu() # (B, n_heads, T0, Ti)
self.last_aligned_attns[buffer_idx] = step_attention[0, head_idx] # (T0, Ti)
target_layer = tfmr.layers[layer_idx].self_attn
# Register hook and store the handle
target_layer.register_forward_hook(attention_forward_hook)
if hasattr(tfmr, 'config') and hasattr(tfmr.config, 'output_attentions'):
self.original_output_attentions = tfmr.config.output_attentions
tfmr.config.output_attentions = True
def step(self, logits, next_token=None):
"""
Emits an AlignmentAnalysisResult into the output queue, and potentially modifies the logits to force an EOS.
"""
# extract approximate alignment matrix chunk (1 frame at a time after the first chunk)
aligned_attn = torch.stack(self.last_aligned_attns).mean(dim=0) # (N, N)
i, j = self.text_tokens_slice
if self.curr_frame_pos == 0:
# first chunk has conditioning info, text tokens, and BOS token
A_chunk = aligned_attn[j:, i:j].clone().cpu() # (T, S)
else:
# subsequent chunks have 1 frame due to KV-caching
A_chunk = aligned_attn[:, i:j].clone().cpu() # (1, S)
# TODO: monotonic masking; could have issue b/c spaces are often skipped.
A_chunk[:, self.curr_frame_pos + 1:] = 0
self.alignment = torch.cat((self.alignment, A_chunk), dim=0)
A = self.alignment
T, S = A.shape
# update position
cur_text_posn = A_chunk[-1].argmax()
discontinuity = not(-4 < cur_text_posn - self.text_position < 7) # NOTE: very lenient!
if not discontinuity:
self.text_position = cur_text_posn
# Hallucinations at the start of speech show up as activations at the bottom of the attention maps!
# To mitigate this, we just wait until there are no activations far off-diagonal in the last 2 tokens,
# and there are some strong activations in the first few tokens.
false_start = (not self.started) and (A[-2:, -2:].max() > 0.1 or A[:, :4].max() < 0.5)
self.started = not false_start
if self.started and self.started_at is None:
self.started_at = T
# Is generation likely complete?
self.complete = self.complete or self.text_position >= S - 3
if self.complete and self.completed_at is None:
self.completed_at = T
# NOTE: EOS rarely assigned activations, and second-last token is often punctuation, so use last 3 tokens.
# NOTE: due to the false-start behaviour, we need to make sure we skip activations for the first few tokens.
last_text_token_duration = A[15:, -3:].sum()
# Activations for the final token that last too long are likely hallucinations.
long_tail = self.complete and (A[self.completed_at:, -3:].sum(dim=0).max() >= 5) # 200ms
# If there are activations in previous tokens after generation has completed, assume this is a repetition error.
alignment_repetition = self.complete and (A[self.completed_at:, :-5].max(dim=1).values.sum() > 5)
# Track generated tokens for repetition detection
if next_token is not None:
# Convert tensor to scalar if needed
if isinstance(next_token, torch.Tensor):
token_id = next_token.item() if next_token.numel() == 1 else next_token.view(-1)[0].item()
else:
token_id = next_token
self.generated_tokens.append(token_id)
# Keep only last 8 tokens to prevent memory issues
if len(self.generated_tokens) > 8:
self.generated_tokens = self.generated_tokens[-8:]
# Check for excessive token repetition (3x same token in a row)
token_repetition = (
# self.complete and
len(self.generated_tokens) >= 3 and
len(set(self.generated_tokens[-2:])) == 1
)
if token_repetition:
repeated_token = self.generated_tokens[-1]
logger.warning(f"🚨 Detected 2x repetition of token {repeated_token}")
# Suppress EoS to prevent early termination
if cur_text_posn < S - 3 and S > 5: # Only suppress if text is longer than 5 tokens
logits[..., self.eos_idx] = -2**15
# If a bad ending is detected, force emit EOS by modifying logits
# NOTE: this means logits may be inconsistent with latents!
if long_tail or alignment_repetition or token_repetition:
logger.warning(f"forcing EOS token, {long_tail=}, {alignment_repetition=}, {token_repetition=}")
# (±2**15 is safe for all dtypes >= 16bit)
logits = -(2**15) * torch.ones_like(logits)
logits[..., self.eos_idx] = 2**15
self.curr_frame_pos += 1
return logits
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/inference/alignment_stream_analyzer.py",
"license": "MIT License",
"lines": 145,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/t3/inference/t3_hf_backend.py | from typing import Optional
import torch
from torch import nn as nn
from transformers import LlamaConfig, LlamaModel, LlamaPreTrainedModel, GenerationMixin
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
class T3HuggingfaceBackend(LlamaPreTrainedModel, GenerationMixin):
"""
Override some HuggingFace interface methods so we can use the standard `generate` method with our
custom embedding / logit layers.
NOTE: need to extend "*PreTrainedModel" to avoid re-initializing weights!
"""
def __init__(
self,
config: LlamaConfig,
llama: LlamaModel,
*,
speech_enc,
speech_head,
latents_queue=None,
logits_queue=None,
alignment_stream_analyzer: 'AlignmentStreamAnalyzer'=None,
):
super().__init__(config)
self.model = llama
self.speech_enc = speech_enc
self.speech_head = speech_head
self._added_cond = False
self.alignment_stream_analyzer = alignment_stream_analyzer
@torch.inference_mode()
def prepare_inputs_for_generation(
self, input_ids: torch.Tensor, decoder_cond: torch.Tensor, use_cache: bool, past_key_values=None,
# This argument was introduced in some recent version of transformers (>=4.29.1)
cache_position=None
):
"""
This is a method used by huggingface's generate() method.
Overridden here to apply our custom speech token embedding layer.
:param input_ids: (B, S) int64 tensors of input tokens.
:param decoder_cond: (B, T, C) float32 tensor of conditioning (prefixed to <input_embeds>)
"""
# Make use of the kv cache: only the last input ID is new, we trim away all the ones before
if not use_cache:
past_key_values = None
if past_key_values is not None:
input_ids = input_ids[:, -1:]
# custom speech token embedding layer
inputs_embeds = self.speech_enc(input_ids)
# prefix decoder conditioning if applicable
if not self._added_cond:
assert past_key_values is not None # should be first step
if decoder_cond.size(0) != inputs_embeds.size(0):
decoder_cond = decoder_cond.expand(inputs_embeds.size(0), -1, -1)
inputs_embeds = torch.cat([decoder_cond, inputs_embeds], dim=1)
self._added_cond = True
return {
"inputs_embeds": inputs_embeds,
"past_key_values": past_key_values,
"use_cache": use_cache,
}
@torch.inference_mode()
def forward(
self,
inputs_embeds: torch.Tensor,
past_key_values: Optional[torch.Tensor]=None,
use_cache=True,
output_attentions=False,
output_hidden_states=True,
return_dict=True,
):
"""
This is a method used by huggingface's generate() method.
Overridden here to apply our custom layer norm and speech logit projection layers.
:param inputs_embeds: (B, S, C) float32 tensor of conditioning inputs. If past key values are given,
S should be 1.
"""
is_large_input = inputs_embeds.size(1) != 1
has_cache = past_key_values is not None and len(past_key_values) > 0
assert not (is_large_input and has_cache)
assert return_dict
assert output_hidden_states
tfmr_out = self.model(
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
hidden_states = tfmr_out.hidden_states[-1] # (B, seq, dim)
logits = self.speech_head(hidden_states)
# assert inputs_embeds.size(0) == 1 # (disabled for CFG)
# NOTE: hallucination handler may modify logits to force emit an EOS token
# logits = self.alignment_stream_analyzer.step(logits)
return CausalLMOutputWithCrossAttentions(
logits=logits,
past_key_values=tfmr_out.past_key_values,
hidden_states=tfmr_out.hidden_states,
attentions=tfmr_out.attentions,
)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/inference/t3_hf_backend.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/t3/llama_configs.py | LLAMA_520M_CONFIG_DICT = dict(
# Arbitrary small number that won't cause problems when loading.
# These param are unused due to custom input layers.
vocab_size=8,
# default params needed for loading most pretrained 1B weights
max_position_embeddings=131072,
hidden_size=1024,
intermediate_size=4096,
num_hidden_layers=30,
num_attention_heads=16,
attn_implementation="sdpa",
head_dim=64,
tie_word_embeddings=False,
hidden_act="silu",
attention_bias=False,
attention_dropout=0.0,
initializer_range=0.02,
mlp_bias=False,
model_type="llama",
num_key_value_heads=16,
pretraining_tp=1,
rms_norm_eps=1e-05,
rope_scaling=dict(
factor=8.0,
high_freq_factor=4.0,
low_freq_factor=1.0,
original_max_position_embeddings=8192,
rope_type="llama3"
),
rope_theta=500000.0,
torch_dtype="bfloat16",
use_cache=True,
)
GPT2_MEDIUM_CONFIG = {
"activation_function": "gelu_new",
"architectures": [
"GPT2LMHeadModel"
],
"attn_pdrop": 0.1,
"bos_token_id": 50256,
"embd_pdrop": 0.1,
"eos_token_id": 50256,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "gpt2",
"n_ctx": 8196,
"n_embd": 1024,
"hidden_size": 1024,
"n_head": 16,
"n_layer": 24,
"n_positions": 8196,
"n_special": 0,
"predict_special_tokens": True,
"resid_pdrop": 0.1,
"summary_activation": None,
"summary_first_dropout": 0.1,
"summary_proj_to_labels": True,
"summary_type": "cls_index",
"summary_use_proj": True,
"task_specific_params": {
"text-generation": {
"do_sample": True,
"max_length": 50
}
},
"vocab_size": 50276,
}
LLAMA_CONFIGS = {
"Llama_520M": LLAMA_520M_CONFIG_DICT,
"GPT2_medium": GPT2_MEDIUM_CONFIG,
}
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/llama_configs.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/t3/modules/cond_enc.py | from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn, Tensor
from .perceiver import Perceiver
from .t3_config import T3Config
@dataclass
class T3Cond:
"""
Dataclass container for most / all conditioning info.
TODO: serialization methods aren't used, keeping them around for convenience
"""
speaker_emb: Tensor
clap_emb: Optional[Tensor] = None
cond_prompt_speech_tokens: Optional[Tensor] = None
cond_prompt_speech_emb: Optional[Tensor] = None
emotion_adv: Optional[Tensor] = 0.5
def to(self, *, device=None, dtype=None):
"Cast to a device and dtype. Dtype casting is ignored for long/int tensors."
for k, v in self.__dict__.items():
if torch.is_tensor(v):
is_fp = type(v.view(-1)[0].item()) is not int
setattr(self, k, v.to(device=device, dtype=dtype if is_fp else None))
return self
def save(self, fpath):
torch.save(self.__dict__, fpath)
@staticmethod
def load(fpath, map_location="cpu"):
kwargs = torch.load(fpath, map_location=map_location, weights_only=True)
return T3Cond(**kwargs)
class T3CondEnc(nn.Module):
"""
Handle all non-text conditioning, like speaker embeddings / prompts, CLAP, emotion, etc.
"""
def __init__(self, hp: T3Config):
super().__init__()
self.hp = hp
if hp.encoder_type == "voice_encoder":
self.spkr_enc = nn.Linear(hp.speaker_embed_size, hp.n_channels)
else:
raise NotImplementedError(str(hp.encoder_type))
# emotion adv
self.emotion_adv_fc = None
if hp.emotion_adv:
self.emotion_adv_fc = nn.Linear(1, hp.n_channels, bias=False)
# perceiver resampler
self.perceiver = None
if hp.use_perceiver_resampler:
self.perceiver = Perceiver()
def forward(self, cond: T3Cond):
# Validate
assert (cond.cond_prompt_speech_tokens is None) == (cond.cond_prompt_speech_emb is None), \
"no embeddings for cond_prompt_speech_tokens"
# Speaker embedding projection
cond_spkr = self.spkr_enc(cond.speaker_emb.view(-1, self.hp.speaker_embed_size))[:, None] # (B, 1, dim)
empty = torch.zeros_like(cond_spkr[:, :0]) # (B, 0, dim)
# TODO CLAP
assert cond.clap_emb is None, "clap_embed not implemented"
cond_clap = empty # (B, 0, dim)
# Cond prompt
cond_prompt_speech_emb = cond.cond_prompt_speech_emb
if cond_prompt_speech_emb is None:
cond_prompt_speech_emb = empty # (B, 0, dim)
elif self.hp.use_perceiver_resampler:
cond_prompt_speech_emb = self.perceiver(cond_prompt_speech_emb)
# Emotion Adv: must provide a value if this model uses emotion conditioning
cond_emotion_adv = empty # (B, 0, dim)
if self.hp.emotion_adv:
assert cond.emotion_adv is not None
cond_emotion_adv = self.emotion_adv_fc(cond.emotion_adv.view(-1, 1, 1))
# Concat and return
cond_embeds = torch.cat((
cond_spkr,
cond_clap,
cond_prompt_speech_emb,
cond_emotion_adv,
), dim=1)
return cond_embeds
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/modules/cond_enc.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/t3/modules/learned_pos_emb.py | from typing import Union
import torch
from torch import nn, Tensor
class LearnedPositionEmbeddings(nn.Module):
def __init__(self, seq_len, model_dim, init=.02):
super().__init__()
self.emb = nn.Embedding(seq_len, model_dim)
# Initializing this way is standard for GPT-2
self.emb.weight.data.normal_(mean=0.0, std=init)
def forward(self, x):
"""
Returns positional embeddings for index 0 up to the length of x
"""
sl = x.shape[1]
return self.emb(torch.arange(0, sl, device=x.device))
def get_fixed_embedding(self, idx: 'Union[int, Tensor]'):
"""
Args:
idx: scalar int or an integer tensor of shape (T,) or (B, T)
Returns:
positional embeddings for given indices, shape (B, T, dim), ie (1, 1, dim) for int input
"""
device = self.emb.weight.device
idx = idx.to(device) if torch.is_tensor(idx) else torch.tensor(idx, device=device)
idx = torch.atleast_2d(idx)
assert idx.ndim == 2
return self.emb(idx) # (B, T, dim)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/modules/learned_pos_emb.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/t3/modules/perceiver.py | # Copyright (c) 2025 Resemble AI
# Author: Manmay Nakhashi
# MIT License
import math
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
class RelativePositionBias(nn.Module):
def __init__(self, scale, causal=False, num_buckets=32, max_distance=128, heads=8):
super().__init__()
self.scale = scale
self.causal = causal
self.num_buckets = num_buckets
self.max_distance = max_distance
self.relative_attention_bias = nn.Embedding(num_buckets, heads)
@staticmethod
def _relative_position_bucket(relative_position, causal=True, num_buckets=32, max_distance=128):
ret = 0
n = -relative_position
if not causal:
num_buckets //= 2
ret += (n < 0).long() * num_buckets
n = torch.abs(n)
else:
n = torch.max(n, torch.zeros_like(n))
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).long()
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
def forward(self, qk_dots):
i, j, device = *qk_dots.shape[-2:], qk_dots.device
q_pos = torch.arange(i, dtype=torch.long, device=device)
k_pos = torch.arange(j, dtype=torch.long, device=device)
rel_pos = k_pos[None, :] - q_pos[:, None]
rp_bucket = self._relative_position_bucket(rel_pos, causal=self.causal, num_buckets=self.num_buckets,
max_distance=self.max_distance)
values = self.relative_attention_bias(rp_bucket)
bias = rearrange(values, 'i j h -> () h i j')
return qk_dots + (bias * self.scale)
class AttentionQKV(nn.Module):
def __init__(self, n_heads, head_dim, dropout_rate=0.1, scale=None, flash=False):
super().__init__()
self.n_heads = n_heads
self.head_dim = head_dim
self.scale = scale if scale is not None else head_dim ** -0.5
self.flash = flash
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(dropout_rate)
self.flash_config = self.setup_flash_config() if flash else None
def setup_flash_config(self):
# Setup flash attention configuration
flash_config = {
'enable_flash': True,
'enable_math': True,
'enable_mem_efficient': True
}
return flash_config
def forward(self, q, k, v, mask=None):
q, k, v = [self.split_heads(tensor) for tensor in [q, k, v]]
if self.flash:
out = self.flash_attention(q, k, v, mask=mask)
else:
out = self.scaled_dot_product_attention(q, k, v, mask=mask)
return self.combine_heads(out)
def scaled_dot_product_attention(self, q, k, v, mask=None):
sim = torch.einsum("bhlt,bhls->bhts", q, k) * self.scale
if mask is not None:
sim = sim.masked_fill(mask == 0, float('-inf'))
attn = torch.softmax(sim, dim=-1)
attn = self.dropout(attn)
return torch.einsum("bhts,bhls->bhlt", attn, v)
def flash_attention(self, q, k, v, mask=None):
config = self.flash_config if self.flash_config else {}
with torch.backends.cuda.sdp_kernel(**config):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask=mask,
dropout_p=self.dropout_rate if self.training else 0.
)
return out
def split_heads(self, x):
bs, length, _ = x.shape
x = x.view(bs, length, self.n_heads, self.head_dim)
return x.permute(0, 2, 1, 3)
def combine_heads(self, x):
bs, _, length, _ = x.shape
x = x.permute(0, 2, 1, 3).contiguous()
return x.view(bs, length, -1)
class AttentionBlock2(nn.Module):
"""
An attention block that allows spatial positions to attend to each other,
using AttentionQKV and separate linear transformations for Q, K, and V.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
relative_pos_embeddings=False,
flash_attention=True,
dropout_rate=0.2,
scale=None
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.norm = nn.LayerNorm(channels)
# Separate linear layers for Q, K, and V
self.to_q = nn.Linear(channels, channels)
self.to_k = nn.Linear(channels, channels)
self.to_v = nn.Linear(channels, channels)
self.attention = AttentionQKV(self.num_heads, channels // self.num_heads, dropout_rate=dropout_rate, flash=flash_attention, scale=scale)
self.proj_out = nn.Linear(channels, channels)
if relative_pos_embeddings:
self.relative_pos_embeddings = RelativePositionBias(scale=(channels // self.num_heads) ** .5, causal=False, heads=num_heads, num_buckets=32, max_distance=64)
else:
self.relative_pos_embeddings = None
def forward(self, x1, x2, mask=None):
b1, c1, *spatial1 = x1.shape
b2, c2, *spatial2 = x2.shape
x1_norm = self.norm(x1)
x2_norm = self.norm(x2)
q = self.to_q(x1_norm)
k = self.to_k(x2_norm)
v = self.to_v(x2_norm)
h = self.attention(q, k, v, mask=mask)
h = self.proj_out(h)
return (x1 + h).reshape(b1, c1, *spatial1)
class Perceiver(nn.Module):
"""Inspired by https://arxiv.org/abs/2103.03206"""
def __init__(self, pre_attention_query_token=32, pre_attention_query_size=1024, embedding_dim=1024, num_attn_heads=4):
"""
Initialize the perceiver module.
:param pre_attention_query_token: Number of query tokens for pre-attention
:param pre_attention_query_size: Size of each query token
:param embedding_dim: Dimension of the embedding space
:param num_attn_heads: Number of attention heads
"""
super().__init__()
# Initialize the pre-attention query parameter
self.pre_attention_query = torch.nn.Parameter(
torch.empty(1, pre_attention_query_token, pre_attention_query_size)
)
# Calculate the variance for uniform initialization
query_variance = math.sqrt(3.0) * math.sqrt(2.0 / (pre_attention_query_token + pre_attention_query_token))
# Initialize the pre-attention query with uniform distribution
self.pre_attention_query.data.uniform_(-query_variance, query_variance)
# Initialize the attention block
self.attn = AttentionBlock2(embedding_dim, num_attn_heads)
def forward(self, h):
"""
Forward pass of the perceiver module.
:param h: Input tensor
:return: Output after applying attention mechanisms
"""
# Expand the pre-attention query to match the batch size of the input
query_ = self.pre_attention_query.expand(h.shape[0], -1, -1)
# Apply the first attention mechanism (cross-attention)
pre_att = self.attn(query_, h)
# Apply the second attention mechanism (self-attention)
attn = self.attn(pre_att, pre_att)
return attn
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/modules/perceiver.py",
"license": "MIT License",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/t3/modules/t3_config.py | from ..llama_configs import LLAMA_CONFIGS
class T3Config:
def __init__(self, text_tokens_dict_size=704):
self.start_text_token = 255
self.stop_text_token = 0
self.text_tokens_dict_size = text_tokens_dict_size
self.max_text_tokens = 2048
self.start_speech_token = 6561
self.stop_speech_token = 6562
self.speech_tokens_dict_size = 8194
self.max_speech_tokens = 4096
self.llama_config_name = "Llama_520M"
self.input_pos_emb = "learned"
self.speech_cond_prompt_len = 150
self.encoder_type = "voice_encoder"
self.speaker_embed_size = 256
self.use_perceiver_resampler = True
self.emotion_adv = True
@property
def n_channels(self):
return LLAMA_CONFIGS[self.llama_config_name]["hidden_size"]
@property
def is_multilingual(self):
return self.text_tokens_dict_size == 2454
@classmethod
def english_only(cls):
"""Create configuration for English-only TTS model."""
return cls(text_tokens_dict_size=704)
@classmethod
def multilingual(cls):
"""Create configuration for multilingual TTS model."""
return cls(text_tokens_dict_size=2454)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/modules/t3_config.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/t3/t3.py | # Copyright (c) 2025 Resemble AI
# MIT License
import logging
from typing import Union, Optional, List
logger = logging.getLogger(__name__)
from tqdm import tqdm
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from transformers import LlamaModel, LlamaConfig, GPT2Config, GPT2Model
from transformers.generation.logits_process import (
LogitsProcessorList,
RepetitionPenaltyLogitsProcessor,
TemperatureLogitsWarper,
TopKLogitsWarper,
TopPLogitsWarper,
MinPLogitsWarper,
)
from .modules.learned_pos_emb import LearnedPositionEmbeddings
from .modules.cond_enc import T3CondEnc, T3Cond
from .modules.t3_config import T3Config
from .llama_configs import LLAMA_CONFIGS
from .inference.t3_hf_backend import T3HuggingfaceBackend
from .inference.alignment_stream_analyzer import AlignmentStreamAnalyzer
from ..utils import AttrDict
logger = logging.getLogger(__name__)
def _ensure_BOT_EOT(text_tokens: Tensor, hp):
B = text_tokens.size(0)
assert (text_tokens == hp.start_text_token).int().sum() >= B, "missing start_text_token"
assert (text_tokens == hp.stop_text_token).int().sum() >= B, "missing stop_text_token"
class T3(nn.Module):
"""
Token-To-Token (T3) TTS model using huggingface transformer models as backbones,
* tokenization, including start / stop tokens are always added externally to this class
* conditioning data like CLAP, emotion, etc are all in a separate file for more modularity
* careful! this class assumes relative positional encoding -- with absolute PE, we would at
least want to reset the position to 0 when speech tokens begin, and optionally use a
different PE embedding space for speech.
"""
def __init__(self, hp=None):
if hp is None:
hp = T3Config.english_only()
super().__init__()
self.hp = hp
config_dict = LLAMA_CONFIGS[hp.llama_config_name]
self.is_gpt = config_dict.get("model_type") == "gpt2"
if self.is_gpt:
self.cfg = GPT2Config(**config_dict)
self.tfmr = GPT2Model(self.cfg)
else:
self.cfg = LlamaConfig(**config_dict)
self.tfmr = LlamaModel(self.cfg)
self.dim = self.cfg.hidden_size
self.deepspeed_patch_applied = False
# conditioning / embedding
self.cond_enc = T3CondEnc(hp)
self.text_emb = nn.Embedding(hp.text_tokens_dict_size, self.dim)
self.speech_emb = nn.Embedding(hp.speech_tokens_dict_size, self.dim)
# custom position embedding
self.text_pos_emb = None
self.speech_pos_emb = None
if hp.input_pos_emb == "learned":
max_text_seq_len = hp.max_text_tokens + 2
self.text_pos_emb = LearnedPositionEmbeddings(max_text_seq_len, self.dim)
max_mel_seq_len = hp.max_speech_tokens + 2 + 2
self.speech_pos_emb = LearnedPositionEmbeddings(max_mel_seq_len, self.dim)
# logit projection
self.text_head = nn.Linear(self.cfg.hidden_size, hp.text_tokens_dict_size, bias=False)
self.speech_head = nn.Linear(self.cfg.hidden_size, hp.speech_tokens_dict_size, bias=self.is_gpt)
self.compiled = False
@property
def device(self):
return self.speech_head.weight.device
def prepare_conditioning(self, t3_cond: T3Cond):
"""
Token cond data needs to be embedded, so that needs to be here instead of in `T3CondEnc`.
"""
if t3_cond.cond_prompt_speech_tokens is not None and t3_cond.cond_prompt_speech_emb is None:
t3_cond.cond_prompt_speech_emb = self.speech_emb(t3_cond.cond_prompt_speech_tokens)
if not self.is_gpt:
t3_cond.cond_prompt_speech_emb += self.speech_pos_emb(t3_cond.cond_prompt_speech_tokens)
return self.cond_enc(t3_cond) # (B, len_cond, dim)
def prepare_input_embeds(
self,
*,
t3_cond: T3Cond,
text_tokens: torch.LongTensor,
speech_tokens: torch.LongTensor,
cfg_weight: float = 0.0,
):
# prepare input embeddings (skip backbone tranformer embeddings)
cond_emb = self.prepare_conditioning(t3_cond) # (B, len_cond, dim)
text_emb = self.text_emb(text_tokens) # (B, len_text, dim)
if cfg_weight > 0.0 and not self.is_gpt:
text_emb[1].zero_() # CFG uncond
speech_emb = self.speech_emb(speech_tokens) # (B, len_speech, dim)
if self.hp.input_pos_emb == "learned":
text_emb = text_emb + self.text_pos_emb(text_tokens)
speech_emb = speech_emb + self.speech_pos_emb(speech_tokens)
len_cond = cond_emb.size(1)
if cond_emb.size(0) != text_emb.size(0):
cond_emb = cond_emb.expand(text_emb.size(0), -1, -1)
# concat
embeds = torch.stack([
torch.cat((ce, te, se))
for ce, te, se in zip(cond_emb, text_emb, speech_emb)
]) # (B, length, dim)
return embeds, len_cond
def forward(
self,
*,
t3_cond: T3Cond,
text_tokens: torch.LongTensor,
text_token_lens: torch.LongTensor,
speech_tokens: torch.LongTensor,
speech_token_lens: torch.LongTensor,
training=False,
):
_ensure_BOT_EOT(text_tokens, self.hp)
# prepare custom input embeds
embeds, len_cond = self.prepare_input_embeds(
t3_cond=t3_cond,
text_tokens=text_tokens,
speech_tokens=speech_tokens,
)
# backbone tranformer forward
tfmr_out = self.tfmr.forward(
input_ids=None,
# position_ids=position_ids, # TODO? ROPE should be fine?
inputs_embeds=embeds,
output_hidden_states=True,
return_dict=True,
use_cache=(not training),
)
hidden_states = tfmr_out.hidden_states[-1] # final tfmr layer output, (B, seq, dim)
# post-processing: splice out text and speech parts of hidden states
len_text = text_tokens.size(1)
len_speech = speech_tokens.size(1)
B, _, dim = hidden_states.shape
device, dtype = hidden_states.device, hidden_states.dtype
text_latents = torch.zeros(B, len_text, dim, dtype=dtype, device=device)
speech_latents = torch.zeros(B, len_speech, dim, dtype=dtype, device=device)
ttl, stl = text_token_lens, speech_token_lens
for i in range(B):
text_end = len_cond + ttl[i].item()
speech_start = len_cond + text_tokens.size(1)
speech_end = speech_start + stl[i].item()
text_latents[i, :ttl[i]] = hidden_states[i, len_cond:text_end]
speech_latents[i, :stl[i]] = hidden_states[i, speech_start:speech_end]
# logit projection
text_logits = self.text_head(text_latents)
speech_logits = self.speech_head(speech_latents)
return AttrDict(
text_logits=text_logits,
text_latents=text_latents,
speech_logits=speech_logits,
speech_latents=speech_latents,
hidden_states=hidden_states,
)
def loss(
self,
*,
t3_cond: T3Cond,
text_tokens: torch.LongTensor,
text_token_lens: torch.LongTensor,
speech_tokens: torch.LongTensor,
speech_token_lens: torch.LongTensor,
):
"training method"
len_text = text_tokens.size(1)
len_speech = speech_tokens.size(1)
assert len_text == text_token_lens.max()
assert len_speech == speech_token_lens.max()
out = self.forward(
t3_cond=t3_cond,
text_tokens=text_tokens,
text_token_lens=text_token_lens,
speech_tokens=speech_tokens,
speech_token_lens=speech_token_lens,
training=True,
) # (B, seq, vocab_size)
# Calc CCE losses
IGNORE_ID = -100
device = out.text_logits.device
mask_text = torch.arange(len_text, device=device)[None] >= text_token_lens[:, None] # (B, len_text)
mask_speech = torch.arange(len_speech, device=device)[None] >= speech_token_lens[:, None] # (B, len_speech)
masked_text = text_tokens.masked_fill(mask_text, IGNORE_ID)
masked_speech = speech_tokens.masked_fill(mask_speech, IGNORE_ID)
loss_text = F.cross_entropy(out.text_logits, masked_text, ignore_index=IGNORE_ID)
loss_speech = F.cross_entropy(out.speech_logits, masked_speech, ignore_index=IGNORE_ID)
return loss_text, loss_speech
@torch.inference_mode()
def inference(
self,
*,
t3_cond: T3Cond,
text_tokens: Tensor,
initial_speech_tokens: Optional[Tensor]=None,
# misc conditioning
prepend_prompt_speech_tokens: Optional[Tensor]=None,
# HF generate args
num_return_sequences=1,
max_new_tokens=None,
stop_on_eos=True,
do_sample=True,
temperature=0.8,
top_p=0.95,
min_p=0.05,
length_penalty=1.0,
repetition_penalty=1.2,
cfg_weight=0.5,
):
"""
Args:
text_tokens: a 1D (unbatched) or 2D (batched) tensor.
"""
# Validate / sanitize inputs
assert prepend_prompt_speech_tokens is None, "not implemented"
_ensure_BOT_EOT(text_tokens, self.hp)
text_tokens = torch.atleast_2d(text_tokens).to(dtype=torch.long, device=self.device)
# Default initial speech to a single start-of-speech token
if initial_speech_tokens is None:
initial_speech_tokens = self.hp.start_speech_token * torch.ones_like(text_tokens[:, :1])
# Prepare custom input embeds
embeds, len_cond = self.prepare_input_embeds(
t3_cond=t3_cond,
text_tokens=text_tokens,
speech_tokens=initial_speech_tokens,
cfg_weight=cfg_weight,
)
# In order to use the standard HF generate method, we need to extend some methods to inject our custom logic
# Note the llama-specific logic. Other tfmr types can be added later.
self.compiled = False
# TODO? synchronize the expensive compile function
# with self.compile_lock:
if not self.compiled:
# Default to None for English models, only create for multilingual
alignment_stream_analyzer = None
if self.hp.is_multilingual:
alignment_stream_analyzer = AlignmentStreamAnalyzer(
self.tfmr,
None,
text_tokens_slice=(len_cond, len_cond + text_tokens.size(-1)),
alignment_layer_idx=9, # TODO: hparam or something?
eos_idx=self.hp.stop_speech_token,
)
assert alignment_stream_analyzer.eos_idx == self.hp.stop_speech_token
patched_model = T3HuggingfaceBackend(
config=self.cfg,
llama=self.tfmr,
speech_enc=self.speech_emb,
speech_head=self.speech_head,
alignment_stream_analyzer=alignment_stream_analyzer,
)
self.patched_model = patched_model
self.compiled = True
# # Run normal generate method, which calls our custom extended methods
# return self.patched_model.generate(
# inputs=initial_speech_tokens,
# decoder_cond=embeds,
# bos_token_id=self.hp.start_speech_token,
# eos_token_id=(self.hp.stop_speech_token if stop_on_eos else -1),
# pad_token_id=self.hp.stop_speech_token,
# max_new_tokens=max_new_tokens or self.hp.max_speech_tokens,
# num_return_sequences=num_return_sequences,
# temperature=temperature,
# min_p=min_p,
# length_penalty=length_penalty,
# repetition_penalty=repetition_penalty,
# do_sample=do_sample,
# # cache_implementation=None if not self.compiled else "static",
# )
device = embeds.device
bos_token = torch.tensor([[self.hp.start_speech_token]], dtype=torch.long, device=device)
bos_embed = self.speech_emb(bos_token) # shape: (B, 1, embed_dim)
bos_embed = bos_embed + self.speech_pos_emb.get_fixed_embedding(0)
# batch_size=2 for CFG
bos_embed = torch.cat([bos_embed, bos_embed])
# Combine condition and BOS token for the initial input
inputs_embeds = torch.cat([embeds, bos_embed], dim=1)
# Track generated token ids; start with the BOS token.
generated_ids = bos_token.clone()
predicted = [] # To store the predicted tokens
# Instantiate the logits processors.
top_p_warper = TopPLogitsWarper(top_p=top_p)
min_p_warper = MinPLogitsWarper(min_p=min_p)
top_p_warper = TopPLogitsWarper(top_p=top_p)
repetition_penalty_processor = RepetitionPenaltyLogitsProcessor(penalty=float(repetition_penalty))
# ---- Initial Forward Pass (no kv_cache yet) ----
output = self.patched_model(
inputs_embeds=inputs_embeds,
past_key_values=None,
use_cache=True,
output_attentions=True,
output_hidden_states=True,
return_dict=True,
)
# Initialize kv_cache with the full context.
past = output.past_key_values
# ---- Generation Loop using kv_cache ----
for i in tqdm(range(max_new_tokens), desc="Sampling", dynamic_ncols=True):
logits_step = output.logits[:, -1, :]
# CFG combine → (1, V)
cond = logits_step[0:1, :]
uncond = logits_step[1:2, :]
cfg = torch.as_tensor(cfg_weight, device=cond.device, dtype=cond.dtype)
logits = cond + cfg * (cond - uncond)
# Apply alignment stream analyzer integrity checks
if self.patched_model.alignment_stream_analyzer is not None:
if logits.dim() == 1: # guard in case something upstream squeezed
logits = logits.unsqueeze(0) # (1, V)
# Pass the last generated token for repetition tracking
last_token = generated_ids[0, -1].item() if len(generated_ids[0]) > 0 else None
logits = self.patched_model.alignment_stream_analyzer.step(logits, next_token=last_token) # (1, V)
# Apply repetition penalty
ids_for_proc = generated_ids[:1, ...] # batch = 1
logits = repetition_penalty_processor(ids_for_proc, logits) # expects (B,V)
# Apply temperature scaling.
if temperature != 1.0:
logits = logits / temperature
# Apply min_p and top_p filtering
logits = min_p_warper(ids_for_proc, logits)
logits = top_p_warper(ids_for_proc, logits)
# Convert logits to probabilities and sample the next token.
probs = torch.softmax(logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1) # shape: (B, 1)
predicted.append(next_token)
generated_ids = torch.cat([generated_ids, next_token], dim=1)
# Check for EOS token.
if next_token.view(-1) == self.hp.stop_speech_token:
logger.info(f"✅ EOS token detected! Stopping generation at step {i+1}")
break
# Get embedding for the new token.
next_token_embed = self.speech_emb(next_token)
next_token_embed = next_token_embed + self.speech_pos_emb.get_fixed_embedding(i + 1)
# For CFG
next_token_embed = torch.cat([next_token_embed, next_token_embed])
# Forward pass with only the new token and the cached past.
output = self.patched_model(
inputs_embeds=next_token_embed,
past_key_values=past,
output_attentions=True,
output_hidden_states=True,
return_dict=True,
)
# Update the kv_cache.
past = output.past_key_values
# Concatenate all predicted tokens along the sequence dimension.
predicted_tokens = torch.cat(predicted, dim=1) # shape: (B, num_tokens)
return predicted_tokens
@torch.inference_mode()
def inference_turbo(self, t3_cond, text_tokens, temperature=0.8, top_k=1000, top_p=0.95, repetition_penalty=1.2,
max_gen_len=1000):
logits_processors = LogitsProcessorList()
if temperature > 0 and temperature != 1.0:
logits_processors.append(TemperatureLogitsWarper(temperature))
if top_k > 0:
logits_processors.append(TopKLogitsWarper(top_k))
if top_p < 1.0:
logits_processors.append(TopPLogitsWarper(top_p))
if repetition_penalty != 1.0:
logits_processors.append(RepetitionPenaltyLogitsProcessor(repetition_penalty))
speech_start_token = self.hp.start_speech_token * torch.ones_like(text_tokens[:, :1])
embeds, _ = self.prepare_input_embeds(
t3_cond=t3_cond,
text_tokens=text_tokens,
speech_tokens=speech_start_token,
cfg_weight=0.0,
)
generated_speech_tokens = []
llm_outputs = self.tfmr(
inputs_embeds=embeds,
use_cache=True
)
hidden_states = llm_outputs[0]
past_key_values = llm_outputs.past_key_values
speech_hidden = hidden_states[:, -1:]
speech_logits = self.speech_head(speech_hidden)
processed_logits = logits_processors(speech_start_token, speech_logits[:, -1, :])
probs = F.softmax(processed_logits, dim=-1)
next_speech_token = torch.multinomial(probs, num_samples=1)
generated_speech_tokens.append(next_speech_token)
current_speech_token = next_speech_token
for _ in tqdm(range(max_gen_len)):
current_speech_embed = self.speech_emb(current_speech_token)
llm_outputs = self.tfmr(
inputs_embeds=current_speech_embed,
past_key_values=past_key_values,
use_cache=True
)
hidden_states = llm_outputs[0]
past_key_values = llm_outputs.past_key_values
speech_logits = self.speech_head(hidden_states)
input_ids = torch.cat(generated_speech_tokens, dim=1)
processed_logits = logits_processors(input_ids, speech_logits[:, -1, :])
if torch.all(processed_logits == -float("inf")):
print("Warning: All logits are -inf")
break
probs = F.softmax(processed_logits, dim=-1)
next_speech_token = torch.multinomial(probs, num_samples=1)
generated_speech_tokens.append(next_speech_token)
current_speech_token = next_speech_token
if torch.all(next_speech_token == self.hp.stop_speech_token):
break
all_tokens = torch.cat(generated_speech_tokens, dim=1)
# Remove EOS token if present
if all_tokens.size(1) > 0 and all_tokens[0, -1] == self.hp.stop_speech_token:
all_tokens = all_tokens[:, :-1]
return all_tokens
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/t3/t3.py",
"license": "MIT License",
"lines": 409,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/models/tokenizers/tokenizer.py | import logging
import json
import torch
from pathlib import Path
from unicodedata import category, normalize
from tokenizers import Tokenizer
from huggingface_hub import hf_hub_download
# Special tokens
SOT = "[START]"
EOT = "[STOP]"
UNK = "[UNK]"
SPACE = "[SPACE]"
SPECIAL_TOKENS = [SOT, EOT, UNK, SPACE, "[PAD]", "[SEP]", "[CLS]", "[MASK]"]
logger = logging.getLogger(__name__)
class EnTokenizer:
def __init__(self, vocab_file_path):
self.tokenizer: Tokenizer = Tokenizer.from_file(vocab_file_path)
self.check_vocabset_sot_eot()
def check_vocabset_sot_eot(self):
voc = self.tokenizer.get_vocab()
assert SOT in voc
assert EOT in voc
def text_to_tokens(self, text: str):
text_tokens = self.encode(text)
text_tokens = torch.IntTensor(text_tokens).unsqueeze(0)
return text_tokens
def encode(self, txt: str):
"""
clean_text > (append `lang_id`) > replace SPACE > encode text using Tokenizer
"""
txt = txt.replace(' ', SPACE)
code = self.tokenizer.encode(txt)
ids = code.ids
return ids
def decode(self, seq):
if isinstance(seq, torch.Tensor):
seq = seq.cpu().numpy()
txt: str = self.tokenizer.decode(seq, skip_special_tokens=False)
txt = txt.replace(' ', '')
txt = txt.replace(SPACE, ' ')
txt = txt.replace(EOT, '')
txt = txt.replace(UNK, '')
return txt
# Model repository
REPO_ID = "ResembleAI/chatterbox"
# Global instances for optional dependencies
_kakasi = None
_dicta = None
_russian_stresser = None
def is_kanji(c: str) -> bool:
"""Check if character is kanji."""
return 19968 <= ord(c) <= 40959
def is_katakana(c: str) -> bool:
"""Check if character is katakana."""
return 12449 <= ord(c) <= 12538
def hiragana_normalize(text: str) -> str:
"""Japanese text normalization: converts kanji to hiragana; katakana remains the same."""
global _kakasi
try:
if _kakasi is None:
import pykakasi
_kakasi = pykakasi.kakasi()
result = _kakasi.convert(text)
out = []
for r in result:
inp = r['orig']
hira = r["hira"]
# Any kanji in the phrase
if any([is_kanji(c) for c in inp]):
if hira and hira[0] in ["は", "へ"]: # Safety check for empty hira
hira = " " + hira
out.append(hira)
# All katakana
elif all([is_katakana(c) for c in inp]) if inp else False: # Safety check for empty inp
out.append(r['orig'])
else:
out.append(inp)
normalized_text = "".join(out)
# Decompose Japanese characters for tokenizer compatibility
import unicodedata
normalized_text = unicodedata.normalize('NFKD', normalized_text)
return normalized_text
except ImportError:
logger.warning("pykakasi not available - Japanese text processing skipped")
return text
def add_hebrew_diacritics(text: str) -> str:
"""Hebrew text normalization: adds diacritics to Hebrew text."""
global _dicta
try:
if _dicta is None:
from dicta_onnx import Dicta
_dicta = Dicta()
return _dicta.add_diacritics(text)
except ImportError:
logger.warning("dicta_onnx not available - Hebrew text processing skipped")
return text
except Exception as e:
logger.warning(f"Hebrew diacritization failed: {e}")
return text
def korean_normalize(text: str) -> str:
"""Korean text normalization: decompose syllables into Jamo for tokenization."""
def decompose_hangul(char):
"""Decompose Korean syllable into Jamo components."""
if not ('\uac00' <= char <= '\ud7af'):
return char
# Hangul decomposition formula
base = ord(char) - 0xAC00
initial = chr(0x1100 + base // (21 * 28))
medial = chr(0x1161 + (base % (21 * 28)) // 28)
final = chr(0x11A7 + base % 28) if base % 28 > 0 else ''
return initial + medial + final
# Decompose syllables and normalize punctuation
result = ''.join(decompose_hangul(char) for char in text)
return result.strip()
class ChineseCangjieConverter:
"""Converts Chinese characters to Cangjie codes for tokenization."""
def __init__(self, model_dir=None):
self.word2cj = {}
self.cj2word = {}
self.segmenter = None
self._load_cangjie_mapping(model_dir)
self._init_segmenter()
def _load_cangjie_mapping(self, model_dir=None):
"""Load Cangjie mapping from HuggingFace model repository."""
try:
cangjie_file = hf_hub_download(
repo_id=REPO_ID,
filename="Cangjie5_TC.json",
cache_dir=model_dir
)
with open(cangjie_file, "r", encoding="utf-8") as fp:
data = json.load(fp)
for entry in data:
word, code = entry.split("\t")[:2]
self.word2cj[word] = code
if code not in self.cj2word:
self.cj2word[code] = [word]
else:
self.cj2word[code].append(word)
except Exception as e:
logger.warning(f"Could not load Cangjie mapping: {e}")
def _init_segmenter(self):
"""Initialize pkuseg segmenter."""
try:
from spacy_pkuseg import pkuseg
self.segmenter = pkuseg()
except ImportError:
logger.warning("pkuseg not available - Chinese segmentation will be skipped")
self.segmenter = None
def _cangjie_encode(self, glyph: str):
"""Encode a single Chinese glyph to Cangjie code."""
normed_glyph = glyph
code = self.word2cj.get(normed_glyph, None)
if code is None: # e.g. Japanese hiragana
return None
index = self.cj2word[code].index(normed_glyph)
index = str(index) if index > 0 else ""
return code + str(index)
def __call__(self, text):
"""Convert Chinese characters in text to Cangjie tokens."""
output = []
if self.segmenter is not None:
segmented_words = self.segmenter.cut(text)
full_text = " ".join(segmented_words)
else:
full_text = text
for t in full_text:
if category(t) == "Lo":
cangjie = self._cangjie_encode(t)
if cangjie is None:
output.append(t)
continue
code = []
for c in cangjie:
code.append(f"[cj_{c}]")
code.append("[cj_.]")
code = "".join(code)
output.append(code)
else:
output.append(t)
return "".join(output)
def add_russian_stress(text: str) -> str:
"""Russian text normalization: adds stress marks to Russian text."""
global _russian_stresser
try:
if _russian_stresser is None:
from russian_text_stresser.text_stresser import RussianTextStresser
_russian_stresser = RussianTextStresser()
return _russian_stresser.stress_text(text)
except ImportError:
logger.warning("russian_text_stresser not available - Russian stress labeling skipped")
return text
except Exception as e:
logger.warning(f"Russian stress labeling failed: {e}")
return text
class MTLTokenizer:
def __init__(self, vocab_file_path):
self.tokenizer: Tokenizer = Tokenizer.from_file(vocab_file_path)
model_dir = Path(vocab_file_path).parent
self.cangjie_converter = ChineseCangjieConverter(model_dir)
self.check_vocabset_sot_eot()
def check_vocabset_sot_eot(self):
voc = self.tokenizer.get_vocab()
assert SOT in voc
assert EOT in voc
def preprocess_text(self, raw_text: str, language_id: str = None, lowercase: bool = True, nfkd_normalize: bool = True):
"""
Text preprocessor that handles lowercase conversion and NFKD normalization.
"""
preprocessed_text = raw_text
if lowercase:
preprocessed_text = preprocessed_text.lower()
if nfkd_normalize:
preprocessed_text = normalize("NFKD", preprocessed_text)
return preprocessed_text
def text_to_tokens(self, text: str, language_id: str = None, lowercase: bool = True, nfkd_normalize: bool = True):
text_tokens = self.encode(text, language_id=language_id, lowercase=lowercase, nfkd_normalize=nfkd_normalize)
text_tokens = torch.IntTensor(text_tokens).unsqueeze(0)
return text_tokens
def encode(self, txt: str, language_id: str = None, lowercase: bool = True, nfkd_normalize: bool = True):
txt = self.preprocess_text(txt, language_id=language_id, lowercase=lowercase, nfkd_normalize=nfkd_normalize)
# Language-specific text processing
if language_id == 'zh':
txt = self.cangjie_converter(txt)
elif language_id == 'ja':
txt = hiragana_normalize(txt)
elif language_id == 'he':
txt = add_hebrew_diacritics(txt)
elif language_id == 'ko':
txt = korean_normalize(txt)
elif language_id == 'ru':
txt = add_russian_stress(txt)
# Prepend language token
if language_id:
txt = f"[{language_id.lower()}]{txt}"
txt = txt.replace(' ', SPACE)
return self.tokenizer.encode(txt).ids
def decode(self, seq):
if isinstance(seq, torch.Tensor):
seq = seq.cpu().numpy()
txt = self.tokenizer.decode(seq, skip_special_tokens=False)
txt = txt.replace(' ', '').replace(SPACE, ' ').replace(EOT, '').replace(UNK, '')
return txt
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/tokenizers/tokenizer.py",
"license": "MIT License",
"lines": 243,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/models/voice_encoder/config.py | class VoiceEncConfig:
num_mels = 40
sample_rate = 16000
speaker_embed_size = 256
ve_hidden_size = 256
flatten_lstm_params = False
n_fft = 400
hop_size = 160
win_size = 400
fmax = 8000
fmin = 0
preemphasis = 0.
mel_power = 2.0
mel_type = "amp"
normalized_mels = False
ve_partial_frames = 160
ve_final_relu = True
stft_magnitude_min = 1e-4
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/voice_encoder/config.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/voice_encoder/melspec.py | from functools import lru_cache
from scipy import signal
import numpy as np
import librosa
@lru_cache()
def mel_basis(hp):
assert hp.fmax <= hp.sample_rate // 2
return librosa.filters.mel(
sr=hp.sample_rate,
n_fft=hp.n_fft,
n_mels=hp.num_mels,
fmin=hp.fmin,
fmax=hp.fmax) # -> (nmel, nfreq)
def preemphasis(wav, hp):
assert hp.preemphasis != 0
wav = signal.lfilter([1, -hp.preemphasis], [1], wav)
wav = np.clip(wav, -1, 1)
return wav
def melspectrogram(wav, hp, pad=True):
# Run through pre-emphasis
if hp.preemphasis > 0:
wav = preemphasis(wav, hp)
assert np.abs(wav).max() - 1 < 1e-07
# Do the stft
spec_complex = _stft(wav, hp, pad=pad)
# Get the magnitudes
spec_magnitudes = np.abs(spec_complex)
if hp.mel_power != 1.0:
spec_magnitudes **= hp.mel_power
# Get the mel and convert magnitudes->db
mel = np.dot(mel_basis(hp), spec_magnitudes)
if hp.mel_type == "db":
mel = _amp_to_db(mel, hp)
# Normalise the mel from db to 0,1
if hp.normalized_mels:
mel = _normalize(mel, hp).astype(np.float32)
assert not pad or mel.shape[1] == 1 + len(wav) // hp.hop_size # Sanity check
return mel # (M, T)
def _stft(y, hp, pad=True):
# NOTE: after 0.8, pad mode defaults to constant, setting this to reflect for
# historical consistency and streaming-version consistency
return librosa.stft(
y,
n_fft=hp.n_fft,
hop_length=hp.hop_size,
win_length=hp.win_size,
center=pad,
pad_mode="reflect",
)
def _amp_to_db(x, hp):
return 20 * np.log10(np.maximum(hp.stft_magnitude_min, x))
def _db_to_amp(x):
return np.power(10.0, x * 0.05)
def _normalize(s, hp, headroom_db=15):
min_level_db = 20 * np.log10(hp.stft_magnitude_min)
s = (s - min_level_db) / (-min_level_db + headroom_db)
return s
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/voice_encoder/melspec.py",
"license": "MIT License",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
resemble-ai/chatterbox:src/chatterbox/models/voice_encoder/voice_encoder.py | # Adapted from https://github.com/CorentinJ/Real-Time-Voice-Cloning
# MIT License
from typing import List, Union, Optional
import numpy as np
from numpy.lib.stride_tricks import as_strided
import librosa
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from .config import VoiceEncConfig
from .melspec import melspectrogram
def pack(arrays, seq_len: int=None, pad_value=0):
"""
Given a list of length B of array-like objects of shapes (Ti, ...), packs them in a single tensor of
shape (B, T, ...) by padding each individual array on the right.
:param arrays: a list of array-like objects of matching shapes except for the first axis.
:param seq_len: the value of T. It must be the maximum of the lengths Ti of the arrays at
minimum. Will default to that value if None.
:param pad_value: the value to pad the arrays with.
:return: a (B, T, ...) tensor
"""
if seq_len is None:
seq_len = max(len(array) for array in arrays)
else:
assert seq_len >= max(len(array) for array in arrays)
# Convert lists to np.array
if isinstance(arrays[0], list):
arrays = [np.array(array) for array in arrays]
# Convert to tensor and handle device
device = None
if isinstance(arrays[0], torch.Tensor):
tensors = arrays
device = tensors[0].device
else:
tensors = [torch.as_tensor(array) for array in arrays]
# Fill the packed tensor with the array data
packed_shape = (len(tensors), seq_len, *tensors[0].shape[1:])
packed_tensor = torch.full(packed_shape, pad_value, dtype=tensors[0].dtype, device=device)
for i, tensor in enumerate(tensors):
packed_tensor[i, :tensor.size(0)] = tensor
return packed_tensor
def get_num_wins(
n_frames: int,
step: int,
min_coverage: float,
hp: VoiceEncConfig,
):
assert n_frames > 0
win_size = hp.ve_partial_frames
n_wins, remainder = divmod(max(n_frames - win_size + step, 0), step)
if n_wins == 0 or (remainder + (win_size - step)) / win_size >= min_coverage:
n_wins += 1
target_n = win_size + step * (n_wins - 1)
return n_wins, target_n
def get_frame_step(
overlap: float,
rate: float,
hp: VoiceEncConfig,
):
# Compute how many frames separate two partial utterances
assert 0 <= overlap < 1
if rate is None:
frame_step = int(np.round(hp.ve_partial_frames * (1 - overlap)))
else:
frame_step = int(np.round((hp.sample_rate / rate) / hp.ve_partial_frames))
assert 0 < frame_step <= hp.ve_partial_frames
return frame_step
def stride_as_partials(
mel: np.ndarray,
hp: VoiceEncConfig,
overlap=0.5,
rate: float=None,
min_coverage=0.8,
):
"""
Takes unscaled mels in (T, M) format
TODO: doc
"""
assert 0 < min_coverage <= 1
frame_step = get_frame_step(overlap, rate, hp)
# Compute how many partials can fit in the mel
n_partials, target_len = get_num_wins(len(mel), frame_step, min_coverage, hp)
# Trim or pad the mel spectrogram to match the number of partials
if target_len > len(mel):
mel = np.concatenate((mel, np.full((target_len - len(mel), hp.num_mels), 0)))
elif target_len < len(mel):
mel = mel[:target_len]
# Ensure the numpy array data is float32 and contiguous in memory
mel = mel.astype(np.float32, order="C")
# Re-arrange the array in memory to be of shape (N, P, M) with partials overlapping eachother,
# where N is the number of partials, P is the number of frames of each partial and M the
# number of channels of the mel spectrograms.
shape = (n_partials, hp.ve_partial_frames, hp.num_mels)
strides = (mel.strides[0] * frame_step, mel.strides[0], mel.strides[1])
partials = as_strided(mel, shape, strides)
return partials
class VoiceEncoder(nn.Module):
def __init__(self, hp=VoiceEncConfig()):
super().__init__()
self.hp = hp
# Network definition
self.lstm = nn.LSTM(self.hp.num_mels, self.hp.ve_hidden_size, num_layers=3, batch_first=True)
if hp.flatten_lstm_params:
self.lstm.flatten_parameters()
self.proj = nn.Linear(self.hp.ve_hidden_size, self.hp.speaker_embed_size)
# Cosine similarity scaling (fixed initial parameter values)
self.similarity_weight = nn.Parameter(torch.tensor([10.]), requires_grad=True)
self.similarity_bias = nn.Parameter(torch.tensor([-5.]), requires_grad=True)
@property
def device(self):
return next(self.parameters()).device
def forward(self, mels: torch.FloatTensor):
"""
Computes the embeddings of a batch of partial utterances.
:param mels: a batch of unscaled mel spectrograms of same duration as a float32 tensor
of shape (B, T, M) where T is hp.ve_partial_frames
:return: the embeddings as a float32 tensor of shape (B, E) where E is
hp.speaker_embed_size. Embeddings are L2-normed and thus lay in the range [-1, 1].
"""
if self.hp.normalized_mels and (mels.min() < 0 or mels.max() > 1):
raise Exception(f"Mels outside [0, 1]. Min={mels.min()}, Max={mels.max()}")
# Pass the input through the LSTM layers
_, (hidden, _) = self.lstm(mels)
# Project the final hidden state
raw_embeds = self.proj(hidden[-1])
if self.hp.ve_final_relu:
raw_embeds = F.relu(raw_embeds)
# L2 normalize the embeddings.
return raw_embeds / torch.linalg.norm(raw_embeds, dim=1, keepdim=True)
def inference(self, mels: torch.Tensor, mel_lens, overlap=0.5, rate: float=None, min_coverage=0.8, batch_size=None):
"""
Computes the embeddings of a batch of full utterances with gradients.
:param mels: (B, T, M) unscaled mels
:return: (B, E) embeddings on CPU
"""
mel_lens = mel_lens.tolist() if torch.is_tensor(mel_lens) else mel_lens
# Compute where to split the utterances into partials
frame_step = get_frame_step(overlap, rate, self.hp)
n_partials, target_lens = zip(*(get_num_wins(l, frame_step, min_coverage, self.hp) for l in mel_lens))
# Possibly pad the mels to reach the target lengths
len_diff = max(target_lens) - mels.size(1)
if len_diff > 0:
pad = torch.full((mels.size(0), len_diff, self.hp.num_mels), 0, dtype=torch.float32)
mels = torch.cat((mels, pad.to(mels.device)), dim=1)
# Group all partials together so that we can batch them easily
partials = [
mel[i * frame_step: i * frame_step + self.hp.ve_partial_frames]
for mel, n_partial in zip(mels, n_partials) for i in range(n_partial)
]
assert all(partials[0].shape == partial.shape for partial in partials)
partials = torch.stack(partials)
# Forward the partials
n_chunks = int(np.ceil(len(partials) / (batch_size or len(partials))))
partial_embeds = torch.cat([self(batch) for batch in partials.chunk(n_chunks)], dim=0).cpu()
# Reduce the partial embeds into full embeds and L2-normalize them
slices = np.concatenate(([0], np.cumsum(n_partials)))
raw_embeds = [torch.mean(partial_embeds[start:end], dim=0) for start, end in zip(slices[:-1], slices[1:])]
raw_embeds = torch.stack(raw_embeds)
embeds = raw_embeds / torch.linalg.norm(raw_embeds, dim=1, keepdim=True)
return embeds
@staticmethod
def utt_to_spk_embed(utt_embeds: np.ndarray):
"""
Takes an array of L2-normalized utterance embeddings, computes the mean embedding and L2-normalize it to get a
speaker embedding.
"""
assert utt_embeds.ndim == 2
utt_embeds = np.mean(utt_embeds, axis=0)
return utt_embeds / np.linalg.norm(utt_embeds, 2)
@staticmethod
def voice_similarity(embeds_x: np.ndarray, embeds_y: np.ndarray):
"""
Cosine similarity for L2-normalized utterance embeddings or speaker embeddings
"""
embeds_x = embeds_x if embeds_x.ndim == 1 else VoiceEncoder.utt_to_spk_embed(embeds_x)
embeds_y = embeds_y if embeds_y.ndim == 1 else VoiceEncoder.utt_to_spk_embed(embeds_y)
return embeds_x @ embeds_y
def embeds_from_mels(
self, mels: Union[Tensor, List[np.ndarray]], mel_lens=None, as_spk=False, batch_size=32, **kwargs
):
"""
Convenience function for deriving utterance or speaker embeddings from mel spectrograms.
:param mels: unscaled mels strictly within [0, 1] as either a (B, T, M) tensor or a list of (Ti, M) arrays.
:param mel_lens: if passing mels as a tensor, individual mel lengths
:param as_spk: whether to return utterance embeddings or a single speaker embedding
:param kwargs: args for inference()
:returns: embeds as a (B, E) float32 numpy array if <as_spk> is False, else as a (E,) array
"""
# Load mels in memory and pack them
if isinstance(mels, List):
mels = [np.asarray(mel) for mel in mels]
assert all(m.shape[1] == mels[0].shape[1] for m in mels), "Mels aren't in (B, T, M) format"
mel_lens = [mel.shape[0] for mel in mels]
mels = pack(mels)
# Embed them
with torch.inference_mode():
utt_embeds = self.inference(mels.to(self.device), mel_lens, batch_size=batch_size, **kwargs).numpy()
return self.utt_to_spk_embed(utt_embeds) if as_spk else utt_embeds
def embeds_from_wavs(
self,
wavs: List[np.ndarray],
sample_rate,
as_spk=False,
batch_size=32,
trim_top_db: Optional[float]=20,
**kwargs
):
"""
Wrapper around embeds_from_mels
:param trim_top_db: this argument was only added for the sake of compatibility with metavoice's implementation
"""
if sample_rate != self.hp.sample_rate:
wavs = [
librosa.resample(wav, orig_sr=sample_rate, target_sr=self.hp.sample_rate, res_type="kaiser_fast")
for wav in wavs
]
if trim_top_db:
wavs = [librosa.effects.trim(wav, top_db=trim_top_db)[0] for wav in wavs]
if "rate" not in kwargs:
kwargs["rate"] = 1.3 # Resemble's default value.
mels = [melspectrogram(w, self.hp).T for w in wavs]
return self.embeds_from_mels(mels, as_spk=as_spk, batch_size=batch_size, **kwargs)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/models/voice_encoder/voice_encoder.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | license |
resemble-ai/chatterbox:src/chatterbox/tts.py | from dataclasses import dataclass
from pathlib import Path
import librosa
import torch
import perth
import torch.nn.functional as F
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from .models.t3 import T3
from .models.s3tokenizer import S3_SR, drop_invalid_tokens
from .models.s3gen import S3GEN_SR, S3Gen
from .models.tokenizers import EnTokenizer
from .models.voice_encoder import VoiceEncoder
from .models.t3.modules.cond_enc import T3Cond
REPO_ID = "ResembleAI/chatterbox"
def punc_norm(text: str) -> str:
"""
Quick cleanup func for punctuation from LLMs or
containing chars not seen often in the dataset
"""
if len(text) == 0:
return "You need to add some text for me to talk."
# Capitalise first letter
if text[0].islower():
text = text[0].upper() + text[1:]
# Remove multiple space chars
text = " ".join(text.split())
# Replace uncommon/llm punc
punc_to_replace = [
("...", ", "),
("…", ", "),
(":", ","),
(" - ", ", "),
(";", ", "),
("—", "-"),
("–", "-"),
(" ,", ","),
("“", "\""),
("”", "\""),
("‘", "'"),
("’", "'"),
]
for old_char_sequence, new_char in punc_to_replace:
text = text.replace(old_char_sequence, new_char)
# Add full stop if no ending punc
text = text.rstrip(" ")
sentence_enders = {".", "!", "?", "-", ","}
if not any(text.endswith(p) for p in sentence_enders):
text += "."
return text
@dataclass
class Conditionals:
"""
Conditionals for T3 and S3Gen
- T3 conditionals:
- speaker_emb
- clap_emb
- cond_prompt_speech_tokens
- cond_prompt_speech_emb
- emotion_adv
- S3Gen conditionals:
- prompt_token
- prompt_token_len
- prompt_feat
- prompt_feat_len
- embedding
"""
t3: T3Cond
gen: dict
def to(self, device):
self.t3 = self.t3.to(device=device)
for k, v in self.gen.items():
if torch.is_tensor(v):
self.gen[k] = v.to(device=device)
return self
def save(self, fpath: Path):
arg_dict = dict(
t3=self.t3.__dict__,
gen=self.gen
)
torch.save(arg_dict, fpath)
@classmethod
def load(cls, fpath, map_location="cpu"):
if isinstance(map_location, str):
map_location = torch.device(map_location)
kwargs = torch.load(fpath, map_location=map_location, weights_only=True)
return cls(T3Cond(**kwargs['t3']), kwargs['gen'])
class ChatterboxTTS:
ENC_COND_LEN = 6 * S3_SR
DEC_COND_LEN = 10 * S3GEN_SR
def __init__(
self,
t3: T3,
s3gen: S3Gen,
ve: VoiceEncoder,
tokenizer: EnTokenizer,
device: str,
conds: Conditionals = None,
):
self.sr = S3GEN_SR # sample rate of synthesized audio
self.t3 = t3
self.s3gen = s3gen
self.ve = ve
self.tokenizer = tokenizer
self.device = device
self.conds = conds
self.watermarker = perth.PerthImplicitWatermarker()
@classmethod
def from_local(cls, ckpt_dir, device) -> 'ChatterboxTTS':
ckpt_dir = Path(ckpt_dir)
# Always load to CPU first for non-CUDA devices to handle CUDA-saved models
if device in ["cpu", "mps"]:
map_location = torch.device('cpu')
else:
map_location = None
ve = VoiceEncoder()
ve.load_state_dict(
load_file(ckpt_dir / "ve.safetensors")
)
ve.to(device).eval()
t3 = T3()
t3_state = load_file(ckpt_dir / "t3_cfg.safetensors")
if "model" in t3_state.keys():
t3_state = t3_state["model"][0]
t3.load_state_dict(t3_state)
t3.to(device).eval()
s3gen = S3Gen()
s3gen.load_state_dict(
load_file(ckpt_dir / "s3gen.safetensors"), strict=False
)
s3gen.to(device).eval()
tokenizer = EnTokenizer(
str(ckpt_dir / "tokenizer.json")
)
conds = None
if (builtin_voice := ckpt_dir / "conds.pt").exists():
conds = Conditionals.load(builtin_voice, map_location=map_location).to(device)
return cls(t3, s3gen, ve, tokenizer, device, conds=conds)
@classmethod
def from_pretrained(cls, device) -> 'ChatterboxTTS':
# Check if MPS is available on macOS
if device == "mps" and not torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
print("MPS not available because the current MacOS version is not 12.3+ and/or you do not have an MPS-enabled device on this machine.")
device = "cpu"
for fpath in ["ve.safetensors", "t3_cfg.safetensors", "s3gen.safetensors", "tokenizer.json", "conds.pt"]:
local_path = hf_hub_download(repo_id=REPO_ID, filename=fpath)
return cls.from_local(Path(local_path).parent, device)
def prepare_conditionals(self, wav_fpath, exaggeration=0.5):
## Load reference wav
s3gen_ref_wav, _sr = librosa.load(wav_fpath, sr=S3GEN_SR)
ref_16k_wav = librosa.resample(s3gen_ref_wav, orig_sr=S3GEN_SR, target_sr=S3_SR)
s3gen_ref_wav = s3gen_ref_wav[:self.DEC_COND_LEN]
s3gen_ref_dict = self.s3gen.embed_ref(s3gen_ref_wav, S3GEN_SR, device=self.device)
# Speech cond prompt tokens
if plen := self.t3.hp.speech_cond_prompt_len:
s3_tokzr = self.s3gen.tokenizer
t3_cond_prompt_tokens, _ = s3_tokzr.forward([ref_16k_wav[:self.ENC_COND_LEN]], max_len=plen)
t3_cond_prompt_tokens = torch.atleast_2d(t3_cond_prompt_tokens).to(self.device)
# Voice-encoder speaker embedding
ve_embed = torch.from_numpy(self.ve.embeds_from_wavs([ref_16k_wav], sample_rate=S3_SR))
ve_embed = ve_embed.mean(axis=0, keepdim=True).to(self.device)
t3_cond = T3Cond(
speaker_emb=ve_embed,
cond_prompt_speech_tokens=t3_cond_prompt_tokens,
emotion_adv=exaggeration * torch.ones(1, 1, 1),
).to(device=self.device)
self.conds = Conditionals(t3_cond, s3gen_ref_dict)
def generate(
self,
text,
repetition_penalty=1.2,
min_p=0.05,
top_p=1.0,
audio_prompt_path=None,
exaggeration=0.5,
cfg_weight=0.5,
temperature=0.8,
):
if audio_prompt_path:
self.prepare_conditionals(audio_prompt_path, exaggeration=exaggeration)
else:
assert self.conds is not None, "Please `prepare_conditionals` first or specify `audio_prompt_path`"
# Update exaggeration if needed
if exaggeration != self.conds.t3.emotion_adv[0, 0, 0]:
_cond: T3Cond = self.conds.t3
self.conds.t3 = T3Cond(
speaker_emb=_cond.speaker_emb,
cond_prompt_speech_tokens=_cond.cond_prompt_speech_tokens,
emotion_adv=exaggeration * torch.ones(1, 1, 1),
).to(device=self.device)
# Norm and tokenize text
text = punc_norm(text)
text_tokens = self.tokenizer.text_to_tokens(text).to(self.device)
if cfg_weight > 0.0:
text_tokens = torch.cat([text_tokens, text_tokens], dim=0) # Need two seqs for CFG
sot = self.t3.hp.start_text_token
eot = self.t3.hp.stop_text_token
text_tokens = F.pad(text_tokens, (1, 0), value=sot)
text_tokens = F.pad(text_tokens, (0, 1), value=eot)
with torch.inference_mode():
speech_tokens = self.t3.inference(
t3_cond=self.conds.t3,
text_tokens=text_tokens,
max_new_tokens=1000, # TODO: use the value in config
temperature=temperature,
cfg_weight=cfg_weight,
repetition_penalty=repetition_penalty,
min_p=min_p,
top_p=top_p,
)
# Extract only the conditional batch.
speech_tokens = speech_tokens[0]
# TODO: output becomes 1D
speech_tokens = drop_invalid_tokens(speech_tokens)
speech_tokens = speech_tokens[speech_tokens < 6561]
speech_tokens = speech_tokens.to(self.device)
wav, _ = self.s3gen.inference(
speech_tokens=speech_tokens,
ref_dict=self.conds.gen,
)
wav = wav.squeeze(0).detach().cpu().numpy()
watermarked_wav = self.watermarker.apply_watermark(wav, sample_rate=self.sr)
return torch.from_numpy(watermarked_wav).unsqueeze(0)
| {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/tts.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
resemble-ai/chatterbox:src/chatterbox/vc.py | from pathlib import Path
import librosa
import torch
import perth
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
from .models.s3tokenizer import S3_SR
from .models.s3gen import S3GEN_SR, S3Gen
REPO_ID = "ResembleAI/chatterbox"
class ChatterboxVC:
ENC_COND_LEN = 6 * S3_SR
DEC_COND_LEN = 10 * S3GEN_SR
def __init__(
self,
s3gen: S3Gen,
device: str,
ref_dict: dict=None,
):
self.sr = S3GEN_SR
self.s3gen = s3gen
self.device = device
self.watermarker = perth.PerthImplicitWatermarker()
if ref_dict is None:
self.ref_dict = None
else:
self.ref_dict = {
k: v.to(device) if torch.is_tensor(v) else v
for k, v in ref_dict.items()
}
@classmethod
def from_local(cls, ckpt_dir, device) -> 'ChatterboxVC':
ckpt_dir = Path(ckpt_dir)
# Always load to CPU first for non-CUDA devices to handle CUDA-saved models
if device in ["cpu", "mps"]:
map_location = torch.device('cpu')
else:
map_location = None
ref_dict = None
if (builtin_voice := ckpt_dir / "conds.pt").exists():
states = torch.load(builtin_voice, map_location=map_location)
ref_dict = states['gen']
s3gen = S3Gen()
s3gen.load_state_dict(
load_file(ckpt_dir / "s3gen.safetensors"), strict=False
)
s3gen.to(device).eval()
return cls(s3gen, device, ref_dict=ref_dict)
@classmethod
def from_pretrained(cls, device) -> 'ChatterboxVC':
# Check if MPS is available on macOS
if device == "mps" and not torch.backends.mps.is_available():
if not torch.backends.mps.is_built():
print("MPS not available because the current PyTorch install was not built with MPS enabled.")
else:
print("MPS not available because the current MacOS version is not 12.3+ and/or you do not have an MPS-enabled device on this machine.")
device = "cpu"
for fpath in ["s3gen.safetensors", "conds.pt"]:
local_path = hf_hub_download(repo_id=REPO_ID, filename=fpath)
return cls.from_local(Path(local_path).parent, device)
def set_target_voice(self, wav_fpath):
## Load reference wav
s3gen_ref_wav, _sr = librosa.load(wav_fpath, sr=S3GEN_SR)
s3gen_ref_wav = s3gen_ref_wav[:self.DEC_COND_LEN]
self.ref_dict = self.s3gen.embed_ref(s3gen_ref_wav, S3GEN_SR, device=self.device)
def generate(
self,
audio,
target_voice_path=None,
):
if target_voice_path:
self.set_target_voice(target_voice_path)
else:
assert self.ref_dict is not None, "Please `prepare_conditionals` first or specify `target_voice_path`"
with torch.inference_mode():
audio_16, _ = librosa.load(audio, sr=S3_SR)
audio_16 = torch.from_numpy(audio_16).float().to(self.device)[None, ]
s3_tokens, _ = self.s3gen.tokenizer(audio_16)
wav, _ = self.s3gen.inference(
speech_tokens=s3_tokens,
ref_dict=self.ref_dict,
)
wav = wav.squeeze(0).detach().cpu().numpy()
watermarked_wav = self.watermarker.apply_watermark(wav, sample_rate=self.sr)
return torch.from_numpy(watermarked_wav).unsqueeze(0) | {
"repo_id": "resemble-ai/chatterbox",
"file_path": "src/chatterbox/vc.py",
"license": "MIT License",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
roboflow/supervision:src/supervision/utils/logger.py | from __future__ import annotations
import logging
import os
import sys
def _get_logger(name: str = "supervision", level: int | None = None) -> logging.Logger:
"""Creates and configures a logger with stdout and stderr handlers.
This function creates a logger that sends INFO and DEBUG level logs to stdout,
and WARNING, ERROR, and CRITICAL level logs to stderr. If the logger already
has handlers, it returns the existing logger without adding new handlers.
The log level can be specified directly or through the `LOG_LEVEL` environment
variable.
Args:
name: The name of the logger. Defaults to `"supervision"`.
level: The logging level to set. If `None`, uses the `LOG_LEVEL` environment
variable, defaulting to `INFO` if not set.
Returns:
A configured `logging.Logger` instance.
Example:
```python
from supervision.utils.logger import _get_logger
logger = _get_logger(__name__)
logger.info("Processing started")
logger.warning("File not found, using default")
```
"""
if level is None:
level = getattr(logging, os.getenv("LOG_LEVEL", "INFO").upper(), logging.INFO)
logger = logging.getLogger(name)
logger.setLevel(level)
if not logger.handlers:
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] %(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.DEBUG)
stdout_handler.addFilter(lambda r: r.levelno <= logging.INFO)
stdout_handler.setFormatter(formatter)
stderr_handler = logging.StreamHandler(sys.stderr)
stderr_handler.setLevel(logging.WARNING)
stderr_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
logger.addHandler(stderr_handler)
logger.propagate = False
return logger
| {
"repo_id": "roboflow/supervision",
"file_path": "src/supervision/utils/logger.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
roboflow/supervision:tests/utils/test_logger.py | from __future__ import annotations
import logging
import sys
from unittest.mock import patch
from supervision.utils.logger import _get_logger
class TestGetLogger:
def test_default_name(self):
"""Logger is created with default name."""
logger = _get_logger()
assert logger.name == "supervision"
def test_custom_name(self):
"""Logger is created with provided name."""
logger = _get_logger("supervision.test_module")
assert logger.name == "supervision.test_module"
def test_default_level_is_info(self):
"""Logger defaults to INFO level when LOG_LEVEL env var is not set."""
with patch.dict("os.environ", {}, clear=True):
# Use a unique name to avoid cached logger state from other tests
logger = _get_logger("supervision.test_default_level")
assert logger.level == logging.INFO
def test_explicit_level(self):
"""Logger uses the explicitly provided level."""
logger = _get_logger("supervision.test_explicit_level", level=logging.DEBUG)
assert logger.level == logging.DEBUG
def test_log_level_env_var(self):
"""Logger respects the LOG_LEVEL environment variable."""
with patch.dict("os.environ", {"LOG_LEVEL": "DEBUG"}):
logger = _get_logger("supervision.test_env_level")
assert logger.level == logging.DEBUG
def test_log_level_env_var_warning(self):
"""Logger respects the LOG_LEVEL=WARNING environment variable."""
with patch.dict("os.environ", {"LOG_LEVEL": "WARNING"}):
logger = _get_logger("supervision.test_env_warning")
assert logger.level == logging.WARNING
def test_two_handlers_configured(self):
"""Logger has exactly two handlers: one for stdout, one for stderr."""
logger = _get_logger("supervision.test_handlers")
assert len(logger.handlers) == 2
def test_stdout_handler_present(self):
"""Logger has a StreamHandler pointing to stdout."""
logger = _get_logger("supervision.test_stdout")
stdout_handlers = [
h
for h in logger.handlers
if isinstance(h, logging.StreamHandler) and h.stream is sys.stdout
]
assert len(stdout_handlers) == 1
def test_stderr_handler_present(self):
"""Logger has a StreamHandler pointing to stderr for warnings."""
logger = _get_logger("supervision.test_stderr")
stderr_handlers = [
h
for h in logger.handlers
if isinstance(h, logging.StreamHandler) and h.stream is sys.stderr
]
assert len(stderr_handlers) == 1
def test_no_propagation(self):
"""Logger does not propagate to the root logger."""
logger = _get_logger("supervision.test_propagation")
assert not logger.propagate
def test_idempotent_no_duplicate_handlers(self):
"""Calling _get_logger twice with the same name does not add extra handlers."""
name = "supervision.test_idempotent"
logger1 = _get_logger(name)
handler_count = len(logger1.handlers)
logger2 = _get_logger(name)
assert len(logger2.handlers) == handler_count
assert logger1 is logger2
| {
"repo_id": "roboflow/supervision",
"file_path": "tests/utils/test_logger.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
roboflow/supervision:tests/metrics/test_mean_average_recall.py | import numpy as np
import pytest
from supervision.detection.core import Detections
from supervision.metrics import MeanAverageRecall, MetricTarget
@pytest.fixture
def complex_scenario_targets():
"""
Ground truth for complex multi-image scenario.
15 images with varying object counts and classes.
Total: class_0=17, class_1=19 objects.
"""
return [
# img 0 (2 GT: c0, c1)
np.array(
[
[100, 120, 260, 400, 1.0, 0],
[500, 200, 760, 640, 1.0, 1],
],
dtype=np.float32,
),
# img 1 (3 GT: c0, c0, c1)
np.array(
[
[50, 60, 180, 300, 1.0, 0],
[210, 70, 340, 310, 1.0, 0],
[400, 90, 620, 360, 1.0, 1],
],
dtype=np.float32,
),
# img 2 (1 GT: c1)
np.array(
[
[320, 200, 540, 520, 1.0, 1],
],
dtype=np.float32,
),
# img 3 (4 GT: c0, c1, c0, c1)
np.array(
[
[100, 100, 240, 340, 1.0, 0],
[260, 110, 410, 350, 1.0, 1],
[430, 120, 580, 360, 1.0, 0],
[600, 130, 760, 370, 1.0, 1],
],
dtype=np.float32,
),
# img 4 (2 GT: c0, c0)
np.array(
[
[120, 400, 260, 700, 1.0, 0],
[300, 420, 480, 720, 1.0, 0],
],
dtype=np.float32,
),
# img 5 (3 GT: c1, c1, c1)
np.array(
[
[50, 50, 200, 260, 1.0, 1],
[230, 60, 380, 270, 1.0, 1],
[410, 70, 560, 280, 1.0, 1],
],
dtype=np.float32,
),
# img 6 (1 GT: c0)
np.array(
[
[600, 60, 780, 300, 1.0, 0],
],
dtype=np.float32,
),
# img 7 (5 GT: c0, c1, c1, c0, c1)
np.array(
[
[60, 360, 180, 600, 1.0, 0],
[200, 350, 340, 590, 1.0, 1],
[360, 340, 500, 580, 1.0, 1],
[520, 330, 660, 570, 1.0, 0],
[680, 320, 820, 560, 1.0, 1],
],
dtype=np.float32,
),
# img 8 (2 GT: c1, c1)
np.array(
[
[100, 100, 220, 300, 1.0, 1],
[260, 110, 380, 310, 1.0, 1],
],
dtype=np.float32,
),
# img 9 (1 GT: c0)
np.array(
[
[420, 400, 600, 700, 1.0, 0],
],
dtype=np.float32,
),
# img 10 (4 GT: c0, c1, c1, c0)
np.array(
[
[50, 500, 180, 760, 1.0, 0],
[200, 500, 350, 760, 1.0, 1],
[370, 500, 520, 760, 1.0, 1],
[540, 500, 690, 760, 1.0, 0],
],
dtype=np.float32,
),
# img 11 (2 GT: c1, c0)
np.array(
[
[150, 150, 300, 420, 1.0, 1],
[330, 160, 480, 430, 1.0, 0],
],
dtype=np.float32,
),
# img 12 (3 GT: c0, c1, c1)
np.array(
[
[600, 200, 760, 460, 1.0, 0],
[100, 220, 240, 480, 1.0, 1],
[260, 230, 400, 490, 1.0, 1],
],
dtype=np.float32,
),
# img 13 (1 GT: c0)
np.array(
[
[50, 50, 190, 250, 1.0, 0],
],
dtype=np.float32,
),
# img 14 (2 GT: c1, c0)
np.array(
[
[420, 80, 560, 300, 1.0, 1],
[580, 90, 730, 310, 1.0, 0],
],
dtype=np.float32,
),
]
@pytest.fixture
def complex_scenario_predictions():
"""
Predictions for complex multi-image scenario.
15 images with varying detection quality:
- True positives, false positives, false negatives
- Class mismatches and IoU variations
- Different confidence levels
"""
return [
# img 0: 2 TP + 1 class mismatch FP
np.array(
[
[102, 118, 258, 398, 0.94, 0], # TP (c0)
[500, 200, 760, 640, 0.90, 1], # TP (c1)
[100, 120, 260, 400, 0.55, 1], # FP (class mismatch)
],
dtype=np.float32,
),
# img 1: TPs for two c0, miss c1 (FN) + background FP
np.array(
[
[50, 60, 180, 300, 0.91, 0], # TP (c0)
[210, 70, 340, 310, 0.88, 0], # TP (c0)
[600, 400, 720, 560, 0.42, 1], # FP (no GT nearby)
],
dtype=np.float32,
),
# img 2: Low-IoU (miss) + random FP
np.array(
[
[300, 180, 500, 430, 0.83, 1], # Low IoU (shifted, suppose < threshold)
[50, 50, 140, 140, 0.30, 0], # FP
],
dtype=np.float32,
),
# img 3: Only match two (others FN) + one mismatch
np.array(
[
[100, 100, 240, 340, 0.90, 0], # TP (c0)
[260, 110, 410, 350, 0.87, 1], # TP (c1)
[430, 120, 580, 360, 0.70, 1], # FP (class mismatch; GT is c0)
],
dtype=np.float32,
),
# img 4: No predictions (2 FN)
np.array([], dtype=np.float32).reshape(0, 6),
# img 5: All three matched + class mismatch
np.array(
[
[50, 50, 200, 260, 0.95, 1], # TP (c1)
[230, 60, 380, 270, 0.92, 1], # TP (c1)
[410, 70, 560, 280, 0.90, 1], # TP (c1)
[50, 50, 200, 260, 0.40, 0], # FP (class mismatch)
],
dtype=np.float32,
),
# img 6: Wrong class over GT (0 recall)
np.array(
[
[600, 60, 780, 300, 0.89, 1], # FP (class mismatch)
],
dtype=np.float32,
),
# img 7: 3 TP, 1 miss (only 3/5 recalled)
np.array(
[
[60, 360, 180, 600, 0.93, 0], # TP (c0)
[200, 350, 340, 590, 0.90, 1], # TP (c1)
[360, 340, 500, 580, 0.88, 1], # TP (c1)
[520, 330, 660, 570, 0.50, 1], # FP (class mismatch; GT is c0)
],
dtype=np.float32,
),
# img 8: 2 TP
np.array(
[
[100, 100, 220, 300, 0.96, 1], # TP
[262, 112, 378, 308, 0.89, 1], # TP
],
dtype=np.float32,
),
# img 9: 1 TP + 1 FP
np.array(
[
[418, 398, 602, 702, 0.86, 0], # TP
[100, 100, 140, 160, 0.33, 1], # FP
],
dtype=np.float32,
),
# img 10: Perfect (all 4 TP)
np.array(
[
[50, 500, 180, 760, 0.94, 0], # TP
[200, 500, 350, 760, 0.93, 1], # TP
[370, 500, 520, 760, 0.92, 1], # TP
[540, 500, 690, 760, 0.91, 0], # TP
],
dtype=np.float32,
),
# img 11: 1 TP, 1 low IoU (FN remains) + FP
np.array(
[
[150, 150, 300, 420, 0.90, 1], # TP (c1)
[
332,
162,
478,
428,
0.58,
0,
], # TP? (slight shift) treat as TP if IoU high enough; assume OK
[148, 148, 298, 415, 0.52, 0], # FP (class mismatch over c1)
],
dtype=np.float32,
),
# img 12: 2 TP + 1 miss (one c1 missed)
np.array(
[
[600, 200, 760, 460, 0.92, 0], # TP
[100, 220, 240, 480, 0.90, 1], # TP
[260, 230, 400, 490, 0.40, 0], # FP (class mismatch; GT is c1)
],
dtype=np.float32,
),
# img 13: No predictions (1 FN)
np.array([], dtype=np.float32).reshape(0, 6),
# img 14: Class swapped (0 recall) + one correct + one FP
np.array(
[
[420, 80, 560, 300, 0.88, 0], # FP (class mismatch; GT is c1)
[580, 90, 730, 310, 0.86, 1], # FP (class mismatch; GT is c0)
],
dtype=np.float32,
),
]
@pytest.fixture
def two_class_two_image_detections():
"""
Scenario: 2 images with 2 classes with varying confidence levels.
Tests that `mAR @ K` limits per image (not per class) by creating a case where
the highest confidence detection differs between images.
Returns:
tuple: `(predictions, targets)`
- Image 1: `class_0` (conf=0.9) > `class_1` (conf=0.8)
- Image 2: `class_1` (conf=0.95) > `class_0` (conf=0.7)
"""
targets = [
Detections(
xyxy=np.array([[10, 10, 50, 50], [60, 60, 100, 100]], dtype=np.float32),
class_id=np.array([0, 1], dtype=np.int32),
),
Detections(
xyxy=np.array([[10, 10, 50, 50], [60, 60, 100, 100]], dtype=np.float32),
class_id=np.array([0, 1], dtype=np.int32),
),
]
predictions = [
Detections(
xyxy=np.array([[10, 10, 50, 50], [60, 60, 100, 100]], dtype=np.float32),
confidence=np.array([0.9, 0.8], dtype=np.float32),
class_id=np.array([0, 1], dtype=np.int32),
),
Detections(
xyxy=np.array([[10, 10, 50, 50], [60, 60, 100, 100]], dtype=np.float32),
confidence=np.array([0.7, 0.95], dtype=np.float32),
class_id=np.array([0, 1], dtype=np.int32),
),
]
return predictions, targets
@pytest.fixture
def three_class_single_image_detections():
"""
Scenario: 1 image with 3 classes - explicit bug reproduction.
Demonstrates the N x K vs K issue: with 3 classes, the bug would allow
3 detections for `mAR @ 1` (one per class) instead of just 1.
Returns:
tuple: `(predictions, targets)`
- Single image with 3 perfect detections
- Confidences: `[0.9, 0.8, 0.7]` for classes `[0, 1, 2]`
"""
targets = [
Detections(
xyxy=np.array(
[[10, 10, 50, 50], [60, 60, 100, 100], [110, 110, 150, 150]],
dtype=np.float32,
),
class_id=np.array([0, 1, 2], dtype=np.int32),
)
]
predictions = [
Detections(
xyxy=np.array(
[[10, 10, 50, 50], [60, 60, 100, 100], [110, 110, 150, 150]],
dtype=np.float32,
),
confidence=np.array([0.9, 0.8, 0.7], dtype=np.float32),
class_id=np.array([0, 1, 2], dtype=np.int32),
)
]
return predictions, targets
def test_single_perfect_detection():
"""Test that a single perfect detection yields 1.0 recall."""
detections = Detections(
xyxy=np.array([[10, 10, 50, 50]], dtype=np.float32),
confidence=np.array([0.9], dtype=np.float32),
class_id=np.array([0], dtype=np.int32),
)
metric = MeanAverageRecall(metric_target=MetricTarget.BOXES)
metric.update([detections], [detections])
result = metric.compute()
# For a single GT, if it's recalled, the score is 1.0 across all K
expected = np.array([1.0, 1.0, 1.0])
np.testing.assert_almost_equal(result.recall_scores, expected, decimal=6)
def test_complex_integration_scenario(
complex_scenario_predictions, complex_scenario_targets
):
"""Test integration scenario with multiple images and varying performance."""
def mock_detections_list(boxes_list):
return [
Detections(
xyxy=boxes[:, :4],
confidence=boxes[:, 4],
class_id=boxes[:, 5].astype(int),
)
for boxes in boxes_list
]
predictions_list = mock_detections_list(complex_scenario_predictions)
targets_list = mock_detections_list(complex_scenario_targets)
metric = MeanAverageRecall(metric_target=MetricTarget.BOXES)
metric.update(predictions_list, targets_list)
result = metric.compute()
# Expected mAR at K = 1, 10, 100
expected_result = np.array([0.2874613, 0.63622291, 0.63622291])
np.testing.assert_almost_equal(result.recall_scores, expected_result, decimal=6)
def test_mar_at_k_limits_per_image_not_per_class(two_class_two_image_detections):
"""
Test that `mAR @ K` limits detections per image, not per class.
BUG SCENARIO (what was wrong):
The previous implementation would limit detections per CLASS per image,
meaning `mAR@1` would take the top-1 prediction for EACH class in each image.
With 2 classes and `mAR@1`, this incorrectly allowed 2 detections per image.
This test uses a scenario where the bug would produce different results:
- 2 images, each with 2 GT objects (one of each class)
- Predictions perfectly match GT with varying confidences
- Image 1: `class_0` (conf=0.9) > `class_1` (conf=0.8)
- Image 2: `class_1` (conf=0.95) > `class_0` (conf=0.7)
BUGGY BEHAVIOR (if bug were present):
- `mAR@1` would take top-1 per class → both detections per image count
- Recall for `class_0`: 2/2 = 1.0
- Recall for `class_1`: 2/2 = 1.0
- `mAR@1` would incorrectly = 1.0 (same as `mAR@10`)
CORRECT BEHAVIOR (with fix):
- `mAR@1` takes top-1 per image → only highest confidence per image counts
- Image 1: only `class_0` counts (conf=0.9)
- Image 2: only `class_1` counts (conf=0.95)
- Recall for `class_0`: 1/2 = 0.5
- Recall for `class_1`: 1/2 = 0.5
- `mAR@1` = 0.5 (correctly < `mAR@10` = 1.0)
"""
predictions, targets = two_class_two_image_detections
metric = MeanAverageRecall(metric_target=MetricTarget.BOXES)
metric.update(predictions, targets)
result = metric.compute()
# Expected results with correct behavior
expected_mar_at_1 = 0.5 # Only top detection per image
expected_mar_at_10 = 1.0 # All detections count
expected_mar_at_100 = 1.0
# Note: Bug would produce mAR @ 1 = 1.0
# Test correct behavior (this would fail with the bug)
np.testing.assert_almost_equal(result.mAR_at_1, expected_mar_at_1, decimal=6)
np.testing.assert_almost_equal(result.mAR_at_10, expected_mar_at_10, decimal=6)
np.testing.assert_almost_equal(result.mAR_at_100, expected_mar_at_100, decimal=6)
# Critical assertion: mAR @ 1 must be less than mAR @ 10
# With the bug, both would equal 1.0
assert result.mAR_at_1 < result.mAR_at_10, (
f"Bug detected: mAR @ 1 ({result.mAR_at_1}) should be < mAR @ 10 "
f"({result.mAR_at_10}) when images have multiple objects. "
"If they're equal, K is being applied per-class instead of per-image."
)
def test_three_class_single_image_scenario(three_class_single_image_detections):
"""
Test with 3 classes on single image - explicit N x K bug reproduction.
THE BUG:
mAR @ K was limiting detections per class per image, not per image globally.
This meant with N classes, up to N x K detections could count per image
instead of just K detections.
REPRODUCTION SCENARIO:
Image with 3 GT objects: `[class_0, class_1, class_2]`
Model predicts all 3 correctly with confidences: `[0.9, 0.8, 0.7]`
With mAR @ 1 (max 1 detection per image):
BUGGY: Would take top-1 per class → all 3 detections count
→ Recall per class: `[1/1, 1/1, 1/1]` → mAR @ 1 = 1.0
CORRECT: Takes top-1 globally → only `class_0` (conf=0.9) counts
→ Recall per class: `[1/1, 0/1, 0/1]` → mAR @ 1 = 0.33
This test would PASS with the bug (incorrectly) if mAR @ 1 ≈ 1.0
and PASS with the fix (correctly) if mAR @ 1 ≈ 0.33
"""
predictions, targets = three_class_single_image_detections
metric = MeanAverageRecall(metric_target=MetricTarget.BOXES)
metric.update(predictions, targets)
result = metric.compute()
# Expected results with correct behavior
expected_mar_at_1 = 1.0 / 3.0 # Only highest confidence (class_0) counts
expected_mar_at_10 = 1.0 # All detections count
# Note: Bug would produce mAR @ 1 = 1.0 (all 3 counted, one per class)
# Test correct behavior
np.testing.assert_almost_equal(result.mAR_at_1, expected_mar_at_1, decimal=6)
np.testing.assert_almost_equal(result.mAR_at_10, expected_mar_at_10, decimal=6)
# Sanity check: if this fails, the bug is present
# Bug would produce mAR @ 1 ≈ 1.0, correct is ≈ 0.333
assert result.mAR_at_1 < 0.5, (
f"Bug detected: mAR @ 1 = {result.mAR_at_1:.4f}, expected ≈ 0.333. "
"The bug would produce mAR @ 1 ≈ 1.0 by counting all detections."
)
def test_dataset_split_integration(yolo_dataset_two_classes):
"""
Test mAR with a roboflow-format dataset loaded from disk.
Uses a synthetic YOLO-format dataset loaded via DetectionDataset.from_yolo()
to validate that the mAR metric works correctly with dataset splits - an
important real-world use case.
Scenarios tested:
- Multiple images with varying object counts
- Two classes with different distributions
- Predictions with different confidence levels
- mAR @ K correctly limits per image (not per class)
"""
from supervision import DetectionDataset
dataset_info = yolo_dataset_two_classes
np.random.seed(42) # Match fixture seed for offset generation
# Load dataset from YOLO format
dataset = DetectionDataset.from_yolo(
images_directory_path=dataset_info["images_dir"],
annotations_directory_path=dataset_info["labels_dir"],
data_yaml_path=dataset_info["data_yaml_path"],
)
assert len(dataset) == dataset_info["num_images"]
assert dataset.classes == ["class_0", "class_1"]
# Create predictions and targets from loaded dataset
predictions_list = []
targets_list = []
for idx, (img_path, img, gt_detections) in enumerate(dataset):
targets_list.append(gt_detections)
# Create predictions based on GT with small offsets
if len(gt_detections) > 0:
pred_xyxy = gt_detections.xyxy.copy().astype(np.float32)
# Add small random offset (±3 pixels)
offset = np.random.randint(-3, 4, pred_xyxy.shape).astype(np.float32)
pred_xyxy = np.clip(pred_xyxy + offset, 0, 640)
# Generate decreasing confidence scores
num_preds = len(pred_xyxy)
confidences = np.linspace(0.95, 0.65, num_preds, dtype=np.float32)
predictions_list.append(
Detections(
xyxy=pred_xyxy,
confidence=confidences,
class_id=gt_detections.class_id.copy(),
)
)
else:
predictions_list.append(Detections.empty())
# Calculate mAR
metric = MeanAverageRecall(metric_target=MetricTarget.BOXES)
metric.update(predictions_list, targets_list)
result = metric.compute()
# Expected behavior validation
expected_min_mar_at_100 = 0.8 # High recall with small offsets
# Verify expected behavior
assert 0.0 <= result.mAR_at_1 <= 1.0
assert 0.0 <= result.mAR_at_10 <= 1.0
assert 0.0 <= result.mAR_at_100 <= 1.0
# mAR should increase with more detections considered
assert result.mAR_at_1 <= result.mAR_at_10
assert result.mAR_at_10 <= result.mAR_at_100
# With good predictions (small offsets), expect high recall
assert result.mAR_at_100 > expected_min_mar_at_100
# mAR@1 should be significantly lower than mAR@10 for multi-object images
# This validates that K limits detections per image (not per class)
assert result.mAR_at_1 < result.mAR_at_10
| {
"repo_id": "roboflow/supervision",
"file_path": "tests/metrics/test_mean_average_recall.py",
"license": "MIT License",
"lines": 518,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
roboflow/supervision:tests/detection/test_from_sam.py | from __future__ import annotations
import numpy as np
import pytest
from supervision.detection.core import Detections
SERVERLESS_SAM3_DICT = {
"prompt_results": [
{
"prompt_index": 0,
"echo": {
"prompt_index": 0,
"type": "text",
"text": "person",
"num_boxes": 0,
},
"predictions": [
{
"masks": [[[295, 675], [294, 676]], [[496, 617], [495, 618]]],
"confidence": 0.94921875,
"format": "polygon",
}
],
},
{
"prompt_index": 1,
"echo": {"prompt_index": 1, "type": "text", "text": "dog", "num_boxes": 0},
"predictions": [
{
"masks": [[[316, 561], [316, 562]], [[345, 251], [344, 252]]],
"confidence": 0.89453125,
"format": "polygon",
}
],
},
],
"time": 0.14756996370851994,
}
HOSTED_SAM3_DICT = {
"prompt_results": [
{
"prompt_index": 0,
"echo": {
"prompt_index": 0,
"type": "text",
"text": "bottle",
"num_boxes": 0,
},
"predictions": [
{
"masks": [[[1364, 200], [1365, 201]]],
"confidence": 0.8984375,
"format": "polygon",
},
{
"masks": [[[1140, 171], [1139, 170]]],
"confidence": 0.94140625,
"format": "polygon",
},
],
}
],
"time": 0.7277156260097399,
}
SERVERLESS_SAM3_PVS_DICT = {
"predictions": [
{
"masks": [
[[713, 1276], [713, 1279], [714, 1279], [714, 1277]],
[[711, 1273]],
[[671, 1231], [671, 1234]],
[[523, 1222], [522, 1223]],
],
"confidence": 0.0025782063603401184,
"format": "polygon",
}
],
"time": 0.07825545498053543,
}
@pytest.mark.parametrize(
("sam_result", "expected_xyxy", "expected_mask_shape"),
[
(
[
{
"segmentation": np.ones((10, 10), dtype=bool),
"bbox": [0, 0, 10, 10],
"area": 100,
}
],
np.array([[0, 0, 10, 10]], dtype=np.float32),
(1, 10, 10),
),
([], np.empty((0, 4), dtype=np.float32), None),
],
)
def test_from_sam(
sam_result: list[dict],
expected_xyxy: np.ndarray,
expected_mask_shape: tuple[int, ...] | None,
) -> None:
detections = Detections.from_sam(sam_result=sam_result)
assert np.array_equal(detections.xyxy, expected_xyxy)
if expected_mask_shape is not None:
assert detections.mask.shape == expected_mask_shape
else:
assert detections.mask is None
@pytest.mark.parametrize(
(
"sam3_result",
"resolution_wh",
"expected_xyxy",
"expected_confidence",
"expected_class_id",
),
[
(
{
"prompt_results": [
{
"predictions": [
{
"format": "polygon",
"masks": [[[0, 0], [10, 0], [10, 10], [0, 10]]],
"confidence": 0.9,
}
],
"prompt_index": 0,
}
]
},
(100, 100),
np.array([[0, 0, 10, 10]], dtype=np.float32),
np.array([0.9], dtype=np.float32),
np.array([0], dtype=int),
),
(
{"prompt_results": []},
(100, 100),
np.empty((0, 4), dtype=np.float32),
np.empty((0,), dtype=np.float32),
np.empty((0,), dtype=int),
),
(
SERVERLESS_SAM3_DICT,
(1000, 1000),
np.array(
[[294.0, 617.0, 496.0, 676.0], [316.0, 251.0, 345.0, 562.0]],
dtype=np.float32,
),
np.array([0.94921875, 0.89453125], dtype=np.float32),
np.array([0, 1], dtype=int),
),
(
HOSTED_SAM3_DICT,
(2000, 2000),
np.array(
[[1364.0, 200.0, 1365.0, 201.0], [1139.0, 170.0, 1140.0, 171.0]],
dtype=np.float32,
),
np.array([0.898438, 0.941406], dtype=np.float32),
np.array([0, 0], dtype=int),
),
(
SERVERLESS_SAM3_PVS_DICT,
(2000, 2000),
np.array([[522.0, 1222.0, 714.0, 1279.0]], dtype=np.float32),
np.array([0.00257821], dtype=np.float32),
np.array([0], dtype=int),
),
],
)
def test_from_sam3(
sam3_result: dict,
resolution_wh: tuple[int, int],
expected_xyxy: np.ndarray,
expected_confidence: np.ndarray,
expected_class_id: np.ndarray,
) -> None:
detections = Detections.from_sam3(
sam3_result=sam3_result, resolution_wh=resolution_wh
)
np.testing.assert_allclose(detections.xyxy, expected_xyxy, atol=1e-5)
np.testing.assert_allclose(detections.confidence, expected_confidence, atol=1e-5)
np.testing.assert_array_equal(detections.class_id, expected_class_id)
def test_from_sam3_invalid_resolution() -> None:
sam3_result = {"prompt_results": []}
with pytest.raises(
ValueError, match=r"Both dimensions in resolution must be positive\."
):
Detections.from_sam3(sam3_result=sam3_result, resolution_wh=(-100, 100))
| {
"repo_id": "roboflow/supervision",
"file_path": "tests/detection/test_from_sam.py",
"license": "MIT License",
"lines": 189,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
roboflow/supervision:examples/time_in_zone/rfdetr_file_example.py | from __future__ import annotations
from enum import Enum
import cv2
import numpy as np
from rfdetr import RFDETRBase, RFDETRLarge, RFDETRMedium, RFDETRNano, RFDETRSmall
from utils.general import find_in_list, load_zones_config
from utils.timers import FPSBasedTimer
import supervision as sv
COLORS = sv.ColorPalette.from_hex(["#E6194B", "#3CB44B", "#FFE119", "#3C76D1"])
COLOR_ANNOTATOR = sv.ColorAnnotator(color=COLORS)
LABEL_ANNOTATOR = sv.LabelAnnotator(
color=COLORS, text_color=sv.Color.from_hex("#000000")
)
class ModelSize(Enum):
NANO = "nano"
SMALL = "small"
MEDIUM = "medium"
BASE = "base"
LARGE = "large"
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
@classmethod
def from_value(cls, value: ModelSize | str) -> ModelSize:
if isinstance(value, cls):
return value
if isinstance(value, str):
value = value.lower()
try:
return cls(value)
except ValueError:
raise ValueError(f"Invalid value: {value}. Must be one of {cls.list()}")
raise ValueError(
f"Invalid value type: {type(value)}. Must be an instance of "
f"{cls.__name__} or str."
)
def load_model(checkpoint: ModelSize | str, device: str, resolution: int):
checkpoint = ModelSize.from_value(checkpoint)
if checkpoint == ModelSize.NANO:
return RFDETRNano(device=device, resolution=resolution)
if checkpoint == ModelSize.SMALL:
return RFDETRSmall(device=device, resolution=resolution)
if checkpoint == ModelSize.MEDIUM:
return RFDETRMedium(device=device, resolution=resolution)
if checkpoint == ModelSize.BASE:
return RFDETRBase(device=device, resolution=resolution)
if checkpoint == ModelSize.LARGE:
return RFDETRLarge(device=device, resolution=resolution)
raise ValueError(
f"Invalid checkpoint: {checkpoint}. Must be one of: {ModelSize.list()}."
)
def adjust_resolution(checkpoint: ModelSize | str, resolution: int) -> int:
checkpoint = ModelSize.from_value(checkpoint)
if checkpoint in {ModelSize.NANO, ModelSize.SMALL, ModelSize.MEDIUM}:
divisor = 32
elif checkpoint in {ModelSize.BASE, ModelSize.LARGE}:
divisor = 56
else:
raise ValueError(
f"Unknown checkpoint: {checkpoint}. Must be one of: {ModelSize.list()}."
)
remainder = resolution % divisor
if remainder == 0:
return resolution
lower = resolution - remainder
upper = lower + divisor
if resolution - lower < upper - resolution:
return lower
else:
return upper
def main(
source_video_path: str,
zone_configuration_path: str,
resolution: int,
model_size: str = "small",
device: str = "cpu",
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
classes: list[int] = [],
) -> None:
"""
Calculating detections dwell time in zones, using video file.
Args:
source_video_path: Path to the source video file
zone_configuration_path: Path to the zone configuration JSON file
resolution: Input resolution for the model
model_size: RF-DETR model size ('nano', 'small', 'medium', 'base' or 'large')
device: Computation device ('cpu', 'mps' or 'cuda')
confidence_threshold: Confidence level for detections (0 to 1)
iou_threshold: IOU threshold for non-max suppression
classes: List of class IDs to track. If empty, all classes are tracked
"""
resolution = adjust_resolution(checkpoint=model_size, resolution=resolution)
model = load_model(checkpoint=model_size, device=device, resolution=resolution)
tracker = sv.ByteTrack(minimum_matching_threshold=0.5)
video_info = sv.VideoInfo.from_video_path(video_path=source_video_path)
frames_generator = sv.get_video_frames_generator(source_video_path)
polygons = load_zones_config(file_path=zone_configuration_path)
zones = [
sv.PolygonZone(
polygon=polygon,
triggering_anchors=(sv.Position.CENTER,),
)
for polygon in polygons
]
timers = [FPSBasedTimer(video_info.fps) for _ in zones]
for frame in frames_generator:
detections = model.predict(frame, threshold=confidence_threshold)
detections = detections[find_in_list(detections.class_id, classes)]
detections = detections.with_nms(threshold=iou_threshold)
detections = tracker.update_with_detections(detections)
annotated_frame = frame.copy()
for idx, zone in enumerate(zones):
annotated_frame = sv.draw_polygon(
scene=annotated_frame, polygon=zone.polygon, color=COLORS.by_idx(idx)
)
detections_in_zone = detections[zone.trigger(detections)]
time_in_zone = timers[idx].tick(detections_in_zone)
custom_color_lookup = np.full(detections_in_zone.class_id.shape, idx)
annotated_frame = COLOR_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
custom_color_lookup=custom_color_lookup,
)
labels = [
f"#{tracker_id} {int(time // 60):02d}:{int(time % 60):02d}"
for tracker_id, time in zip(detections_in_zone.tracker_id, time_in_zone)
]
annotated_frame = LABEL_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
labels=labels,
custom_color_lookup=custom_color_lookup,
)
cv2.imshow("Processed Video", annotated_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
from jsonargparse import auto_cli, set_parsing_settings
set_parsing_settings(parse_optionals_as_positionals=True)
auto_cli(main, as_positional=False)
| {
"repo_id": "roboflow/supervision",
"file_path": "examples/time_in_zone/rfdetr_file_example.py",
"license": "MIT License",
"lines": 142,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
roboflow/supervision:examples/time_in_zone/rfdetr_naive_stream_example.py | from __future__ import annotations
from enum import Enum
import cv2
import numpy as np
from rfdetr import RFDETRBase, RFDETRLarge, RFDETRMedium, RFDETRNano, RFDETRSmall
from utils.general import find_in_list, get_stream_frames_generator, load_zones_config
from utils.timers import ClockBasedTimer
import supervision as sv
COLORS = sv.ColorPalette.from_hex(["#E6194B", "#3CB44B", "#FFE119", "#3C76D1"])
COLOR_ANNOTATOR = sv.ColorAnnotator(color=COLORS)
LABEL_ANNOTATOR = sv.LabelAnnotator(
color=COLORS, text_color=sv.Color.from_hex("#000000")
)
class ModelSize(Enum):
NANO = "nano"
SMALL = "small"
MEDIUM = "medium"
BASE = "base"
LARGE = "large"
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
@classmethod
def from_value(cls, value: ModelSize | str) -> ModelSize:
if isinstance(value, cls):
return value
if isinstance(value, str):
value = value.lower()
try:
return cls(value)
except ValueError:
raise ValueError(f"Invalid value: {value}. Must be one of {cls.list()}")
raise ValueError(
f"Invalid value type: {type(value)}. Must be an instance of "
f"{cls.__name__} or str."
)
def load_model(checkpoint: ModelSize | str, device: str, resolution: int):
checkpoint = ModelSize.from_value(checkpoint)
if checkpoint == ModelSize.NANO:
return RFDETRNano(device=device, resolution=resolution)
if checkpoint == ModelSize.SMALL:
return RFDETRSmall(device=device, resolution=resolution)
if checkpoint == ModelSize.MEDIUM:
return RFDETRMedium(device=device, resolution=resolution)
if checkpoint == ModelSize.BASE:
return RFDETRBase(device=device, resolution=resolution)
if checkpoint == ModelSize.LARGE:
return RFDETRLarge(device=device, resolution=resolution)
raise ValueError(
f"Invalid checkpoint: {checkpoint}. Must be one of: {ModelSize.list()}."
)
def adjust_resolution(checkpoint: ModelSize | str, resolution: int) -> int:
checkpoint = ModelSize.from_value(checkpoint)
if checkpoint in {ModelSize.NANO, ModelSize.SMALL, ModelSize.MEDIUM}:
divisor = 32
elif checkpoint in {ModelSize.BASE, ModelSize.LARGE}:
divisor = 56
else:
raise ValueError(
f"Unknown checkpoint: {checkpoint}. Must be one of: {ModelSize.list()}."
)
remainder = resolution % divisor
if remainder == 0:
return resolution
lower = resolution - remainder
upper = lower + divisor
if resolution - lower < upper - resolution:
return lower
else:
return upper
def main(
rtsp_url: str,
zone_configuration_path: str,
resolution: int,
model_size: str = "small",
device: str = "cpu",
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
classes: list[int] = [],
) -> None:
"""
Calculating detections dwell time in zones, using RTSP stream.
Args:
rtsp_url: Complete RTSP URL for the video stream
zone_configuration_path: Path to the zone configuration JSON file
resolution: Input resolution for the model
model_size: RF-DETR model size ('nano', 'small', 'medium', 'base' or 'large')
device: Computation device ('cpu', 'mps' or 'cuda')
confidence_threshold: Confidence level for detections (0 to 1)
iou_threshold: IOU threshold for non-max suppression
classes: List of class IDs to track. If empty, all classes are tracked
"""
resolution = adjust_resolution(checkpoint=model_size, resolution=resolution)
model = load_model(checkpoint=model_size, device=device, resolution=resolution)
tracker = sv.ByteTrack(minimum_matching_threshold=0.5)
frames_generator = get_stream_frames_generator(rtsp_url=rtsp_url)
fps_monitor = sv.FPSMonitor()
polygons = load_zones_config(file_path=zone_configuration_path)
zones = [
sv.PolygonZone(
polygon=polygon,
triggering_anchors=(sv.Position.CENTER,),
)
for polygon in polygons
]
timers = [ClockBasedTimer() for _ in zones]
for frame in frames_generator:
fps_monitor.tick()
fps = fps_monitor.fps
detections = model.predict(frame, threshold=confidence_threshold)
detections = detections[find_in_list(detections.class_id, classes)]
detections = detections.with_nms(threshold=iou_threshold)
detections = tracker.update_with_detections(detections)
annotated_frame = frame.copy()
annotated_frame = sv.draw_text(
scene=annotated_frame,
text=f"{fps:.1f}",
text_anchor=sv.Point(40, 30),
background_color=sv.Color.from_hex("#A351FB"),
text_color=sv.Color.from_hex("#000000"),
)
for idx, zone in enumerate(zones):
annotated_frame = sv.draw_polygon(
scene=annotated_frame, polygon=zone.polygon, color=COLORS.by_idx(idx)
)
detections_in_zone = detections[zone.trigger(detections)]
time_in_zone = timers[idx].tick(detections_in_zone)
custom_color_lookup = np.full(detections_in_zone.class_id.shape, idx)
annotated_frame = COLOR_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
custom_color_lookup=custom_color_lookup,
)
labels = [
f"#{tracker_id} {int(t // 60):02d}:{int(t % 60):02d}"
for tracker_id, t in zip(detections_in_zone.tracker_id, time_in_zone)
]
annotated_frame = LABEL_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
labels=labels,
custom_color_lookup=custom_color_lookup,
)
cv2.imshow("Processed Video", annotated_frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
from jsonargparse import auto_cli, set_parsing_settings
set_parsing_settings(parse_optionals_as_positionals=True)
auto_cli(main, as_positional=False)
| {
"repo_id": "roboflow/supervision",
"file_path": "examples/time_in_zone/rfdetr_naive_stream_example.py",
"license": "MIT License",
"lines": 151,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
roboflow/supervision:examples/time_in_zone/rfdetr_stream_example.py | from __future__ import annotations
from enum import Enum
import cv2
import numpy as np
from inference import InferencePipeline
from inference.core.interfaces.camera.entities import VideoFrame
from rfdetr import RFDETRBase, RFDETRLarge, RFDETRMedium, RFDETRNano, RFDETRSmall
from utils.general import find_in_list, load_zones_config
from utils.timers import ClockBasedTimer
import supervision as sv
class ModelSize(Enum):
NANO = "nano"
SMALL = "small"
MEDIUM = "medium"
BASE = "base"
LARGE = "large"
@classmethod
def list(cls):
return [c.value for c in cls]
@classmethod
def from_value(cls, value: ModelSize | str) -> ModelSize:
if isinstance(value, cls):
return value
if isinstance(value, str):
value = value.lower()
try:
return cls(value)
except ValueError as exc:
raise ValueError(
f"Invalid model size '{value}'. Must be one of {cls.list()}."
) from exc
raise ValueError(
f"Invalid value type '{type(value)}'. Expected str or ModelSize."
)
def load_model(checkpoint: ModelSize | str, device: str, resolution: int):
checkpoint = ModelSize.from_value(checkpoint)
if checkpoint == ModelSize.NANO:
return RFDETRNano(device=device, resolution=resolution)
if checkpoint == ModelSize.SMALL:
return RFDETRSmall(device=device, resolution=resolution)
if checkpoint == ModelSize.MEDIUM:
return RFDETRMedium(device=device, resolution=resolution)
if checkpoint == ModelSize.BASE:
return RFDETRBase(device=device, resolution=resolution)
if checkpoint == ModelSize.LARGE:
return RFDETRLarge(device=device, resolution=resolution)
raise RuntimeError("Unhandled checkpoint type.")
def adjust_resolution(checkpoint: ModelSize | str, resolution: int) -> int:
checkpoint = ModelSize.from_value(checkpoint)
divisor = (
32 if checkpoint in {ModelSize.NANO, ModelSize.SMALL, ModelSize.MEDIUM} else 56
)
remainder = resolution % divisor
if remainder == 0:
return resolution
lower = resolution - remainder
upper = lower + divisor
return lower if resolution - lower < upper - resolution else upper
COLORS = sv.ColorPalette.from_hex(["#E6194B", "#3CB44B", "#FFE119", "#3C76D1"])
COLOR_ANNOTATOR = sv.ColorAnnotator(color=COLORS)
LABEL_ANNOTATOR = sv.LabelAnnotator(
color=COLORS, text_color=sv.Color.from_hex("#000000")
)
class CustomSink:
def __init__(self, zone_configuration_path: str, classes: list[int]):
self.classes = classes
self.tracker = sv.ByteTrack(minimum_matching_threshold=0.8)
self.fps_monitor = sv.FPSMonitor()
self.polygons = load_zones_config(file_path=zone_configuration_path)
self.timers = [ClockBasedTimer() for _ in self.polygons]
self.zones = [
sv.PolygonZone(
polygon=polygon,
triggering_anchors=(sv.Position.CENTER,),
)
for polygon in self.polygons
]
def on_prediction(self, detections: sv.Detections, frame: VideoFrame) -> None:
self.fps_monitor.tick()
fps = self.fps_monitor.fps
detections = detections[find_in_list(detections.class_id, self.classes)]
detections = self.tracker.update_with_detections(detections)
annotated_frame = frame.image.copy()
annotated_frame = sv.draw_text(
scene=annotated_frame,
text=f"{fps:.1f}",
text_anchor=sv.Point(40, 30),
background_color=sv.Color.from_hex("#A351FB"),
text_color=sv.Color.from_hex("#000000"),
)
for idx, zone in enumerate(self.zones):
annotated_frame = sv.draw_polygon(
scene=annotated_frame,
polygon=zone.polygon,
color=COLORS.by_idx(idx),
)
detections_in_zone = detections[zone.trigger(detections)]
time_in_zone = self.timers[idx].tick(detections_in_zone)
custom_color_lookup = np.full(detections_in_zone.class_id.shape, idx)
annotated_frame = COLOR_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
custom_color_lookup=custom_color_lookup,
)
labels = [
f"#{tracker_id} {int(t // 60):02d}:{int(t % 60):02d}"
for tracker_id, t in zip(detections_in_zone.tracker_id, time_in_zone)
]
annotated_frame = LABEL_ANNOTATOR.annotate(
scene=annotated_frame,
detections=detections_in_zone,
labels=labels,
custom_color_lookup=custom_color_lookup,
)
cv2.imshow("Processed Video", annotated_frame)
cv2.waitKey(1)
def main(
rtsp_url: str,
zone_configuration_path: str,
resolution: int,
model_size: str = "small",
device: str = "cpu",
confidence_threshold: float = 0.3,
iou_threshold: float = 0.7,
classes: list[int] = [],
) -> None:
"""
Calculating detections dwell time in zones using an RTSP stream.
Args:
rtsp_url: Complete RTSP URL for the video stream
zone_configuration_path: Path to the zone configuration JSON file
resolution: Input resolution for the model
model_size: RF-DETR model size ('nano', 'small', 'medium', 'base' or 'large')
device: Computation device ('cpu', 'mps' or 'cuda')
confidence_threshold: Confidence level for detections (0 to 1)
iou_threshold: IOU threshold for non-max suppression
classes: List of class IDs to track. If empty, all classes are tracked
"""
resolution = adjust_resolution(checkpoint=model_size, resolution=resolution)
model = load_model(checkpoint=model_size, device=device, resolution=resolution)
def inference_callback(frames: list[VideoFrame]) -> list[sv.Detections]:
dets = model.predict(frames[0].image, threshold=confidence_threshold)
return [dets.with_nms(threshold=iou_threshold)]
sink = CustomSink(zone_configuration_path=zone_configuration_path, classes=classes)
pipeline = InferencePipeline.init_with_custom_logic(
video_reference=rtsp_url,
on_video_frame=inference_callback,
on_prediction=sink.on_prediction,
)
pipeline.start()
try:
pipeline.join()
except KeyboardInterrupt:
pipeline.terminate()
if __name__ == "__main__":
from jsonargparse import auto_cli, set_parsing_settings
set_parsing_settings(parse_optionals_as_positionals=True)
auto_cli(main, as_positional=False)
| {
"repo_id": "roboflow/supervision",
"file_path": "examples/time_in_zone/rfdetr_stream_example.py",
"license": "MIT License",
"lines": 158,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
roboflow/supervision:.github/scripts/augment_links.py | #!/usr/bin/env python3
"""
Script to augment relative links in markdown files to GitHub URLs.
"""
import argparse
import os
import re
from re import Match
def get_repo_root() -> str:
"""Get the repository root path."""
script_dir = os.path.dirname(os.path.abspath(__file__))
return os.path.dirname(os.path.dirname(script_dir))
def augment_links_in_file(file_path: str, branch: str = "main") -> None:
"""
Augment relative links in a markdown file to GitHub URLs.
Args:
file_path: Path to the markdown file.
branch: Branch name, default "main".
"""
repo_root = get_repo_root()
if not file_path.endswith(".md"):
return
with open(file_path) as f:
content = f.read()
def replace_link(match: Match[str]) -> str:
full_match = match.group(0)
text = match.group(2)
url = match.group(3)
if not url.startswith("http"):
# Resolve relative to an absolute path
abs_path = os.path.normpath(os.path.join(os.path.dirname(file_path), url))
if os.path.exists(abs_path):
# Use 'tree' for directories and 'blob' for files
ref = "tree" if os.path.isdir(abs_path) else "blob"
rel_to_root = os.path.relpath(abs_path, repo_root)
new_url = f"https://github.com/roboflow/supervision/{ref}/{branch}/{rel_to_root}"
if full_match.startswith("!"):
return f""
else:
return f"[{text}]({new_url})"
return full_match
new_content = re.sub(r"(!?)\[([^\]]+)\]\(([^)]+)\)", replace_link, content)
with open(file_path, "w") as f:
f.write(new_content)
def main() -> None:
parser = argparse.ArgumentParser(
description="Augment relative links to GitHub URLs."
)
parser.add_argument("--branch", default="main", help="Branch name")
parser.add_argument("files", nargs="+", help="Files to process")
args = parser.parse_args()
for file in args.files:
augment_links_in_file(file, args.branch)
if __name__ == "__main__":
main()
| {
"repo_id": "roboflow/supervision",
"file_path": ".github/scripts/augment_links.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-opensearch/llama_index/storage/chat_store/opensearch/base.py | """OpenSearch chat store."""
from typing import Any, Dict, List, Optional
from llama_index.core.bridge.pydantic import Field, PrivateAttr
from llama_index.core.llms import ChatMessage
from llama_index.core.storage.chat_store.base import BaseChatStore
from opensearchpy import AsyncOpenSearch, OpenSearch
DEFAULT_INDEX_NAME = "llama_index_chat_store"
# OpenSearch index mapping for chat messages
CHAT_STORE_MAPPING: Dict[str, Any] = {
"mappings": {
"properties": {
"session_id": {"type": "keyword"},
"index": {"type": "integer"},
"message": {"type": "keyword", "index": False, "doc_values": False},
}
},
}
def _message_to_str(message: ChatMessage) -> str:
"""Serialize a ChatMessage to a JSON string for storage."""
return message.model_dump_json()
def _str_to_message(s: str) -> ChatMessage:
"""Deserialize a JSON string from storage to a ChatMessage."""
return ChatMessage.model_validate_json(s)
class OpensearchChatStore(BaseChatStore):
"""
OpenSearch chat store.
Stores chat messages as individual documents in an OpenSearch index,
keyed by session_id with an integer index for ordering.
Args:
opensearch_url: OpenSearch endpoint URL.
index: Name of the OpenSearch index to store messages in.
os_client: Optional pre-configured OpenSearch client.
os_async_client: Optional pre-configured async OpenSearch client.
**kwargs: Additional arguments passed to the OpenSearch client.
"""
opensearch_url: str = Field(
default="https://localhost:9200",
description="OpenSearch URL.",
)
index: str = Field(
default=DEFAULT_INDEX_NAME,
description="OpenSearch index name for chat messages.",
)
_os_client: Any = PrivateAttr()
_os_async_client: Any = PrivateAttr()
def __init__(
self,
opensearch_url: str = "https://localhost:9200",
index: str = DEFAULT_INDEX_NAME,
os_client: Optional[Any] = None,
os_async_client: Optional[Any] = None,
**kwargs: Any,
) -> None:
"""Initialize OpensearchChatStore."""
super().__init__(opensearch_url=opensearch_url, index=index)
self._os_client = os_client or OpenSearch(opensearch_url, **kwargs)
self._os_async_client = os_async_client or AsyncOpenSearch(
opensearch_url, **kwargs
)
self._ensure_index_exists()
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "OpensearchChatStore"
def _ensure_index_exists(self) -> None:
"""Create the index if it does not already exist."""
if not self._os_client.indices.exists(index=self.index):
self._os_client.indices.create(index=self.index, body=CHAT_STORE_MAPPING)
async def _aensure_index_exists(self) -> None:
"""Async: create the index if it does not already exist."""
exists = await self._os_async_client.indices.exists(index=self.index)
if not exists:
await self._os_async_client.indices.create(
index=self.index, body=CHAT_STORE_MAPPING
)
# ---- helpers ----
def _search(self, query: Dict[str, Any], size: int = 10000) -> List[Dict]:
"""Run a search and return the list of hits."""
resp = self._os_client.search(index=self.index, body=query, size=size)
return resp["hits"]["hits"]
async def _asearch(self, query: Dict[str, Any], size: int = 10000) -> List[Dict]:
"""Async: run a search and return the list of hits."""
resp = await self._os_async_client.search(
index=self.index, body=query, size=size
)
return resp["hits"]["hits"]
def _delete_by_query(self, query: Dict[str, Any]) -> None:
"""Delete documents matching a query."""
self._os_client.delete_by_query(
index=self.index,
body=query,
refresh=True,
)
async def _adelete_by_query(self, query: Dict[str, Any]) -> None:
"""Async: delete documents matching a query."""
await self._os_async_client.delete_by_query(
index=self.index,
body=query,
refresh=True,
)
def _session_query(self, key: str) -> Dict[str, Any]:
"""Build a query to match all documents for a session."""
return {"query": {"term": {"session_id": key}}}
def _session_sorted_query(self, key: str, order: str = "asc") -> Dict[str, Any]:
"""Build a query for a session, sorted by index."""
return {
"query": {"term": {"session_id": key}},
"sort": [{"index": {"order": order}}],
}
def _find_by_index_query(self, key: str, idx: int) -> Dict[str, Any]:
"""Build a query to match a single document by session + index."""
return {
"query": {
"bool": {
"must": [
{"term": {"session_id": key}},
{"term": {"index": idx}},
]
}
}
}
def _shift_query(self, key: str, from_idx: int) -> Dict[str, Any]:
"""Build a query to find documents at or after a given index (desc)."""
return {
"query": {
"bool": {
"must": [
{"term": {"session_id": key}},
{"range": {"index": {"gte": from_idx}}},
]
}
},
"sort": [{"index": {"order": "desc"}}],
}
def _get_next_index(self, key: str) -> int:
"""Get the next available index for a session."""
hits = self._search(self._session_sorted_query(key, order="desc"), size=1)
if not hits:
return 0
return int(hits[0]["_source"]["index"]) + 1
async def _aget_next_index(self, key: str) -> int:
"""Async: get the next available index for a session."""
hits = await self._asearch(
self._session_sorted_query(key, order="desc"), size=1
)
if not hits:
return 0
return int(hits[0]["_source"]["index"]) + 1
def _index_doc(self, key: str, idx: int, message: ChatMessage) -> None:
"""Index a single message document."""
self._os_client.index(
index=self.index,
body={
"session_id": key,
"index": idx,
"message": _message_to_str(message),
},
refresh=True,
)
async def _aindex_doc(self, key: str, idx: int, message: ChatMessage) -> None:
"""Async: index a single message document."""
await self._os_async_client.index(
index=self.index,
body={
"session_id": key,
"index": idx,
"message": _message_to_str(message),
},
refresh=True,
)
def _reindex_session(self, key: str) -> None:
"""Re-number all documents in a session so indices are contiguous."""
hits = self._search(self._session_sorted_query(key))
# Delete all existing documents for this session
self._delete_by_query(self._session_query(key))
# Re-insert with corrected indices
for new_idx, hit in enumerate(hits):
msg = _str_to_message(hit["_source"]["message"])
self._index_doc(key, new_idx, msg)
async def _areindex_session(self, key: str) -> None:
"""Async: re-number all documents in a session so indices are contiguous."""
hits = await self._asearch(self._session_sorted_query(key))
# Delete all existing documents for this session
await self._adelete_by_query(self._session_query(key))
# Re-insert with corrected indices
for new_idx, hit in enumerate(hits):
msg = _str_to_message(hit["_source"]["message"])
await self._aindex_doc(key, new_idx, msg)
# ---- BaseChatStore interface ----
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key, replacing any existing messages."""
# Delete existing messages for this session
self._delete_by_query(self._session_query(key))
# Insert new messages
for idx, message in enumerate(messages):
self._index_doc(key, idx, message)
async def aset_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Async: set messages for a key, replacing any existing messages."""
await self._adelete_by_query(self._session_query(key))
for idx, message in enumerate(messages):
await self._aindex_doc(key, idx, message)
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key, ordered by index."""
hits = self._search(self._session_sorted_query(key))
return [_str_to_message(hit["_source"]["message"]) for hit in hits]
async def aget_messages(self, key: str) -> List[ChatMessage]:
"""Async: get messages for a key, ordered by index."""
hits = await self._asearch(self._session_sorted_query(key))
return [_str_to_message(hit["_source"]["message"]) for hit in hits]
def add_message(
self, key: str, message: ChatMessage, idx: Optional[int] = None
) -> None:
"""
Add a message for a key.
If idx is None, appends to the end. Otherwise inserts at the given
position and shifts subsequent messages.
"""
if idx is None:
idx = self._get_next_index(key)
self._index_doc(key, idx, message)
else:
# Shift existing messages at >= idx up by one (reverse to avoid collisions)
for hit in self._search(self._shift_query(key, idx)):
self._os_client.update(
index=self.index,
id=hit["_id"],
body={"doc": {"index": hit["_source"]["index"] + 1}},
refresh=True,
)
self._index_doc(key, idx, message)
async def async_add_message(
self, key: str, message: ChatMessage, idx: Optional[int] = None
) -> None:
"""Async: add a message for a key."""
if idx is None:
idx = await self._aget_next_index(key)
await self._aindex_doc(key, idx, message)
else:
for hit in await self._asearch(self._shift_query(key, idx)):
await self._os_async_client.update(
index=self.index,
id=hit["_id"],
body={"doc": {"index": hit["_source"]["index"] + 1}},
refresh=True,
)
await self._aindex_doc(key, idx, message)
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete all messages for a key. Returns the deleted messages."""
messages = self.get_messages(key)
self._delete_by_query(self._session_query(key))
return messages if messages else None
async def adelete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Async: delete all messages for a key."""
messages = await self.aget_messages(key)
await self._adelete_by_query(self._session_query(key))
return messages if messages else None
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""
Delete a specific message by index for a key.
After deletion, remaining messages are re-indexed to stay contiguous.
"""
hits = self._search(self._find_by_index_query(key, idx), size=1)
if not hits:
return None
deleted_message = _str_to_message(hits[0]["_source"]["message"])
self._os_client.delete(index=self.index, id=hits[0]["_id"], refresh=True)
self._reindex_session(key)
return deleted_message
async def adelete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Async: delete a specific message by index for a key."""
hits = await self._asearch(self._find_by_index_query(key, idx), size=1)
if not hits:
return None
deleted_message = _str_to_message(hits[0]["_source"]["message"])
await self._os_async_client.delete(
index=self.index, id=hits[0]["_id"], refresh=True
)
await self._areindex_session(key)
return deleted_message
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete the last message for a key."""
hits = self._search(self._session_sorted_query(key, order="desc"), size=1)
if not hits:
return None
last_message = _str_to_message(hits[0]["_source"]["message"])
self._os_client.delete(index=self.index, id=hits[0]["_id"], refresh=True)
return last_message
async def adelete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Async: delete the last message for a key."""
hits = await self._asearch(
self._session_sorted_query(key, order="desc"), size=1
)
if not hits:
return None
last_message = _str_to_message(hits[0]["_source"]["message"])
await self._os_async_client.delete(
index=self.index, id=hits[0]["_id"], refresh=True
)
return last_message
def get_keys(self) -> List[str]:
"""Get all unique session keys."""
query = {
"size": 0,
"aggs": {
"unique_sessions": {"terms": {"field": "session_id", "size": 10000}}
},
}
resp = self._os_client.search(index=self.index, body=query)
buckets = resp["aggregations"]["unique_sessions"]["buckets"]
return [bucket["key"] for bucket in buckets]
async def aget_keys(self) -> List[str]:
"""Async: get all unique session keys."""
query = {
"size": 0,
"aggs": {
"unique_sessions": {"terms": {"field": "session_id", "size": 10000}}
},
}
resp = await self._os_async_client.search(index=self.index, body=query)
buckets = resp["aggregations"]["unique_sessions"]["buckets"]
return [bucket["key"] for bucket in buckets]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-opensearch/llama_index/storage/chat_store/opensearch/base.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-opensearch/tests/test_opensearch_chat_store.py | """Tests for OpensearchChatStore."""
from typing import Any, Dict
from unittest.mock import MagicMock, AsyncMock, patch
import pytest
from llama_index.core.llms import ChatMessage, MessageRole
from llama_index.storage.chat_store.opensearch.base import (
OpensearchChatStore,
_message_to_str,
_str_to_message,
)
# ---- serialisation helpers ----
def test_message_roundtrip() -> None:
"""ChatMessage survives a serialise/deserialise cycle."""
msg = ChatMessage(role=MessageRole.USER, content="hello")
assert _str_to_message(_message_to_str(msg)).content == "hello"
# ---- fixtures ----
def _make_hit(session_id: str, idx: int, message: ChatMessage) -> Dict[str, Any]:
"""Build a fake OpenSearch hit dict."""
return {
"_id": f"{session_id}_{idx}",
"_source": {
"session_id": session_id,
"index": idx,
"message": _message_to_str(message),
},
}
def _msg(content: str, role: MessageRole = MessageRole.USER) -> ChatMessage:
return ChatMessage(role=role, content=content)
@pytest.fixture()
def mock_os_client() -> MagicMock:
client = MagicMock()
client.indices.exists.return_value = True
return client
@pytest.fixture()
def mock_async_client() -> AsyncMock:
client = AsyncMock()
client.indices.exists.return_value = True
return client
@pytest.fixture()
def chat_store(
mock_os_client: MagicMock, mock_async_client: AsyncMock
) -> OpensearchChatStore:
with (
patch(
"llama_index.storage.chat_store.opensearch.base.OpenSearch",
return_value=mock_os_client,
),
patch(
"llama_index.storage.chat_store.opensearch.base.AsyncOpenSearch",
return_value=mock_async_client,
),
):
return OpensearchChatStore(
opensearch_url="https://localhost:9200",
index="test_chat",
)
# ---- class_name ----
def test_class_name() -> None:
assert OpensearchChatStore.class_name() == "OpensearchChatStore"
# ---- set_messages / get_messages ----
def test_set_messages(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
messages = [_msg("hi"), _msg("bye")]
chat_store.set_messages("s1", messages)
# Should delete existing and index new docs
mock_os_client.delete_by_query.assert_called_once()
assert mock_os_client.index.call_count == 2
def test_get_messages(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
m1, m2 = _msg("first"), _msg("second")
mock_os_client.search.return_value = {
"hits": {"hits": [_make_hit("s1", 0, m1), _make_hit("s1", 1, m2)]}
}
result = chat_store.get_messages("s1")
assert len(result) == 2
assert result[0].content == "first"
assert result[1].content == "second"
# ---- add_message ----
def test_add_message_append(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
"""add_message without idx appends at the end."""
# _get_next_index does a search for highest index
mock_os_client.search.return_value = {
"hits": {"hits": [_make_hit("s1", 2, _msg("existing"))]}
}
chat_store.add_message("s1", _msg("new"))
# Should index at position 3
call_kwargs = mock_os_client.index.call_args
assert call_kwargs.kwargs["body"]["index"] == 3
def test_add_message_insert(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
"""add_message with idx shifts existing messages."""
existing = _make_hit("s1", 1, _msg("old"))
mock_os_client.search.return_value = {"hits": {"hits": [existing]}}
chat_store.add_message("s1", _msg("inserted"), idx=1)
# Should update the existing doc's index to 2
mock_os_client.update.assert_called_once()
update_body = mock_os_client.update.call_args.kwargs["body"]
assert update_body == {"doc": {"index": 2}}
# Should index the new message at position 1
index_body = mock_os_client.index.call_args.kwargs["body"]
assert index_body["index"] == 1
# ---- delete_messages ----
def test_delete_messages(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
m1 = _msg("to-delete")
mock_os_client.search.return_value = {"hits": {"hits": [_make_hit("s1", 0, m1)]}}
result = chat_store.delete_messages("s1")
assert result is not None
assert len(result) == 1
assert result[0].content == "to-delete"
mock_os_client.delete_by_query.assert_called()
def test_delete_messages_empty(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
mock_os_client.search.return_value = {"hits": {"hits": []}}
result = chat_store.delete_messages("empty")
assert result is None
# ---- delete_message ----
def test_delete_message(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
m = _msg("target")
# First search: find the message to delete
# Second search (reindex): return remaining messages
mock_os_client.search.side_effect = [
{"hits": {"hits": [_make_hit("s1", 1, m)]}},
# reindex: session_sorted_query returns remaining
{"hits": {"hits": [_make_hit("s1", 0, _msg("kept"))]}},
]
result = chat_store.delete_message("s1", 1)
assert result is not None
assert result.content == "target"
mock_os_client.delete.assert_called_once()
def test_delete_message_not_found(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
mock_os_client.search.return_value = {"hits": {"hits": []}}
result = chat_store.delete_message("s1", 99)
assert result is None
# ---- delete_last_message ----
def test_delete_last_message(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
m = _msg("last")
mock_os_client.search.return_value = {"hits": {"hits": [_make_hit("s1", 5, m)]}}
result = chat_store.delete_last_message("s1")
assert result is not None
assert result.content == "last"
mock_os_client.delete.assert_called_once()
def test_delete_last_message_empty(
chat_store: OpensearchChatStore, mock_os_client: MagicMock
) -> None:
mock_os_client.search.return_value = {"hits": {"hits": []}}
result = chat_store.delete_last_message("s1")
assert result is None
# ---- get_keys ----
def test_get_keys(chat_store: OpensearchChatStore, mock_os_client: MagicMock) -> None:
mock_os_client.search.return_value = {
"aggregations": {"unique_sessions": {"buckets": [{"key": "s1"}, {"key": "s2"}]}}
}
keys = chat_store.get_keys()
assert keys == ["s1", "s2"]
# ---- async tests ----
@pytest.mark.asyncio
async def test_aget_messages(
chat_store: OpensearchChatStore, mock_async_client: AsyncMock
) -> None:
m1, m2 = _msg("a"), _msg("b")
mock_async_client.search.return_value = {
"hits": {"hits": [_make_hit("s1", 0, m1), _make_hit("s1", 1, m2)]}
}
result = await chat_store.aget_messages("s1")
assert len(result) == 2
assert result[0].content == "a"
@pytest.mark.asyncio
async def test_aset_messages(
chat_store: OpensearchChatStore, mock_async_client: AsyncMock
) -> None:
messages = [_msg("x"), _msg("y")]
await chat_store.aset_messages("s1", messages)
mock_async_client.delete_by_query.assert_called_once()
assert mock_async_client.index.call_count == 2
@pytest.mark.asyncio
async def test_adelete_last_message(
chat_store: OpensearchChatStore, mock_async_client: AsyncMock
) -> None:
m = _msg("async-last")
mock_async_client.search.return_value = {"hits": {"hits": [_make_hit("s1", 3, m)]}}
result = await chat_store.adelete_last_message("s1")
assert result is not None
assert result.content == "async-last"
mock_async_client.delete.assert_called_once()
@pytest.mark.asyncio
async def test_aget_keys(
chat_store: OpensearchChatStore, mock_async_client: AsyncMock
) -> None:
mock_async_client.search.return_value = {
"aggregations": {
"unique_sessions": {"buckets": [{"key": "a"}, {"key": "b"}, {"key": "c"}]}
}
}
keys = await chat_store.aget_keys()
assert keys == ["a", "b", "c"]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-opensearch/tests/test_opensearch_chat_store.py",
"license": "MIT License",
"lines": 209,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/indices/property_graph/test_schema_utils.py | """
Tests for property graph schema utility functions.
Validates that auto-generated Entity/Relation Pydantic models produce
JSON schemas compatible with OpenAI structured outputs and Google Gemini
when ``clean_additional_properties=True`` is passed.
"""
import json
from typing import Literal
from llama_index.core.indices.property_graph.transformations.utils import (
_clean_additional_properties,
get_entity_class,
get_relation_class,
)
def _schema_contains(schema: dict, key: str, value: object) -> bool:
"""Recursively check whether *schema* contains *key* mapped to *value*."""
if isinstance(schema, dict):
if schema.get(key) is value:
return True
return any(_schema_contains(v, key, value) for v in schema.values())
if isinstance(schema, list):
return any(_schema_contains(item, key, value) for item in schema)
return False
# -- _clean_additional_properties ------------------------------------------
def test_clean_additional_properties_sets_true_to_false():
schema = {"additionalProperties": True, "properties": {"x": {"type": "string"}}}
_clean_additional_properties(schema)
assert schema["additionalProperties"] is False
def test_clean_additional_properties_nested():
schema = {
"properties": {
"inner": {
"additionalProperties": True,
"type": "object",
}
}
}
_clean_additional_properties(schema)
assert schema["properties"]["inner"]["additionalProperties"] is False
def test_clean_additional_properties_ignores_false():
schema = {"additionalProperties": False}
_clean_additional_properties(schema)
assert schema["additionalProperties"] is False
def test_clean_additional_properties_ignores_absent():
schema = {"properties": {"x": {"type": "string"}}}
_clean_additional_properties(schema)
assert "additionalProperties" not in schema
def test_clean_additional_properties_handles_list():
schema = {"anyOf": [{"additionalProperties": True}, {"type": "null"}]}
_clean_additional_properties(schema)
assert schema["anyOf"][0]["additionalProperties"] is False
# -- get_entity_class (no props → no additionalProperties issue) -----------
def test_entity_class_without_props_has_no_additional_properties_true():
entities = Literal["PERSON", "LOCATION"]
cls = get_entity_class(entities, None, strict=True)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True)
# -- get_entity_class (default: additionalProperties preserved) ------------
def test_entity_class_with_props_default_preserves_additional_properties():
"""By default, additionalProperties: true is left as-is."""
entities = Literal["PERSON", "LOCATION"]
cls = get_entity_class(entities, ["age", "occupation"], strict=True)
schema = cls.model_json_schema()
assert _schema_contains(schema, "additionalProperties", True), (
f"Expected additionalProperties: true in default mode:\n"
f"{json.dumps(schema, indent=2)}"
)
# -- get_entity_class (opt-in: fix applied) --------------------------------
def test_entity_class_with_props_clean_removes_additional_properties():
entities = Literal["PERSON", "LOCATION"]
cls = get_entity_class(
entities, ["age", "occupation"], strict=True, clean_additional_properties=True
)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True), (
f"Schema still contains additionalProperties: true:\n"
f"{json.dumps(schema, indent=2)}"
)
def test_entity_class_with_props_non_strict_clean():
cls = get_entity_class(str, ["age"], strict=False, clean_additional_properties=True)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True)
# -- get_relation_class (no props → no issue) ------------------------------
def test_relation_class_without_props_has_no_additional_properties_true():
relations = Literal["USED_BY", "PART_OF"]
cls = get_relation_class(relations, None, strict=True)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True)
# -- get_relation_class (default: additionalProperties preserved) ----------
def test_relation_class_with_props_default_preserves_additional_properties():
"""By default, additionalProperties: true is left as-is."""
relations = Literal["USED_BY", "PART_OF"]
cls = get_relation_class(relations, ["weight", "source"], strict=True)
schema = cls.model_json_schema()
assert _schema_contains(schema, "additionalProperties", True), (
f"Expected additionalProperties: true in default mode:\n"
f"{json.dumps(schema, indent=2)}"
)
# -- get_relation_class (opt-in: fix applied) ------------------------------
def test_relation_class_with_props_clean_removes_additional_properties():
relations = Literal["USED_BY", "PART_OF"]
cls = get_relation_class(
relations, ["weight", "source"], strict=True, clean_additional_properties=True
)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True), (
f"Schema still contains additionalProperties: true:\n"
f"{json.dumps(schema, indent=2)}"
)
def test_relation_class_with_props_non_strict_clean():
cls = get_relation_class(
str, ["weight"], strict=False, clean_additional_properties=True
)
schema = cls.model_json_schema()
assert not _schema_contains(schema, "additionalProperties", True)
# -- Models still validate correctly after the fix -------------------------
def test_entity_model_with_props_roundtrips():
entities = Literal["PERSON", "LOCATION"]
cls = get_entity_class(
entities, ["age", "occupation"], strict=True, clean_additional_properties=True
)
instance = cls(type="PERSON", name="Alice", properties={"age": 30})
assert instance.type == "PERSON"
assert instance.name == "Alice"
assert instance.properties == {"age": 30}
def test_relation_model_with_props_roundtrips():
relations = Literal["USED_BY", "PART_OF"]
cls = get_relation_class(
relations, ["weight"], strict=True, clean_additional_properties=True
)
instance = cls(type="USED_BY", properties={"weight": 0.9})
assert instance.type == "USED_BY"
assert instance.properties == {"weight": 0.9}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/indices/property_graph/test_schema_utils.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/query_engine/test_router_query_engine.py | import asyncio
from unittest.mock import AsyncMock, MagicMock
import pytest
from llama_index.core.base.base_selector import (
BaseSelector,
SelectorResult,
SingleSelection,
)
from llama_index.core.base.response.schema import Response
from llama_index.core.llms.mock import MockLLM
from llama_index.core.query_engine.router_query_engine import (
RouterQueryEngine,
ToolRetrieverRouterQueryEngine,
)
from llama_index.core.tools.types import ToolMetadata
class _AlwaysMultiSelector(BaseSelector):
def _get_prompts(self):
return {}
def _update_prompts(self, prompts):
pass
def _select(self, choices, query):
return SelectorResult(
selections=[
SingleSelection(index=i, reason="") for i in range(len(choices))
]
)
async def _aselect(self, choices, query):
return self._select(choices, query)
def _make_query_engine_tool(name: str):
async def _fake_aquery(_):
await asyncio.sleep(0.05)
return Response(response="ok")
engine = MagicMock()
engine.aquery = AsyncMock(side_effect=_fake_aquery)
tool = MagicMock()
tool.query_engine = engine
tool.metadata = ToolMetadata(name=name, description=name)
return tool
class _MockSummarizer:
async def aget_response(self, *args, **kwargs):
return "combined"
async def _assert_not_blocked(coro) -> None:
loop = asyncio.get_running_loop()
start = loop.time()
ran_at = None
async def _background():
nonlocal ran_at
await asyncio.sleep(0.01)
ran_at = loop.time()
bg_task = asyncio.create_task(_background())
await asyncio.sleep(0)
await coro
await bg_task
assert ran_at is not None, "background task never ran"
assert (ran_at - start) < 0.04, (
f"background task finished {ran_at - start:.3f}s after start "
f"(expected < 0.04s) — the event loop was likely blocked"
)
@pytest.mark.asyncio
async def test_router_aquery_does_not_block_event_loop():
tool_a = _make_query_engine_tool("a")
tool_b = _make_query_engine_tool("b")
router = RouterQueryEngine(
selector=_AlwaysMultiSelector(),
query_engine_tools=[tool_a, tool_b],
llm=MockLLM(),
summarizer=_MockSummarizer(),
)
await _assert_not_blocked(router.aquery("test query"))
assert tool_a.query_engine.aquery.call_count == 1
assert tool_b.query_engine.aquery.call_count == 1
@pytest.mark.asyncio
async def test_tool_retriever_router_aquery_does_not_block_event_loop():
tool_a = _make_query_engine_tool("a")
tool_b = _make_query_engine_tool("b")
retriever = MagicMock()
retriever.retrieve = MagicMock(return_value=[tool_a, tool_b])
router = ToolRetrieverRouterQueryEngine(
retriever=retriever,
llm=MockLLM(),
summarizer=_MockSummarizer(),
)
await _assert_not_blocked(router.aquery("test query"))
assert tool_a.query_engine.aquery.call_count == 1
assert tool_b.query_engine.aquery.call_count == 1
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/query_engine/test_router_query_engine.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-screenpipe/llama_index/readers/screenpipe/base.py | """Screenpipe reader."""
import logging
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
import requests
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
logger = logging.getLogger(__name__)
SEARCH_URL_TMPL = "{base_url}/search"
VALID_CONTENT_TYPES = {
"all",
"ocr",
"audio",
"ui",
"audio+ui",
"ocr+ui",
"audio+ocr",
}
class ScreenpipeReader(BasePydanticReader):
"""
Screenpipe reader.
Reads screen capture (OCR) and audio transcription data from a local
Screenpipe instance via its REST API.
See https://github.com/mediar-ai/screenpipe for details.
Args:
base_url (str): Base URL of the Screenpipe server.
Defaults to ``http://localhost:3030``.
"""
is_remote: bool = True
base_url: str = "http://localhost:3030"
@classmethod
def class_name(cls) -> str:
return "ScreenpipeReader"
@staticmethod
def _to_utc_isoformat(dt: datetime) -> str:
"""
Convert a datetime to a UTC ISO 8601 string.
Screenpipe requires UTC timestamps. Naive datetimes (no tzinfo) are
assumed to be local time and converted to UTC.
"""
if dt.tzinfo is None:
dt = dt.astimezone(timezone.utc)
return dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def _search(self, params: Dict[str, Any]) -> Dict[str, Any]:
"""Execute a search request against the Screenpipe API."""
url = SEARCH_URL_TMPL.format(base_url=self.base_url.rstrip("/"))
response = requests.get(url, params=params)
response.raise_for_status()
return response.json()
def load_data(
self,
content_type: str = "all",
query: Optional[str] = None,
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
app_name: Optional[str] = None,
window_name: Optional[str] = None,
limit: int = 20,
) -> List[Document]:
"""
Load data from Screenpipe.
Args:
content_type: Type of content to retrieve.
One of ``"all"``, ``"ocr"``, ``"audio"``, ``"ui"``,
``"audio+ui"``, ``"ocr+ui"``, ``"audio+ocr"``.
query: Optional search query for semantic filtering.
start_time: Filter results after this timestamp.
end_time: Filter results before this timestamp.
app_name: Filter by application name.
window_name: Filter by window name.
limit: Maximum number of results to return.
Returns:
List of documents.
"""
if content_type not in VALID_CONTENT_TYPES:
raise ValueError(
f"Invalid content_type '{content_type}'. "
f"Must be one of: {sorted(VALID_CONTENT_TYPES)}"
)
params: Dict[str, Any] = {
"content_type": content_type,
"limit": limit,
}
if query is not None:
params["q"] = query
if start_time is not None:
params["start_time"] = self._to_utc_isoformat(start_time)
if end_time is not None:
params["end_time"] = self._to_utc_isoformat(end_time)
if app_name is not None:
params["app_name"] = app_name
if window_name is not None:
params["window_name"] = window_name
all_items: List[Dict[str, Any]] = []
offset = 0
while True:
params["offset"] = offset
data = self._search(params)
items = data.get("data", [])
if not items:
break
all_items.extend(items)
if len(all_items) >= limit:
all_items = all_items[:limit]
break
pagination = data.get("pagination", {})
total = pagination.get("total", 0)
offset += len(items)
if offset >= total:
break
documents = []
for item in all_items:
doc = self._item_to_document(item)
if doc is not None:
documents.append(doc)
return documents
def _item_to_document(self, item: Dict[str, Any]) -> Optional[Document]:
"""Convert a Screenpipe search result item to a Document."""
item_type = item.get("type", "")
content = item.get("content", {})
if item_type == "OCR":
return self._ocr_to_document(content)
elif item_type == "Audio":
return self._audio_to_document(content)
elif item_type == "UI":
return self._ui_to_document(content)
else:
logger.warning("Unknown item type '%s', skipping.", item_type)
return None
def _ocr_to_document(self, content: Dict[str, Any]) -> Document:
"""Convert an OCR content item to a Document."""
text = content.get("text", "")
metadata: Dict[str, Any] = {
"type": "ocr",
"app_name": content.get("app_name", ""),
"window_name": content.get("window_name", ""),
"timestamp": content.get("timestamp", ""),
}
if content.get("file_path"):
metadata["file_path"] = content["file_path"]
if content.get("browser_url"):
metadata["browser_url"] = content["browser_url"]
return Document(text=text, metadata=metadata)
def _audio_to_document(self, content: Dict[str, Any]) -> Document:
"""Convert an Audio content item to a Document."""
text = content.get("transcription", "")
metadata: Dict[str, Any] = {
"type": "audio",
"device_name": content.get("device_name", ""),
"device_type": content.get("device_type", ""),
"timestamp": content.get("timestamp", ""),
}
if content.get("file_path"):
metadata["file_path"] = content["file_path"]
speaker = content.get("speaker")
if speaker:
metadata["speaker_id"] = speaker.get("id")
metadata["speaker_name"] = speaker.get("name")
return Document(text=text, metadata=metadata)
def _ui_to_document(self, content: Dict[str, Any]) -> Document:
"""Convert a UI content item to a Document."""
text = content.get("text", "")
metadata: Dict[str, Any] = {
"type": "ui",
"app_name": content.get("app_name", ""),
"window_name": content.get("window_name", ""),
"timestamp": content.get("timestamp", ""),
}
if content.get("browser_url"):
metadata["browser_url"] = content["browser_url"]
return Document(text=text, metadata=metadata)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-screenpipe/llama_index/readers/screenpipe/base.py",
"license": "MIT License",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-screenpipe/tests/test_readers_screenpipe.py | """Tests for ScreenpipeReader."""
from datetime import datetime, timezone
from unittest.mock import patch
import pytest
import requests
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from llama_index.readers.screenpipe import ScreenpipeReader
@pytest.fixture()
def reader() -> ScreenpipeReader:
return ScreenpipeReader(base_url="http://localhost:3030")
@pytest.fixture()
def mock_search_response() -> dict:
return {
"data": [
{
"type": "OCR",
"content": {
"frame_id": 1,
"text": "Hello from the screen",
"timestamp": "2024-01-15T10:30:00Z",
"file_path": "/tmp/frame.png",
"app_name": "Chrome",
"window_name": "Google",
"tags": [],
"browser_url": "https://google.com",
},
},
{
"type": "Audio",
"content": {
"chunk_id": 1,
"transcription": "Meeting discussion about roadmap",
"timestamp": "2024-01-15T11:00:00Z",
"file_path": "/tmp/audio.wav",
"device_name": "MacBook Microphone",
"device_type": "Input",
"speaker": {"id": 1, "name": "John", "metadata": ""},
},
},
{
"type": "UI",
"content": {
"id": 1,
"text": "Submit Button",
"timestamp": "2024-01-15T12:00:00Z",
"app_name": "VSCode",
"window_name": "main.py",
},
},
],
"pagination": {"limit": 20, "offset": 0, "total": 3},
}
def test_class():
names_of_base_classes = [b.__name__ for b in ScreenpipeReader.__mro__]
assert BasePydanticReader.__name__ in names_of_base_classes
def test_invalid_content_type(reader: ScreenpipeReader) -> None:
with pytest.raises(ValueError, match="Invalid content_type"):
reader.load_data(content_type="invalid")
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_load_data(
mock_get, reader: ScreenpipeReader, mock_search_response: dict
) -> None:
mock_get.return_value.json.return_value = mock_search_response
mock_get.return_value.raise_for_status.return_value = None
documents = reader.load_data(content_type="all", limit=20)
assert len(documents) == 3
assert all(isinstance(doc, Document) for doc in documents)
# Verify OCR document
assert documents[0].text == "Hello from the screen"
assert documents[0].metadata["type"] == "ocr"
assert documents[0].metadata["app_name"] == "Chrome"
assert documents[0].metadata["browser_url"] == "https://google.com"
# Verify Audio document
assert documents[1].text == "Meeting discussion about roadmap"
assert documents[1].metadata["type"] == "audio"
assert documents[1].metadata["speaker_name"] == "John"
assert documents[1].metadata["device_name"] == "MacBook Microphone"
# Verify UI document
assert documents[2].text == "Submit Button"
assert documents[2].metadata["type"] == "ui"
assert documents[2].metadata["app_name"] == "VSCode"
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_load_data_passes_params(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {"data": [], "pagination": {}}
mock_get.return_value.raise_for_status.return_value = None
reader.load_data(
content_type="ocr",
query="meeting",
app_name="Zoom",
window_name="Meeting",
limit=10,
)
call_params = mock_get.call_args[1]["params"]
assert call_params["content_type"] == "ocr"
assert call_params["q"] == "meeting"
assert call_params["app_name"] == "Zoom"
assert call_params["window_name"] == "Meeting"
assert call_params["limit"] == 10
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_load_data_empty_response(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {"data": [], "pagination": {}}
mock_get.return_value.raise_for_status.return_value = None
documents = reader.load_data()
assert documents == []
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_unknown_item_type_skipped(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {
"data": [{"type": "FutureType", "content": {"text": "something"}}],
"pagination": {},
}
mock_get.return_value.raise_for_status.return_value = None
documents = reader.load_data()
assert documents == []
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_naive_datetime_converted_to_utc(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {"data": [], "pagination": {}}
mock_get.return_value.raise_for_status.return_value = None
start = datetime(2024, 1, 15, 10, 0, 0)
reader.load_data(start_time=start)
call_params = mock_get.call_args[1]["params"]
# Naive datetime is treated as local time and converted to UTC
assert call_params["start_time"].endswith("Z")
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_aware_datetime_serialized(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {"data": [], "pagination": {}}
mock_get.return_value.raise_for_status.return_value = None
start = datetime(2024, 1, 15, 10, 0, 0, tzinfo=timezone.utc)
reader.load_data(start_time=start)
call_params = mock_get.call_args[1]["params"]
assert call_params["start_time"] == "2024-01-15T10:00:00.000000Z"
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_audio_without_speaker(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.json.return_value = {
"data": [
{
"type": "Audio",
"content": {
"transcription": "Some audio",
"timestamp": "2024-01-15T11:00:00Z",
"device_name": "Mic",
"device_type": "Input",
},
},
],
"pagination": {"limit": 20, "offset": 0, "total": 1},
}
mock_get.return_value.raise_for_status.return_value = None
documents = reader.load_data()
assert len(documents) == 1
assert documents[0].text == "Some audio"
assert "speaker_id" not in documents[0].metadata
assert "speaker_name" not in documents[0].metadata
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_pagination(mock_get, reader: ScreenpipeReader) -> None:
page1 = {
"data": [{"type": "OCR", "content": {"text": "page1", "timestamp": ""}}],
"pagination": {"limit": 1, "offset": 0, "total": 2},
}
page2 = {
"data": [{"type": "OCR", "content": {"text": "page2", "timestamp": ""}}],
"pagination": {"limit": 1, "offset": 1, "total": 2},
}
mock_get.return_value.raise_for_status.return_value = None
mock_get.return_value.json.side_effect = [page1, page2]
documents = reader.load_data(limit=5)
assert len(documents) == 2
assert documents[0].text == "page1"
assert documents[1].text == "page2"
assert mock_get.call_count == 2
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_http_error_raised(mock_get, reader: ScreenpipeReader) -> None:
mock_get.return_value.raise_for_status.side_effect = requests.exceptions.HTTPError(
"500 Server Error"
)
with pytest.raises(requests.exceptions.HTTPError):
reader.load_data()
@patch("llama_index.readers.screenpipe.base.requests.get")
def test_trailing_slash_in_base_url(mock_get) -> None:
mock_get.return_value.json.return_value = {"data": [], "pagination": {}}
mock_get.return_value.raise_for_status.return_value = None
reader = ScreenpipeReader(base_url="http://localhost:3030/")
reader.load_data()
called_url = mock_get.call_args[0][0]
assert called_url == "http://localhost:3030/search"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-screenpipe/tests/test_readers_screenpipe.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/llama_index/core/rate_limiter.py | """Rate limiters for LLM and embedding API calls."""
import asyncio
import logging
import threading
import time
from abc import ABC, abstractmethod
from typing import Optional
from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr
logger = logging.getLogger(__name__)
class BaseRateLimiter(ABC):
"""
Abstract base class for rate limiters.
All rate limiter implementations must inherit from this class and
implement :meth:`acquire` (synchronous) and :meth:`async_acquire`
(asynchronous). This allows swapping in alternative strategies
(e.g. distributed rate limiting via Redis) without changing the
calling code in ``BaseLLM`` or ``BaseEmbedding``.
"""
@abstractmethod
def acquire(self, num_tokens: int = 0) -> None:
"""
Block until one request is allowed (synchronous).
Args:
num_tokens: Estimated token count for this request.
Implementations may ignore this if they only track
request counts.
"""
@abstractmethod
async def async_acquire(self, num_tokens: int = 0) -> None:
"""
Wait until one request is allowed (asynchronous).
Args:
num_tokens: Estimated token count for this request.
Implementations may ignore this if they only track
request counts.
"""
class TokenBucketRateLimiter(BaseRateLimiter, BaseModel):
"""
Token-bucket rate limiter for controlling API request throughput.
Supports both requests-per-minute (RPM) and tokens-per-minute (TPM)
limiting. Instances can be shared across multiple LLM and embedding
objects that hit the same API endpoint, so a single budget is enforced
globally.
A token-bucket allows an initial burst up to the configured limit,
then smoothly throttles to the sustained rate. This matches the
behaviour of most LLM provider rate limits.
Args:
requests_per_minute: Maximum requests allowed per minute.
``None`` disables request-rate limiting.
tokens_per_minute: Maximum tokens allowed per minute.
``None`` disables token-rate limiting.
Examples:
.. code-block:: python
from llama_index.core.rate_limiter import TokenBucketRateLimiter
# Share a single limiter across LLM and embedding instances
groq_limiter = TokenBucketRateLimiter(requests_per_minute=30)
llm = SomeLLM(rate_limiter=groq_limiter)
embed = SomeEmbedding(rate_limiter=groq_limiter)
"""
requests_per_minute: Optional[float] = Field(
default=None,
description="Maximum number of requests per minute.",
gt=0,
)
tokens_per_minute: Optional[float] = Field(
default=None,
description="Maximum number of tokens per minute.",
gt=0,
)
_request_tokens: float = PrivateAttr(default=0.0)
_request_max_tokens: float = PrivateAttr(default=0.0)
_request_refill_rate: float = PrivateAttr(default=0.0)
_token_tokens: float = PrivateAttr(default=0.0)
_token_max_tokens: float = PrivateAttr(default=0.0)
_token_refill_rate: float = PrivateAttr(default=0.0)
_last_refill_time: float = PrivateAttr(default=0.0)
_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock)
def __init__(self, **kwargs: object) -> None:
super().__init__(**kwargs)
now = time.monotonic()
self._last_refill_time = now
if self.requests_per_minute is not None:
self._request_max_tokens = self.requests_per_minute
self._request_tokens = self.requests_per_minute
self._request_refill_rate = self.requests_per_minute / 60.0
if self.tokens_per_minute is not None:
self._token_max_tokens = self.tokens_per_minute
self._token_tokens = self.tokens_per_minute
self._token_refill_rate = self.tokens_per_minute / 60.0
def _refill(self) -> None:
"""
Refill token buckets based on elapsed time.
Must be called while holding ``_lock``.
"""
now = time.monotonic()
elapsed = now - self._last_refill_time
self._last_refill_time = now
if self.requests_per_minute is not None:
self._request_tokens = min(
self._request_max_tokens,
self._request_tokens + elapsed * self._request_refill_rate,
)
if self.tokens_per_minute is not None:
self._token_tokens = min(
self._token_max_tokens,
self._token_tokens + elapsed * self._token_refill_rate,
)
def _wait_time(self, num_tokens: int = 0) -> float:
"""
Return seconds to wait before the next request is allowed.
Must be called while holding ``_lock`` and after ``_refill()``.
"""
wait = 0.0
if self.requests_per_minute is not None and self._request_tokens < 1.0:
wait = max(
wait,
(1.0 - self._request_tokens) / self._request_refill_rate,
)
if (
self.tokens_per_minute is not None
and num_tokens > 0
and self._token_tokens < num_tokens
):
wait = max(
wait,
(num_tokens - self._token_tokens) / self._token_refill_rate,
)
return wait
def _consume(self, num_tokens: int = 0) -> None:
"""
Consume one request token and *num_tokens* LLM tokens.
Must be called while holding ``_lock``.
"""
if self.requests_per_minute is not None:
self._request_tokens -= 1.0
if self.tokens_per_minute is not None and num_tokens > 0:
self._token_tokens -= num_tokens
def acquire(self, num_tokens: int = 0) -> None:
"""
Block until one request is allowed (synchronous).
Args:
num_tokens: Estimated token count for this request. Only
consulted when ``tokens_per_minute`` is configured.
"""
while True:
with self._lock:
self._refill()
wait = self._wait_time(num_tokens)
if wait <= 0:
self._consume(num_tokens)
return
time.sleep(wait)
async def async_acquire(self, num_tokens: int = 0) -> None:
"""
Wait until one request is allowed (asynchronous).
Args:
num_tokens: Estimated token count for this request. Only
consulted when ``tokens_per_minute`` is configured.
"""
while True:
with self._lock:
self._refill()
wait = self._wait_time(num_tokens)
if wait <= 0:
self._consume(num_tokens)
return
await asyncio.sleep(wait)
# Backwards-compatible alias
RateLimiter = TokenBucketRateLimiter
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/llama_index/core/rate_limiter.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-core/tests/test_rate_limiter.py | """Tests for the rate limiter module."""
import asyncio
import time
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.llms.mock import MockLLM
from llama_index.core.rate_limiter import (
BaseRateLimiter,
RateLimiter,
TokenBucketRateLimiter,
)
# ---------------------------------------------------------------------------
# BaseRateLimiter contract tests
# ---------------------------------------------------------------------------
def test_base_rate_limiter_is_abstract() -> None:
with pytest.raises(TypeError, match="abstract method"):
BaseRateLimiter()
def test_token_bucket_is_subclass_of_base() -> None:
assert issubclass(TokenBucketRateLimiter, BaseRateLimiter)
def test_rate_limiter_alias_is_token_bucket() -> None:
assert RateLimiter is TokenBucketRateLimiter
def test_instance_is_base_rate_limiter() -> None:
rl = TokenBucketRateLimiter(requests_per_minute=60)
assert isinstance(rl, BaseRateLimiter)
def test_custom_rate_limiter_subclass() -> None:
"""Users can create custom rate limiters by subclassing BaseRateLimiter."""
class FixedDelayLimiter(BaseRateLimiter):
def acquire(self, num_tokens: int = 0) -> None:
pass
async def async_acquire(self, num_tokens: int = 0) -> None:
pass
limiter = FixedDelayLimiter()
assert isinstance(limiter, BaseRateLimiter)
limiter.acquire()
# ---------------------------------------------------------------------------
# Token-bucket algorithm tests
# ---------------------------------------------------------------------------
def test_creation_rpm_only() -> None:
rl = RateLimiter(requests_per_minute=60)
assert rl.requests_per_minute == 60
assert rl.tokens_per_minute is None
def test_creation_tpm_only() -> None:
rl = RateLimiter(tokens_per_minute=10000)
assert rl.tokens_per_minute == 10000
assert rl.requests_per_minute is None
def test_creation_both() -> None:
rl = RateLimiter(requests_per_minute=30, tokens_per_minute=5000)
assert rl.requests_per_minute == 30
assert rl.tokens_per_minute == 5000
def test_validation_rejects_zero() -> None:
with pytest.raises(Exception):
RateLimiter(requests_per_minute=0)
def test_validation_rejects_negative() -> None:
with pytest.raises(Exception):
RateLimiter(tokens_per_minute=-1)
def test_burst_within_limit() -> None:
"""Requests within the bucket size should not block."""
rl = RateLimiter(requests_per_minute=10)
start = time.monotonic()
for _ in range(10):
rl.acquire()
elapsed = time.monotonic() - start
assert elapsed < 1.0
def test_acquire_blocks_when_exhausted() -> None:
"""After draining the bucket, acquire blocks until a token refills."""
rl = RateLimiter(requests_per_minute=60)
# Drain the bucket
for _ in range(60):
rl.acquire()
with (
patch("llama_index.core.rate_limiter.time.sleep") as mock_sleep,
patch("llama_index.core.rate_limiter.time.monotonic") as mock_time,
):
base = 1000.0
# Align internal clock with mock timeline
rl._last_refill_time = base
rl._request_tokens = 0.0
# First monotonic(): no elapsed time, bucket empty -> must sleep
# Second monotonic(): 2s elapsed -> refills 2 tokens -> acquire succeeds
mock_time.side_effect = [base, base + 2.0]
mock_sleep.return_value = None
rl.acquire()
mock_sleep.assert_called_once()
def test_refill_caps_at_max() -> None:
"""Token count must not exceed the configured maximum."""
rl = RateLimiter(requests_per_minute=10)
# Simulate long idle period
rl._last_refill_time = time.monotonic() - 3600
rl._request_tokens = 0.0
rl.acquire()
# After refill (capped at 10) minus 1 consumed
assert rl._request_tokens <= 9.0
assert rl._request_tokens >= 8.5
@pytest.mark.asyncio
async def test_async_acquire_burst_within_limit() -> None:
rl = RateLimiter(requests_per_minute=10)
start = time.monotonic()
for _ in range(10):
await rl.async_acquire()
elapsed = time.monotonic() - start
assert elapsed < 1.0
@pytest.mark.asyncio
async def test_async_acquire_tpm_limiting() -> None:
"""TPM limiting should throttle based on token count."""
rl = RateLimiter(tokens_per_minute=100)
await rl.async_acquire(num_tokens=100)
with (
patch(
"llama_index.core.rate_limiter.asyncio.sleep",
new_callable=AsyncMock,
) as mock_sleep,
patch("llama_index.core.rate_limiter.time.monotonic") as mock_time,
):
base = 1000.0
# Align internal clock with mock timeline
rl._last_refill_time = base
rl._token_tokens = 0.0
# First monotonic(): no elapsed time, need 50 tokens but 0 available -> sleep
# Second monotonic(): 60s elapsed -> refills 100 tokens -> acquire succeeds
mock_time.side_effect = [base, base + 60.0]
await rl.async_acquire(num_tokens=50)
mock_sleep.assert_called_once()
@pytest.mark.asyncio
async def test_concurrent_async_rate_limiting() -> None:
"""Multiple concurrent async_acquire calls must all complete."""
rl = RateLimiter(requests_per_minute=600)
results: list = []
async def worker(n: int) -> None:
await rl.async_acquire()
results.append(n)
tasks = [worker(i) for i in range(20)]
await asyncio.gather(*tasks)
assert len(results) == 20
# ---------------------------------------------------------------------------
# LLM integration tests
# ---------------------------------------------------------------------------
def test_llm_sync_chat_calls_acquire() -> None:
mock_limiter = MagicMock()
llm = MockLLM()
llm.rate_limiter = mock_limiter
llm.chat([ChatMessage(role="user", content="hello")])
# chat() triggers callback; internally calls complete() which also triggers
assert mock_limiter.acquire.call_count >= 1
@pytest.mark.asyncio
async def test_llm_async_chat_calls_async_acquire() -> None:
mock_limiter = MagicMock()
mock_limiter.async_acquire = AsyncMock()
llm = MockLLM()
llm.rate_limiter = mock_limiter
await llm.achat([ChatMessage(role="user", content="hello")])
mock_limiter.async_acquire.assert_called_once()
def test_llm_sync_complete_calls_acquire() -> None:
mock_limiter = MagicMock()
llm = MockLLM()
llm.rate_limiter = mock_limiter
llm.complete("hello")
mock_limiter.acquire.assert_called_once()
@pytest.mark.asyncio
async def test_llm_async_complete_calls_async_acquire() -> None:
mock_limiter = MagicMock()
mock_limiter.async_acquire = AsyncMock()
llm = MockLLM()
llm.rate_limiter = mock_limiter
await llm.acomplete("hello")
mock_limiter.async_acquire.assert_called_once()
def test_llm_without_rate_limiter_works() -> None:
llm = MockLLM()
assert llm.rate_limiter is None
response = llm.complete("hello")
assert response.text == "hello"
# ---------------------------------------------------------------------------
# Embedding integration tests
# ---------------------------------------------------------------------------
def test_embedding_single_calls_acquire() -> None:
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
mock_limiter = MagicMock()
embed = MockEmbedding(embed_dim=8, rate_limiter=mock_limiter)
embed.get_text_embedding("test")
mock_limiter.acquire.assert_called_once()
def test_embedding_batch_calls_acquire_per_batch() -> None:
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
mock_limiter = MagicMock()
embed = MockEmbedding(embed_dim=8, rate_limiter=mock_limiter, embed_batch_size=5)
texts = ["text"] * 12 # 3 batches: 5, 5, 2
embed.get_text_embedding_batch(texts)
assert mock_limiter.acquire.call_count == 3
def test_embedding_without_rate_limiter_works() -> None:
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
embed = MockEmbedding(embed_dim=8)
assert embed.rate_limiter is None
result = embed.get_text_embedding("test")
assert len(result) == 8
def test_shared_rate_limiter_across_instances() -> None:
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
rl = RateLimiter(requests_per_minute=100)
embed1 = MockEmbedding(embed_dim=8, rate_limiter=rl)
embed2 = MockEmbedding(embed_dim=8, rate_limiter=rl)
assert embed1.rate_limiter is embed2.rate_limiter
def test_shared_limiter_between_llm_and_embedding() -> None:
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
rl = RateLimiter(requests_per_minute=100)
llm = MockLLM()
llm.rate_limiter = rl
embed = MockEmbedding(embed_dim=8, rate_limiter=rl)
assert llm.rate_limiter is embed.rate_limiter
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/test_rate_limiter.py",
"license": "MIT License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/storage/docstore/test_legacy_json_to_doc.py | import pytest
from llama_index.core.constants import DATA_KEY, TYPE_KEY
from llama_index.core.schema import Document, TextNode, ImageNode, IndexNode
from llama_index.core.storage.docstore.utils import legacy_json_to_doc
def _make_legacy_dict(node_cls, doc_id: str, *, extra: dict | None = None) -> dict:
data = {
"text": "hello",
"extra_info": {},
"doc_id": doc_id,
"relationships": {},
}
if extra:
data.update(extra)
return {TYPE_KEY: node_cls.get_type(), DATA_KEY: data}
@pytest.mark.parametrize(
("node_cls", "doc_id", "extra"),
[
(Document, "doc-123", None),
(TextNode, "text-456", None),
(ImageNode, "img-789", {"image": None}),
(IndexNode, "idx-999", {"index_id": "index-abc"}),
],
)
def test_legacy_json_to_doc_preserves_doc_id(node_cls, doc_id, extra):
doc_dict = _make_legacy_dict(node_cls, doc_id, extra=extra)
node = legacy_json_to_doc(doc_dict)
assert node.id_ == doc_id
def test_legacy_json_to_doc_unknown_type_raises():
doc_dict = {
TYPE_KEY: "not-a-real-node-type",
DATA_KEY: {"text": "hello", "extra_info": {}, "doc_id": "x", "relationships": {}},
}
with pytest.raises(ValueError):
legacy_json_to_doc(doc_dict)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/storage/docstore/test_legacy_json_to_doc.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-igpt-email/llama_index/readers/igpt_email/base.py | """iGPT Email Intelligence reader."""
import json
from typing import List, Optional
from igptai import IGPT
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
class IGPTEmailReader(BaseReader):
"""
iGPT Email Intelligence Reader.
Loads structured, reasoning-ready email context from the iGPT API as
LlamaIndex Documents for indexing and retrieval.
Args:
api_key (str): iGPT API key. See https://docs.igpt.ai for details.
user (str): User identifier for the connected mailbox.
Example:
.. code-block:: python
from llama_index.readers.igpt_email import IGPTEmailReader
from llama_index.core import VectorStoreIndex
reader = IGPTEmailReader(api_key="your-key", user="user-id")
documents = reader.load_data(
query="project Alpha", date_from="2025-01-01"
)
index = VectorStoreIndex.from_documents(documents)
"""
def __init__(self, api_key: str, user: str) -> None:
"""Initialize with parameters."""
super().__init__()
self.client = IGPT(api_key=api_key, user=user)
def load_data(
self,
query: str,
date_from: Optional[str] = None,
date_to: Optional[str] = None,
max_results: int = 50,
) -> List[Document]:
"""
Load email context as Documents from iGPT recall.search().
Each result from the iGPT API is returned as a separate Document.
Thread metadata (subject, participants, date, thread ID) is preserved
in metadata for filtering and attribution during retrieval.
Args:
query (str): Search query to run against connected email data.
date_from (str, optional): Filter results from this date (YYYY-MM-DD).
date_to (str, optional): Filter results up to this date (YYYY-MM-DD).
max_results (int): Maximum number of results to return. Default is 50.
Returns:
List[Document]: One Document per email result, ready for indexing.
Thread metadata is stored in metadata (subject, from, to,
date, thread_id, id).
"""
response = self.client.recall.search(
query=query,
date_from=date_from,
date_to=date_to,
max_results=max_results,
)
if isinstance(response, dict) and "error" in response:
raise ValueError(f"iGPT API error: {response['error']}")
if not response:
return []
results = (
response if isinstance(response, list) else response.get("results", [])
)
documents = []
for item in results:
if isinstance(item, dict):
text = item.get("content", item.get("body", json.dumps(item)))
metadata = {
"source": "igpt_email",
"subject": item.get("subject"),
"from": item.get("from"),
"to": item.get("to"),
"date": item.get("date"),
"thread_id": item.get("thread_id"),
"id": item.get("id"),
}
else:
text = str(item)
metadata = {"source": "igpt_email"}
documents.append(Document(text=text, metadata=metadata))
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-igpt-email/llama_index/readers/igpt_email/base.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-igpt-email/tests/test_readers_igpt_email.py | """Tests for IGPTEmailReader."""
import json
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.readers.base import BaseReader
from llama_index.core.schema import Document
from llama_index.readers.igpt_email import IGPTEmailReader
def test_class():
names_of_base_classes = [b.__name__ for b in IGPTEmailReader.__mro__]
assert BaseReader.__name__ in names_of_base_classes
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_returns_documents(mock_igpt_class):
mock_results = [
{
"id": "msg-1",
"subject": "Vendor proposal",
"content": "Please find attached the vendor proposal for review.",
"from": "vendor@external.com",
"to": ["procurement@company.com"],
"date": "2025-02-05",
"thread_id": "thread-001",
},
{
"id": "msg-2",
"subject": "Re: Vendor proposal",
"content": "We'll review and get back to you by Friday.",
"from": "procurement@company.com",
"to": ["vendor@external.com"],
"date": "2025-02-06",
"thread_id": "thread-001",
},
]
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_results
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(
query="vendor proposal",
date_from="2025-02-01",
date_to="2025-02-28",
max_results=20,
)
mock_igpt_class.assert_called_once_with(api_key="test-key", user="test-user")
mock_client.recall.search.assert_called_once_with(
query="vendor proposal",
date_from="2025-02-01",
date_to="2025-02-28",
max_results=20,
)
assert len(results) == 2
assert all(isinstance(doc, Document) for doc in results)
assert results[0].text == "Please find attached the vendor proposal for review."
assert results[0].metadata["subject"] == "Vendor proposal"
assert results[0].metadata["from"] == "vendor@external.com"
assert results[0].metadata["to"] == ["procurement@company.com"]
assert results[0].metadata["date"] == "2025-02-05"
assert results[0].metadata["thread_id"] == "thread-001"
assert results[0].metadata["id"] == "msg-1"
assert results[0].metadata["source"] == "igpt_email"
assert results[1].text == "We'll review and get back to you by Friday."
assert results[1].metadata["id"] == "msg-2"
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_with_results_wrapper(mock_igpt_class):
"""Test load_data() handles a dict response with a 'results' key."""
mock_response = {
"results": [
{
"id": "msg-3",
"subject": "Support ticket #42",
"content": "Customer is unable to log in.",
"from": "support@company.com",
"to": ["tech@company.com"],
"date": "2025-02-12",
"thread_id": "thread-002",
}
]
}
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_response
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="support ticket")
assert len(results) == 1
assert results[0].text == "Customer is unable to log in."
assert results[0].metadata["subject"] == "Support ticket #42"
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_uses_body_fallback(mock_igpt_class):
"""Test load_data() falls back to 'body' when 'content' is absent."""
mock_results = [
{
"id": "msg-4",
"subject": "Fallback test",
"body": "This message uses the body field.",
"from": "a@example.com",
"to": ["b@example.com"],
"date": "2025-01-01",
"thread_id": "thread-003",
}
]
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_results
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="fallback")
assert results[0].text == "This message uses the body field."
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_default_max_results(mock_igpt_class):
"""Test load_data() passes default max_results of 50."""
mock_client = MagicMock()
mock_client.recall.search.return_value = []
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
reader.load_data(query="weekly summary")
mock_client.recall.search.assert_called_once_with(
query="weekly summary",
date_from=None,
date_to=None,
max_results=50,
)
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_empty_response(mock_igpt_class):
"""Test load_data() returns empty list when API returns nothing."""
mock_client = MagicMock()
mock_client.recall.search.return_value = []
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="nothing here")
assert results == []
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_none_response(mock_igpt_class):
"""Test load_data() returns empty list when API returns None."""
mock_client = MagicMock()
mock_client.recall.search.return_value = None
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="nothing")
assert results == []
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_non_dict_items(mock_igpt_class):
"""Test load_data() handles non-dict items in results gracefully."""
mock_client = MagicMock()
mock_client.recall.search.return_value = ["raw string result"]
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="edge case")
assert len(results) == 1
assert results[0].text == "raw string result"
assert results[0].metadata["source"] == "igpt_email"
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_error_response(mock_igpt_class):
"""Test load_data() raises ValueError on API error response."""
mock_client = MagicMock()
mock_client.recall.search.return_value = {"error": "auth"}
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="bad-key", user="test-user")
with pytest.raises(ValueError, match="iGPT API error: auth"):
reader.load_data(query="test")
@patch("llama_index.readers.igpt_email.base.IGPT")
def test_load_data_item_without_content_or_body(mock_igpt_class):
"""Test load_data() falls back to json.dumps when item has no content or body."""
mock_results = [
{
"id": "msg-5",
"subject": "Metadata only",
"from": "a@example.com",
}
]
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_results
mock_igpt_class.return_value = mock_client
reader = IGPTEmailReader(api_key="test-key", user="test-user")
results = reader.load_data(query="metadata only")
assert len(results) == 1
assert results[0].text == json.dumps(mock_results[0])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-igpt-email/tests/test_readers_igpt_email.py",
"license": "MIT License",
"lines": 174,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-igpt-email/llama_index/tools/igpt_email/base.py | """iGPT Email Intelligence tool spec."""
import json
from typing import List, Optional
from igptai import IGPT
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
class IGPTEmailToolSpec(BaseToolSpec):
"""
iGPT Email Intelligence tool spec.
Wraps the iGPT recall.ask() and recall.search() endpoints, giving agents
structured, reasoning-ready context from connected email threads.
Args:
api_key (str): iGPT API key. See https://docs.igpt.ai for details.
user (str): User identifier for the connected mailbox.
Example:
.. code-block:: python
from llama_index.tools.igpt_email import IGPTEmailToolSpec
from llama_index.core.agent.workflow import FunctionAgent
from llama_index.llms.openai import OpenAI
tool_spec = IGPTEmailToolSpec(api_key="your-key", user="user-id")
agent = FunctionAgent(
tools=tool_spec.to_tool_list(),
llm=OpenAI(model="gpt-4.1"),
)
answer = await agent.run("What tasks were assigned to me this week?")
"""
spec_functions = ["ask", "search"]
def __init__(self, api_key: str, user: str) -> None:
"""Initialize with parameters."""
self.client = IGPT(api_key=api_key, user=user)
def ask(
self,
question: str,
output_format: str = "json",
) -> List[Document]:
"""
Ask a question about email context using iGPT's reasoning engine.
Calls recall.ask() and returns structured context extracted from
connected email threads, including tasks, decisions, owners, sentiment,
deadlines, and citations.
Args:
question (str): The question or prompt to reason over email context.
output_format (str): Response format — "text" or "json". Default is "json".
Returns:
List[Document]: A single Document containing the structured reasoning
response. Citations are stored in metadata["citations"].
"""
response = self.client.recall.ask(
input=question,
output_format=output_format,
)
if isinstance(response, dict) and "error" in response:
raise ValueError(f"iGPT API error: {response['error']}")
if isinstance(response, dict):
text = json.dumps(response)
citations = response.get("citations", [])
else:
text = str(response)
citations = []
return [
Document(
text=text,
metadata={
"question": question,
"citations": citations,
"source": "igpt_email_ask",
},
)
]
def search(
self,
query: str,
date_from: Optional[str] = None,
date_to: Optional[str] = None,
max_results: Optional[int] = 10,
) -> List[Document]:
"""
Search email context for relevant messages and threads.
Calls recall.search() and returns matching email context as Documents,
with thread metadata (subject, participants, date, thread ID) preserved
in metadata for downstream filtering and retrieval.
Args:
query (str): Search query to run against connected email data.
date_from (str, optional): Filter results from this date (YYYY-MM-DD).
date_to (str, optional): Filter results up to this date (YYYY-MM-DD).
max_results (int, optional): Maximum number of results to return. Default is 10.
Returns:
List[Document]: One Document per email result. Thread metadata is
stored in metadata (subject, from, to, date, thread_id, id).
"""
response = self.client.recall.search(
query=query,
date_from=date_from,
date_to=date_to,
max_results=max_results,
)
if isinstance(response, dict) and "error" in response:
raise ValueError(f"iGPT API error: {response['error']}")
if not response:
return []
results = (
response if isinstance(response, list) else response.get("results", [])
)
documents = []
for item in results:
if isinstance(item, dict):
text = item.get("content", item.get("body", json.dumps(item)))
metadata = {
"source": "igpt_email_search",
"subject": item.get("subject"),
"from": item.get("from"),
"to": item.get("to"),
"date": item.get("date"),
"thread_id": item.get("thread_id"),
"id": item.get("id"),
}
else:
text = str(item)
metadata = {"source": "igpt_email_search"}
documents.append(Document(text=text, metadata=metadata))
return documents
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-igpt-email/llama_index/tools/igpt_email/base.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-igpt-email/tests/test_tools_igpt_email.py | """Tests for IGPTEmailToolSpec."""
import json
from unittest.mock import MagicMock, patch
import pytest
from llama_index.core.schema import Document
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from llama_index.core.tools import FunctionTool
from llama_index.tools.igpt_email import IGPTEmailToolSpec
def test_class():
names_of_base_classes = [b.__name__ for b in IGPTEmailToolSpec.__mro__]
assert BaseToolSpec.__name__ in names_of_base_classes
def test_spec_functions():
assert "ask" in IGPTEmailToolSpec.spec_functions
assert "search" in IGPTEmailToolSpec.spec_functions
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_to_tool_list(mock_igpt_class):
"""Test that to_tool_list() produces valid FunctionTool objects."""
tool_spec = IGPTEmailToolSpec(api_key="test-key", user="test-user")
tools = tool_spec.to_tool_list()
assert len(tools) == 2
assert all(isinstance(t, FunctionTool) for t in tools)
tool_names = {t.metadata.name for t in tools}
assert tool_names == {"ask", "search"}
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_init(mock_igpt_class):
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
mock_igpt_class.assert_called_once_with(api_key="test-key", user="test-user")
assert tool.client == mock_igpt_class.return_value
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_ask_returns_document(mock_igpt_class):
mock_response = {
"answer": "You have 3 action items from the project meeting.",
"citations": [{"id": "msg-1", "subject": "Project sync"}],
"tasks": ["Send proposal", "Schedule follow-up"],
}
mock_client = MagicMock()
mock_client.recall.ask.return_value = mock_response
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.ask("What are my action items from this week?")
mock_client.recall.ask.assert_called_once_with(
input="What are my action items from this week?",
output_format="json",
)
assert len(results) == 1
assert isinstance(results[0], Document)
assert results[0].text == json.dumps(mock_response)
assert results[0].metadata["question"] == "What are my action items from this week?"
assert results[0].metadata["citations"] == mock_response["citations"]
assert results[0].metadata["source"] == "igpt_email_ask"
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_ask_with_text_output_format(mock_igpt_class):
mock_client = MagicMock()
mock_client.recall.ask.return_value = {"answer": "Some answer", "citations": []}
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
tool.ask("What deadlines are coming up?", output_format="text")
mock_client.recall.ask.assert_called_once_with(
input="What deadlines are coming up?",
output_format="text",
)
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_ask_with_string_response(mock_igpt_class):
"""Test ask() handles a plain string response gracefully."""
mock_client = MagicMock()
mock_client.recall.ask.return_value = "Plain text answer"
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.ask("Any decisions made?")
assert len(results) == 1
assert results[0].text == "Plain text answer"
assert results[0].metadata["citations"] == []
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_returns_documents(mock_igpt_class):
mock_results = [
{
"id": "msg-1",
"subject": "Q1 Budget Review",
"content": "Team, please review the attached Q1 budget.",
"from": "alice@company.com",
"to": ["bob@company.com"],
"date": "2025-02-10",
"thread_id": "thread-abc",
},
{
"id": "msg-2",
"subject": "Re: Q1 Budget Review",
"content": "Looks good. Let's approve.",
"from": "bob@company.com",
"to": ["alice@company.com"],
"date": "2025-02-11",
"thread_id": "thread-abc",
},
]
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_results
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.search("Q1 budget", date_from="2025-02-01", max_results=5)
mock_client.recall.search.assert_called_once_with(
query="Q1 budget",
date_from="2025-02-01",
date_to=None,
max_results=5,
)
assert len(results) == 2
assert all(isinstance(doc, Document) for doc in results)
assert results[0].text == "Team, please review the attached Q1 budget."
assert results[0].metadata["subject"] == "Q1 Budget Review"
assert results[0].metadata["from"] == "alice@company.com"
assert results[0].metadata["thread_id"] == "thread-abc"
assert results[0].metadata["source"] == "igpt_email_search"
assert results[1].text == "Looks good. Let's approve."
assert results[1].metadata["id"] == "msg-2"
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_with_results_wrapper(mock_igpt_class):
"""Test search() handles a dict response with a 'results' key."""
mock_response = {
"results": [
{
"id": "msg-1",
"subject": "Deal update",
"content": "The deal is closed.",
"from": "sales@company.com",
"to": ["ceo@company.com"],
"date": "2025-02-14",
"thread_id": "thread-xyz",
}
]
}
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_response
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.search("deal update")
assert len(results) == 1
assert results[0].text == "The deal is closed."
assert results[0].metadata["subject"] == "Deal update"
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_empty_response(mock_igpt_class):
"""Test search() returns empty list when API returns nothing."""
mock_client = MagicMock()
mock_client.recall.search.return_value = []
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.search("nonexistent topic")
assert results == []
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_none_response(mock_igpt_class):
"""Test search() returns empty list when API returns None."""
mock_client = MagicMock()
mock_client.recall.search.return_value = None
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.search("nothing here")
assert results == []
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_default_parameters(mock_igpt_class):
"""Test search() uses correct defaults when optional args are omitted."""
mock_client = MagicMock()
mock_client.recall.search.return_value = []
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
tool.search("onboarding")
mock_client.recall.search.assert_called_once_with(
query="onboarding",
date_from=None,
date_to=None,
max_results=10,
)
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_ask_error_response(mock_igpt_class):
"""Test ask() raises ValueError on API error response."""
mock_client = MagicMock()
mock_client.recall.ask.return_value = {"error": "auth"}
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="bad-key", user="test-user")
with pytest.raises(ValueError, match="iGPT API error: auth"):
tool.ask("test question")
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_error_response(mock_igpt_class):
"""Test search() raises ValueError on API error response."""
mock_client = MagicMock()
mock_client.recall.search.return_value = {"error": "params"}
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
with pytest.raises(ValueError, match="iGPT API error: params"):
tool.search("bad query")
@patch("llama_index.tools.igpt_email.base.IGPT")
def test_search_item_without_content_or_body(mock_igpt_class):
"""Test search() falls back to json.dumps when item has no content or body."""
mock_results = [
{
"id": "msg-1",
"subject": "Metadata only",
"from": "a@example.com",
}
]
mock_client = MagicMock()
mock_client.recall.search.return_value = mock_results
mock_igpt_class.return_value = mock_client
tool = IGPTEmailToolSpec(api_key="test-key", user="test-user")
results = tool.search("metadata only")
assert len(results) == 1
assert results[0].text == json.dumps(mock_results[0])
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-igpt-email/tests/test_tools_igpt_email.py",
"license": "MIT License",
"lines": 205,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-modelslab/llama_index/llms/modelslab/base.py | import os
from typing import Any, Optional
from llama_index.llms.openai_like import OpenAILike
MODELSLAB_API_BASE = "https://modelslab.com/uncensored-chat/v1"
class ModelsLabLLM(OpenAILike):
"""
ModelsLab LLM integration for LlamaIndex.
Provides uncensored Llama 3.1 language models via ModelsLab's
OpenAI-compatible API. Suitable for RAG pipelines, agents, and
workflows requiring unrestricted language generation with a
128K token context window.
Models:
- ``llama-3.1-8b-uncensored`` — fast, efficient (default)
- ``llama-3.1-70b-uncensored`` — higher quality, deeper reasoning
Examples:
``pip install llama-index-llms-modelslab``
```python
from llama_index.llms.modelslab import ModelsLabLLM
# Set MODELSLAB_API_KEY env var or pass api_key directly
llm = ModelsLabLLM(
model="llama-3.1-8b-uncensored",
api_key="your-modelslab-api-key",
)
resp = llm.complete("Explain transformers in simple terms.")
print(resp)
```
Use in a RAG pipeline::
```python
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.llms.modelslab import ModelsLabLLM
from llama_index.core import Settings
Settings.llm = ModelsLabLLM(model="llama-3.1-70b-uncensored")
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
response = query_engine.query("What is the main topic?")
```
Get your API key at: https://modelslab.com
API docs: https://docs.modelslab.com/uncensored-chat
"""
def __init__(
self,
model: str = "llama-3.1-8b-uncensored",
api_key: Optional[str] = None,
api_base: str = MODELSLAB_API_BASE,
is_chat_model: bool = True,
is_function_calling_model: bool = False,
context_window: int = 131072,
**kwargs: Any,
) -> None:
api_key = api_key or os.environ.get("MODELSLAB_API_KEY")
if not api_key:
raise ValueError(
"ModelsLab API key not found. "
"Set the MODELSLAB_API_KEY environment variable or pass api_key directly. "
"Get your key at https://modelslab.com"
)
super().__init__(
model=model,
api_key=api_key,
api_base=api_base,
is_chat_model=is_chat_model,
is_function_calling_model=is_function_calling_model,
context_window=context_window,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "ModelsLabLLM"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-modelslab/llama_index/llms/modelslab/base.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/llms/llama-index-llms-modelslab/tests/test_modelslab_llm.py | """
Tests for llama-index-llms-modelslab.
Run with: pytest tests/ -v
"""
import os
from unittest.mock import patch
import pytest
from llama_index.llms.modelslab import ModelsLabLLM
from llama_index.llms.modelslab.base import MODELSLAB_API_BASE
# ── Fixtures ──────────────────────────────────────────────────────────────────
@pytest.fixture
def llm():
return ModelsLabLLM(
model="llama-3.1-8b-uncensored",
api_key="test-key-abc",
)
# ── Constructor ───────────────────────────────────────────────────────────────
class TestModelsLabLLMConstructor:
def test_default_model(self, llm):
assert llm.model == "llama-3.1-8b-uncensored"
def test_custom_model(self):
llm = ModelsLabLLM(model="llama-3.1-70b-uncensored", api_key="k")
assert llm.model == "llama-3.1-70b-uncensored"
def test_default_api_base(self, llm):
assert llm.api_base == MODELSLAB_API_BASE
def test_default_api_base_value(self):
assert MODELSLAB_API_BASE == "https://modelslab.com/uncensored-chat/v1"
def test_is_chat_model_true(self, llm):
assert llm.is_chat_model is True
def test_context_window_128k(self, llm):
assert llm.context_window == 131072
def test_api_key_set(self, llm):
assert llm.api_key == "test-key-abc"
@patch.dict(os.environ, {"MODELSLAB_API_KEY": "env-key-xyz"})
def test_reads_api_key_from_env(self):
llm = ModelsLabLLM()
assert llm.api_key == "env-key-xyz"
def test_raises_without_api_key(self):
with patch.dict(os.environ, {}, clear=True):
os.environ.pop("MODELSLAB_API_KEY", None)
with pytest.raises(ValueError, match="MODELSLAB_API_KEY"):
ModelsLabLLM()
def test_custom_api_base(self):
custom = "https://custom.modelslab.com/v1"
llm = ModelsLabLLM(api_key="k", api_base=custom)
assert llm.api_base == custom
def test_class_name(self, llm):
assert llm.class_name() == "ModelsLabLLM"
def test_import_from_package(self):
from llama_index.llms.modelslab import ModelsLabLLM as ML
from llama_index.llms.modelslab.base import ModelsLabLLM as MLBase
assert ML is MLBase
def test_inherits_from_openai_like(self, llm):
from llama_index.llms.openai_like import OpenAILike
assert isinstance(llm, OpenAILike)
@patch.dict(os.environ, {"MODELSLAB_API_KEY": "env-key"})
def test_explicit_key_overrides_env(self):
llm = ModelsLabLLM(api_key="explicit-key")
assert llm.api_key == "explicit-key"
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/llms/llama-index-llms-modelslab/tests/test_modelslab_llm.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-core/tests/extractors/test_extractor_resilience.py | """Tests for BaseExtractor retry and error-handling behaviour."""
from typing import Dict, List, Sequence
from unittest.mock import AsyncMock, patch
import pytest
from llama_index.core.extractors.interface import BaseExtractor
from llama_index.core.schema import BaseNode, TextNode
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
class _FailNTimesExtractor(BaseExtractor):
"""Extractor that raises on the first *fail_count* calls, then succeeds."""
fail_count: int = 0
_calls: int = 0
async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
self._calls += 1
if self._calls <= self.fail_count:
raise RuntimeError(f"Simulated failure #{self._calls}")
return [{"extracted": True} for _ in nodes]
class _AlwaysFailExtractor(BaseExtractor):
"""Extractor that always raises."""
async def aextract(self, nodes: Sequence[BaseNode]) -> List[Dict]:
raise RuntimeError("Permanent failure")
def _make_nodes(n: int = 2) -> List[TextNode]:
return [TextNode(text=f"node-{i}") for i in range(n)]
# ---------------------------------------------------------------------------
# Tests
# ---------------------------------------------------------------------------
@pytest.mark.asyncio
async def test_default_raises_on_error():
"""With max_retries=0 and raise_on_error=True, errors propagate."""
ext = _AlwaysFailExtractor(max_retries=0, raise_on_error=True)
with pytest.raises(RuntimeError, match="Permanent failure"):
await ext.aprocess_nodes(_make_nodes())
@pytest.mark.asyncio
async def test_retry_succeeds_after_transient_failure():
"""Extractor fails once, succeeds on the second attempt."""
ext = _FailNTimesExtractor(fail_count=1, max_retries=2, retry_backoff=0.01)
nodes = _make_nodes()
result = await ext.aprocess_nodes(nodes)
assert len(result) == 2
for node in result:
assert node.metadata.get("extracted") is True
@pytest.mark.asyncio
async def test_skip_returns_empty_metadata():
"""With raise_on_error=False, failed extraction returns empty dicts."""
ext = _AlwaysFailExtractor(max_retries=0, raise_on_error=False)
nodes = _make_nodes(3)
result = await ext.aprocess_nodes(nodes)
assert len(result) == 3
for node in result:
# Metadata should be unchanged (empty dict merge is a no-op)
assert "extracted" not in node.metadata
@pytest.mark.asyncio
async def test_retries_exhausted_then_raises():
"""After max_retries exhausted with raise_on_error=True, the error propagates."""
ext = _AlwaysFailExtractor(max_retries=2, raise_on_error=True, retry_backoff=0.01)
with pytest.raises(RuntimeError, match="Permanent failure"):
await ext.aprocess_nodes(_make_nodes())
@pytest.mark.asyncio
async def test_retries_exhausted_then_skips():
"""After max_retries exhausted with raise_on_error=False, returns empty dicts."""
ext = _AlwaysFailExtractor(max_retries=2, raise_on_error=False, retry_backoff=0.01)
result = await ext.aprocess_nodes(_make_nodes())
assert len(result) == 2
for node in result:
assert "extracted" not in node.metadata
@pytest.mark.asyncio
async def test_backoff_delays_applied():
"""Verify asyncio.sleep is called with exponential backoff delays."""
ext = _AlwaysFailExtractor(
max_retries=3,
raise_on_error=False,
retry_backoff=2.0,
)
with patch(
"llama_index.core.extractors.interface.asyncio.sleep", new_callable=AsyncMock
) as mock_sleep:
await ext.aprocess_nodes(_make_nodes())
# 3 retries -> 3 sleep calls: 2*2^0=2.0, 2*2^1=4.0, 2*2^2=8.0
assert mock_sleep.call_count == 3
delays = [call.args[0] for call in mock_sleep.call_args_list]
assert delays == [2.0, 4.0, 8.0]
@pytest.mark.asyncio
async def test_no_retry_single_call():
"""With max_retries=0, aextract is called exactly once."""
ext = _FailNTimesExtractor(fail_count=0, max_retries=0)
await ext.aprocess_nodes(_make_nodes())
assert ext._calls == 1
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-core/tests/extractors/test_extractor_resilience.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-layoutir/llama_index/readers/layoutir/base.py | import shutil
import tempfile
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Union
from layoutir import Pipeline
from layoutir.adapters import DoclingAdapter
from layoutir.chunking import SemanticSectionChunker
from llama_index.core.readers.base import BasePydanticReader
from llama_index.core.schema import Document
from pydantic import Field
class LayoutIRReader(BasePydanticReader):
"""
LayoutIR Reader.
Production-grade document ingestion engine using LayoutIR's compiler-like architecture.
Processes PDFs and documents through IR (Intermediate Representation) to preserve
complex layouts, tables, and multi-column structures.
Args:
use_gpu (bool, optional): Whether to use GPU acceleration. Defaults to False.
api_key (Optional[str], optional): API key for remote processing. Defaults to None.
model_name (Optional[str], optional): Model name to use for processing. Defaults to None.
chunk_strategy (str, optional): Chunking strategy to use. Options: "semantic", "fixed". Defaults to "semantic".
max_heading_level (int, optional): Maximum heading level for semantic chunking. Defaults to 2.
"""
use_gpu: bool = Field(
default=False,
description="Whether to use GPU acceleration for document processing.",
)
api_key: Optional[str] = Field(
default=None,
description="API key for remote LayoutIR processing.",
)
model_name: Optional[str] = Field(
default=None,
description="Model name to use for document processing.",
)
chunk_strategy: str = Field(
default="semantic",
description="Chunking strategy: 'semantic' for section-based, 'fixed' for fixed-size chunks.",
)
max_heading_level: int = Field(
default=2,
description="Maximum heading level for semantic chunking.",
)
is_remote: bool = Field(
default=False,
description="Whether the data is loaded from a remote API or a local file.",
)
def lazy_load_data(
self,
file_path: Union[str, Path, List[str], List[Path]],
extra_info: Optional[Dict[str, Any]] = None,
) -> Iterable[Document]:
"""
Lazily load documents from given file path(s) using LayoutIR.
Args:
file_path (Union[str, Path, List[str], List[Path]]): Path to PDF/document file(s).
extra_info (Optional[Dict[str, Any]], optional): Additional metadata to include. Defaults to None.
Yields:
Document: LlamaIndex Document objects with preserved layout structure.
Raises:
ImportError: If GPU is requested but PyTorch is not installed.
"""
# Check GPU requirements if use_gpu is enabled
if self.use_gpu:
try:
import torch # noqa: F401
except ImportError as e:
raise ImportError(
"GPU acceleration requested but PyTorch is not installed. "
"Please install PyTorch with CUDA support:\n"
"pip3 install torch torchvision --index-url https://download.pytorch.org/whl/cu130"
) from e
# Normalize file_path to list
file_paths = file_path if isinstance(file_path, list) else [file_path]
# Initialize LayoutIR components
adapter_kwargs = {"use_gpu": self.use_gpu}
if self.model_name:
adapter_kwargs["model_name"] = self.model_name
if self.api_key:
adapter_kwargs["api_key"] = self.api_key
adapter = DoclingAdapter(**adapter_kwargs)
# Setup chunking strategy
if self.chunk_strategy == "semantic":
chunker = SemanticSectionChunker(max_heading_level=self.max_heading_level)
else:
chunker = None # Use default chunking
pipeline = Pipeline(adapter=adapter, chunk_strategy=chunker)
# Process each file
for source in file_paths:
source_path = Path(source) if isinstance(source, str) else source
# Use a temp directory for LayoutIR output, cleaned up after processing
tmp_dir = tempfile.mkdtemp()
try:
layoutir_doc = pipeline.process(
input_path=source_path,
output_dir=Path(tmp_dir),
)
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
# Extract blocks/chunks from the IR
if hasattr(layoutir_doc, "blocks"):
blocks = layoutir_doc.blocks
elif hasattr(layoutir_doc, "chunks"):
blocks = layoutir_doc.chunks
else:
# Fallback: treat entire document as single block
blocks = [{"text": str(layoutir_doc), "type": "document"}]
# Convert each block to a LlamaIndex Document
for idx, block in enumerate(blocks):
# Extract text content from layoutir.schema.Block objects
if isinstance(block, dict):
text = block.get("text", block.get("content", ""))
block_type = str(block.get("type", "unknown"))
block_id = block.get("id", f"{source_path.stem}_block_{idx}")
page_number = block.get("page", block.get("page_number", 0))
elif hasattr(block, "content"):
text = block.content or ""
block_type = (
str(block.type.value)
if hasattr(block.type, "value")
else str(block.type)
)
block_id = getattr(
block, "block_id", f"{source_path.stem}_block_{idx}"
)
page_number = getattr(block, "page_number", 0)
else:
text = str(block)
block_type = "block"
block_id = f"{source_path.stem}_block_{idx}"
page_number = 0
# Create metadata
metadata = extra_info.copy() if extra_info else {}
metadata.update(
{
"file_path": str(source_path),
"file_name": source_path.name,
"block_type": block_type,
"block_index": idx,
"page_number": page_number,
"source": "layoutir",
}
)
# Create and yield Document
doc = Document(
doc_id=block_id,
text=text,
metadata=metadata,
)
yield doc
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-layoutir/llama_index/readers/layoutir/base.py",
"license": "MIT License",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-layoutir/tests/test_readers_layoutir.py | from unittest.mock import MagicMock, patch
import pytest
from llama_index.readers.layoutir.base import LayoutIRReader
@pytest.fixture
def mock_layoutir_components(monkeypatch):
"""Mock all layoutir components to avoid external dependencies."""
# Mock the imports
mock_pipeline = MagicMock()
mock_adapter = MagicMock()
mock_chunker = MagicMock()
mock_modules = {
"layoutir": MagicMock(),
"layoutir.Pipeline": mock_pipeline,
"layoutir.adapters": MagicMock(DoclingAdapter=mock_adapter),
"layoutir.chunking": MagicMock(SemanticSectionChunker=mock_chunker),
}
# Patch sys.modules to mock layoutir
import sys
for module_name, module_mock in mock_modules.items():
sys.modules[module_name] = module_mock
yield {
"pipeline": mock_pipeline,
"adapter": mock_adapter,
"chunker": mock_chunker,
}
# Cleanup
for module_name in mock_modules:
if module_name in sys.modules:
del sys.modules[module_name]
def test_layoutir_reader_initialization():
"""Test LayoutIRReader can be initialized with default parameters."""
reader = LayoutIRReader()
assert reader.use_gpu is False
assert reader.api_key is None
assert reader.model_name is None
assert reader.chunk_strategy == "semantic"
assert reader.max_heading_level == 2
assert reader.is_remote is False
def test_layoutir_reader_initialization_with_custom_params():
"""Test LayoutIRReader can be initialized with custom parameters."""
reader = LayoutIRReader(
use_gpu=True,
api_key="test-key",
model_name="custom-model",
chunk_strategy="fixed",
max_heading_level=3,
)
assert reader.use_gpu is True
assert reader.api_key == "test-key"
assert reader.model_name == "custom-model"
assert reader.chunk_strategy == "fixed"
assert reader.max_heading_level == 3
def test_lazy_load_data_single_file(monkeypatch):
"""Test loading a single file with mocked LayoutIR pipeline."""
# Create mock document with blocks
mock_doc = MagicMock()
mock_doc.blocks = [
{
"text": "First paragraph content",
"type": "paragraph",
"id": "block_0",
"page": 1,
},
{
"text": "Second paragraph content",
"type": "paragraph",
"id": "block_1",
"page": 1,
},
{"text": "Table content", "type": "table", "id": "block_2", "page": 2},
]
# Create mock pipeline
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
# Mock the Pipeline class
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
# Mock the adapter and chunker classes
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
# Patch the imports in the base module
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader()
documents = list(reader.lazy_load_data(file_path="test.pdf"))
# Verify we got 3 documents (one per block)
assert len(documents) == 3
# Check first document
assert documents[0].text == "First paragraph content"
assert documents[0].metadata["block_type"] == "paragraph"
assert documents[0].metadata["page_number"] == 1
assert documents[0].metadata["block_index"] == 0
assert documents[0].metadata["source"] == "layoutir"
assert documents[0].doc_id == "block_0"
# Check second document
assert documents[1].text == "Second paragraph content"
assert documents[1].metadata["block_type"] == "paragraph"
# Check third document (table)
assert documents[2].text == "Table content"
assert documents[2].metadata["block_type"] == "table"
assert documents[2].metadata["page_number"] == 2
def test_lazy_load_data_multiple_files(monkeypatch):
"""Test loading multiple files."""
# Create mock document
mock_doc = MagicMock()
mock_doc.blocks = [
{"text": "Content", "type": "paragraph", "id": "block_0", "page": 1}
]
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader()
documents = list(
reader.lazy_load_data(file_path=["file1.pdf", "file2.pdf"])
)
# Should get 2 documents (1 block per file)
assert len(documents) == 2
assert mock_pipeline_instance.process.call_count == 2
def test_lazy_load_data_with_extra_info(monkeypatch):
"""Test that extra_info metadata is preserved."""
mock_doc = MagicMock()
mock_doc.blocks = [
{"text": "Content", "type": "paragraph", "id": "block_0", "page": 1}
]
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader()
extra_metadata = {"department": "research", "year": 2026}
documents = list(
reader.lazy_load_data(
file_path="test.pdf", extra_info=extra_metadata
)
)
# Check that extra metadata is included
assert documents[0].metadata["department"] == "research"
assert documents[0].metadata["year"] == 2026
# Standard metadata should also be present
assert documents[0].metadata["block_type"] == "paragraph"
assert documents[0].metadata["source"] == "layoutir"
def test_lazy_load_data_with_gpu(monkeypatch):
"""Test that GPU flag is passed to adapter."""
mock_doc = MagicMock()
mock_doc.blocks = [
{"text": "Content", "type": "paragraph", "id": "block_0", "page": 1}
]
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader(use_gpu=True, api_key="test-key")
documents = list(reader.lazy_load_data(file_path="test.pdf"))
# Verify adapter was called with GPU flag
mock_adapter_class.assert_called_once()
call_kwargs = mock_adapter_class.call_args[1]
assert call_kwargs["use_gpu"] is True
assert call_kwargs["api_key"] == "test-key"
def test_lazy_load_data_with_chunks_attribute(monkeypatch):
"""Test fallback when document has chunks instead of blocks."""
mock_doc = MagicMock()
# Remove blocks attribute, add chunks instead
del mock_doc.blocks
mock_doc.chunks = [{"text": "Chunk 1", "type": "chunk", "id": "chunk_0", "page": 1}]
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader()
documents = list(reader.lazy_load_data(file_path="test.pdf"))
assert len(documents) == 1
assert documents[0].text == "Chunk 1"
def test_lazy_load_data_gpu_check_error():
"""Test that ImportError is raised when GPU is requested but requirements are not met."""
reader = LayoutIRReader(use_gpu=True)
# Mock torch import to raise ImportError
with patch(
"builtins.__import__", side_effect=ImportError("No module named 'torch'")
):
with pytest.raises(ImportError) as exc_info:
list(reader.lazy_load_data(file_path="test.pdf"))
assert "GPU acceleration requested" in str(exc_info.value)
assert "PyTorch" in str(exc_info.value)
def test_load_data_method(monkeypatch):
"""Test that load_data returns a list instead of iterator."""
mock_doc = MagicMock()
mock_doc.blocks = [
{"text": "Content", "type": "paragraph", "id": "block_0", "page": 1}
]
mock_pipeline_instance = MagicMock()
mock_pipeline_instance.process.return_value = mock_doc
mock_pipeline_class = MagicMock(return_value=mock_pipeline_instance)
mock_adapter_class = MagicMock()
mock_chunker_class = MagicMock()
with patch("llama_index.readers.layoutir.base.Pipeline", mock_pipeline_class):
with patch(
"llama_index.readers.layoutir.base.DoclingAdapter", mock_adapter_class
):
with patch(
"llama_index.readers.layoutir.base.SemanticSectionChunker",
mock_chunker_class,
):
reader = LayoutIRReader()
documents = reader.load_data(file_path="test.pdf")
# Should return a list, not an iterator
assert isinstance(documents, list)
assert len(documents) == 1
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-layoutir/tests/test_readers_layoutir.py",
"license": "MIT License",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/readers/llama-index-readers-github/tests/test_github_repository_reader_selective.py | from __future__ import annotations
import json
import pytest
from llama_index.readers.github import GithubRepositoryReader
from llama_index.readers.github.repository.github_client import (
GithubClient,
GitContentResponseModel,
GitCommitResponseModel,
GitTreeResponseModel,
)
# Mock Data
CONTENT_JSON = json.dumps(
{
"type": "file",
"encoding": "base64",
"size": 12,
"name": "test_file.txt",
"path": "test_file.txt",
"content": "VGVzdCBjb250ZW50", # "Test content"
"sha": "test_sha",
"url": "https://api.github.com/repos/run-llama/llama_index/contents/test_file.txt",
"git_url": "https://api.github.com/repos/run-llama/llama_index/git/blobs/test_sha",
"html_url": "https://github.com/run-llama/llama_index/blob/main/test_file.txt",
"download_url": "https://raw.githubusercontent.com/run-llama/llama_index/main/test_file.txt",
"_links": {"self": "...", "git": "...", "html": "..."},
}
)
COMMIT_JSON = (
'{"sha":"test_commit_sha","url":"...","commit":{"tree":{"sha":"test_tree_sha"}}}'
)
TREE_JSON = json.dumps(
{
"sha": "test_tree_sha",
"url": "...",
"tree": [
{
"path": "test_file.txt",
"mode": "100644",
"type": "blob",
"sha": "test_sha",
"size": 12,
"url": "...",
}
],
"truncated": False,
}
)
BLOB_JSON = json.dumps(
{
"sha": "test_sha",
"size": 12,
"url": "...",
"content": "VGVzdCBjb250ZW50",
"encoding": "base64",
"node_id": "test_node_id",
}
)
@pytest.fixture
def mock_client(monkeypatch):
async def mock_get_content(self, owner, repo, path, ref=None, **kwargs):
# Return mock content for any path
# In a real test, we might check path and ref
return GitContentResponseModel.from_json(CONTENT_JSON)
async def mock_get_commit(self, *args, **kwargs):
return GitCommitResponseModel.from_json(COMMIT_JSON)
async def mock_get_tree(self, *args, **kwargs):
return GitTreeResponseModel.from_json(TREE_JSON)
async def mock_get_blob(self, *args, **kwargs):
from llama_index.readers.github.repository.github_client import (
GitBlobResponseModel,
)
return GitBlobResponseModel.from_json(BLOB_JSON)
monkeypatch.setattr(GithubClient, "get_content", mock_get_content)
monkeypatch.setattr(GithubClient, "get_commit", mock_get_commit)
monkeypatch.setattr(GithubClient, "get_tree", mock_get_tree)
monkeypatch.setattr(GithubClient, "get_blob", mock_get_blob)
return GithubClient("fake_token")
def test_load_specific_file_paths(mock_client):
reader = GithubRepositoryReader(mock_client, "run-llama", "llama_index")
# Test loading specific files via branch
documents = reader.load_data(branch="main", file_paths=["test_file.txt"])
assert len(documents) == 1
assert documents[0].metadata["file_path"] == "test_file.txt"
assert documents[0].text == "Test content"
assert documents[0].id_ == "test_sha"
def test_load_specific_file_paths_deduplication(mock_client):
reader = GithubRepositoryReader(mock_client, "run-llama", "llama_index")
# Define check function that says "test_sha" exists
def check_exists(sha):
return sha == "test_sha"
# expect empty list because check_exists returns True for the file's SHA
documents = reader.load_data(
branch="main", file_paths=["test_file.txt"], file_exists_callback=check_exists
)
assert len(documents) == 0
def test_scan_deduplication(mock_client):
reader = GithubRepositoryReader(mock_client, "run-llama", "llama_index")
# Normal scan (via commit or branch without file_paths)
# Mock tree has 1 file with SHA "test_sha"
# Case 1: No deduplication -> should get 1 doc
documents = reader.load_data(commit_sha="test_commit_sha")
assert len(documents) == 1
# Case 2: Deduplication -> should get 0 docs
def check_exists(sha):
return sha == "test_sha"
documents = reader.load_data(
commit_sha="test_commit_sha", file_exists_callback=check_exists
)
assert len(documents) == 0
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/readers/llama-index-readers-github/tests/test_github_repository_reader_selective.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/program/llama-index-program-evaporate/tests/test_sandbox.py | """Tests for the sandboxed execution environment in EvaporateExtractor."""
import pytest
from llama_index.program.evaporate.extractor import (
_build_sandbox,
_validate_generated_code,
)
# ---------------------------------------------------------------------------
# _validate_generated_code
# ---------------------------------------------------------------------------
def test_validate_allows_safe_code():
"""Normal extraction function should pass validation."""
code = (
"def get_name_field(text: str):\n"
' match = re.search(r"Name: (.+)", text)\n'
" if match:\n"
" return match.group(1)\n"
' return ""\n'
)
# Should not raise
_validate_generated_code(code)
def test_validate_allows_re_import():
"""Import re is used in the prompt template and should be allowed."""
code = "import re\nx = re.search(r'a', 'a')\n"
_validate_generated_code(code)
def test_validate_rejects_os_import():
code = "import os\nos.system('echo pwned')\n"
with pytest.raises(RuntimeError, match="imports 'os'"):
_validate_generated_code(code)
def test_validate_rejects_subprocess_import():
code = "import subprocess\nsubprocess.run(['ls'])\n"
with pytest.raises(RuntimeError, match="imports 'subprocess'"):
_validate_generated_code(code)
def test_validate_rejects_from_import():
code = "from os.path import join\n"
with pytest.raises(RuntimeError, match="imports from 'os.path'"):
_validate_generated_code(code)
def test_validate_rejects_dunder_name():
code = "x = __import__('os')\n"
with pytest.raises(RuntimeError, match="dunder name '__import__'"):
_validate_generated_code(code)
def test_validate_rejects_dunder_attribute():
code = "x = ''.__class__.__bases__\n"
with pytest.raises(RuntimeError, match="dunder attribute"):
_validate_generated_code(code)
# ---------------------------------------------------------------------------
# _build_sandbox
# ---------------------------------------------------------------------------
def test_sandbox_has_re_module():
sandbox = _build_sandbox("hello world")
assert sandbox["re"] is not None
assert sandbox["re"].search(r"hello", "hello world") is not None
def test_sandbox_has_node_text():
sandbox = _build_sandbox("test text")
assert sandbox["node_text"] == "test text"
def test_sandbox_blocks_open():
sandbox = _build_sandbox("")
with pytest.raises(NameError):
exec("f = open('/etc/passwd')", sandbox)
def test_sandbox_blocks_eval():
sandbox = _build_sandbox("")
with pytest.raises(NameError):
exec("eval('1+1')", sandbox)
def test_sandbox_blocks_exec_builtin():
sandbox = _build_sandbox("")
with pytest.raises(NameError):
exec("exec('x=1')", sandbox)
def test_sandbox_blocks_unrestricted_import():
sandbox = _build_sandbox("")
with pytest.raises(ImportError, match="not allowed in the sandbox"):
exec("import os", sandbox)
def test_sandbox_allows_re_import_at_runtime():
sandbox = _build_sandbox("")
exec("import re\nx = re.search(r'a', 'abc')", sandbox)
assert sandbox["x"] is not None
def test_sandbox_allows_stdlib_imports_at_runtime():
"""Stdlib modules in the allowlist should be importable at runtime."""
sandbox = _build_sandbox("")
exec("import datetime\nx = datetime.date(2026, 1, 1).isoformat()", sandbox)
assert sandbox["x"] == "2026-01-01"
sandbox = _build_sandbox("")
exec("import collections\nx = collections.Counter('aab')", sandbox)
assert sandbox["x"]["a"] == 2
def test_sandbox_exec_extraction_function():
"""End-to-end: define and call a function inside the sandbox."""
fn_str = (
"def get_name_field(text):\n"
' match = re.search(r"Name: (.+)", text)\n'
" if match:\n"
" return match.group(1).strip()\n"
' return ""\n'
)
sandbox = _build_sandbox("Name: Alice Johnson")
exec(fn_str, sandbox)
exec("__result__ = get_name_field(node_text)", sandbox)
assert sandbox["__result__"] == "Alice Johnson"
def test_sandbox_basic_builtins_available():
"""Generated functions should be able to use common builtins."""
sandbox = _build_sandbox("")
exec("x = len([1, 2, 3])", sandbox)
assert sandbox["x"] == 3
exec("y = int('42')", sandbox)
assert sandbox["y"] == 42
exec("z = sorted([3, 1, 2])", sandbox)
assert sandbox["z"] == [1, 2, 3]
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/program/llama-index-program-evaporate/tests/test_sandbox.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/identity.py | """
Cryptographic identity management for AgentMesh.
Uses Ed25519 for cryptographic operations.
"""
from __future__ import annotations
import base64
import hashlib
import time
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from cryptography.hazmat.primitives.asymmetric import ed25519
from cryptography.exceptions import InvalidSignature
@dataclass
class CMVKSignature:
"""A cryptographic signature from a CMVK identity."""
algorithm: str = "CMVK-Ed25519"
public_key: str = ""
signature: str = ""
timestamp: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
def to_dict(self) -> Dict[str, Any]:
return {
"algorithm": self.algorithm,
"public_key": self.public_key,
"signature": self.signature,
"timestamp": self.timestamp.isoformat(),
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "CMVKSignature":
timestamp_str = data.get("timestamp")
return cls(
algorithm=data.get("algorithm", "CMVK-Ed25519"),
public_key=data.get("public_key", ""),
signature=data.get("signature", ""),
timestamp=(
datetime.fromisoformat(timestamp_str)
if timestamp_str
else datetime.now(timezone.utc)
),
)
@dataclass
class CMVKIdentity:
"""Cryptographic identity for an agent using CMVK scheme."""
did: str
agent_name: str
public_key: str
private_key: Optional[str] = None
capabilities: List[str] = field(default_factory=list)
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
@classmethod
def generate(
cls, agent_name: str, capabilities: Optional[List[str]] = None
) -> "CMVKIdentity":
"""Generate a new CMVK identity with Ed25519 key pair."""
seed = f"{agent_name}:{time.time_ns()}"
did_hash = hashlib.sha256(seed.encode()).hexdigest()[:32]
did = f"did:cmvk:{did_hash}"
private_key_obj = ed25519.Ed25519PrivateKey.generate()
public_key_obj = private_key_obj.public_key()
private_key_b64 = base64.b64encode(private_key_obj.private_bytes_raw()).decode(
"ascii"
)
public_key_b64 = base64.b64encode(public_key_obj.public_bytes_raw()).decode(
"ascii"
)
return cls(
did=did,
agent_name=agent_name,
public_key=public_key_b64,
private_key=private_key_b64,
capabilities=capabilities or [],
)
def sign(self, data: str) -> CMVKSignature:
"""Sign data with this identity's private key."""
if not self.private_key:
raise ValueError("Cannot sign without private key")
private_key_bytes = base64.b64decode(self.private_key)
private_key_obj = ed25519.Ed25519PrivateKey.from_private_bytes(
private_key_bytes
)
signature_bytes = private_key_obj.sign(data.encode("utf-8"))
signature_b64 = base64.b64encode(signature_bytes).decode("ascii")
return CMVKSignature(public_key=self.public_key, signature=signature_b64)
def verify_signature(self, data: str, signature: CMVKSignature) -> bool:
"""Verify a signature against this identity's public key."""
if signature.public_key != self.public_key:
return False
try:
public_key_bytes = base64.b64decode(self.public_key)
public_key_obj = ed25519.Ed25519PublicKey.from_public_bytes(
public_key_bytes
)
signature_bytes = base64.b64decode(signature.signature)
public_key_obj.verify(signature_bytes, data.encode("utf-8"))
return True
except (InvalidSignature, ValueError):
return False
def to_dict(self) -> Dict[str, Any]:
return {
"did": self.did,
"agent_name": self.agent_name,
"public_key": self.public_key,
"capabilities": self.capabilities,
"created_at": self.created_at.isoformat(),
}
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "CMVKIdentity":
created_str = data.get("created_at")
return cls(
did=data["did"],
agent_name=data["agent_name"],
public_key=data["public_key"],
capabilities=data.get("capabilities", []),
created_at=(
datetime.fromisoformat(created_str)
if created_str
else datetime.now(timezone.utc)
),
)
def public_identity(self) -> "CMVKIdentity":
"""Return a copy without the private key."""
return CMVKIdentity(
did=self.did,
agent_name=self.agent_name,
public_key=self.public_key,
private_key=None,
capabilities=self.capabilities.copy(),
created_at=self.created_at,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/identity.py",
"license": "MIT License",
"lines": 129,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/query_engine.py | """Trust-gated query engine for LlamaIndex."""
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.schema import QueryBundle
from llama_index.agent.agentmesh.identity import CMVKIdentity
from llama_index.agent.agentmesh.trust import (
TrustHandshake,
TrustPolicy,
TrustedAgentCard,
TrustVerificationResult,
)
@dataclass
class DataAccessPolicy:
"""Policy for controlling data access in RAG pipelines."""
allowed_collections: Optional[List[str]] = None
denied_collections: Optional[List[str]] = None
require_audit: bool = False
max_results_per_query: int = 100
allowed_metadata_filters: Optional[List[str]] = None
require_identity: bool = True
@dataclass
class QueryAuditRecord:
"""Audit record for a query."""
query_id: str
timestamp: datetime
invoker_did: Optional[str]
query_text: str
trust_verified: bool
trust_score: float
result_count: int
warnings: List[str] = field(default_factory=list)
class TrustGatedQueryEngine(BaseQueryEngine):
"""
Query engine with trust-based access control.
This wrapper adds trust verification and access control
to any LlamaIndex query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
identity: CMVKIdentity,
policy: Optional[TrustPolicy] = None,
data_policy: Optional[DataAccessPolicy] = None,
):
"""
Initialize trust-gated query engine.
Args:
query_engine: The underlying query engine
identity: This engine's identity
policy: Trust policy for verification
data_policy: Data access policy
"""
super().__init__(callback_manager=query_engine.callback_manager)
self._query_engine = query_engine
self._identity = identity
self._policy = policy or TrustPolicy()
self._data_policy = data_policy or DataAccessPolicy()
self._handshake = TrustHandshake(identity, policy)
self._audit_log: List[QueryAuditRecord] = []
self._query_counter = 0
def _generate_query_id(self) -> str:
"""Generate unique query ID."""
self._query_counter += 1
return f"q_{self._identity.did[-8:]}_{self._query_counter}"
def _create_audit_record(
self,
query_text: str,
invoker_card: Optional[TrustedAgentCard],
verification: Optional[TrustVerificationResult],
result_count: int = 0,
) -> QueryAuditRecord:
"""Create an audit record for a query."""
return QueryAuditRecord(
query_id=self._generate_query_id(),
timestamp=datetime.now(timezone.utc),
invoker_did=invoker_card.identity.did
if invoker_card and invoker_card.identity
else None,
query_text=query_text[:500], # Truncate for storage
trust_verified=verification.trusted if verification else False,
trust_score=verification.trust_score if verification else 0.0,
result_count=result_count,
warnings=verification.warnings if verification else [],
)
def verify_invoker(
self,
invoker_card: TrustedAgentCard,
required_capabilities: Optional[List[str]] = None,
) -> TrustVerificationResult:
"""
Verify an invoker before processing query.
Args:
invoker_card: The invoker's agent card
required_capabilities: Required capabilities
Returns:
Verification result
"""
return self._handshake.verify_peer(invoker_card, required_capabilities)
def _query(
self,
query_bundle: QueryBundle,
invoker_card: Optional[TrustedAgentCard] = None,
**kwargs: Any,
) -> RESPONSE_TYPE:
"""
Execute query with trust verification.
Args:
query_bundle: The query to execute
invoker_card: Optional invoker card for verification
**kwargs: Additional arguments
Returns:
Query response
Raises:
PermissionError: If trust verification fails
"""
verification = None
query_text = query_bundle.query_str
# Verify invoker if required
if self._policy.require_verification:
if not invoker_card:
if self._data_policy.require_identity:
raise PermissionError(
"Query requires invoker identity but none provided"
)
else:
verification = self.verify_invoker(invoker_card)
if not verification.trusted and self._policy.block_unverified:
# Log blocked query
if self._policy.audit_queries:
record = self._create_audit_record(
query_text, invoker_card, verification
)
record.warnings.append("Query blocked due to trust failure")
self._audit_log.append(record)
raise PermissionError(f"Query rejected: {verification.reason}")
# Execute the underlying query
response = self._query_engine.query(query_bundle)
# Audit if required
if self._policy.audit_queries:
# Count results (simplified)
result_count = 1 if response else 0
record = self._create_audit_record(
query_text, invoker_card, verification, result_count
)
self._audit_log.append(record)
return response
async def _aquery(
self,
query_bundle: QueryBundle,
invoker_card: Optional[TrustedAgentCard] = None,
**kwargs: Any,
) -> RESPONSE_TYPE:
"""
Async query with trust verification.
Args:
query_bundle: The query to execute
invoker_card: Optional invoker card
**kwargs: Additional arguments
Returns:
Query response
"""
verification = None
query_text = query_bundle.query_str
if self._policy.require_verification:
if not invoker_card:
if self._data_policy.require_identity:
raise PermissionError(
"Query requires invoker identity but none provided"
)
else:
verification = self.verify_invoker(invoker_card)
if not verification.trusted and self._policy.block_unverified:
raise PermissionError(f"Query rejected: {verification.reason}")
response = await self._query_engine.aquery(query_bundle)
if self._policy.audit_queries:
result_count = 1 if response else 0
record = self._create_audit_record(
query_text, invoker_card, verification, result_count
)
self._audit_log.append(record)
return response
def query(
self,
str_or_query_bundle: str | QueryBundle,
invoker_card: Optional[TrustedAgentCard] = None,
**kwargs: Any,
) -> RESPONSE_TYPE:
"""
Query with trust verification.
Args:
str_or_query_bundle: Query string or bundle
invoker_card: Optional invoker card for verification
**kwargs: Additional arguments
Returns:
Query response
"""
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(query_str=str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
return self._query(query_bundle, invoker_card=invoker_card, **kwargs)
async def aquery(
self,
str_or_query_bundle: str | QueryBundle,
invoker_card: Optional[TrustedAgentCard] = None,
**kwargs: Any,
) -> RESPONSE_TYPE:
"""
Async query with trust verification.
Args:
str_or_query_bundle: Query string or bundle
invoker_card: Optional invoker card
**kwargs: Additional arguments
Returns:
Query response
"""
if isinstance(str_or_query_bundle, str):
query_bundle = QueryBundle(query_str=str_or_query_bundle)
else:
query_bundle = str_or_query_bundle
return await self._aquery(query_bundle, invoker_card=invoker_card, **kwargs)
def get_audit_log(self) -> List[QueryAuditRecord]:
"""
Get the query audit log.
Returns:
List of audit records
"""
return self._audit_log.copy()
def clear_audit_log(self) -> None:
"""Clear the audit log."""
self._audit_log.clear()
def get_audit_summary(self) -> Dict[str, Any]:
"""
Get summary of audit activity.
Returns:
Audit summary dictionary
"""
total = len(self._audit_log)
verified = sum(1 for r in self._audit_log if r.trust_verified)
return {
"total_queries": total,
"verified_queries": verified,
"unverified_queries": total - verified,
"verification_rate": verified / total if total > 0 else 1.0,
"total_warnings": sum(len(r.warnings) for r in self._audit_log),
}
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/query_engine.py",
"license": "MIT License",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/trust.py | """Trust verification protocols for AgentMesh."""
from __future__ import annotations
import json
from dataclasses import dataclass, field
from datetime import datetime, timedelta, timezone
from typing import Any, Dict, List, Optional
from llama_index.agent.agentmesh.identity import CMVKIdentity, CMVKSignature
@dataclass
class TrustPolicy:
"""Policy configuration for trust verification."""
require_verification: bool = True
min_trust_score: float = 0.7
allowed_capabilities: Optional[List[str]] = None
audit_queries: bool = False
block_unverified: bool = True
cache_ttl_seconds: int = 900
@dataclass
class TrustVerificationResult:
"""Result of a trust verification operation."""
trusted: bool
trust_score: float
reason: str
verified_capabilities: List[str] = field(default_factory=list)
warnings: List[str] = field(default_factory=list)
@dataclass
class TrustedAgentCard:
"""Agent card for discovery and verification."""
name: str
description: str
capabilities: List[str]
identity: Optional[CMVKIdentity] = None
trust_score: float = 1.0
card_signature: Optional[CMVKSignature] = None
metadata: Dict[str, Any] = field(default_factory=dict)
def _get_signable_content(self) -> str:
content = {
"name": self.name,
"description": self.description,
"capabilities": sorted(self.capabilities),
"trust_score": self.trust_score,
"identity_did": self.identity.did if self.identity else None,
"identity_public_key": self.identity.public_key if self.identity else None,
}
return json.dumps(content, sort_keys=True, separators=(",", ":"))
def sign(self, identity: CMVKIdentity) -> None:
"""Sign this card with the given identity."""
self.identity = identity.public_identity()
signable = self._get_signable_content()
self.card_signature = identity.sign(signable)
def verify_signature(self) -> bool:
"""Verify the card's signature."""
if not self.identity or not self.card_signature:
return False
signable = self._get_signable_content()
return self.identity.verify_signature(signable, self.card_signature)
def to_dict(self) -> Dict[str, Any]:
result = {
"name": self.name,
"description": self.description,
"capabilities": self.capabilities,
"trust_score": self.trust_score,
"metadata": self.metadata,
}
if self.identity:
result["identity"] = self.identity.to_dict()
if self.card_signature:
result["card_signature"] = self.card_signature.to_dict()
return result
@classmethod
def from_dict(cls, data: Dict[str, Any]) -> "TrustedAgentCard":
identity = None
if "identity" in data:
identity = CMVKIdentity.from_dict(data["identity"])
card_signature = None
if "card_signature" in data:
card_signature = CMVKSignature.from_dict(data["card_signature"])
return cls(
name=data["name"],
description=data.get("description", ""),
capabilities=data.get("capabilities", []),
identity=identity,
trust_score=data.get("trust_score", 1.0),
card_signature=card_signature,
metadata=data.get("metadata", {}),
)
class TrustHandshake:
"""Handles trust verification between agents."""
def __init__(
self,
my_identity: CMVKIdentity,
policy: Optional[TrustPolicy] = None,
):
self.my_identity = my_identity
self.policy = policy or TrustPolicy()
self._verified_peers: Dict[str, tuple[TrustVerificationResult, datetime]] = {}
self._cache_ttl = timedelta(seconds=self.policy.cache_ttl_seconds)
def _get_cached_result(self, did: str) -> Optional[TrustVerificationResult]:
if did in self._verified_peers:
result, timestamp = self._verified_peers[did]
if datetime.now(timezone.utc) - timestamp < self._cache_ttl:
return result
del self._verified_peers[did]
return None
def _cache_result(self, did: str, result: TrustVerificationResult) -> None:
self._verified_peers[did] = (result, datetime.now(timezone.utc))
def verify_peer(
self,
peer_card: TrustedAgentCard,
required_capabilities: Optional[List[str]] = None,
min_trust_score: Optional[float] = None,
) -> TrustVerificationResult:
"""Verify a peer agent's trustworthiness."""
warnings: List[str] = []
min_score = min_trust_score or self.policy.min_trust_score
if peer_card.identity:
cached = self._get_cached_result(peer_card.identity.did)
if cached:
return cached
if not peer_card.identity:
return TrustVerificationResult(
trusted=False,
trust_score=0.0,
reason="No cryptographic identity provided",
)
if not peer_card.identity.did.startswith("did:cmvk:"):
return TrustVerificationResult(
trusted=False,
trust_score=0.0,
reason="Invalid DID format",
)
if not peer_card.verify_signature():
return TrustVerificationResult(
trusted=False,
trust_score=0.0,
reason="Card signature verification failed",
)
if peer_card.trust_score < min_score:
return TrustVerificationResult(
trusted=False,
trust_score=peer_card.trust_score,
reason=f"Trust score {peer_card.trust_score} below minimum {min_score}",
)
verified_caps = peer_card.capabilities.copy()
if required_capabilities:
missing = set(required_capabilities) - set(peer_card.capabilities)
if missing:
return TrustVerificationResult(
trusted=False,
trust_score=peer_card.trust_score,
reason=f"Missing required capabilities: {missing}",
verified_capabilities=verified_caps,
)
result = TrustVerificationResult(
trusted=True,
trust_score=peer_card.trust_score,
reason="Verification successful",
verified_capabilities=verified_caps,
warnings=warnings,
)
self._cache_result(peer_card.identity.did, result)
return result
def clear_cache(self) -> None:
self._verified_peers.clear()
@dataclass
class Delegation:
"""A delegation of capabilities."""
delegator: str
delegatee: str
capabilities: List[str]
signature: Optional[CMVKSignature] = None
created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
expires_at: Optional[datetime] = None
class DelegationChain:
"""Manages a chain of trust delegations."""
def __init__(self, root_identity: CMVKIdentity):
self.root_identity = root_identity
self.delegations: List[Delegation] = []
self._known_identities: Dict[str, CMVKIdentity] = {
root_identity.did: root_identity
}
def add_delegation(
self,
delegatee: TrustedAgentCard,
capabilities: List[str],
expires_in_hours: Optional[int] = None,
delegator_identity: Optional[CMVKIdentity] = None,
) -> Delegation:
"""Add a delegation to the chain."""
if not delegatee.identity:
raise ValueError("Delegatee must have identity")
delegator = delegator_identity or self.root_identity
delegatee_did = delegatee.identity.did
expires_at = None
if expires_in_hours:
expires_at = datetime.now(timezone.utc) + timedelta(hours=expires_in_hours)
delegation_data = json.dumps(
{
"delegator": delegator.did,
"delegatee": delegatee_did,
"capabilities": sorted(capabilities),
"expires_at": expires_at.isoformat() if expires_at else None,
},
sort_keys=True,
)
signature = delegator.sign(delegation_data)
delegation = Delegation(
delegator=delegator.did,
delegatee=delegatee_did,
capabilities=capabilities,
signature=signature,
expires_at=expires_at,
)
self.delegations.append(delegation)
self._known_identities[delegatee_did] = delegatee.identity
return delegation
def verify(self) -> bool:
"""Verify the entire delegation chain."""
if not self.delegations:
return True
for i, delegation in enumerate(self.delegations):
if delegation.expires_at and delegation.expires_at < datetime.now(
timezone.utc
):
return False
if not delegation.signature:
return False
delegator_identity = self._known_identities.get(delegation.delegator)
if not delegator_identity:
return False
delegation_data = json.dumps(
{
"delegator": delegation.delegator,
"delegatee": delegation.delegatee,
"capabilities": sorted(delegation.capabilities),
"expires_at": delegation.expires_at.isoformat()
if delegation.expires_at
else None,
},
sort_keys=True,
)
if not delegator_identity.verify_signature(
delegation_data, delegation.signature
):
return False
if i > 0:
prev_delegation = self.delegations[i - 1]
if delegation.delegator != prev_delegation.delegatee:
return False
return True
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/trust.py",
"license": "MIT License",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/worker.py | """Trusted agent worker for LlamaIndex."""
from __future__ import annotations
from typing import Any, List, Optional, Sequence
from llama_index.core.agent import AgentRunner
from llama_index.core.agent.types import BaseAgentWorker, Task, TaskStep, TaskStepOutput
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms import LLM
from llama_index.core.tools import BaseTool
from llama_index.agent.agentmesh.identity import CMVKIdentity
from llama_index.agent.agentmesh.trust import (
TrustHandshake,
TrustPolicy,
TrustedAgentCard,
TrustVerificationResult,
)
class TrustedAgentWorker(BaseAgentWorker):
"""
Agent worker with cryptographic identity and trust verification.
This worker extends the base agent worker to add:
- Cryptographic identity for authentication
- Trust verification for peer agents
- Capability-based access control
- Audit logging of agent actions
"""
def __init__(
self,
tools: Sequence[BaseTool],
llm: LLM,
identity: CMVKIdentity,
policy: Optional[TrustPolicy] = None,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
):
"""
Initialize trusted agent worker.
Args:
tools: List of tools available to the agent
llm: Language model for the agent
identity: Cryptographic identity for this agent
policy: Trust policy to enforce
callback_manager: Optional callback manager
verbose: Enable verbose logging
"""
self._tools = list(tools)
self._llm = llm
self._identity = identity
self._policy = policy or TrustPolicy()
self._callback_manager = callback_manager
self._verbose = verbose
self._handshake = TrustHandshake(identity, policy)
self._agent_card = self._create_agent_card()
def _create_agent_card(self) -> TrustedAgentCard:
"""Create this agent's card for discovery."""
card = TrustedAgentCard(
name=self._identity.agent_name,
description=f"LlamaIndex agent with tools: {[t.metadata.name for t in self._tools]}",
capabilities=self._identity.capabilities,
)
card.sign(self._identity)
return card
@classmethod
def from_tools(
cls,
tools: Sequence[BaseTool],
llm: LLM,
identity: CMVKIdentity,
policy: Optional[TrustPolicy] = None,
callback_manager: Optional[CallbackManager] = None,
verbose: bool = False,
**kwargs: Any,
) -> "TrustedAgentWorker":
"""
Create a trusted agent worker from tools.
Args:
tools: List of tools for the agent
llm: Language model
identity: Agent's cryptographic identity
policy: Trust policy
callback_manager: Callback manager
verbose: Verbose mode
**kwargs: Additional arguments
Returns:
TrustedAgentWorker instance
"""
return cls(
tools=tools,
llm=llm,
identity=identity,
policy=policy,
callback_manager=callback_manager,
verbose=verbose,
)
@property
def identity(self) -> CMVKIdentity:
"""Get this agent's identity."""
return self._identity
@property
def agent_card(self) -> TrustedAgentCard:
"""Get this agent's card for discovery."""
return self._agent_card
def verify_peer(
self,
peer_card: TrustedAgentCard,
required_capabilities: Optional[List[str]] = None,
) -> TrustVerificationResult:
"""
Verify a peer agent before accepting tasks.
Args:
peer_card: The peer's agent card
required_capabilities: Capabilities required from peer
Returns:
Verification result
"""
return self._handshake.verify_peer(peer_card, required_capabilities)
def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep:
"""
Initialize a new task step.
Args:
task: The task to execute
**kwargs: Additional arguments
Returns:
Initial task step
"""
# Create initial step with trust context
return TaskStep(
task_id=task.task_id,
step_id=f"{task.task_id}_step_0",
input=task.input,
step_state={
"agent_did": self._identity.did,
"trust_verified": True,
},
)
def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""
Run a single step of the task.
Args:
step: Current step
task: Parent task
**kwargs: Additional arguments
Returns:
Step output
"""
# Get invoker identity from context if available
invoker_card = kwargs.get("invoker_card")
# Verify invoker if policy requires it
if self._policy.require_verification and invoker_card:
result = self.verify_peer(invoker_card)
if not result.trusted and self._policy.block_unverified:
raise PermissionError(
f"Task rejected: invoker verification failed - {result.reason}"
)
# Use the LLM directly to generate a response
messages = [ChatMessage(role=MessageRole.USER, content=step.input)]
response = self._llm.chat(messages)
return TaskStepOutput(
output=str(response),
task_step=step,
is_last=True,
next_steps=[],
)
async def arun_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""
Async version of run_step.
Args:
step: Current step
task: Parent task
**kwargs: Additional arguments
Returns:
Step output
"""
# Verify invoker if policy requires it
invoker_card = kwargs.get("invoker_card")
if self._policy.require_verification and invoker_card:
result = self.verify_peer(invoker_card)
if not result.trusted and self._policy.block_unverified:
raise PermissionError(
f"Task rejected: invoker verification failed - {result.reason}"
)
# Use the LLM's native async method
messages = [ChatMessage(role=MessageRole.USER, content=step.input)]
response = await self._llm.achat(messages)
return TaskStepOutput(
output=str(response),
task_step=step,
is_last=True,
next_steps=[],
)
def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""
Stream a task step.
Args:
step: Current step
task: Parent task
**kwargs: Additional arguments
Returns:
Step output
"""
return self.run_step(step, task, **kwargs)
async def astream_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""
Async stream a task step.
Args:
step: Current step
task: Parent task
**kwargs: Additional arguments
Returns:
Step output
"""
return await self.arun_step(step, task, **kwargs)
def finalize_task(self, task: Task, **kwargs: Any) -> None:
"""
Finalize a completed task.
Args:
task: The completed task
**kwargs: Additional arguments
"""
# Log completion if auditing
if self._policy.audit_queries:
# Would log to audit system
pass
def as_agent(self, **kwargs: Any) -> AgentRunner:
"""
Create an agent runner from this worker.
Args:
**kwargs: Additional arguments for AgentRunner
Returns:
AgentRunner instance
"""
return AgentRunner.from_llm(
llm=self._llm,
tools=self._tools,
callback_manager=self._callback_manager,
verbose=self._verbose,
**kwargs,
)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/agent/llama-index-agent-agentmesh/llama_index/agent/agentmesh/worker.py",
"license": "MIT License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/node_parser/llama-index-node-parser-chonkie/llama_index/node_parser/chonkie/chunkers.py | from typing import Any, Callable, List, Optional, Union
from chonkie.chunker.base import BaseChunker
from chonkie.pipeline import ComponentRegistry, ComponentType
from pydantic import Field
import logging
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.node_parser.interface import (
MetadataAwareTextSplitter,
)
from llama_index.core.node_parser.node_utils import default_id_func
# a list of strings of all available chunkers in chonkie
# see https://github.com/chonkie-inc/chonkie/blob/cd8bd643bd7045686f0a8b73a64f1c9296c0dae2/src/chonkie/cli/cli_utils.py#L34-L36
CHUNKERS = sorted(
c.alias
for c in ComponentRegistry.list_components(component_type=ComponentType.CHUNKER)
if c.alias not in ["table", "slumber"]
)
logger = logging.getLogger(__name__)
class Chunker(MetadataAwareTextSplitter):
"""
Wrapper for Chonkie's chunkers.
This class integrates Chonkie's chunking functionality with LlamaIndex's
MetadataAwareTextSplitter interface.
"""
# this is related to the metadata schema in the super, or pydantic will fail
# attributes need to be defined as pydantic fields
chunker: Optional[BaseChunker] = Field(default=None, exclude=True)
valid_chunker_aliases: List[str] = Field(
default_factory=lambda: CHUNKERS, exclude=True
)
_logged_warning_for_incompatible_chunker: bool = False
def __init__(
self,
chunker: Union[str, BaseChunker] = "recursive",
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
id_func: Optional[Callable] = None,
**kwargs: Any,
):
id_func = id_func or default_id_func
callback_manager = callback_manager or CallbackManager([])
super().__init__(
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
id_func=id_func,
)
if isinstance(chunker, str) and chunker not in self.valid_chunker_aliases:
raise ValueError(
f"Invalid chunker '{chunker}'. Must be one of: {self.valid_chunker_aliases}"
)
if isinstance(chunker, str):
# flexible approach to pull chunker classes based on their alias
ChunkingClass = ComponentRegistry.get_chunker(chunker).component_class
self.chunker = ChunkingClass(**kwargs)
else:
self.chunker = chunker
@classmethod
def from_defaults(
cls,
callback_manager: Optional[CallbackManager] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
) -> "Chunker":
"""Initialize with parameters."""
callback_manager = callback_manager or CallbackManager([])
return cls(
callback_manager=callback_manager,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
)
@classmethod
def class_name(cls) -> str:
return "Chunker"
def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]:
"""Split text with metadata awareness."""
# only apply metadata-aware chunking if the chunker is compatible (counter-example fast)
if (
hasattr(self.chunker, "_tokenizer")
and (self.chunker._tokenizer is not None)
and hasattr(self.chunker, "chunk_size")
and self.chunker.chunk_size is not None
):
# count tokens and update chunk_size
num_tokens = self.chunker._tokenizer.count_tokens(metadata_str)
original_chunk_size = self.chunker.chunk_size
effective_chunk_size = original_chunk_size - num_tokens
self.chunker.chunk_size = effective_chunk_size
if effective_chunk_size <= 0:
raise ValueError(
f"Metadata length ({num_tokens} tokens) is longer than or equal to "
f"chunk size ({original_chunk_size}). Consider increasing the chunk size or "
"decreasing the size of your metadata to avoid this."
)
splits = self.split_text(text)
# reset chunk_size to original value after splitting
self.chunker.chunk_size = original_chunk_size
return splits
# fallback mechanism for incompatible chunkers (only logs a warning once per instance)
if not self._logged_warning_for_incompatible_chunker:
logger.warning(
"current chunker type does not support metadata awareness. Proceeding with regular chunking."
" This warning will only be logged once per instance."
)
self._logged_warning_for_incompatible_chunker = True
return self.split_text(text)
def split_text(self, text: str) -> List[str]:
"""Split incoming text and return chunks using Chonkie chunker."""
if text == "":
return [text]
if self.chunker is None:
raise ValueError("Chunker not initialized")
chunks = self.chunker.chunk(text)
# extract attributes from chonkie Chunk dataclass
# see https://github.com/chonkie-inc/chonkie/blob/cd8bd643bd7045686f0a8b73a64f1c9296c0dae2/src/chonkie/types/base.py#L32-L38
if isinstance(chunks, list):
return [
chunk.text if hasattr(chunk, "text") else str(chunk) for chunk in chunks
]
else:
return [chunks.text if hasattr(chunks, "text") else str(chunks)]
# MonkeyPatch for https://github.com/run-llama/llama_index/pull/20622#discussion_r2764697454
Chunker.__init__.__doc__ = f"""
Initialize with a Chonkie chunker instance or create one if not provided.
Args:
chunker Union[str, BaseChunker]: The chunker to use. Must be one of {CHUNKERS} or a chonkie chunker instance.
callback_manager (Optional[CallbackManager]): Callback manager for handling callbacks.
include_metadata (bool): Whether to include metadata in the nodes.
include_prev_next_rel (bool): Whether to include previous/next relationships.
id_func (Optional[Callable]): Function to generate node IDs.
**kwargs: Additional keyword arguments for Chonkie's RecursiveChunker.
"""
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/node_parser/llama-index-node-parser-chonkie/llama_index/node_parser/chonkie/chunkers.py",
"license": "MIT License",
"lines": 133,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
run-llama/llama_index:llama-index-integrations/node_parser/llama-index-node-parser-chonkie/tests/test_chunkers.py | """Tests for Chonkie Chunker integration."""
import pytest
from typing import List
from llama_index.core.schema import Document, MetadataMode, TextNode
from llama_index.node_parser.chonkie.chunkers import Chunker, CHUNKERS
def test_chonkie_chunker_initialization() -> None:
"""Test Chunker can be initialized with default parameters."""
chunker = Chunker(chunk_size=512)
assert chunker is not None
assert chunker.chunker is not None
def test_chonkie_chunker_from_defaults() -> None:
"""Test Chunker can be created using from_defaults."""
chunker = Chunker.from_defaults()
assert chunker is not None
assert isinstance(chunker, Chunker)
# Default is recursive
from chonkie import RecursiveChunker
assert isinstance(chunker.chunker, RecursiveChunker)
def test_chonkie_chunker_class_name() -> None:
"""Test that class_name returns the correct name."""
chunker = Chunker(chunk_size=512)
assert chunker.class_name() == "Chunker"
def test_split_text_empty() -> None:
"""Test splitting empty text returns empty list with one empty string."""
chunker = Chunker(chunk_size=512)
result = chunker.split_text("")
assert result == [""]
def test_split_text_basic() -> None:
"""Test basic text splitting functionality."""
chunker = Chunker(chunk_size=100)
text = "This is a test. " * 50 # Create text that will need splitting
chunks = chunker.split_text(text)
assert len(chunks) > 0
assert isinstance(chunks, list)
assert all(isinstance(chunk, str) for chunk in chunks)
def test_split_text_short() -> None:
"""Test that short text is not split."""
chunker = Chunker(chunk_size=512)
text = "This is a short text."
chunks = chunker.split_text(text)
assert len(chunks) == 1
assert chunks[0] == text
def test_split_text_with_paragraphs() -> None:
"""Test splitting text with multiple paragraphs."""
chunker = Chunker(chunk_size=100)
text = "First paragraph. " * 20 + "\n\n" + "Second paragraph. " * 20
chunks = chunker.split_text(text)
assert len(chunks) > 1
assert all(len(chunk) > 0 for chunk in chunks)
def test_split_text_metadata_aware_basic() -> None:
"""Test metadata-aware text splitting returns valid chunks."""
chunker = Chunker(chunker="recursive", chunk_size=200)
text = "This is a test document. " * 20
metadata_str = "title: Test Document\nauthor: Test Author\n"
chunks = chunker.split_text_metadata_aware(text, metadata_str)
assert len(chunks) > 0
assert isinstance(chunks, list)
assert all(isinstance(c, str) for c in chunks)
assert "".join(chunks).strip() == text.strip()
def test_split_text_metadata_aware_more_chunks_with_metadata() -> None:
"""With recursive chunker, metadata reduces effective chunk size so we get more (or equal) chunks."""
chunker = Chunker(chunker="recursive", chunk_size=100)
text = "This is a test document. " * 30
chunks_no_metadata = chunker.split_text(text)
metadata_str = "title: Test Document\nauthor: Test Author\ndate: 2024\n"
chunks_with_metadata = chunker.split_text_metadata_aware(text, metadata_str)
assert len(chunks_with_metadata) >= len(chunks_no_metadata)
assert all(isinstance(c, str) for c in chunks_with_metadata)
assert "".join(chunks_with_metadata).strip() == text.strip()
def test_split_text_metadata_aware_empty_metadata() -> None:
"""Empty metadata: effective_chunk_size equals chunk_size, so result matches split_text."""
chunker = Chunker(chunker="token", chunk_size=80)
text = "This is a test. " * 20
chunks_empty_metadata = chunker.split_text_metadata_aware(text, "")
chunks_no_metadata = chunker.split_text(text)
assert len(chunks_empty_metadata) == len(chunks_no_metadata)
assert chunks_empty_metadata == chunks_no_metadata
def test_split_text_metadata_aware_value_error_when_metadata_too_long() -> None:
"""ValueError when metadata token count >= chunk_size (effective_chunk_size <= 0)."""
chunker = Chunker(chunker="recursive", chunk_size=5)
text = "Short text."
long_metadata = "title: " + "very long title " * 20 + "author: " + "name " * 20
with pytest.raises(ValueError) as exc_info:
chunker.split_text_metadata_aware(text, long_metadata)
assert "Metadata length" in str(exc_info.value)
assert "chunk size" in str(exc_info.value).lower()
def test_split_text_metadata_aware_chunk_size_restored() -> None:
"""chunk_size is restored after split so repeated metadata-aware calls are consistent."""
chunker = Chunker(chunker="recursive", chunk_size=60)
text = "This is a test document. " * 15
metadata_str = "title: Doc\n"
chunks1 = chunker.split_text_metadata_aware(text, metadata_str)
chunks2 = chunker.split_text_metadata_aware(text, metadata_str)
assert chunks1 == chunks2
chunks_plain = chunker.split_text(text)
assert len(chunks_plain) >= 1
def test_split_text_metadata_aware_preserves_content() -> None:
"""Metadata-aware splitting preserves full text (no overlap, so join equals original)."""
chunker = Chunker(chunker="recursive", chunk_size=50)
text = "One. Two. Three. Four. Five. " * 10
metadata_str = "key: value\n"
chunks = chunker.split_text_metadata_aware(text, metadata_str)
combined = "".join(chunks)
assert combined.strip() == text.strip()
assert len(chunks) > 0
def test_split_text_metadata_aware_single_chunk_short_text() -> None:
"""Short text with metadata still fits in one chunk when effective size is large enough."""
chunker = Chunker(chunker="recursive", chunk_size=512)
text = "Short piece of text."
metadata_str = "title: Short\n"
chunks = chunker.split_text_metadata_aware(text, metadata_str)
assert len(chunks) == 1
assert chunks[0] == text
def test_split_text_metadata_aware_fallback_same_as_split_text() -> None:
"""Chunkers without tokenizer (e.g. fast) fall back to split_text; result equals split_text."""
chunker = Chunker(chunker="fast", chunk_size=10, forward_fallback=True)
text = "This is a test. " * 50
metadata_str = "title: Test\n"
chunks_metadata_aware = chunker.split_text_metadata_aware(text, metadata_str)
chunks_plain = chunker.split_text(text)
assert chunks_metadata_aware == chunks_plain
def test_split_text_metadata_aware_fallback_logs_warning(
caplog: pytest.LogCaptureFixture,
) -> None:
"""Fallback path logs a warning once per instance for incompatible chunkers."""
import logging
with caplog.at_level(logging.WARNING):
chunker = Chunker(chunker="fast", chunk_size=20, forward_fallback=True)
chunker.split_text_metadata_aware("Some text." * 100, "meta: value\n")
chunker.split_text_metadata_aware("More text." * 100, "meta: value\n")
# Warning only logged once per instance
warnings = [r for r in caplog.records if "metadata awareness" in (r.message or "")]
assert len(warnings) >= 1
assert "does not support metadata awareness" in (warnings[0].message or "")
def test_get_nodes_from_documents() -> None:
"""Test generating nodes from documents."""
chunker = Chunker(chunk_size=100)
documents = [
Document(text="This is document one. " * 30, metadata={"doc_id": "1"}),
Document(text="This is document two. " * 30, metadata={"doc_id": "2"}),
]
nodes: List[TextNode] = chunker.get_nodes_from_documents(documents)
assert len(nodes) > 0
assert all(isinstance(node, TextNode) for node in nodes)
# Check that metadata is preserved
assert any(node.metadata.get("doc_id") in ["1", "2"] for node in nodes)
def test_start_end_char_idx() -> None:
"""Test that start and end character indices are correctly set."""
chunker = Chunker(chunk_size=50)
document = Document(text="This is a test document. " * 20)
nodes: List[TextNode] = chunker.get_nodes_from_documents([document])
for node in nodes:
assert node.start_char_idx is not None
assert node.end_char_idx is not None
# Verify that the indices correctly extract the node content
assert node.end_char_idx - node.start_char_idx == len(
node.get_content(metadata_mode=MetadataMode.NONE)
)
def test_nodes_preserve_document_metadata() -> None:
"""Test that nodes preserve document metadata."""
chunker = Chunker(chunk_size=100)
metadata = {"title": "Test Document", "author": "Test Author", "year": 2024}
document = Document(text="This is a test. " * 30, metadata=metadata)
nodes: List[TextNode] = chunker.get_nodes_from_documents([document])
for node in nodes:
assert node.metadata["title"] == metadata["title"]
assert node.metadata["author"] == metadata["author"]
assert node.metadata["year"] == metadata["year"]
def test_chunker_with_long_text() -> None:
"""Test chunker with a longer piece of text."""
chunker = Chunker(chunk_size=200)
# Create a long text that will require multiple chunks
text = (
"""
The quick brown fox jumps over the lazy dog. This is a test sentence.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat
non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
"""
* 10
)
chunks = chunker.split_text(text)
assert len(chunks) > 1
# Verify no chunks are empty
assert all(len(chunk.strip()) > 0 for chunk in chunks)
def test_chunker_preserves_text_content() -> None:
"""
Test that chunker preserves all text content across chunks.
"""
chunker = Chunker(chunker="recursive", chunk_size=100)
text = "This is a test sentence. " * 50
chunks = chunker.split_text(text)
combined_text = "".join(chunks)
assert len(combined_text) > 0
def test_include_metadata_flag() -> None:
"""Test that include_metadata flag works correctly."""
chunker_with_metadata = Chunker(chunk_size=100, include_metadata=True)
chunker_without_metadata = Chunker(chunk_size=100, include_metadata=False)
document = Document(
text="This is a test. " * 30, metadata={"title": "Test", "author": "Author"}
)
nodes_with = chunker_with_metadata.get_nodes_from_documents([document])
nodes_without = chunker_without_metadata.get_nodes_from_documents([document])
# Both should produce nodes
assert len(nodes_with) > 0
assert len(nodes_without) > 0
def test_include_prev_next_rel_flag() -> None:
"""Test that include_prev_next_rel flag affects node relationships."""
chunker_with_rel = Chunker(chunk_size=100, include_prev_next_rel=True)
chunker_without_rel = Chunker(chunk_size=100, include_prev_next_rel=False)
document = Document(text="This is a test. " * 30)
nodes_with = chunker_with_rel.get_nodes_from_documents([document])
nodes_without = chunker_without_rel.get_nodes_from_documents([document])
# Both should produce nodes
assert len(nodes_with) > 0
assert len(nodes_without) > 0
# Nodes with relationships should have prev/next set (except first/last)
if len(nodes_with) > 1:
# Second node should have previous
assert (
nodes_with[1].prev_node is not None
or not chunker_with_rel.include_prev_next_rel
)
def test_chunker_with_special_characters() -> None:
"""Test chunker handles text with special characters."""
chunker = Chunker(chunk_size=100)
text = "Hello! How are you? I'm fine. This costs $10. Email: test@example.com"
chunks = chunker.split_text(text)
assert len(chunks) > 0
# Verify the text is preserved
combined = "".join(chunks)
assert "@example.com" in combined or len(chunks) == 1
def test_chunker_with_unicode() -> None:
"""Test chunker handles Unicode text."""
chunker = Chunker(chunk_size=100)
text = "Hello 世界! This is a test with émojis 🎉 and special chars: café, naïve."
chunks = chunker.split_text(text)
assert len(chunks) > 0
combined = "".join(chunks)
assert "世界" in combined or "🎉" in combined or len(chunks) == 1
def test_multiple_documents() -> None:
"""Test processing multiple documents at once."""
chunker = Chunker(chunk_size=100)
documents = [
Document(text="Document one. " * 30, metadata={"id": 1}),
Document(text="Document two. " * 30, metadata={"id": 2}),
Document(text="Document three. " * 30, metadata={"id": 3}),
]
nodes = chunker.get_nodes_from_documents(documents)
# Should have nodes from all documents
assert len(nodes) > 3
# Verify all document IDs are represented
doc_ids = {node.metadata.get("id") for node in nodes}
assert doc_ids == {1, 2, 3}
def test_chunker_consistency() -> None:
"""Test that chunker produces consistent results."""
chunker = Chunker(chunk_size=100)
text = "This is a test sentence. " * 20
chunks1 = chunker.split_text(text)
chunks2 = chunker.split_text(text)
assert len(chunks1) == len(chunks2)
assert chunks1 == chunks2
def test_chunker_kwargs() -> None:
"""
Test that chunker accepts and uses kwargs properly.
Using TokenChunker to test chunk_overlap support.
"""
# Test with various chunk sizes
chunker_small = Chunker(chunker="token", chunk_size=50, chunk_overlap=10)
chunker_large = Chunker(chunker="token", chunk_size=500, chunk_overlap=50)
text = "This is a test sentence. " * 50
chunks_small = chunker_small.split_text(text)
chunks_large = chunker_large.split_text(text)
# Smaller chunk size should produce more chunks
assert len(chunks_small) >= len(chunks_large)
def test_available_chunkers() -> None:
"""Test that all available chunkers can be initialized."""
assert len(CHUNKERS) > 0
for chunker_type in CHUNKERS:
kwargs = {}
try:
if chunker_type == "code":
kwargs = {"language": "python"}
chunker = Chunker(chunker=chunker_type, **kwargs)
except Exception as e:
raise AssertionError(f"Failed to initialize chunker '{chunker_type}': {e}")
assert chunker is not None
assert chunker.chunker is not None
def test_chunker_with_instance() -> None:
"""Test Chunker initialization with a chonkie chunker instance."""
from chonkie import RecursiveChunker
chunker = RecursiveChunker(chunk_size=2048)
parser = Chunker(chunker)
assert parser is not None
assert parser.chunker is not None
assert isinstance(parser.chunker, RecursiveChunker)
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/node_parser/llama-index-node-parser-chonkie/tests/test_chunkers.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-moss/examples/moss_agent.py | import asyncio
import os
from typing import List
from dotenv import load_dotenv
load_dotenv()
from llama_index.core.agent import ReActAgent
from llama_index.llms.openai import OpenAI
from llama_index.tools.moss import MossToolSpec, QueryOptions
from inferedge_moss import MossClient, DocumentInfo
async def main():
# 1. Initialize Client
MOSS_PROJECT_KEY = os.getenv("MOSS_PROJECT_KEY")
MOSS_PROJECT_ID = os.getenv("MOSS_PROJECT_ID")
client = MossClient(project_id=MOSS_PROJECT_ID, project_key=MOSS_PROJECT_KEY)
# 2. Configure query settings (optional — defaults: top_k=5, alpha=0.5, model_id="moss-minilm")
query_options = QueryOptions(top_k=5, alpha=0.5, model_id="moss-minilm")
# 3. Initialize Tool
moss_tool = MossToolSpec(
client=client,
index_name="knowledge_base_new",
query_options=query_options,
)
# 4. List existing indexes before indexing
print("\n[Step 4] Listing existing indexes...")
print(await moss_tool.list_indexes())
# 5. Index Documents
print("\n[Step 5] Indexing Documents...")
docs: List[DocumentInfo] = [
DocumentInfo(
id="123",
text="LlamaIndex is a data framework for LLM-based applications.",
metadata={"source": "docs", "category": "framework"},
),
DocumentInfo(
id="124",
text="Moss is a real-time semantic search engine optimized for speed.",
metadata={"source": "moss_website", "category": "engine"},
),
]
await moss_tool.index_docs(docs)
print(f"Indexed {len(docs)} documents.")
# 6. List indexes again to confirm creation
print("\n[Step 6] Listing indexes after indexing...")
print(await moss_tool.list_indexes())
# 7. Create agent with all exposed tools (query, list_indexes, delete_index)
print("\n[Step 7] Creating Agent...")
llm = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
agent = ReActAgent(
tools=moss_tool.to_tool_list(),
llm=llm,
verbose=True,
)
# 8. Run Agent — natural language query triggers the query tool
print("\n[Step 8] Querying via Agent...")
response = await agent.run(user_msg="What is Moss?")
print("\nAgent Response:")
print(response)
# 9. Run Agent — ask it to list available indexes
print("\n[Step 9] Listing indexes via Agent...")
response = await agent.run(user_msg="What indexes are available?")
print("\nAgent Response:")
print(response)
# 10. Clean up — delete the index directly (not via agent to avoid accidental deletion)
print("\n[Step 10] Cleaning up...")
print(await moss_tool.delete_index("knowledge_base"))
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-moss/examples/moss_agent.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-moss/llama_index/tools/moss/base.py | """Moss tool spec."""
from typing import List, Optional
from dataclasses import dataclass
from llama_index.core.tools.tool_spec.base import BaseToolSpec
from inferedge_moss import MossClient, DocumentInfo
from inferedge_moss import QueryOptions as MossQueryOptions
@dataclass
class QueryOptions:
"""
Configuration options for Moss search queries.
Attributes:
top_k (int): Number of results to return from queries. Defaults to 5.
alpha (float): Hybrid search weight (0.0=keyword, 1.0=semantic). Defaults to 0.5.
model_id (str): The embedding model ID used when creating the index. Defaults to "moss-minilm".
"""
top_k: int = 5
alpha: float = 0.5
model_id: str = "moss-minilm"
class MossToolSpec(BaseToolSpec):
"""
Moss Tool Spec.
This tool allows agents to interact with the Moss search engine to index documents
and query for relevant information.
"""
spec_functions: tuple[str, ...] = ("query", "list_indexes", "delete_index")
def __init__(
self,
client: MossClient,
index_name: str,
query_options: Optional[QueryOptions] = None,
) -> None:
"""
Initialize the Moss tool spec.
Args:
client (MossClient): The client to interact with the Moss service.
index_name (str): The name of the index to use.
query_options (Optional[QueryOptions]): Configuration options for the tool.
Includes top_k (int), alpha (float), and model_id (str).
"""
opt = query_options or QueryOptions()
if not (0.0 <= opt.alpha <= 1.0):
raise ValueError("alpha must be between 0 and 1")
if opt.top_k < 1:
raise ValueError("top_k must be greater than 0")
self.top_k: int = opt.top_k
self.alpha: float = opt.alpha
self.model_id: str = opt.model_id
self.client: MossClient = client
self.index_name: str = index_name
self._index_loaded: bool = False
async def index_docs(self, docs: List[DocumentInfo]) -> None:
await self.client.create_index(self.index_name, docs, model_id=self.model_id)
self._index_loaded = False
async def _load_index(self) -> None:
"""Load the index if it hasn't been loaded locally yet."""
await self.client.load_index(self.index_name)
self._index_loaded = True
async def query(self, query: str) -> str:
"""
Search the Moss knowledge base for information relevant to a specific query.
This tool performs a hybrid semantic search to find the most relevant
text snippets from the indexed documents. It is best used for answering
technical questions, retrieving facts, or finding specific context
within a large collection of documents.
Args:
query (str): The search terms or question to look up in the index.
Returns:
str: A formatted report containing the top matching text snippets,
their relevance scores, and their source metadata (like filename).
"""
if not self._index_loaded:
await self._load_index()
results = await self.client.query(
self.index_name, query, MossQueryOptions(top_k=self.top_k, alpha=self.alpha)
)
answer = f"Search results for: '{query}'\n\n"
for i, result in enumerate(results.docs):
source = (
result.metadata.get("filename")
or result.metadata.get("source")
or "Unknown Source"
)
page = result.metadata.get("page", "N/A")
answer += f"Match {i + 1} [Score: {result.score:.2f}]\n"
answer += f"Source: {source} (Page: {page})\n"
answer += f"Content: {result.text}\n"
answer += "-" * 20 + "\n\n"
return answer
async def list_indexes(self) -> str:
"""
List all available indexes in the Moss project.
Use this tool to discover what indexes exist before querying or managing them.
Returns:
str: A formatted list of all index names in the project.
"""
indexes = await self.client.list_indexes()
if not indexes:
return "No indexes found."
result = "Available indexes:\n"
for idx in indexes:
result += f" - {idx.name} (docs: {idx.doc_count}, status: {idx.status})\n"
return result
async def delete_index(self, index_name: str) -> str:
"""
Delete an index from the Moss project.
Use this tool to remove an index and all its documents when it is no longer needed.
Args:
index_name (str): The name of the index to delete.
Returns:
str: A confirmation message indicating the index was deleted.
"""
await self.client.delete_index(index_name)
if index_name == self.index_name:
self._index_loaded = False
return f"Index '{index_name}' has been deleted."
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-moss/llama_index/tools/moss/base.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
run-llama/llama_index:llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py | import pytest
from unittest.mock import MagicMock, AsyncMock, patch
# Mock dependencies before import
mock_core = MagicMock()
mock_base_spec = MagicMock()
class MockBaseToolSpec:
def __init__(self, *args, **kwargs):
pass
mock_base_spec.BaseToolSpec = MockBaseToolSpec
module_patches = {
"inferedge_moss": MagicMock(),
"llama_index.core": mock_core,
"llama_index.core.tools": mock_core,
"llama_index.core.tools.tool_spec": mock_core,
"llama_index.core.tools.tool_spec.base": mock_base_spec,
}
# Apply patches before importing the unit under test
with patch.dict("sys.modules", module_patches):
from llama_index.tools.moss.base import MossToolSpec, QueryOptions
def _make_mock_index(name: str, doc_count: int = 0, status: str = "ready") -> MagicMock:
idx = MagicMock()
idx.name = name
idx.doc_count = doc_count
idx.status = status
return idx
@pytest.fixture
def mock_client():
client = AsyncMock()
client.create_index = AsyncMock()
client.load_index = AsyncMock()
client.delete_index = AsyncMock()
# Mock query return structure
mock_doc = MagicMock()
mock_doc.metadata = {"page": "33", "source": "mock_source"}
mock_doc.score = 10.00
mock_doc.text = "mock content"
results = MagicMock()
results.docs = [mock_doc]
results.score = 10.00
client.query = AsyncMock(return_value=results)
# Mock list_indexes return structure
client.list_indexes = AsyncMock(
return_value=[
_make_mock_index("index_a", doc_count=5, status="ready"),
_make_mock_index("index_b", doc_count=12, status="ready"),
]
)
return client
@pytest.mark.asyncio
async def test_index_docs(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
await spec.index_docs([])
assert not spec._index_loaded
mock_client.create_index.assert_awaited_once_with(
"test", [], model_id="moss-minilm"
)
@pytest.mark.asyncio
async def test_query(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.query("mock")
# Verify load_index was called (since _index_loaded starts as False)
mock_client.load_index.assert_awaited_once()
# Verify query results formatting
assert "10.00" in output
assert "mock_source" in output
assert "mock" in output
assert "33" in output
@pytest.mark.asyncio
async def test_query_passes_options_to_client(mock_client):
options = QueryOptions(top_k=7, alpha=0.3)
spec = MossToolSpec(client=mock_client, index_name="test", query_options=options)
await spec.query("something")
# Verify client.query was called with the correct index name and query text
call_args = mock_client.query.call_args
assert call_args.args[0] == "test"
assert call_args.args[1] == "something"
# Third arg is the MossQueryOptions object (not None)
assert call_args.args[2] is not None
@pytest.mark.asyncio
async def test_query_skips_load_when_already_loaded(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
spec._index_loaded = True
await spec.query("mock")
mock_client.load_index.assert_not_awaited()
@pytest.mark.asyncio
async def test_list_indexes(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.list_indexes()
mock_client.list_indexes.assert_awaited_once()
# Verify all indexes are in output
assert "index_a" in output
assert "index_b" in output
assert "5" in output
assert "12" in output
assert "ready" in output
# Verify formatting
assert "Available indexes:" in output
@pytest.mark.asyncio
async def test_list_indexes_empty(mock_client):
mock_client.list_indexes = AsyncMock(return_value=[])
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.list_indexes()
assert output == "No indexes found."
@pytest.mark.asyncio
async def test_list_indexes_formatting(mock_client):
"""Verify list_indexes returns properly formatted output with all index details."""
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.list_indexes()
# Verify header
assert "Available indexes:" in output
# Verify each index appears with its details
assert "index_a" in output
assert "docs: 5" in output
assert "status: ready" in output
assert "index_b" in output
assert "docs: 12" in output
# Verify output is multi-line
assert "\n" in output
@pytest.mark.asyncio
async def test_delete_index(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.delete_index("other_index")
mock_client.delete_index.assert_awaited_once_with("other_index")
assert "other_index" in output
# Deleting a different index should not reset _index_loaded
assert not spec._index_loaded
@pytest.mark.asyncio
async def test_delete_current_index_resets_loaded_state(mock_client):
spec = MossToolSpec(client=mock_client, index_name="test")
spec._index_loaded = True
output = await spec.delete_index("test")
# Verify reset happened
assert not spec._index_loaded
# Verify deletion message
assert "test" in output
assert "deleted" in output
def test_query_options_application():
client = MagicMock()
options = QueryOptions(top_k=10, alpha=0.8, model_id="custom-model")
spec = MossToolSpec(client=client, index_name="test", query_options=options)
assert spec.top_k == 10
assert spec.alpha == 0.8
assert spec.model_id == "custom-model"
def test_initialization_validation():
client = MagicMock()
# Test invalid alpha
opt1 = QueryOptions(alpha=2)
with pytest.raises(ValueError, match="alpha must be between 0 and 1"):
MossToolSpec(client, "test", query_options=opt1)
# Test invalid top_k
opt2 = QueryOptions(top_k=-2)
with pytest.raises(ValueError, match="top_k must be greater than 0"):
MossToolSpec(client, "test", query_options=opt2)
@pytest.mark.asyncio
async def test_delete_index_return_message(mock_client):
"""Verify delete_index returns the correct confirmation message."""
spec = MossToolSpec(client=mock_client, index_name="test")
output = await spec.delete_index("remove_me")
# Verify the exact message format
assert output == "Index 'remove_me' has been deleted."
mock_client.delete_index.assert_awaited_once_with("remove_me")
| {
"repo_id": "run-llama/llama_index",
"file_path": "llama-index-integrations/tools/llama-index-tools-moss/tests/test_base.py",
"license": "MIT License",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.