language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | falconry__falcon | tests/test_redirects.py | {
"start": 456,
"end": 1087
} | class ____:
# NOTE(kgriffs): You wouldn't necessarily use these types of
# http methods with these types of redirects; this is only
# done to simplify testing.
def on_get(self, req, resp):
raise falcon.HTTPMovedPermanently('/moved/perm')
def on_post(self, req, resp):
raise falcon.HTTPFound('/found')
def on_put(self, req, resp):
raise falcon.HTTPSeeOther('/see/other')
def on_delete(self, req, resp):
raise falcon.HTTPTemporaryRedirect('/tmp/redirect')
def on_head(self, req, resp):
raise falcon.HTTPPermanentRedirect('/perm/redirect')
| RedirectingResource |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 320379,
"end": 324439
} | class ____:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
assert_equal(actual, dt,
err_msg=f"spec {spec!r} != dtype {wanted!r}")
def test_native_padding(self):
align = np.dtype('i').alignment
for j in range(8):
if j == 0:
s = 'bi'
else:
s = 'b%dxi' % j
self._check('@' + s, {'f0': ('i1', 0),
'f1': ('i', align * (1 + j // align))})
self._check('=' + s, {'f0': ('i1', 0),
'f1': ('i', 1 + j)})
def test_native_padding_2(self):
# Native padding should work also for structs and sub-arrays
self._check('x3T{xi}', {'f0': (({'f0': ('i', 4)}, (3,)), 4)})
self._check('^x3T{xi}', {'f0': (({'f0': ('i', 1)}, (3,)), 1)})
def test_trailing_padding(self):
# Trailing padding should be included, *and*, the item size
# should match the alignment if in aligned mode
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return align * (1 + (n - 1) // align)
base = {"formats": ['i'], "names": ['f0']}
self._check('ix', dict(itemsize=aligned(size + 1), **base))
self._check('ixx', dict(itemsize=aligned(size + 2), **base))
self._check('ixxx', dict(itemsize=aligned(size + 3), **base))
self._check('ixxxx', dict(itemsize=aligned(size + 4), **base))
self._check('i7x', dict(itemsize=aligned(size + 7), **base))
self._check('^ix', dict(itemsize=size + 1, **base))
self._check('^ixx', dict(itemsize=size + 2, **base))
self._check('^ixxx', dict(itemsize=size + 3, **base))
self._check('^ixxxx', dict(itemsize=size + 4, **base))
self._check('^i7x', dict(itemsize=size + 7, **base))
def test_native_padding_3(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'),
('sub', np.dtype('b,i')), ('c', 'i')],
align=True)
self._check("T{b:a:xxxi:b:T{b:f0:=i:f1:}:sub:xxxi:c:}", dt)
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b'), ('d', 'b'),
('e', 'b'), ('sub', np.dtype('b,i', align=True))])
self._check("T{b:a:=i:b:b:c:b:d:b:e:T{b:f0:xxxi:f1:}:sub:}", dt)
def test_padding_with_array_inside_struct(self):
dt = np.dtype(
[('a', 'b'), ('b', 'i'), ('c', 'b', (3,)),
('d', 'i')],
align=True)
self._check("T{b:a:xxxi:b:3b:c:xi:d:}", dt)
def test_byteorder_inside_struct(self):
# The byte order after @T{=i} should be '=', not '@'.
# Check this by noting the absence of native alignment.
self._check('@T{^i}xi', {'f0': ({'f0': ('i', 0)}, 0),
'f1': ('i', 5)})
def test_intra_padding(self):
# Natively aligned sub-arrays may require some internal padding
align = np.dtype('i').alignment
size = np.dtype('i').itemsize
def aligned(n):
return (align * (1 + (n - 1) // align))
self._check('(3)T{ix}', ({
"names": ['f0'],
"formats": ['i'],
"offsets": [0],
"itemsize": aligned(size + 1)
}, (3,)))
def test_char_vs_string(self):
dt = np.dtype('c')
self._check('c', dt)
dt = np.dtype([('f0', 'S1', (4,)), ('f1', 'S4')])
self._check('4c4s', dt)
def test_field_order(self):
# gh-9053 - previously, we relied on dictionary key order
self._check("(0)I:a:f:b:", [('a', 'I', (0,)), ('b', 'f')])
self._check("(0)I:b:f:a:", [('b', 'I', (0,)), ('a', 'f')])
def test_unnamed_fields(self):
self._check('ii', [('f0', 'i'), ('f1', 'i')])
self._check('ii:f0:', [('f1', 'i'), ('f0', 'i')])
self._check('i', 'i')
self._check('i:f0:', [('f0', 'i')])
| TestPEP3118Dtype |
python | redis__redis-py | redis/asyncio/multidb/client.py | {
"start": 976,
"end": 12420
} | class ____(AsyncRedisModuleCommands, AsyncCoreCommands):
"""
Client that operates on multiple logical Redis databases.
Should be used in Active-Active database setups.
"""
def __init__(self, config: MultiDbConfig):
self._databases = config.databases()
self._health_checks = (
config.default_health_checks()
if not config.health_checks
else config.health_checks
)
self._health_check_interval = config.health_check_interval
self._health_check_policy: HealthCheckPolicy = config.health_check_policy.value(
config.health_check_probes, config.health_check_delay
)
self._failure_detectors = (
config.default_failure_detectors()
if not config.failure_detectors
else config.failure_detectors
)
self._failover_strategy = (
config.default_failover_strategy()
if config.failover_strategy is None
else config.failover_strategy
)
self._failover_strategy.set_databases(self._databases)
self._auto_fallback_interval = config.auto_fallback_interval
self._event_dispatcher = config.event_dispatcher
self._command_retry = config.command_retry
self._command_retry.update_supported_errors([ConnectionRefusedError])
self.command_executor = DefaultCommandExecutor(
failure_detectors=self._failure_detectors,
databases=self._databases,
command_retry=self._command_retry,
failover_strategy=self._failover_strategy,
failover_attempts=config.failover_attempts,
failover_delay=config.failover_delay,
event_dispatcher=self._event_dispatcher,
auto_fallback_interval=self._auto_fallback_interval,
)
self.initialized = False
self._hc_lock = asyncio.Lock()
self._bg_scheduler = BackgroundScheduler()
self._config = config
self._recurring_hc_task = None
self._hc_tasks = []
self._half_open_state_task = None
async def __aenter__(self: "MultiDBClient") -> "MultiDBClient":
if not self.initialized:
await self.initialize()
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if self._recurring_hc_task:
self._recurring_hc_task.cancel()
if self._half_open_state_task:
self._half_open_state_task.cancel()
for hc_task in self._hc_tasks:
hc_task.cancel()
async def initialize(self):
"""
Perform initialization of databases to define their initial state.
"""
async def raise_exception_on_failed_hc(error):
raise error
# Initial databases check to define initial state
await self._check_databases_health(on_error=raise_exception_on_failed_hc)
# Starts recurring health checks on the background.
self._recurring_hc_task = asyncio.create_task(
self._bg_scheduler.run_recurring_async(
self._health_check_interval,
self._check_databases_health,
)
)
is_active_db_found = False
for database, weight in self._databases:
# Set on state changed callback for each circuit.
database.circuit.on_state_changed(self._on_circuit_state_change_callback)
# Set states according to a weights and circuit state
if database.circuit.state == CBState.CLOSED and not is_active_db_found:
await self.command_executor.set_active_database(database)
is_active_db_found = True
if not is_active_db_found:
raise NoValidDatabaseException(
"Initial connection failed - no active database found"
)
self.initialized = True
def get_databases(self) -> Databases:
"""
Returns a sorted (by weight) list of all databases.
"""
return self._databases
async def set_active_database(self, database: AsyncDatabase) -> None:
"""
Promote one of the existing databases to become an active.
"""
exists = None
for existing_db, _ in self._databases:
if existing_db == database:
exists = True
break
if not exists:
raise ValueError("Given database is not a member of database list")
await self._check_db_health(database)
if database.circuit.state == CBState.CLOSED:
highest_weighted_db, _ = self._databases.get_top_n(1)[0]
await self.command_executor.set_active_database(database)
return
raise NoValidDatabaseException(
"Cannot set active database, database is unhealthy"
)
async def add_database(self, database: AsyncDatabase):
"""
Adds a new database to the database list.
"""
for existing_db, _ in self._databases:
if existing_db == database:
raise ValueError("Given database already exists")
await self._check_db_health(database)
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
self._databases.add(database, database.weight)
await self._change_active_database(database, highest_weighted_db)
async def _change_active_database(
self, new_database: AsyncDatabase, highest_weight_database: AsyncDatabase
):
if (
new_database.weight > highest_weight_database.weight
and new_database.circuit.state == CBState.CLOSED
):
await self.command_executor.set_active_database(new_database)
async def remove_database(self, database: AsyncDatabase):
"""
Removes a database from the database list.
"""
weight = self._databases.remove(database)
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
if (
highest_weight <= weight
and highest_weighted_db.circuit.state == CBState.CLOSED
):
await self.command_executor.set_active_database(highest_weighted_db)
async def update_database_weight(self, database: AsyncDatabase, weight: float):
"""
Updates a database from the database list.
"""
exists = None
for existing_db, _ in self._databases:
if existing_db == database:
exists = True
break
if not exists:
raise ValueError("Given database is not a member of database list")
highest_weighted_db, highest_weight = self._databases.get_top_n(1)[0]
self._databases.update_weight(database, weight)
database.weight = weight
await self._change_active_database(database, highest_weighted_db)
def add_failure_detector(self, failure_detector: AsyncFailureDetector):
"""
Adds a new failure detector to the database.
"""
self._failure_detectors.append(failure_detector)
async def add_health_check(self, healthcheck: HealthCheck):
"""
Adds a new health check to the database.
"""
async with self._hc_lock:
self._health_checks.append(healthcheck)
async def execute_command(self, *args, **options):
"""
Executes a single command and return its result.
"""
if not self.initialized:
await self.initialize()
return await self.command_executor.execute_command(*args, **options)
def pipeline(self):
"""
Enters into pipeline mode of the client.
"""
return Pipeline(self)
async def transaction(
self,
func: Callable[["Pipeline"], Union[Any, Awaitable[Any]]],
*watches: KeyT,
shard_hint: Optional[str] = None,
value_from_callable: bool = False,
watch_delay: Optional[float] = None,
):
"""
Executes callable as transaction.
"""
if not self.initialized:
await self.initialize()
return await self.command_executor.execute_transaction(
func,
*watches,
shard_hint=shard_hint,
value_from_callable=value_from_callable,
watch_delay=watch_delay,
)
async def pubsub(self, **kwargs):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
if not self.initialized:
await self.initialize()
return PubSub(self, **kwargs)
async def _check_databases_health(
self,
on_error: Optional[Callable[[Exception], Coroutine[Any, Any, None]]] = None,
):
"""
Runs health checks as a recurring task.
Runs health checks against all databases.
"""
try:
self._hc_tasks = [
asyncio.create_task(self._check_db_health(database))
for database, _ in self._databases
]
results = await asyncio.wait_for(
asyncio.gather(
*self._hc_tasks,
return_exceptions=True,
),
timeout=self._health_check_interval,
)
except asyncio.TimeoutError:
raise asyncio.TimeoutError(
"Health check execution exceeds health_check_interval"
)
for result in results:
if isinstance(result, UnhealthyDatabaseException):
unhealthy_db = result.database
unhealthy_db.circuit.state = CBState.OPEN
logger.exception(
"Health check failed, due to exception",
exc_info=result.original_exception,
)
if on_error:
on_error(result.original_exception)
async def _check_db_health(self, database: AsyncDatabase) -> bool:
"""
Runs health checks on the given database until first failure.
"""
# Health check will setup circuit state
is_healthy = await self._health_check_policy.execute(
self._health_checks, database
)
if not is_healthy:
if database.circuit.state != CBState.OPEN:
database.circuit.state = CBState.OPEN
return is_healthy
elif is_healthy and database.circuit.state != CBState.CLOSED:
database.circuit.state = CBState.CLOSED
return is_healthy
def _on_circuit_state_change_callback(
self, circuit: CircuitBreaker, old_state: CBState, new_state: CBState
):
loop = asyncio.get_running_loop()
if new_state == CBState.HALF_OPEN:
self._half_open_state_task = asyncio.create_task(
self._check_db_health(circuit.database)
)
return
if old_state == CBState.CLOSED and new_state == CBState.OPEN:
loop.call_later(DEFAULT_GRACE_PERIOD, _half_open_circuit, circuit)
async def aclose(self):
if self.command_executor.active_database:
await self.command_executor.active_database.client.aclose()
def _half_open_circuit(circuit: CircuitBreaker):
circuit.state = CBState.HALF_OPEN
| MultiDBClient |
python | google__jax | tests/api_test.py | {
"start": 258597,
"end": 259074
} | class ____(jtu.JaxTestCase):
def test_autodidax_smoketest(self):
autodidax_file = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'docs',
'autodidax.py')
if not os.path.exists(autodidax_file):
self.skipTest("Cannot locate autodidax.py")
spec = importlib.util.spec_from_file_location('autodidax', autodidax_file)
autodidax_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(autodidax_module)
| AutodidaxTest |
python | django__django | tests/postgres_tests/test_aggregates.py | {
"start": 26962,
"end": 34211
} | class ____(PostgreSQLTestCase):
@classmethod
def setUpTestData(cls):
StatTestModel.objects.create(
int1=1,
int2=3,
related_field=AggregateTestModel.objects.create(integer_field=0),
)
StatTestModel.objects.create(
int1=2,
int2=2,
related_field=AggregateTestModel.objects.create(integer_field=1),
)
StatTestModel.objects.create(
int1=3,
int2=1,
related_field=AggregateTestModel.objects.create(integer_field=2),
)
# Tests for base class (StatAggregate)
def test_missing_arguments_raises_exception(self):
with self.assertRaisesMessage(ValueError, "Both y and x must be provided."):
StatAggregate(x=None, y=None)
def test_correct_source_expressions(self):
func = StatAggregate(x="test", y=13)
self.assertIsInstance(func.source_expressions[0], Value)
self.assertIsInstance(func.source_expressions[1], F)
def test_alias_is_required(self):
class SomeFunc(StatAggregate):
function = "TEST"
with self.assertRaisesMessage(TypeError, "Complex aggregates require an alias"):
StatTestModel.objects.aggregate(SomeFunc(y="int2", x="int1"))
# Test aggregates
def test_empty_result_set(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y="int2", x="int1"), None),
(CovarPop(y="int2", x="int1"), None),
(CovarPop(y="int2", x="int1", sample=True), None),
(RegrAvgX(y="int2", x="int1"), None),
(RegrAvgY(y="int2", x="int1"), None),
(RegrCount(y="int2", x="int1"), 0),
(RegrIntercept(y="int2", x="int1"), None),
(RegrR2(y="int2", x="int1"), None),
(RegrSlope(y="int2", x="int1"), None),
(RegrSXX(y="int2", x="int1"), None),
(RegrSXY(y="int2", x="int1"), None),
(RegrSYY(y="int2", x="int1"), None),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_default_argument(self):
StatTestModel.objects.all().delete()
tests = [
(Corr(y="int2", x="int1", default=0), 0),
(CovarPop(y="int2", x="int1", default=0), 0),
(CovarPop(y="int2", x="int1", sample=True, default=0), 0),
(RegrAvgX(y="int2", x="int1", default=0), 0),
(RegrAvgY(y="int2", x="int1", default=0), 0),
# RegrCount() doesn't support the default argument.
(RegrIntercept(y="int2", x="int1", default=0), 0),
(RegrR2(y="int2", x="int1", default=0), 0),
(RegrSlope(y="int2", x="int1", default=0), 0),
(RegrSXX(y="int2", x="int1", default=0), 0),
(RegrSXY(y="int2", x="int1", default=0), 0),
(RegrSYY(y="int2", x="int1", default=0), 0),
]
for aggregation, expected_result in tests:
with self.subTest(aggregation=aggregation):
# Empty result with non-execution optimization.
with self.assertNumQueries(0):
values = StatTestModel.objects.none().aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
# Empty result when query must be executed.
with self.assertNumQueries(1):
values = StatTestModel.objects.aggregate(
aggregation=aggregation,
)
self.assertEqual(values, {"aggregation": expected_result})
def test_corr_general(self):
values = StatTestModel.objects.aggregate(corr=Corr(y="int2", x="int1"))
self.assertEqual(values, {"corr": -1.0})
def test_covar_pop_general(self):
values = StatTestModel.objects.aggregate(covarpop=CovarPop(y="int2", x="int1"))
self.assertEqual(values, {"covarpop": Approximate(-0.66, places=1)})
def test_covar_pop_sample(self):
values = StatTestModel.objects.aggregate(
covarpop=CovarPop(y="int2", x="int1", sample=True)
)
self.assertEqual(values, {"covarpop": -1.0})
def test_regr_avgx_general(self):
values = StatTestModel.objects.aggregate(regravgx=RegrAvgX(y="int2", x="int1"))
self.assertEqual(values, {"regravgx": 2.0})
def test_regr_avgy_general(self):
values = StatTestModel.objects.aggregate(regravgy=RegrAvgY(y="int2", x="int1"))
self.assertEqual(values, {"regravgy": 2.0})
def test_regr_count_general(self):
values = StatTestModel.objects.aggregate(
regrcount=RegrCount(y="int2", x="int1")
)
self.assertEqual(values, {"regrcount": 3})
def test_regr_count_default(self):
msg = "RegrCount does not allow default."
with self.assertRaisesMessage(TypeError, msg):
RegrCount(y="int2", x="int1", default=0)
def test_regr_intercept_general(self):
values = StatTestModel.objects.aggregate(
regrintercept=RegrIntercept(y="int2", x="int1")
)
self.assertEqual(values, {"regrintercept": 4})
def test_regr_r2_general(self):
values = StatTestModel.objects.aggregate(regrr2=RegrR2(y="int2", x="int1"))
self.assertEqual(values, {"regrr2": 1})
def test_regr_slope_general(self):
values = StatTestModel.objects.aggregate(
regrslope=RegrSlope(y="int2", x="int1")
)
self.assertEqual(values, {"regrslope": -1})
def test_regr_sxx_general(self):
values = StatTestModel.objects.aggregate(regrsxx=RegrSXX(y="int2", x="int1"))
self.assertEqual(values, {"regrsxx": 2.0})
def test_regr_sxy_general(self):
values = StatTestModel.objects.aggregate(regrsxy=RegrSXY(y="int2", x="int1"))
self.assertEqual(values, {"regrsxy": -2.0})
def test_regr_syy_general(self):
values = StatTestModel.objects.aggregate(regrsyy=RegrSYY(y="int2", x="int1"))
self.assertEqual(values, {"regrsyy": 2.0})
def test_regr_avgx_with_related_obj_and_number_as_argument(self):
"""
This is more complex test to check if JOIN on field and
number as argument works as expected.
"""
values = StatTestModel.objects.aggregate(
complex_regravgx=RegrAvgX(y=5, x="related_field__integer_field")
)
self.assertEqual(values, {"complex_regravgx": 1.0})
| TestStatisticsAggregate |
python | celery__celery | t/unit/app/test_beat.py | {
"start": 22906,
"end": 30185
} | class ____:
@patch('os.remove')
def test_remove_db(self, remove):
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
s._remove_db()
remove.assert_has_calls(
[call('schedule' + suffix) for suffix in s.known_suffixes]
)
err = OSError()
err.errno = errno.ENOENT
remove.side_effect = err
s._remove_db()
err.errno = errno.EPERM
with pytest.raises(OSError):
s._remove_db()
def test_create_schedule_corrupted(self):
"""
Test that any decoding errors that might happen when opening beat-schedule.db are caught
"""
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
s._store = MagicMock()
s._destroy_open_corrupted_schedule = Mock()
s._destroy_open_corrupted_schedule.return_value = MagicMock()
# self._store['entries'] will throw a KeyError
s._store.__getitem__.side_effect = KeyError()
# then, when _create_schedule tries to reset _store['entries'], throw another error
expected_error = UnicodeDecodeError("ascii", b"ordinal not in range(128)", 0, 0, "")
s._store.__setitem__.side_effect = expected_error
s._create_schedule()
s._destroy_open_corrupted_schedule.assert_called_with(expected_error)
def test_create_schedule_corrupted_dbm_error(self):
"""
Test that any dbm.error that might happen when opening beat-schedule.db are caught
"""
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
s._store = MagicMock()
s._destroy_open_corrupted_schedule = Mock()
s._destroy_open_corrupted_schedule.return_value = MagicMock()
# self._store['entries'] = {} will throw a KeyError
s._store.__getitem__.side_effect = KeyError()
# then, when _create_schedule tries to reset _store['entries'], throw another error, specifically dbm.error
expected_error = dbm.error[0]()
s._store.__setitem__.side_effect = expected_error
s._create_schedule()
s._destroy_open_corrupted_schedule.assert_called_with(expected_error)
def test_create_schedule_corrupted_pickle_error(self):
"""
Test that any UnpicklingError that might happen when opening beat-schedule.db is caught
"""
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
s._store = MagicMock()
s._destroy_open_corrupted_schedule = Mock()
s._destroy_open_corrupted_schedule.return_value = MagicMock()
# self._store['entries'] = {} will throw a pickle.UnpicklingError
s._store.__getitem__.side_effect = pickle.UnpicklingError("test")
# then, when _create_schedule tries to reset _store['entries'],
# throw another error, specifically pickle.UnpicklingError
expected_error = pickle.UnpicklingError("test")
s._store.__setitem__.side_effect = expected_error
s._create_schedule()
s._destroy_open_corrupted_schedule.assert_called_with(expected_error)
def test_create_schedule_missing_entries(self):
"""
Test that if _create_schedule can't find the key "entries" in _store it will recreate it
"""
s = create_persistent_scheduler()[0](app=self.app, schedule_filename="schedule")
s._store = MagicMock()
# self._store['entries'] will throw a KeyError
s._store.__getitem__.side_effect = TypeError()
s._create_schedule()
s._store.__setitem__.assert_called_with("entries", {})
def test_setup_schedule(self):
s = create_persistent_scheduler()[0](app=self.app,
schedule_filename='schedule')
opens = s.persistence.open = Mock()
s._remove_db = Mock()
def effect(*args, **kwargs):
if opens.call_count > 1:
return s.sh
raise OSError()
opens.side_effect = effect
s.setup_schedule()
s._remove_db.assert_called_with()
s._store = {'__version__': 1}
s.setup_schedule()
s._store.clear = Mock()
op = s.persistence.open = Mock()
op.return_value = s._store
s._store['tz'] = 'FUNKY'
s.setup_schedule()
op.assert_called_with(s.schedule_filename, writeback=True)
s._store.clear.assert_called_with()
s._store['utc_enabled'] = False
s._store.clear = Mock()
s.setup_schedule()
s._store.clear.assert_called_with()
def test_get_schedule(self):
s = create_persistent_scheduler()[0](
schedule_filename='schedule', app=self.app,
)
s._store = {'entries': {}}
s.schedule = {'foo': 'bar'}
assert s.schedule == {'foo': 'bar'}
assert s._store['entries'] == s.schedule
def test_run_all_due_tasks_after_restart(self):
scheduler_class, shelve = create_persistent_scheduler_w_call_logging()
shelve['tz'] = 'UTC'
shelve['utc_enabled'] = True
shelve['__version__'] = __version__
cur_seconds = 20
def now_func():
return datetime(2018, 1, 1, 1, 11, cur_seconds)
app_schedule = {
'first_missed': {'schedule': crontab(
minute='*/10', nowfun=now_func), 'task': 'first_missed'},
'second_missed': {'schedule': crontab(
minute='*/1', nowfun=now_func), 'task': 'second_missed'},
'non_missed': {'schedule': crontab(
minute='*/13', nowfun=now_func), 'task': 'non_missed'}
}
shelve['entries'] = {
'first_missed': beat.ScheduleEntry(
'first_missed', 'first_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
app=self.app,
schedule=app_schedule['first_missed']['schedule']),
'second_missed': beat.ScheduleEntry(
'second_missed', 'second_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
app=self.app,
schedule=app_schedule['second_missed']['schedule']),
'non_missed': beat.ScheduleEntry(
'non_missed', 'non_missed',
last_run_at=now_func() - timedelta(minutes=2),
total_run_count=10,
app=self.app,
schedule=app_schedule['non_missed']['schedule']),
}
self.app.conf.beat_schedule = app_schedule
scheduler = scheduler_class(self.app)
max_iter_number = 5
for i in range(max_iter_number):
delay = scheduler.tick()
if delay > 0:
break
assert {'first_missed', 'second_missed'} == {
item['task'] for item in scheduler.sent}
# ensure next call on the beginning of next min
assert abs(60 - cur_seconds - delay) < 1
| test_PersistentScheduler |
python | wandb__wandb | wandb/sdk/artifacts/artifact_state.py | {
"start": 83,
"end": 272
} | class ____(Enum):
PENDING = "PENDING"
COMMITTED = "COMMITTED"
DELETED = "DELETED"
GARBAGE_COLLECTED = "GARBAGE_COLLECTED"
PENDING_DELETION = "PENDING_DELETION"
| ArtifactState |
python | facebook__pyre-check | client/language_server/code_navigation_request.py | {
"start": 1722,
"end": 1970
} | class ____:
client_id: str
def to_json(self) -> List[object]:
return [
"DisposeClient",
{
"client_id": self.client_id,
},
]
@dataclasses.dataclass(frozen=True)
| DisposeClient |
python | doocs__leetcode | solution/0900-0999/0901.Online Stock Span/Solution.py | {
"start": 0,
"end": 390
} | class ____:
def __init__(self):
self.stk = []
def next(self, price: int) -> int:
cnt = 1
while self.stk and self.stk[-1][0] <= price:
cnt += self.stk.pop()[1]
self.stk.append((price, cnt))
return cnt
# Your StockSpanner object will be instantiated and called as such:
# obj = StockSpanner()
# param_1 = obj.next(price)
| StockSpanner |
python | PyCQA__pylint | tests/functional/ext/docparams/parameter/missing_param_doc_required_Numpy.py | {
"start": 731,
"end": 2749
} | class ____:
def test_missing_method_params_in_numpy_docstring( # [missing-param-doc, missing-type-doc]
self, x, y
):
"""Example of a class method with missing parameter documentation in
the Numpy style docstring
missing parameter documentation
Parameters
----------
x:
bla
"""
def test_existing_func_params_in_numpy_docstring(xarg, yarg, zarg, warg):
"""Example of a function with correctly documented parameters and
return values (Numpy style)
Parameters
----------
xarg: int
bla xarg
yarg: my.qualified.type
bla yarg
zarg: int
bla zarg
warg: my.qualified.type
bla warg
Returns
-------
float
sum
"""
return xarg + yarg
def test_wrong_name_of_func_params_in_numpy_docstring( # [missing-param-doc, missing-type-doc, differing-param-doc, differing-type-doc]
xarg, yarg, zarg
):
"""Example of functions with inconsistent parameter names in the
signature and in the Numpy style documentation
Parameters
----------
xarg1: int
bla xarg
yarg: float
bla yarg
zarg1: str
bla zarg
"""
return xarg + yarg
def test_wrong_name_of_func_params_in_numpy_docstring_two( # [differing-param-doc, differing-type-doc]
xarg, yarg
):
"""Example of functions with inconsistent parameter names in the
signature and in the Numpy style documentation
Parameters
----------
yarg1: float
bla yarg
For the other parameters, see bla.
"""
return xarg + yarg
def test_see_sentence_for_func_params_in_numpy_docstring(xarg, yarg):
"""Example for the usage of "For the other parameters, see" to avoid
too many repetitions, e.g. in functions or methods adhering to a
given interface (Numpy style)
Parameters
----------
yarg: float
bla yarg
For the other parameters, see :func:`bla`
"""
return xarg + yarg
| Foo |
python | scipy__scipy | scipy/stats/tests/test_continuous.py | {
"start": 8946,
"end": 41972
} | class ____:
@pytest.mark.fail_slow(60) # need to break up check_moment_funcs
@settings(max_examples=20)
@pytest.mark.parametrize('family', families)
@given(data=strategies.data(), seed=strategies.integers(min_value=0))
def test_support_moments_sample(self, family, data, seed):
rng = np.random.default_rng(seed)
# relative proportions of valid, endpoint, out of bounds, and NaN params
proportions = (0.7, 0.1, 0.1, 0.1)
tmp = draw_distribution_from_family(family, data, rng, proportions)
dist, x, y, p, logp, result_shape, x_result_shape, xy_result_shape = tmp
sample_shape = data.draw(npst.array_shapes(min_dims=0, min_side=0,
max_side=20))
with np.errstate(invalid='ignore', divide='ignore'):
check_support(dist)
check_moment_funcs(dist, result_shape) # this needs to get split up
check_sample_shape_NaNs(dist, 'sample', sample_shape, result_shape, rng)
qrng = qmc.Halton(d=1, seed=rng)
check_sample_shape_NaNs(dist, 'sample', sample_shape, result_shape, qrng)
@pytest.mark.fail_slow(10)
@pytest.mark.parametrize('family', families)
@pytest.mark.parametrize('func, methods, arg',
[('entropy', {'log/exp', 'quadrature'}, None),
('logentropy', {'log/exp', 'quadrature'}, None),
('median', {'icdf'}, None),
('mode', {'optimization'}, None),
('mean', {'cache'}, None),
('variance', {'cache'}, None),
('skewness', {'cache'}, None),
('kurtosis', {'cache'}, None),
('pdf', {'log/exp'}, 'x'),
('logpdf', {'log/exp'}, 'x'),
('logcdf', {'log/exp', 'complement', 'quadrature'}, 'x'),
('cdf', {'log/exp', 'complement', 'quadrature'}, 'x'),
('logccdf', {'log/exp', 'complement', 'quadrature'}, 'x'),
('ccdf', {'log/exp', 'complement', 'quadrature'}, 'x'),
('ilogccdf', {'complement', 'inversion'}, 'logp'),
('iccdf', {'complement', 'inversion'}, 'p'),
])
@settings(max_examples=20)
@given(data=strategies.data(), seed=strategies.integers(min_value=0))
def test_funcs(self, family, data, seed, func, methods, arg):
if family == Uniform and func == 'mode':
pytest.skip("Mode is not unique; `method`s disagree.")
rng = np.random.default_rng(seed)
# relative proportions of valid, endpoint, out of bounds, and NaN params
proportions = (0.7, 0.1, 0.1, 0.1)
tmp = draw_distribution_from_family(family, data, rng, proportions)
dist, x, y, p, logp, result_shape, x_result_shape, xy_result_shape = tmp
args = {'x': x, 'p': p, 'logp': p}
with np.errstate(invalid='ignore', divide='ignore', over='ignore'):
if arg is None:
check_dist_func(dist, func, None, result_shape, methods)
elif arg in args:
check_dist_func(dist, func, args[arg], x_result_shape, methods)
if func == 'variance':
assert_allclose(dist.standard_deviation()**2, dist.variance())
# invalid and divide are to be expected; maybe look into over
with np.errstate(invalid='ignore', divide='ignore', over='ignore'):
if not isinstance(dist, ShiftedScaledDistribution):
if func == 'cdf':
methods = {'quadrature'}
check_cdf2(dist, False, x, y, xy_result_shape, methods)
check_cdf2(dist, True, x, y, xy_result_shape, methods)
elif func == 'ccdf':
methods = {'addition'}
check_ccdf2(dist, False, x, y, xy_result_shape, methods)
check_ccdf2(dist, True, x, y, xy_result_shape, methods)
def test_plot(self):
try:
import matplotlib.pyplot as plt
except ImportError:
return
X = Uniform(a=0., b=1.)
ax = X.plot()
assert ax == plt.gca()
@pytest.mark.parametrize('method_name', ['cdf', 'ccdf'])
def test_complement_safe(self, method_name):
X = stats.Normal()
X.tol = 1e-12
p = np.asarray([1e-4, 1e-3])
func = getattr(X, method_name)
ifunc = getattr(X, 'i'+method_name)
x = ifunc(p, method='formula')
p1 = func(x, method='complement_safe')
p2 = func(x, method='complement')
assert_equal(p1[1], p2[1])
assert p1[0] != p2[0]
assert_allclose(p1[0], p[0], rtol=X.tol)
@pytest.mark.parametrize('method_name', ['cdf', 'ccdf'])
def test_icomplement_safe(self, method_name):
X = stats.Normal()
X.tol = 1e-12
p = np.asarray([1e-4, 1e-3])
func = getattr(X, method_name)
ifunc = getattr(X, 'i'+method_name)
x1 = ifunc(p, method='complement_safe')
x2 = ifunc(p, method='complement')
assert_equal(x1[1], x2[1])
assert x1[0] != x2[0]
assert_allclose(func(x1[0]), p[0], rtol=X.tol)
def test_subtraction_safe(self):
X = stats.Normal()
X.tol = 1e-12
# Regular subtraction is fine in either tail (and of course, across tails)
x = [-11, -10, 10, 11]
y = [-10, -11, 11, 10]
p0 = X.cdf(x, y, method='quadrature')
p1 = X.cdf(x, y, method='subtraction_safe')
p2 = X.cdf(x, y, method='subtraction')
assert_equal(p2, p1)
assert_allclose(p1, p0, rtol=X.tol)
# Safe subtraction is needed in special cases
x = np.asarray([-1e-20, -1e-21, 1e-20, 1e-21, -1e-20])
y = np.asarray([-1e-21, -1e-20, 1e-21, 1e-20, 1e-20])
p0 = X.pdf(0)*(y-x)
p1 = X.cdf(x, y, method='subtraction_safe')
p2 = X.cdf(x, y, method='subtraction')
assert_equal(p2, 0)
assert_allclose(p1, p0, rtol=X.tol)
def test_logentropy_safe(self):
# simulate an `entropy` calculation over/underflowing with extreme parameters
class _Normal(stats.Normal):
def _entropy_formula(self, **params):
out = np.asarray(super()._entropy_formula(**params))
out[0] = 0
out[-1] = np.inf
return out
X = _Normal(sigma=[1, 2, 3])
with np.errstate(divide='ignore'):
res1 = X.logentropy(method='logexp_safe')
res2 = X.logentropy(method='logexp')
ref = X.logentropy(method='quadrature')
i_fl = [0, -1] # first and last
assert np.isinf(res2[i_fl]).all()
assert res1[1] == res2[1]
# quadrature happens to be perfectly accurate on some platforms
# assert res1[1] != ref[1]
assert_equal(res1[i_fl], ref[i_fl])
def test_logcdf2_safe(self):
# test what happens when 2-arg `cdf` underflows
X = stats.Normal(sigma=[1, 2, 3])
x = [-301, 1, 300]
y = [-300, 2, 301]
with np.errstate(divide='ignore'):
res1 = X.logcdf(x, y, method='logexp_safe')
res2 = X.logcdf(x, y, method='logexp')
ref = X.logcdf(x, y, method='quadrature')
i_fl = [0, -1] # first and last
assert np.isinf(res2[i_fl]).all()
assert res1[1] == res2[1]
# quadrature happens to be perfectly accurate on some platforms
# assert res1[1] != ref[1]
assert_equal(res1[i_fl], ref[i_fl])
@pytest.mark.parametrize('method_name', ['logcdf', 'logccdf'])
def test_logexp_safe(self, method_name):
# test what happens when `cdf`/`ccdf` underflows
X = stats.Normal(sigma=2)
x = [-301, 1] if method_name == 'logcdf' else [301, 1]
func = getattr(X, method_name)
with np.errstate(divide='ignore'):
res1 = func(x, method='logexp_safe')
res2 = func(x, method='logexp')
ref = func(x, method='quadrature')
assert res1[0] == ref[0]
assert res1[0] != res2[0]
assert res1[1] == res2[1]
assert res1[1] != ref[1]
def check_sample_shape_NaNs(dist, fname, sample_shape, result_shape, rng):
full_shape = sample_shape + result_shape
if fname == 'sample':
sample_method = dist.sample
methods = {'inverse_transform'}
if dist._overrides(f'_{fname}_formula') and not isinstance(rng, qmc.QMCEngine):
methods.add('formula')
for method in methods:
res = sample_method(sample_shape, method=method, rng=rng)
valid_parameters = np.broadcast_to(get_valid_parameters(dist),
res.shape)
assert_equal(res.shape, full_shape)
np.testing.assert_equal(res.dtype, dist._dtype)
if full_shape == ():
# NumPy random makes a distinction between a 0d array and a scalar.
# In stats, we consistently turn 0d arrays into scalars, so
# maintain that behavior here. (With Array API arrays, this will
# change.)
assert np.isscalar(res)
assert np.all(np.isfinite(res[valid_parameters]))
assert_equal(res[~valid_parameters], np.nan)
sample1 = sample_method(sample_shape, method=method, rng=42)
sample2 = sample_method(sample_shape, method=method, rng=42)
if not isinstance(dist, DiscreteDistribution):
# The idea is that it's very unlikely that the random sample
# for a randomly chosen seed will match that for seed 42,
# but it is not so unlikely if `dist` is a discrete distribution.
assert not np.any(np.equal(res, sample1))
assert_equal(sample1, sample2)
def check_support(dist):
a, b = dist.support()
check_nans_and_edges(dist, 'support', None, a)
check_nans_and_edges(dist, 'support', None, b)
assert a.shape == dist._shape
assert b.shape == dist._shape
assert a.dtype == dist._dtype
assert b.dtype == dist._dtype
def check_dist_func(dist, fname, arg, result_shape, methods):
# Check that all computation methods of all distribution functions agree
# with one another, effectively testing the correctness of the generic
# computation methods and confirming the consistency of specific
# distributions with their pdf/logpdf.
args = tuple() if arg is None else (arg,)
methods = methods.copy()
if "cache" in methods:
# If "cache" is specified before the value has been evaluated, it
# raises an error. After the value is evaluated, it will succeed.
with pytest.raises(NotImplementedError):
getattr(dist, fname)(*args, method="cache")
ref = getattr(dist, fname)(*args)
check_nans_and_edges(dist, fname, arg, ref)
# Remove this after fixing `draw`
tol_override = {'atol': 1e-15}
# Mean can be 0, which makes logmean -inf.
if fname in {'logmean', 'mean', 'logskewness', 'skewness'}:
tol_override = {'atol': 1e-15}
elif fname in {'mode'}:
# can only expect about half of machine precision for optimization
# because math
tol_override = {'atol': 1e-6}
elif fname in {'logcdf'}: # gh-22276
tol_override = {'rtol': 2e-7}
if dist._overrides(f'_{fname}_formula'):
methods.add('formula')
np.testing.assert_equal(ref.shape, result_shape)
# Until we convert to array API, let's do the familiar thing:
# 0d things are scalars, not arrays
if result_shape == tuple():
assert np.isscalar(ref)
for method in methods:
res = getattr(dist, fname)(*args, method=method)
if 'log' in fname:
np.testing.assert_allclose(np.exp(res), np.exp(ref),
**tol_override)
else:
np.testing.assert_allclose(res, ref, **tol_override)
# for now, make sure dtypes are consistent; later, we can check whether
# they are correct.
np.testing.assert_equal(res.dtype, ref.dtype)
np.testing.assert_equal(res.shape, result_shape)
if result_shape == tuple():
assert np.isscalar(res)
def check_cdf2(dist, log, x, y, result_shape, methods):
# Specialized test for 2-arg cdf since the interface is a bit different
# from the other methods. Here, we'll use 1-arg cdf as a reference, and
# since we have already checked 1-arg cdf in `check_nans_and_edges`, this
# checks the equivalent of both `check_dist_func` and
# `check_nans_and_edges`.
methods = methods.copy()
if log:
if dist._overrides('_logcdf2_formula'):
methods.add('formula')
if dist._overrides('_logcdf_formula') or dist._overrides('_logccdf_formula'):
methods.add('subtraction')
if (dist._overrides('_cdf_formula')
or dist._overrides('_ccdf_formula')):
methods.add('log/exp')
else:
if dist._overrides('_cdf2_formula'):
methods.add('formula')
if dist._overrides('_cdf_formula') or dist._overrides('_ccdf_formula'):
methods.add('subtraction')
if (dist._overrides('_logcdf_formula')
or dist._overrides('_logccdf_formula')):
methods.add('log/exp')
ref = dist.cdf(y) - dist.cdf(x)
np.testing.assert_equal(ref.shape, result_shape)
if result_shape == tuple():
assert np.isscalar(ref)
for method in methods:
if isinstance(dist, DiscreteDistribution):
message = ("Two argument cdf functions are currently only supported for "
"continuous distributions.")
with pytest.raises(NotImplementedError, match=message):
res = (np.exp(dist.logcdf(x, y, method=method)) if log
else dist.cdf(x, y, method=method))
continue
res = (np.exp(dist.logcdf(x, y, method=method)) if log
else dist.cdf(x, y, method=method))
np.testing.assert_allclose(res, ref, atol=1e-14)
if log:
np.testing.assert_equal(res.dtype, (ref + 0j).dtype)
else:
np.testing.assert_equal(res.dtype, ref.dtype)
np.testing.assert_equal(res.shape, result_shape)
if result_shape == tuple():
assert np.isscalar(res)
def check_ccdf2(dist, log, x, y, result_shape, methods):
# Specialized test for 2-arg ccdf since the interface is a bit different
# from the other methods. Could be combined with check_cdf2 above, but
# writing it separately is simpler.
methods = methods.copy()
if dist._overrides(f'_{"log" if log else ""}ccdf2_formula'):
methods.add('formula')
ref = dist.cdf(x) + dist.ccdf(y)
np.testing.assert_equal(ref.shape, result_shape)
if result_shape == tuple():
assert np.isscalar(ref)
for method in methods:
message = ("Two argument cdf functions are currently only supported for "
"continuous distributions.")
if isinstance(dist, DiscreteDistribution):
with pytest.raises(NotImplementedError, match=message):
res = (np.exp(dist.logccdf(x, y, method=method)) if log
else dist.ccdf(x, y, method=method))
continue
res = (np.exp(dist.logccdf(x, y, method=method)) if log
else dist.ccdf(x, y, method=method))
np.testing.assert_allclose(res, ref, atol=1e-14)
np.testing.assert_equal(res.dtype, ref.dtype)
np.testing.assert_equal(res.shape, result_shape)
if result_shape == tuple():
assert np.isscalar(res)
def check_nans_and_edges(dist, fname, arg, res):
valid_parameters = get_valid_parameters(dist)
if fname in {'icdf', 'iccdf'}:
arg_domain = _RealInterval(endpoints=(0, 1), inclusive=(True, True))
elif fname in {'ilogcdf', 'ilogccdf'}:
arg_domain = _RealInterval(endpoints=(-inf, 0), inclusive=(True, True))
else:
arg_domain = dist._variable.domain
classified_args = classify_arg(dist, arg, arg_domain)
valid_parameters, *classified_args = np.broadcast_arrays(valid_parameters,
*classified_args)
valid_arg, endpoint_arg, outside_arg, nan_arg = classified_args
all_valid = valid_arg & valid_parameters
# Check NaN pattern and edge cases
assert_equal(res[~valid_parameters], np.nan)
assert_equal(res[nan_arg], np.nan)
a, b = dist.support()
a = np.broadcast_to(a, res.shape)
b = np.broadcast_to(b, res.shape)
outside_arg_minus = (outside_arg == -1) & valid_parameters
outside_arg_plus = (outside_arg == 1) & valid_parameters
endpoint_arg_minus = (endpoint_arg == -1) & valid_parameters
endpoint_arg_plus = (endpoint_arg == 1) & valid_parameters
is_discrete = isinstance(dist, DiscreteDistribution)
# Writing this independently of how the are set in the distribution
# infrastructure. That is very compact; this is very verbose.
if fname in {'logpdf'}:
assert_equal(res[outside_arg_minus], -np.inf)
assert_equal(res[outside_arg_plus], -np.inf)
ref = -np.inf if not is_discrete else np.inf
assert_equal(res[endpoint_arg_minus & ~valid_arg], ref)
assert_equal(res[endpoint_arg_plus & ~valid_arg], ref)
elif fname in {'pdf'}:
assert_equal(res[outside_arg_minus], 0)
assert_equal(res[outside_arg_plus], 0)
ref = 0 if not is_discrete else np.inf
assert_equal(res[endpoint_arg_minus & ~valid_arg], ref)
assert_equal(res[endpoint_arg_plus & ~valid_arg], ref)
elif fname in {'logcdf'} and not is_discrete:
assert_equal(res[outside_arg_minus], -inf)
assert_equal(res[outside_arg_plus], 0)
assert_equal(res[endpoint_arg_minus], -inf)
assert_equal(res[endpoint_arg_plus], 0)
elif fname in {'cdf'} and not is_discrete:
assert_equal(res[outside_arg_minus], 0)
assert_equal(res[outside_arg_plus], 1)
assert_equal(res[endpoint_arg_minus], 0)
assert_equal(res[endpoint_arg_plus], 1)
elif fname in {'logccdf'} and not is_discrete:
assert_equal(res[outside_arg_minus], 0)
assert_equal(res[outside_arg_plus], -inf)
assert_equal(res[endpoint_arg_minus], 0)
assert_equal(res[endpoint_arg_plus], -inf)
elif fname in {'ccdf'} and not is_discrete:
assert_equal(res[outside_arg_minus], 1)
assert_equal(res[outside_arg_plus], 0)
assert_equal(res[endpoint_arg_minus], 1)
assert_equal(res[endpoint_arg_plus], 0)
elif fname in {'ilogcdf', 'icdf'} and not is_discrete:
assert_equal(res[outside_arg == -1], np.nan)
assert_equal(res[outside_arg == 1], np.nan)
assert_equal(res[endpoint_arg == -1], a[endpoint_arg == -1])
assert_equal(res[endpoint_arg == 1], b[endpoint_arg == 1])
elif fname in {'ilogccdf', 'iccdf'} and not is_discrete:
assert_equal(res[outside_arg == -1], np.nan)
assert_equal(res[outside_arg == 1], np.nan)
assert_equal(res[endpoint_arg == -1], b[endpoint_arg == -1])
assert_equal(res[endpoint_arg == 1], a[endpoint_arg == 1])
exclude = {'logmean', 'mean', 'logskewness', 'skewness', 'support'}
if isinstance(dist, DiscreteDistribution):
exclude.update({'pdf', 'logpdf'})
if (
fname not in exclude
and not (isinstance(dist, Binomial)
and np.any((dist.n == 0) | (dist.p == 0) | (dist.p == 1)))):
# This can fail in degenerate case where Binomial distribution is a point
# distribution. Further on, we could factor out an is_degenerate function
# for the tests, or think about storing info about degeneracy in the
# instances.
assert np.isfinite(res[all_valid & (endpoint_arg == 0)]).all()
def check_moment_funcs(dist, result_shape):
# Check that all computation methods of all distribution functions agree
# with one another, effectively testing the correctness of the generic
# computation methods and confirming the consistency of specific
# distributions with their pdf/logpdf.
atol = 1e-9 # make this tighter (e.g. 1e-13) after fixing `draw`
def check(order, kind, method=None, ref=None, success=True):
if success:
res = dist.moment(order, kind, method=method)
assert_allclose(res, ref, atol=atol*10**order)
assert res.shape == ref.shape
else:
with pytest.raises(NotImplementedError):
dist.moment(order, kind, method=method)
def has_formula(order, kind):
formula_name = f'_moment_{kind}_formula'
overrides = dist._overrides(formula_name)
if not overrides:
return False
formula = getattr(dist, formula_name)
orders = getattr(formula, 'orders', set(range(6)))
return order in orders
dist.reset_cache()
### Check Raw Moments ###
for i in range(6):
check(i, 'raw', 'cache', success=False) # not cached yet
ref = dist.moment(i, 'raw', method='quadrature')
check_nans_and_edges(dist, 'moment', None, ref)
assert ref.shape == result_shape
check(i, 'raw','cache', ref, success=True) # cached now
check(i, 'raw', 'formula', ref, success=has_formula(i, 'raw'))
check(i, 'raw', 'general', ref, success=(i == 0))
if dist.__class__ == stats.Normal:
check(i, 'raw', 'quadrature_icdf', ref, success=True)
# Clearing caches to better check their behavior
dist.reset_cache()
# If we have central or standard moment formulas, or if there are
# values in their cache, we can use method='transform'
dist.moment(0, 'central') # build up the cache
dist.moment(1, 'central')
for i in range(2, 6):
ref = dist.moment(i, 'raw', method='quadrature')
check(i, 'raw', 'transform', ref,
success=has_formula(i, 'central') or has_formula(i, 'standardized'))
dist.moment(i, 'central') # build up the cache
check(i, 'raw', 'transform', ref)
dist.reset_cache()
### Check Central Moments ###
for i in range(6):
check(i, 'central', 'cache', success=False)
ref = dist.moment(i, 'central', method='quadrature')
assert ref.shape == result_shape
check(i, 'central', 'cache', ref, success=True)
check(i, 'central', 'formula', ref, success=has_formula(i, 'central'))
check(i, 'central', 'general', ref, success=i <= 1)
if dist.__class__ == stats.Normal:
check(i, 'central', 'quadrature_icdf', ref, success=True)
if not (dist.__class__ == stats.Uniform and i == 5):
# Quadrature is not super accurate for 5th central moment when the
# support is really big. Skip this one failing test. We need to come
# up with a better system of skipping individual failures w/ hypothesis.
check(i, 'central', 'transform', ref,
success=has_formula(i, 'raw') or (i <= 1))
if not has_formula(i, 'raw'):
dist.moment(i, 'raw')
check(i, 'central', 'transform', ref)
variance = dist.variance()
dist.reset_cache()
# If we have standard moment formulas, or if there are
# values in their cache, we can use method='normalize'
dist.moment(0, 'standardized') # build up the cache
dist.moment(1, 'standardized')
dist.moment(2, 'standardized')
for i in range(3, 6):
ref = dist.moment(i, 'central', method='quadrature')
check(i, 'central', 'normalize', ref,
success=has_formula(i, 'standardized') and not np.any(variance == 0))
dist.moment(i, 'standardized') # build up the cache
check(i, 'central', 'normalize', ref, success=not np.any(variance == 0))
### Check Standardized Moments ###
var = dist.moment(2, 'central', method='quadrature')
dist.reset_cache()
for i in range(6):
check(i, 'standardized', 'cache', success=False)
ref = dist.moment(i, 'central', method='quadrature') / var ** (i / 2)
assert ref.shape == result_shape
check(i, 'standardized', 'formula', ref,
success=has_formula(i, 'standardized'))
if not (
isinstance(dist, Binomial)
and np.any((dist.n == 0) | (dist.p == 0) | (dist.p == 1))
):
# This test will fail for degenerate case where binomial distribution
# is a point distribution.
check(i, 'standardized', 'general', ref, success=i <= 2)
check(i, 'standardized', 'normalize', ref)
if isinstance(dist, ShiftedScaledDistribution):
# logmoment is not fully fleshed out; no need to test
# ShiftedScaledDistribution here
return
# logmoment is not very accuate, and it's not public, so skip for now
# ### Check Against _logmoment ###
# logmean = dist._logmoment(1, logcenter=-np.inf)
# for i in range(6):
# ref = np.exp(dist._logmoment(i, logcenter=-np.inf))
# assert_allclose(dist.moment(i, 'raw'), ref, atol=atol*10**i)
#
# ref = np.exp(dist._logmoment(i, logcenter=logmean))
# assert_allclose(dist.moment(i, 'central'), ref, atol=atol*10**i)
#
# ref = np.exp(dist._logmoment(i, logcenter=logmean, standardized=True))
# assert_allclose(dist.moment(i, 'standardized'), ref, atol=atol*10**i)
@pytest.mark.parametrize('family', (Normal,))
@pytest.mark.parametrize('x_shape', [tuple(), (2, 3)])
@pytest.mark.parametrize('dist_shape', [tuple(), (4, 1)])
@pytest.mark.parametrize('fname', ['sample'])
@pytest.mark.parametrize('rng_type', [np.random.Generator, qmc.Halton, qmc.Sobol])
def test_sample_against_cdf(family, dist_shape, x_shape, fname, rng_type):
rng = np.random.default_rng(842582438235635)
num_parameters = family._num_parameters()
if dist_shape and num_parameters == 0:
pytest.skip("Distribution can't have a shape without parameters.")
dist = family._draw(dist_shape, rng)
n = 1024
sample_size = (n,) + x_shape
sample_array_shape = sample_size + dist_shape
if fname == 'sample':
sample_method = dist.sample
if rng_type != np.random.Generator:
rng = rng_type(d=1, seed=rng)
x = sample_method(sample_size, rng=rng)
assert x.shape == sample_array_shape
# probably should give `axis` argument to ks_1samp, review that separately
statistic = _kolmogorov_smirnov(dist, x, axis=0)
pvalue = kolmogn(x.shape[0], statistic, cdf=False)
p_threshold = 0.01
num_pvalues = pvalue.size
num_small_pvalues = np.sum(pvalue < p_threshold)
assert num_small_pvalues < p_threshold * num_pvalues
def get_valid_parameters(dist):
# Given a distribution, return a logical array that is true where all
# distribution parameters are within their respective domains. The code
# here is probably quite similar to that used to form the `_invalid`
# attribute of the distribution, but this was written about a week later
# without referring to that code, so it is a somewhat independent check.
# Get all parameter values and `_Parameter` objects
parameter_values = dist._parameters
parameters = {}
for parameterization in dist._parameterizations:
parameters.update(parameterization.parameters)
all_valid = np.ones(dist._shape, dtype=bool)
for name, value in parameter_values.items():
if name not in parameters: # cached value not part of parameterization
continue
parameter = parameters[name]
# Check that the numerical endpoints and inclusivity attribute
# agree with the `contains` method about which parameter values are
# within the domain.
a, b = parameter.domain.get_numerical_endpoints(
parameter_values=parameter_values)
a_included, b_included = parameter.domain.inclusive
valid = (a <= value) if a_included else a < value
valid &= (value <= b) if b_included else value < b
assert_equal(valid, parameter.domain.contains(
value, parameter_values=parameter_values))
# Form `all_valid` mask that is True where *all* parameters are valid
all_valid &= valid
# Check that the `all_valid` mask formed here is the complement of the
# `dist._invalid` mask stored by the infrastructure
assert_equal(~all_valid, dist._invalid)
return all_valid
def classify_arg(dist, arg, arg_domain):
if arg is None:
valid_args = np.ones(dist._shape, dtype=bool)
endpoint_args = np.zeros(dist._shape, dtype=bool)
outside_args = np.zeros(dist._shape, dtype=bool)
nan_args = np.zeros(dist._shape, dtype=bool)
return valid_args, endpoint_args, outside_args, nan_args
a, b = arg_domain.get_numerical_endpoints(
parameter_values=dist._parameters)
a, b, arg = np.broadcast_arrays(a, b, arg)
a_included, b_included = arg_domain.inclusive
inside = (a <= arg) if a_included else a < arg
inside &= (arg <= b) if b_included else arg < b
# TODO: add `supported` method and check here
on = np.zeros(a.shape, dtype=int)
on[a == arg] = -1
on[b == arg] = 1
outside = np.zeros(a.shape, dtype=int)
outside[(arg < a) if a_included else arg <= a] = -1
outside[(b < arg) if b_included else b <= arg] = 1
nan = np.isnan(arg)
return inside, on, outside, nan
def test_input_validation():
class Test(ContinuousDistribution):
_variable = _RealParameter('x', domain=_RealInterval())
message = ("The `Test` distribution family does not accept parameters, "
"but parameters `{'a'}` were provided.")
with pytest.raises(ValueError, match=message):
Test(a=1, )
message = "Attribute `tol` of `Test` must be a positive float, if specified."
with pytest.raises(ValueError, match=message):
Test(tol=np.asarray([]))
with pytest.raises(ValueError, match=message):
Test(tol=[1, 2, 3])
with pytest.raises(ValueError, match=message):
Test(tol=np.nan)
with pytest.raises(ValueError, match=message):
Test(tol=-1)
message = ("Argument `order` of `Test.moment` must be a "
"finite, positive integer.")
with pytest.raises(ValueError, match=message):
Test().moment(-1)
with pytest.raises(ValueError, match=message):
Test().moment(np.inf)
message = "Argument `kind` of `Test.moment` must be one of..."
with pytest.raises(ValueError, match=message):
Test().moment(2, kind='coconut')
class Test2(ContinuousDistribution):
_p1 = _RealParameter('c', domain=_RealInterval())
_p2 = _RealParameter('d', domain=_RealInterval())
_parameterizations = [_Parameterization(_p1, _p2)]
_variable = _RealParameter('x', domain=_RealInterval())
message = ("The provided parameters `{a}` do not match a supported "
"parameterization of the `Test2` distribution family.")
with pytest.raises(ValueError, match=message):
Test2(a=1)
message = ("The `Test2` distribution family requires parameters, but none "
"were provided.")
with pytest.raises(ValueError, match=message):
Test2()
message = ("The parameters `{c, d}` provided to the `Test2` "
"distribution family cannot be broadcast to the same shape.")
with pytest.raises(ValueError, match=message):
Test2(c=[1, 2], d=[1, 2, 3])
message = ("The argument provided to `Test2.pdf` cannot be be broadcast to "
"the same shape as the distribution parameters.")
with pytest.raises(ValueError, match=message):
dist = Test2(c=[1, 2, 3], d=[1, 2, 3])
dist.pdf([1, 2])
message = "Parameter `c` must be of real dtype."
with pytest.raises(TypeError, match=message):
Test2(c=[1, object()], d=[1, 2])
message = "Parameter `convention` of `Test2.kurtosis` must be one of..."
with pytest.raises(ValueError, match=message):
dist = Test2(c=[1, 2, 3], d=[1, 2, 3])
dist.kurtosis(convention='coconut')
def test_rng_deepcopy_pickle():
# test behavior of `rng` attribute and copy behavior
kwargs = dict(a=[-1, 2], b=10)
dist1 = Uniform(**kwargs)
dist2 = deepcopy(dist1)
dist3 = pickle.loads(pickle.dumps(dist1))
res1, res2, res3 = dist1.sample(), dist2.sample(), dist3.sample()
assert np.all(res2 != res1)
assert np.all(res3 != res1)
res1, res2, res3 = dist1.sample(rng=42), dist2.sample(rng=42), dist3.sample(rng=42)
assert np.all(res2 == res1)
assert np.all(res3 == res1)
| TestDistributions |
python | getlogbook__logbook | src/logbook/_fallback.py | {
"start": 708,
"end": 1715
} | class ____:
def __init__(self, default, *, fallback=_missing):
self.default = default
self.fallback = fallback
self.prop_name = None
self.attr_name = None
def __set_name__(self, owner: type, name: str) -> None:
self.prop_name = name
self.attr_name = f"_{name}"
def __get__(self, instance: Any, owner: type) -> Any:
if instance is None:
return self
if self.attr_name is None:
raise TypeError("property is not bound to a class")
rv = getattr(instance, self.attr_name, _missing)
if rv is not _missing and rv != self.fallback:
return rv
if instance.group is None:
return self.default
return getattr(instance.group, self.prop_name)
def __set__(self, instance: Any, value: Any) -> None:
setattr(instance, self.attr_name, value)
def __delete__(self, instance: Any) -> None:
delattr(instance, self.attr_name)
| group_reflected_property |
python | google__jax | tests/pallas/gpu_ops_test.py | {
"start": 12433,
"end": 12509
} | class ____(FusedLayerNormTest):
INTERPRET = True
| FusedLayerNormInterpretTest |
python | huggingface__transformers | tests/utils/test_modeling_rope_utils.py | {
"start": 924,
"end": 24141
} | class ____(unittest.TestCase):
def test_rope_validation(self):
config = LlamaConfig()
all_rope_types = ROPE_INIT_FUNCTIONS.keys()
# The base config is always valid (default RoPE)
config.validate_rope()
# If we explicitly set the other RoPE types, then validation should fail
for rope_type in all_rope_types:
config.rope_parameters = {"rope_type": rope_type, "rope_theta": 10000.0}
with self.assertRaises(KeyError):
config.validate_rope()
# Parameters are exclusive to their own RoPE type, and should raise an exception if incorrectly passed
valid_param_mapping = {
"factor": ["linear", "dynamic", "yarn", "longrope"],
"attention_factor": ["yarn", "longrope"],
"beta_fast": ["yarn"],
"beta_slow": ["yarn"],
"short_factor": ["longrope"],
"long_factor": ["longrope"],
}
for rope_type in all_rope_types:
for param, valid_rope_types in valid_param_mapping.items():
# Set `param` with a dummy value -- we want to test the dict key
config.rope_parameters = {"rope_type": rope_type, "rope_theta": 10000.0, param: True}
if rope_type in valid_rope_types:
continue
else:
with self.assertRaises(KeyError):
config.validate_rope()
# Any other parameters passed to RoPE will raise a warning that a particular key is not used
# But sometimes we can have model-specific RoPE kwargs and bypass warning with `ignore_keys`
model_specific_kwarg = "mrope_sections" # e,g in Qwen2-VL
config.rope_parameters = {"rope_type": "default", "rope_theta": 10000.0, model_specific_kwarg: True}
config.validate_rope(ignore_keys={model_specific_kwarg})
with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs:
config.validate_rope()
self.assertEqual(len(logs.output), 1)
self.assertIn(model_specific_kwarg, logs.output[0])
# We can indicate Different RoPE params for each attention type
# We can also have only one RoPE params defined for all layer, we don't raise an error
# because it is not required to have separate RoPE per layer type
config.layer_types = ["full_attention", "sliding_attention"]
config.rope_parameters = {
"full_attention": {"rope_type": "default", "rope_theta": 10000},
"sliding_attention": {"rope_type": "linear", "rope_theta": 10000, "factor": 2.0},
}
config.validate_rope()
config.rope_parameters = config.rope_parameters["full_attention"]
config.validate_rope()
def test_yarn_original_original_max_position_embeddings_validation(self):
"""Tests that models with no/bad `original_max_position_embeddings` raise a warning"""
config = LlamaConfig()
# good rope config: has a factor AND original_max_position_embeddings -> no warnings
rope_config = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": 2.0,
"original_max_position_embeddings": int(config.max_position_embeddings / 2.0),
}
config.rope_parameters = rope_config
with self.assertRaises(AssertionError): # confirm that no warnings are thrown
with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs:
config.validate_rope()
# bad rope config, no `original_max_position_embeddings` -> warning
rope_config = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": 2.0,
}
config.rope_parameters = rope_config
with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs:
config.validate_rope()
self.assertEqual(len(logs.output), 1)
self.assertIn("is unset", logs.output[0])
# bad rope config, bad implicit fator -> warning
rope_config = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": 2.0,
"original_max_position_embeddings": 1,
}
config.rope_parameters = rope_config
with self.assertLogs("transformers.modeling_rope_utils", level="WARNING") as logs:
config.validate_rope()
self.assertEqual(len(logs.output), 1)
self.assertIn("implicit factor", logs.output[0])
def test_default_rope_numerically(self):
# Note: some RoPE scaling methods start off by calling the default RoPE frequencies. If this test fails, then
# multiple RoPE strategies will fail.
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02,
7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02,
3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02,
1.3335e-02, 1.1548e-02, 1.0000e-02, 8.6596e-03, 7.4989e-03, 6.4938e-03,
5.6234e-03, 4.8697e-03, 4.2170e-03, 3.6517e-03, 3.1623e-03, 2.7384e-03,
2.3714e-03, 2.0535e-03, 1.7783e-03, 1.5399e-03, 1.3335e-03, 1.1548e-03,
1.0000e-03, 8.6596e-04, 7.4989e-04, 6.4938e-04, 5.6234e-04, 4.8697e-04,
4.2170e-04, 3.6517e-04, 3.1623e-04, 2.7384e-04, 2.3714e-04, 2.0535e-04,
1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_parameters, {"rope_type": "default", "rope_theta": 10000.0})
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for default RoPE
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_linear_rope_numerically(self):
# This is a linear scaling strategy, the **frequencies** are scaled linearly with respect to the default
# frequencies (= the inverse frequencies are scaled **inversely**)
config = LlamaConfig()
default_rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
default_inv_freq, _ = default_rope_fn(config=config, device=torch_device)
rope_fn = ROPE_INIT_FUNCTIONS["linear"]
for factor in (2.0, 10.0, 20.0):
config.rope_parameters = {"rope_type": "linear", "rope_theta": 10000.0, "factor": factor}
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for linear RoPE
torch.testing.assert_close(inv_freq, default_inv_freq / factor)
def test_dynamic_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.0931e-01, 6.5498e-01, 5.3008e-01, 4.2900e-01, 3.4720e-01,
2.8099e-01, 2.2741e-01, 1.8404e-01, 1.4895e-01, 1.2055e-01, 9.7558e-02,
7.8955e-02, 6.3899e-02, 5.1714e-02, 4.1853e-02, 3.3872e-02, 2.7413e-02,
2.2185e-02, 1.7955e-02, 1.4531e-02, 1.1760e-02, 9.5176e-03, 7.7027e-03,
6.2339e-03, 5.0451e-03, 4.0831e-03, 3.3045e-03, 2.6744e-03, 2.1644e-03,
1.7517e-03, 1.4176e-03, 1.1473e-03, 9.2852e-04, 7.5146e-04, 6.0817e-04,
4.9220e-04, 3.9834e-04, 3.2238e-04, 2.6091e-04, 2.1115e-04, 1.7089e-04,
1.3830e-04, 1.1193e-04, 9.0585e-05, 7.3312e-05, 5.9332e-05, 4.8018e-05,
3.8861e-05, 3.1451e-05, 2.5453e-05, 2.0600e-05, 1.6672e-05, 1.3492e-05,
1.0920e-05, 8.8374e-06, 7.1522e-06, 5.7883e-06, 4.6845e-06, 3.7912e-06,
3.0683e-06, 2.4832e-06, 2.0097e-06, 1.6265e-06
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_parameters, {"rope_type": "default", "rope_theta": 10000.0})
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: this is a dynamic scaling strategy, it will not scale unless we provide `seq_len` larger than the
# model's original training sequence length
rope_fn = ROPE_INIT_FUNCTIONS["dynamic"]
for factor in (2.0, 10.0, 20.0):
config.rope_parameters = {"rope_type": "dynamic", "rope_theta": 10000.0, "factor": factor}
inv_freq, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0) # attention scale is always 1 for dynamic RoPE
torch.testing.assert_close(inv_freq, default_inv_freq)
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=1)
torch.testing.assert_close(inv_freq, default_inv_freq)
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=torch.tensor(1, dtype=torch.int64))
torch.testing.assert_close(inv_freq, default_inv_freq)
# Check 2: if we provide `seq_len` larger than the model's original training sequence length, the frequencies
# will scale up (i.e., the inverse frequencies will scale down).
factor = 10.0
config.rope_parameters = {"rope_type": "dynamic", "rope_theta": 10000.0, "factor": factor}
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=16384)
with self.assertRaises(AssertionError): # It is NOT a linear factor
torch.testing.assert_close(inv_freq, default_inv_freq / factor)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_yarn_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.3479e-02,
6.9590e-02, 5.7925e-02, 4.8136e-02, 3.9931e-02, 3.3061e-02, 2.7315e-02,
2.2515e-02, 1.8512e-02, 1.5177e-02, 1.2403e-02, 1.0101e-02, 8.1924e-03,
6.6143e-03, 5.3120e-03, 4.2400e-03, 3.3599e-03, 2.6396e-03, 2.0520e-03,
1.5746e-03, 1.1882e-03, 8.7713e-04, 6.2810e-04, 4.3007e-04, 2.7384e-04,
2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04,
1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05,
4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05,
1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_parameters, {"rope_type": "default", "rope_theta": 10000.0})
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default --
# `0.1 * math.log(factor) + 1.0`
rope_fn = ROPE_INIT_FUNCTIONS["yarn"]
for factor in (2.0, 10.0, 20.0):
config.rope_parameters = {"rope_type": "yarn", "rope_theta": 10000.0, "factor": factor}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 0.1 * math.log(factor) + 1.0)
config.rope_parameters = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": factor,
"attention_factor": 0.5,
}
_, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1)
self.assertEqual(attention_scale, 0.5)
# Check 2: based on `beta_fast` and `beta_slow`, the frequencies will be scaled between 1 and `factor`.
# Increasing `beta_fast` will make RoPE more interpolative (apply scaling), and the other way around.
# `beta_slow` behaves the opposite way. Remember: `beta_fast` > `beta_slow`
# (note: adds a margin to the test for numerical stability)
factor = 10.0
margin = 1e-8
config.rope_parameters = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": factor,
"beta_fast": 32,
"beta_slow": 1,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_bounded_by_factor = [
((default_inv_freq[idx] / factor) - margin) <= yarn_inv_freq_value <= (default_inv_freq[idx] + margin)
for idx, yarn_inv_freq_value in enumerate(inv_freq)
]
self.assertTrue(all(is_bounded_by_factor))
# super high beta_fast = interpolation (i.e. scaling) in all but the first inverse frequency. The last ~20
# values (empirically checked for `beta_fast` = 1000) should be very small to linear scaling
config.rope_parameters = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": factor,
"beta_fast": 1000,
"beta_slow": 1,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_interpolating = [
yarn_inv_freq_value < (default_inv_freq[idx] + margin) for idx, yarn_inv_freq_value in enumerate(inv_freq)
]
self.assertFalse(is_interpolating[0])
self.assertTrue(all(is_interpolating[1:]))
torch.testing.assert_close(inv_freq[-20:], default_inv_freq[-20:] / factor)
# Check 3: numerical snapshot to avoid regressions
config.rope_parameters = {
"rope_type": "yarn",
"rope_theta": 10000.0,
"factor": factor,
"beta_fast": 32,
"beta_slow": 1,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
def test_longrope_rope_numerically(self):
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_parameters, {"rope_type": "default", "rope_theta": 10000.0})
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
# longrope applies scaling on EACH inv frequency, `short_factor` or `long_factor`, depending on the seq_len
dim = config.hidden_size // config.num_attention_heads
short_factor = [2.0] * (dim // 2) # scaling applied when seq_len <= max_position_embeddings
long_factor = torch.ones(dim // 2).cumsum(0).tolist() # scaling applied when seq_len > max_position_embeddings
rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: according to the paper, if `attention_factor` is not specified, then it has a specific default --
# `math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings))`
rope_fn = ROPE_INIT_FUNCTIONS["longrope"]
max_position_embeddings = config.max_position_embeddings
for factor in (2.0, 10.0, 20.0):
config.rope_parameters = {
"rope_type": "longrope",
"rope_theta": 10000.0,
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, math.sqrt(1 + math.log(factor) / math.log(max_position_embeddings)))
config.rope_parameters = {
"rope_type": "longrope",
"rope_theta": 10000.0,
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
"attention_factor": 0.5,
}
_, attention_scale = rope_fn(config=config, device=torch_device, seq_len=1)
self.assertEqual(attention_scale, 0.5)
config.rope_parameters = {
"rope_type": "longrope",
"rope_theta": 10000.0,
"factor": factor,
"short_factor": short_factor,
"long_factor": long_factor,
}
self.assertEqual(config.rope_parameters.get("attention_factor"), None)
# Verify that "TypeError: '<' not supported between instances of 'NoneType' and 'int'" is not raised.
config.validate_rope()
# Check 2: seq_len == 0 -> short factor is applied to the default frequencies
config.rope_parameters = {
"rope_type": "longrope",
"rope_theta": 10000.0,
"factor": 1.0,
"short_factor": short_factor,
"long_factor": long_factor,
}
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=0)
torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(short_factor).to(torch_device))
# Check 3: seq_len > max_position_embeddings -> long factor is applied to the default frequencies
inv_freq, _ = rope_fn(config=config, device=torch_device, seq_len=config.max_position_embeddings + 1)
torch.testing.assert_close(inv_freq, default_inv_freq / torch.tensor(long_factor).to(torch_device))
def test_llama3_rope_numerically(self):
# fmt: off
EXPECTED_INV_FREQ = torch.tensor(
[
1.0000e+00, 8.6596e-01, 7.4989e-01, 6.4938e-01, 5.6234e-01, 4.8697e-01,
4.2170e-01, 3.6517e-01, 3.1623e-01, 2.7384e-01, 2.3714e-01, 2.0535e-01,
1.7783e-01, 1.5399e-01, 1.3335e-01, 1.1548e-01, 1.0000e-01, 8.6596e-02,
7.4989e-02, 6.4938e-02, 5.6234e-02, 4.8697e-02, 4.2170e-02, 3.6517e-02,
3.1623e-02, 2.7384e-02, 2.3714e-02, 2.0535e-02, 1.7783e-02, 1.5399e-02,
1.3335e-02, 1.0730e-02, 7.7785e-03, 5.6009e-03, 3.9991e-03, 2.8248e-03,
1.9675e-03, 1.3449e-03, 8.9549e-04, 5.7363e-04, 3.4539e-04, 2.7384e-04,
2.3714e-04, 2.0535e-04, 1.7783e-04, 1.5399e-04, 1.3335e-04, 1.1548e-04,
1.0000e-04, 8.6596e-05, 7.4989e-05, 6.4938e-05, 5.6234e-05, 4.8697e-05,
4.2170e-05, 3.6517e-05, 3.1623e-05, 2.7384e-05, 2.3714e-05, 2.0535e-05,
1.7783e-05, 1.5399e-05, 1.3335e-05, 1.1548e-05
], device=torch_device
)
# fmt: on
# input sanity checks: if these change, the output will also change
config = LlamaConfig()
self.assertEqual(config.rope_parameters, {"rope_type": "default", "rope_theta": 10000.0})
self.assertEqual(config.hidden_size, 4096)
self.assertEqual(config.num_attention_heads, 32)
self.assertFalse(hasattr(config, "partial_rotary_factor"))
rope_fn = LlamaRotaryEmbedding.compute_default_rope_parameters
default_inv_freq, _ = rope_fn(config=config, device=torch_device)
# Check 1: `attention_factor` is always 1
rope_fn = ROPE_INIT_FUNCTIONS["llama3"]
for factor in (2.0, 10.0, 20.0):
config.rope_parameters = {
"rope_type": "llama3",
"rope_theta": 10000.0,
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
_, attention_scale = rope_fn(config=config, device=torch_device)
self.assertEqual(attention_scale, 1.0)
# Check 2: based on `low_freq_factor` and `high_freq_factor`, the frequencies will be scaled between 1 and
# `factor` (similar to yarn). Low frequencies get scaled by `factor`, high frequencies see no change, medium
# frequencies are scaled by a value in between. Changing `low_freq_factor` and `high_freq_factor` changes what
# is considered low, medium, and high frequencies.
factor = 10.0
config.rope_parameters = {
"rope_type": "llama3",
"rope_theta": 10000.0,
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_bounded_by_factor = [
(default_inv_freq[idx] / factor) <= llama3_inv_freq_value <= default_inv_freq[idx]
for idx, llama3_inv_freq_value in enumerate(inv_freq)
]
self.assertTrue(all(is_bounded_by_factor))
# if we change `high_freq_factor` to a very high value, none is considered high-frequency -> ALL values will be
# scaled
config.rope_parameters = config.rope_parameters = {
"rope_type": "llama3",
"rope_theta": 10000.0,
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 1000,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
is_scaled = [yarn_inv_freq_value < default_inv_freq[idx] for idx, yarn_inv_freq_value in enumerate(inv_freq)]
self.assertTrue(all(is_scaled))
# Check 3: numerical snapshot to avoid regressions
config.rope_parameters = {
"rope_type": "llama3",
"rope_theta": 10000.0,
"factor": factor,
"original_max_position_embeddings": 2048,
"low_freq_factor": 1,
"high_freq_factor": 4,
}
inv_freq, _ = rope_fn(config=config, device=torch_device)
torch.testing.assert_close(inv_freq, EXPECTED_INV_FREQ)
| RopeTest |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/masked_input.py | {
"start": 85,
"end": 343
} | class ____(App[None]):
def compose(self) -> ComposeResult:
yield MaskedInput(">NNNNN-NNNNN-NNNNN-NNNNN;_")
yield MaskedInput("9999-99-99", placeholder="YYYY-MM-DD")
if __name__ == "__main__":
app = TemplateApp()
app.run() | TemplateApp |
python | walkccc__LeetCode | solutions/2180. Count Integers With Even Digit Sum/2180.py | {
"start": 0,
"end": 200
} | class ____:
def countEven(self, num: int) -> int:
return (num - self._getDigitSum(num) % 2) // 2
def _getDigitSum(self, num: int) -> int:
return sum(int(digit) for digit in str(num))
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 31573,
"end": 32513
} | class ____(GroupByShift):
_parameters = GroupByApply._parameters + ["split_every"]
default = {**GroupByShift._defaults, "split_every": None}
@functools.cached_property
def grp_func(self):
return functools.partial(_median_groupby_aggregate)
def _shuffle_grp_func(self, shuffled=False):
return self.grp_func
def _simplify_up(self, parent, dependents):
if isinstance(parent, Projection):
return groupby_projection(self, parent, dependents)
@functools.cached_property
def npartitions(self):
npartitions = self.frame.npartitions
if self.split_every is not None:
npartitions = npartitions // self.split_every
return npartitions
def groupby_get_group(df, *by_key, get_key=None, columns=None):
if PANDAS_GE_300 and is_scalar(get_key):
get_key = (get_key,)
return _groupby_get_group(df, list(by_key), get_key, columns)
| Median |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-friendli/llama_index/llms/friendli/base.py | {
"start": 757,
"end": 8897
} | class ____(LLM):
"""Friendli LLM."""
model: str = Field(description="The friendli model to use.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
temperature: Optional[float] = Field(
description="The temperature to use for sampling."
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Friendli API."
)
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: str = "mixtral-8x7b-instruct-v0-1",
friendli_token: Optional[str] = None,
max_tokens: int = 256,
temperature: Optional[float] = 0.1,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
max_tokens=max_tokens,
temperature=temperature,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._client = friendli.Friendli(token=friendli_token)
self._aclient = friendli.AsyncFriendli(token=friendli_token)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Friendli_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=friendli_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
}
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
response = self._client.chat.completions.create(
stream=False,
**get_chat_request(messages),
**all_kwargs,
)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=response.choices[0].message.content
),
raw=response.__dict__,
additional_kwargs={"usage": response.usage.__dict__},
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
response = self._client.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
return CompletionResponse(
text=response.choices[0].text,
additional_kwargs={"usage": response.usage.__dict__},
raw=response.__dict__,
)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
all_kwargs = self._get_all_kwargs(**kwargs)
stream = self._client.chat.completions.create(
stream=True,
**get_chat_request(messages),
**all_kwargs,
)
def gen() -> ChatResponseGen:
content = ""
for chunk in stream:
content_delta = chunk.choices[0].delta.content or ""
content += content_delta
yield ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=content),
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
all_kwargs = self._get_all_kwargs(**kwargs)
stream = self._client.completions.create(
prompt=prompt,
stream=True,
**all_kwargs,
)
def gen() -> CompletionResponseGen:
content = ""
for chunk in stream:
content_delta = chunk.text
content += content_delta
yield CompletionResponse(
text=content,
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
response = await self._aclient.chat.completions.create(
stream=False,
**get_chat_request(messages),
**all_kwargs,
)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=response.choices[0].message.content
),
raw=response.__dict__,
additional_kwargs={"usage": response.usage.__dict__},
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
response = await self._aclient.completions.create(
prompt=prompt,
stream=False,
**all_kwargs,
)
return CompletionResponse(
text=response.choices[0].text,
additional_kwargs={"usage": response.usage.__dict__},
raw=response.__dict__,
)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
all_kwargs = self._get_all_kwargs(**kwargs)
stream = await self._aclient.chat.completions.create(
stream=True,
**get_chat_request(messages),
**all_kwargs,
)
async def gen() -> ChatResponseAsyncGen:
content = ""
async for chunk in stream:
content_delta = chunk.choices[0].delta.content or ""
content += content_delta
yield ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=content),
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
all_kwargs = self._get_all_kwargs(**kwargs)
stream = await self._aclient.completions.create(
prompt=prompt,
stream=True,
**all_kwargs,
)
async def gen() -> CompletionResponseAsyncGen:
content = ""
async for chunk in stream:
content_delta = chunk.text
content += content_delta
yield CompletionResponse(
text=content,
delta=content_delta,
raw=chunk.__dict__,
)
return gen()
| Friendli |
python | huggingface__transformers | tests/utils/test_expectations.py | {
"start": 71,
"end": 1408
} | class ____(unittest.TestCase):
def test_expectations(self):
# We use the expectations below to make sure the right expectations are found for the right devices.
# Each value is just a unique ID.
expectations = Expectations(
{
(None, None): 1,
("cuda", 8): 2,
("cuda", 7): 3,
("rocm", 8): 4,
("rocm", None): 5,
("cpu", None): 6,
("xpu", 3): 7,
}
)
def check(expected_id, device_prop):
found_id = expectations.find_expectation(device_prop)
assert found_id == expected_id, f"Expected {expected_id} for {device_prop}, found {found_id}"
# npu has no matches so should find default expectation
check(1, ("npu", None, None))
check(7, ("xpu", 3, None))
check(2, ("cuda", 8, None))
check(3, ("cuda", 7, None))
check(4, ("rocm", 9, None))
check(4, ("rocm", None, None))
check(2, ("cuda", 2, None))
# We also test that if there is no default excpectation and no match is found, a ValueError is raised.
expectations = Expectations({("cuda", 8): 1})
with self.assertRaises(ValueError):
expectations.find_expectation(("xpu", None))
| ExpectationsTest |
python | huggingface__transformers | tests/models/clip/test_image_processing_clip.py | {
"start": 3210,
"end": 5104
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = CLIPImageProcessor if is_vision_available() else None
fast_image_processing_class = CLIPImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = CLIPImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_center_crop"))
self.assertTrue(hasattr(image_processing, "center_crop"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_convert_rgb"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"shortest_edge": 20})
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84)
self.assertEqual(image_processor.size, {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84})
| CLIPImageProcessingTest |
python | huggingface__transformers | src/transformers/trainer_utils.py | {
"start": 6512,
"end": 6688
} | class ____(NamedTuple):
predictions: np.ndarray | tuple[np.ndarray]
label_ids: np.ndarray | tuple[np.ndarray] | None
metrics: dict[str, float] | None
| PredictionOutput |
python | apache__airflow | devel-common/src/sphinx_exts/providers_commits.py | {
"start": 8742,
"end": 10057
} | class ____(BaseJinjaReferenceDirective):
"""Generate list of classes supporting OpenLineage"""
def render_content(self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR):
package_name = os.environ.get("AIRFLOW_PACKAGE_NAME")
if not package_name:
raise ValueError("AIRFLOW_PACKAGE_NAME environment variable is not set.")
if not package_name.startswith("apache-airflow-providers-"):
raise ValueError(
"AIRFLOW_PACKAGE_NAME environment variable should start with 'apache-airflow-providers-'"
)
provider_id = package_name.replace("apache-airflow-providers-", "").replace("-", ".")
if os.environ.get("INCLUDE_COMMITS", "") == "true":
return _get_all_changes_for_package_as_rst(provider_id)
return (
"When you add --include-commits to the build command, this "
"will be replaced with the list of commits.\n\n"
)
def setup(app):
"""Setup plugin"""
app.add_directive("airflow-providers-commits", ProviderCommitsClassesDirective)
if shutil.which("git") is None:
raise RuntimeError("Git is not installed or not found in PATH")
return {"parallel_read_safe": True, "parallel_write_safe": True}
| ProviderCommitsClassesDirective |
python | dask__distributed | distributed/objects.py | {
"start": 305,
"end": 496
} | class ____(dict):
"""A dictionary of all workers and which keys that worker has."""
def _repr_html_(self):
return get_template("has_what.html.j2").render(has_what=self)
| HasWhat |
python | numpy__numpy | numpy/lib/_iotools.py | {
"start": 13437,
"end": 30876
} | class ____:
"""
Factory class for function transforming a string into another object
(int, float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a
missing value, a default value is returned.
Attributes
----------
func : function
Function used for the conversion.
default : any
Default value to return when the input corresponds to a missing
value.
type : type
Type of the output.
_status : int
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in
order.
_locked : bool
Holds `locked` parameter.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
If a `dtype`, specifies the input data type, used to define a basic
function and a default value for missing data. For example, when
`dtype` is float, the `func` attribute is set to `float` and the
default value to `np.nan`. If a function, this function is used to
convert a string to another object. In this case, it is recommended
to give an associated default value as input.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given, `StringConverter`
tries to supply a reasonable default value.
missing_values : {None, sequence of str}, optional
``None`` or sequence of strings indicating a missing value. If ``None``
then missing values are indicated by empty entries. The default is
``None``.
locked : bool, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not. Default is False.
"""
_mapper = [(nx.bool, str2bool, False),
(nx.int_, int, -1),]
# On 32-bit systems, we need to make sure that we explicitly include
# nx.int64 since ns.int_ is nx.int32.
if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.float64, float, nx.nan),
(nx.complex128, complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
# If a non-default dtype is passed, fall back to generic
# ones (should only be used for the converter)
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(nx.complexfloating, complex, nx.nan + 0j),
# Last, try with the string types (must be last, because
# `_mapper[-1]` is used as default in some cases)
(nx.str_, asunicode, '???'),
(nx.bytes_, asbytes, '???'),
])
@classmethod
def _getdtype(cls, val):
"""Returns the dtype of the input variable."""
return np.array(val).dtype
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
@classmethod
def _dtypeortype(cls, dtype):
"""Returns dtype for datetime64 and type of dtype otherwise."""
# This is a bit annoying. We want to return the "general" type in most
# cases (ie. "string" rather than "S10"), but we want to return the
# specific type for datetime64 (ie. "datetime64[us]" rather than
# "datetime64").
if dtype.type == np.datetime64:
return dtype
return dtype.type
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and
its corresponding default.
The input function (or sequence of functions) and its associated
default value (if any) is inserted in penultimate position of the
mapper. The corresponding type is estimated from the dtype of the
default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = dateutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if callable(func):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func) - len(default)))
for fct, dft in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
@classmethod
def _find_map_entry(cls, dtype):
# if a converter for the specific dtype is available use that
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if dtype.type == deftype:
return i, (deftype, func, default_def)
# otherwise find an inexact match
for i, (deftype, func, default_def) in enumerate(cls._mapper):
if np.issubdtype(dtype.type, deftype):
return i, (deftype, func, default_def)
raise LookupError
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
dtype = np.dtype('bool')
else:
# Is the input a np.dtype ?
try:
self.func = None
dtype = np.dtype(dtype_or_func)
except TypeError:
# dtype_or_func must be a function, then
if not callable(dtype_or_func):
errmsg = ("The input argument `dtype` is neither a"
" function nor a dtype (got '%s' instead)")
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to
# None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
dtype = self._getdtype(default)
# find the best match in our mapper
try:
self._status, (_, func, default_def) = self._find_map_entry(dtype)
except LookupError:
# no match
self.default = default
_, func, _ = self._mapper[-1]
self._status = 0
else:
# use the found default only if we did not already have one
if default is None:
self.default = default_def
else:
self.default = default
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to
# something more robust.
if self.func == self._mapper[1][1]:
if issubclass(dtype.type, np.uint64):
self.func = np.uint64
elif issubclass(dtype.type, np.int64):
self.func = np.int64
else:
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = {''}
else:
if isinstance(missing_values, str):
missing_values = missing_values.split(",")
self.missing_values = set(list(missing_values) + [''])
self._callingfunction = self._strict_call
self.type = self._dtypeortype(dtype)
self._checked = False
self._initial_default = default
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
def _strict_call(self, value):
try:
# We check if we can convert the value using the current function
new_value = self.func(value)
# In addition to having to check whether func can convert the
# value, we also have to make sure that we don't get overflow
# errors for integers.
if self.func is int:
try:
np.array(value, dtype=self.type)
except OverflowError:
raise ValueError
# We're still here so we can now return the new value
return new_value
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError(f"Cannot convert string '{value}'")
def __call__(self, value):
return self._callingfunction(value)
def _do_upgrade(self):
# Raise an exception if we locked the converter...
if self._locked:
errmsg = "Converter is locked and cannot be upgraded"
raise ConverterLockError(errmsg)
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
_status = self._status
if _status == _statusmax:
errmsg = "Could not find a valid conversion function"
raise ConverterError(errmsg)
elif _status < _statusmax - 1:
_status += 1
self.type, self.func, default = self._mapper[_status]
self._status = _status
if self._initial_default is not None:
self.default = self._initial_default
else:
self.default = default
def upgrade(self, value):
"""
Find the best converter for a given string, and return the result.
The supplied string `value` is converted by testing different
converters in order. First the `func` method of the
`StringConverter` instance is tried, if this fails other available
converters are tried. The order in which these other converters
are tried is determined by the `_status` attribute of the instance.
Parameters
----------
value : str
The string to convert.
Returns
-------
out : any
The result of converting `value` with the appropriate converter.
"""
self._checked = True
try:
return self._strict_call(value)
except ValueError:
self._do_upgrade()
return self.upgrade(value)
def iterupgrade(self, value):
self._checked = True
if not hasattr(value, '__iter__'):
value = (value,)
_strict_call = self._strict_call
try:
for _m in value:
_strict_call(_m)
except ValueError:
self._do_upgrade()
self.iterupgrade(value)
def update(self, func, default=None, testing_value=None,
missing_values='', locked=False):
"""
Set StringConverter attributes directly.
Parameters
----------
func : function
Conversion function.
default : any, optional
Value to return by default, that is, when the string to be
converted is flagged as missing. If not given,
`StringConverter` tries to supply a reasonable default value.
testing_value : str, optional
A string representing a standard input value of the converter.
This string is used to help defining a reasonable default
value.
missing_values : {sequence of str, None}, optional
Sequence of strings indicating a missing value. If ``None``, then
the existing `missing_values` are cleared. The default is ``''``.
locked : bool, optional
Whether the StringConverter should be locked to prevent
automatic upgrade or not. Default is False.
Notes
-----
`update` takes the same parameters as the constructor of
`StringConverter`, except that `func` does not accept a `dtype`
whereas `dtype_or_func` in the constructor does.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
self.type = self._dtypeortype(self._getdtype(default))
else:
try:
tester = func(testing_value or '1')
except (TypeError, ValueError):
tester = None
self.type = self._dtypeortype(self._getdtype(tester))
# Add the missing values to the existing set or clear it.
if missing_values is None:
# Clear all missing values even though the ctor initializes it to
# set(['']) when the argument is None.
self.missing_values = set()
else:
if not np.iterable(missing_values):
missing_values = [missing_values]
if not all(isinstance(v, str) for v in missing_values):
raise TypeError("missing_values must be strings or unicode")
self.missing_values.update(missing_values)
def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
"""
Convenience function to create a `np.dtype` object.
The function processes the input `dtype` and matches it with the given
names.
Parameters
----------
ndtype : var
Definition of the dtype. Can be any string or dictionary recognized
by the `np.dtype` function, or a sequence of types.
names : str or sequence, optional
Sequence of strings to use as field names for a structured dtype.
For convenience, `names` can be a string of a comma-separated list
of names.
defaultfmt : str, optional
Format string used to define missing names, such as ``"f%i"``
(default) or ``"fields_%02i"``.
validationargs : optional
A series of optional arguments used to initialize a
`NameValidator`.
Examples
--------
>>> import numpy as np
>>> np.lib._iotools.easy_dtype(float)
dtype('float64')
>>> np.lib._iotools.easy_dtype("i4, f8")
dtype([('f0', '<i4'), ('f1', '<f8')])
>>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
dtype([('field_000', '<i4'), ('field_001', '<f8')])
>>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
>>> np.lib._iotools.easy_dtype(float, names="a,b,c")
dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
"""
try:
ndtype = np.dtype(ndtype)
except TypeError:
validate = NameValidator(**validationargs)
nbfields = len(ndtype)
if names is None:
names = [''] * len(ndtype)
elif isinstance(names, str):
names = names.split(",")
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype({"formats": ndtype, "names": names})
else:
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, str):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names)))
if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
ndtype.names = validate([''] * len(ndtype.names),
defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
return ndtype
| StringConverter |
python | keras-team__keras | keras/src/metrics/iou_metrics.py | {
"start": 19272,
"end": 23616
} | class ____(IoU):
"""Computes the Intersection-Over-Union metric for one-hot encoded labels.
Formula:
```python
iou = true_positives / (true_positives + false_positives + false_negatives)
```
Intersection-Over-Union is a common evaluation metric for semantic image
segmentation.
To compute IoUs, the predictions are accumulated in a confusion matrix,
weighted by `sample_weight` and the metric is then calculated from it.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
This class can be used to compute IoU for multi-class classification tasks
where the labels are one-hot encoded (the last axis should have one
dimension per class). Note that the predictions should also have the same
shape. To compute the IoU, first the labels and predictions are converted
back into integer format by taking the argmax over the class axis. Then the
same computation steps as for the base `IoU` class apply.
Note, if there is only one channel in the labels and predictions, this class
is the same as class `IoU`. In this case, use `IoU` instead.
Also, make sure that `num_classes` is equal to the number of classes in the
data, to avoid a "labels out of bound" error when the confusion matrix is
computed.
Args:
num_classes: The possible number of labels the prediction task can have.
target_class_ids: A tuple or list of target class ids for which the
metric is returned. To compute IoU for a specific class, a list
(or tuple) of a single id value should be provided.
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
ignore_class: Optional integer. The ID of a class to be ignored during
metric computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
sparse_y_pred: Whether predictions are encoded using integers or
dense floating point vectors. If `False`, the `argmax` function
is used to determine each sample's most likely associated label.
axis: (Optional) The dimension containing the logits. Defaults to `-1`.
Example:
>>> y_true = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0], [1, 0, 0]])
>>> y_pred = np.array([[0.2, 0.3, 0.5], [0.1, 0.2, 0.7], [0.5, 0.3, 0.1],
... [0.1, 0.4, 0.5]])
>>> sample_weight = [0.1, 0.2, 0.3, 0.4]
>>> m = keras.metrics.OneHotIoU(num_classes=3, target_class_ids=[0, 2])
>>> m.update_state(
... y_true=y_true, y_pred=y_pred, sample_weight=sample_weight)
>>> # cm = [[0, 0, 0.2+0.4],
>>> # [0.3, 0, 0],
>>> # [0, 0, 0.1]]
>>> # sum_row = [0.3, 0, 0.7], sum_col = [0.6, 0.3, 0.1]
>>> # true_positives = [0, 0, 0.1]
>>> # single_iou = true_positives / (sum_row + sum_col - true_positives))
>>> # mean_iou = (0 / (0.3 + 0.6 - 0) + 0.1 / (0.7 + 0.1 - 0.1)) / 2
>>> m.result()
0.071
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[keras.metrics.OneHotIoU(
num_classes=3,
target_class_id=[1]
)]
)
```
"""
def __init__(
self,
num_classes,
target_class_ids,
name=None,
dtype=None,
ignore_class=None,
sparse_y_pred=False,
axis=-1,
):
super().__init__(
num_classes=num_classes,
target_class_ids=target_class_ids,
name=name,
dtype=dtype,
ignore_class=ignore_class,
sparse_y_true=False,
sparse_y_pred=sparse_y_pred,
axis=axis,
)
def get_config(self):
return {
"num_classes": self.num_classes,
"target_class_ids": self.target_class_ids,
"name": self.name,
"dtype": self._dtype,
"ignore_class": self.ignore_class,
"sparse_y_pred": self.sparse_y_pred,
"axis": self.axis,
}
@keras_export("keras.metrics.OneHotMeanIoU")
| OneHotIoU |
python | coleifer__peewee | peewee.py | {
"start": 174202,
"end": 174969
} | class ____(_BaseFormattedField):
field_type = 'DATE'
formats = [
'%Y-%m-%d',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
]
def adapt(self, value):
if value and isinstance(value, basestring):
pp = lambda x: x.date()
return format_date_time(value, self.formats, pp)
elif value and isinstance(value, datetime.datetime):
return value.date()
return value
def to_timestamp(self):
return self.model._meta.database.to_timestamp(self)
def truncate(self, part):
return self.model._meta.database.truncate_date(part, self)
year = property(_date_part('year'))
month = property(_date_part('month'))
day = property(_date_part('day'))
| DateField |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/coverage/erase.py | {
"start": 817,
"end": 916
} | class ____(CoverageConfig):
"""Configuration for the coverage erase command."""
| CoverageEraseConfig |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_Google.py | {
"start": 2818,
"end": 3119
} | class ____:
"""test_ignores_ignored_argument_names_google
Example of a method documenting the return type that an
implementation should return.
"""
def foo_method(self, arg, _):
"""docstring ...
Args:
arg (int): An argument.
"""
pass
| Foo |
python | django__django | tests/transactions/tests.py | {
"start": 10436,
"end": 10751
} | class ____(AtomicTests):
"""
All basic tests for atomic should also pass within an existing transaction.
"""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
| AtomicInsideTransactionTests |
python | jina-ai__jina | jina/serve/consensus/add_voter/pb/add_voter_pb2_grpc.py | {
"start": 3129,
"end": 5398
} | class ____(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def AddVoter(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/RaftAdmin/AddVoter',
add__voter__pb2.AddVoterRequest.SerializeToString,
add__voter__pb2.Future.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Await(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/RaftAdmin/Await',
add__voter__pb2.Future.SerializeToString,
add__voter__pb2.AwaitResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
@staticmethod
def Forget(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/RaftAdmin/Forget',
add__voter__pb2.Future.SerializeToString,
add__voter__pb2.ForgetResponse.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| RaftAdmin |
python | simonw__datasette | datasette/utils/__init__.py | {
"start": 30886,
"end": 31499
} | class ____(ConnectionProblem):
pass
def check_connection(conn):
tables = [
r[0]
for r in conn.execute(
"select name from sqlite_master where type='table'"
).fetchall()
]
for table in tables:
try:
conn.execute(
f"PRAGMA table_info({escape_sqlite(table)});",
)
except sqlite3.OperationalError as e:
if e.args[0] == "no such module: VirtualSpatialIndex":
raise SpatialiteConnectionProblem(e)
else:
raise ConnectionProblem(e)
| SpatialiteConnectionProblem |
python | wandb__wandb | wandb/vendor/pygments/lexers/whiley.py | {
"start": 425,
"end": 4012
} | class ____(RegexLexer):
"""
Lexer for the Whiley programming language.
.. versionadded:: 2.2
"""
name = 'Whiley'
filenames = ['*.whiley']
aliases = ['whiley']
mimetypes = ['text/x-whiley']
# See the language specification:
# http://whiley.org/download/WhileyLanguageSpec.pdf
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# Comments
(r'//.*', Comment.Single),
# don't parse empty comment as doc comment
(r'/\*\*/', Comment.Multiline),
(r'(?s)/\*\*.*?\*/', String.Doc),
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Keywords
(words((
'if', 'else', 'while', 'for', 'do', 'return',
'switch', 'case', 'default', 'break', 'continue',
'requires', 'ensures', 'where', 'assert', 'assume',
'all', 'no', 'some', 'in', 'is', 'new',
'throw', 'try', 'catch', 'debug', 'skip', 'fail',
'finite', 'total'), suffix=r'\b'), Keyword.Reserved),
(words((
'function', 'method', 'public', 'private', 'protected',
'export', 'native'), suffix=r'\b'), Keyword.Declaration),
# "constant" & "type" are not keywords unless used in declarations
(r'(constant|type)(\s+)([a-zA-Z_]\w*)(\s+)(is)\b',
bygroups(Keyword.Declaration, Text, Name, Text, Keyword.Reserved)),
(r'(true|false|null)\b', Keyword.Constant),
(r'(bool|byte|int|real|any|void)\b', Keyword.Type),
# "from" is not a keyword unless used with import
(r'(import)(\s+)(\*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Punctuation, Text, Keyword.Namespace)),
(r'(import)(\s+)([a-zA-Z_]\w*)([^\S\n]+)(from)\b',
bygroups(Keyword.Namespace, Text, Name, Text, Keyword.Namespace)),
(r'(package|import)\b', Keyword.Namespace),
# standard library: https://github.com/Whiley/WhileyLibs/
(words((
# types defined in whiley.lang.Int
'i8', 'i16', 'i32', 'i64',
'u8', 'u16', 'u32', 'u64',
'uint', 'nat',
# whiley.lang.Any
'toString'), suffix=r'\b'), Name.Builtin),
# byte literal
(r'[01]+b', Number.Bin),
# decimal literal
(r'[0-9]+\.[0-9]+', Number.Float),
# match "1." but not ranges like "3..5"
(r'[0-9]+\.(?!\.)', Number.Float),
# integer literal
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
# character literal
(r"""'[^\\]'""", String.Char),
(r"""(')(\\['"\\btnfr])(')""",
bygroups(String.Char, String.Escape, String.Char)),
# string literal
(r'"', String, 'string'),
# operators and punctuation
(r'[{}()\[\],.;]', Punctuation),
(u'[+\\-*/%&|<>^!~@=:?'
# unicode operators
u'\u2200\u2203\u2205\u2282\u2286\u2283\u2287'
u'\u222A\u2229\u2264\u2265\u2208\u2227\u2228'
u']', Operator),
# identifier
(r'[a-zA-Z_]\w*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\[btnfr]', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\.', String),
(r'[^\\"]+', String),
],
}
| WhileyLexer |
python | tox-dev__tox | src/tox/tox_env/python/api.py | {
"start": 546,
"end": 686
} | class ____(NamedTuple):
major: int
minor: int
micro: int
releaselevel: str
serial: int
@dataclass(frozen=True)
| VersionInfo |
python | huggingface__transformers | examples/modular-transformers/modeling_super.py | {
"start": 1981,
"end": 3737
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: SuperConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_parameters") and isinstance(config.rope_parameters, dict):
self.rope_type = config.rope_parameters.get("rope_type", config.rope_parameters.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
| SuperRotaryEmbedding |
python | huggingface__transformers | src/transformers/models/vitpose/image_processing_vitpose.py | {
"start": 1619,
"end": 13173
} | class ____(ImagesKwargs, total=False):
r"""
do_affine_transform (`bool`, *optional*):
Whether to apply an affine transformation to the input images based on the bounding boxes.
normalize_factor (`float`, *optional*, defaults to `200.0`):
Width and height scale factor used for normalization when computing center and scale from bounding boxes.
"""
do_affine_transform: Optional[bool]
normalize_factor: Optional[float]
# inspired by https://github.com/ViTAE-Transformer/ViTPose/blob/d5216452796c90c6bc29f5c5ec0bdba94366768a/mmpose/datasets/datasets/base/kpt_2d_sview_rgb_img_top_down_dataset.py#L132
def box_to_center_and_scale(
box: Union[tuple, list, np.ndarray],
image_width: int,
image_height: int,
normalize_factor: float = 200.0,
padding_factor: float = 1.25,
):
"""
Encodes a bounding box in COCO format into (center, scale).
Args:
box (`Tuple`, `List`, or `np.ndarray`):
Bounding box in COCO format (top_left_x, top_left_y, width, height).
image_width (`int`):
Image width.
image_height (`int`):
Image height.
normalize_factor (`float`):
Width and height scale factor.
padding_factor (`float`):
Bounding box padding factor.
Returns:
tuple: A tuple containing center and scale.
- `np.ndarray` [float32](2,): Center of the bbox (x, y).
- `np.ndarray` [float32](2,): Scale of the bbox width & height.
"""
top_left_x, top_left_y, width, height = box[:4]
aspect_ratio = image_width / image_height
center = np.array([top_left_x + width * 0.5, top_left_y + height * 0.5], dtype=np.float32)
if width > aspect_ratio * height:
height = width * 1.0 / aspect_ratio
elif width < aspect_ratio * height:
width = height * aspect_ratio
scale = np.array([width / normalize_factor, height / normalize_factor], dtype=np.float32)
scale = scale * padding_factor
return center, scale
def coco_to_pascal_voc(bboxes: np.ndarray) -> np.ndarray:
"""
Converts bounding boxes from the COCO format to the Pascal VOC format.
In other words, converts from (top_left_x, top_left_y, width, height) format
to (top_left_x, top_left_y, bottom_right_x, bottom_right_y).
Args:
bboxes (`np.ndarray` of shape `(batch_size, 4)):
Bounding boxes in COCO format.
Returns:
`np.ndarray` of shape `(batch_size, 4) in Pascal VOC format.
"""
bboxes[:, 2] = bboxes[:, 2] + bboxes[:, 0] - 1
bboxes[:, 3] = bboxes[:, 3] + bboxes[:, 1] - 1
return bboxes
def get_keypoint_predictions(heatmaps: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
"""Get keypoint predictions from score maps.
Args:
heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`):
Model predicted heatmaps.
Returns:
tuple: A tuple containing aggregated results.
- coords (`np.ndarray` of shape `(batch_size, num_keypoints, 2)`):
Predicted keypoint location.
- scores (`np.ndarray` of shape `(batch_size, num_keypoints, 1)`):
Scores (confidence) of the keypoints.
"""
if not isinstance(heatmaps, np.ndarray):
raise TypeError("Heatmaps should be np.ndarray")
if heatmaps.ndim != 4:
raise ValueError("Heatmaps should be 4-dimensional")
batch_size, num_keypoints, _, width = heatmaps.shape
heatmaps_reshaped = heatmaps.reshape((batch_size, num_keypoints, -1))
idx = np.argmax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))
scores = np.amax(heatmaps_reshaped, 2).reshape((batch_size, num_keypoints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = preds[:, :, 0] % width
preds[:, :, 1] = preds[:, :, 1] // width
preds = np.where(np.tile(scores, (1, 1, 2)) > 0.0, preds, -1)
return preds, scores
def post_dark_unbiased_data_processing(coords: np.ndarray, batch_heatmaps: np.ndarray, kernel: int = 3) -> np.ndarray:
"""DARK post-pocessing. Implemented by unbiased_data_processing.
Paper references:
- Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
- Zhang et al. Distribution-Aware Coordinate Representation for Human Pose Estimation (CVPR 2020).
Args:
coords (`np.ndarray` of shape `(num_persons, num_keypoints, 2)`):
Initial coordinates of human pose.
batch_heatmaps (`np.ndarray` of shape `(batch_size, num_keypoints, height, width)`):
Batched heatmaps as predicted by the model.
A batch_size of 1 is used for the bottom up paradigm where all persons share the same heatmap.
A batch_size of `num_persons` is used for the top down paradigm where each person has its own heatmaps.
kernel (`int`, *optional*, defaults to 3):
Gaussian kernel size (K) for modulation.
Returns:
`np.ndarray` of shape `(num_persons, num_keypoints, 2)` ):
Refined coordinates.
"""
batch_size, num_keypoints, height, width = batch_heatmaps.shape
num_coords = coords.shape[0]
if not (batch_size == 1 or batch_size == num_coords):
raise ValueError("The batch size of heatmaps should be 1 or equal to the batch size of coordinates.")
radius = int((kernel - 1) // 2)
batch_heatmaps = np.array(
[
[gaussian_filter(heatmap, sigma=0.8, radius=(radius, radius), axes=(0, 1)) for heatmap in heatmaps]
for heatmaps in batch_heatmaps
]
)
batch_heatmaps = np.clip(batch_heatmaps, 0.001, 50)
batch_heatmaps = np.log(batch_heatmaps)
batch_heatmaps_pad = np.pad(batch_heatmaps, ((0, 0), (0, 0), (1, 1), (1, 1)), mode="edge").flatten()
# calculate indices for coordinates
index = coords[..., 0] + 1 + (coords[..., 1] + 1) * (width + 2)
index += (width + 2) * (height + 2) * np.arange(0, batch_size * num_keypoints).reshape(-1, num_keypoints)
index = index.astype(int).reshape(-1, 1)
i_ = batch_heatmaps_pad[index]
ix1 = batch_heatmaps_pad[index + 1]
iy1 = batch_heatmaps_pad[index + width + 2]
ix1y1 = batch_heatmaps_pad[index + width + 3]
ix1_y1_ = batch_heatmaps_pad[index - width - 3]
ix1_ = batch_heatmaps_pad[index - 1]
iy1_ = batch_heatmaps_pad[index - 2 - width]
# calculate refined coordinates using Newton's method
dx = 0.5 * (ix1 - ix1_)
dy = 0.5 * (iy1 - iy1_)
derivative = np.concatenate([dx, dy], axis=1)
derivative = derivative.reshape(num_coords, num_keypoints, 2, 1)
dxx = ix1 - 2 * i_ + ix1_
dyy = iy1 - 2 * i_ + iy1_
dxy = 0.5 * (ix1y1 - ix1 - iy1 + i_ + i_ - ix1_ - iy1_ + ix1_y1_)
hessian = np.concatenate([dxx, dxy, dxy, dyy], axis=1)
hessian = hessian.reshape(num_coords, num_keypoints, 2, 2)
hessian = np.linalg.inv(hessian + np.finfo(np.float32).eps * np.eye(2))
coords -= np.einsum("ijmn,ijnk->ijmk", hessian, derivative).squeeze()
return coords
def transform_preds(coords: np.ndarray, center: np.ndarray, scale: np.ndarray, output_size: np.ndarray) -> np.ndarray:
"""Get final keypoint predictions from heatmaps and apply scaling and
translation to map them back to the image.
Note:
num_keypoints: K
Args:
coords (`np.ndarray` of shape `(num_keypoints, ndims)`):
* If ndims=2, corrds are predicted keypoint location.
* If ndims=4, corrds are composed of (x, y, scores, tags)
* If ndims=5, corrds are composed of (x, y, scores, tags,
flipped_tags)
center (`np.ndarray` of shape `(2,)`):
Center of the bounding box (x, y).
scale (`np.ndarray` of shape `(2,)`):
Scale of the bounding box wrt original image of width and height.
output_size (`np.ndarray` of shape `(2,)`):
Size of the destination heatmaps in (height, width) format.
Returns:
np.ndarray: Predicted coordinates in the images.
"""
if coords.shape[1] not in (2, 4, 5):
raise ValueError("Coordinates need to have either 2, 4 or 5 dimensions.")
if len(center) != 2:
raise ValueError("Center needs to have 2 elements, one for x and one for y.")
if len(scale) != 2:
raise ValueError("Scale needs to consist of a width and height")
if len(output_size) != 2:
raise ValueError("Output size needs to consist of a height and width")
# Recover the scale which is normalized by a factor of 200.
scale = scale * 200.0
# We use unbiased data processing
scale_y = scale[1] / (output_size[0] - 1.0)
scale_x = scale[0] / (output_size[1] - 1.0)
target_coords = np.ones_like(coords)
target_coords[:, 0] = coords[:, 0] * scale_x + center[0] - scale[0] * 0.5
target_coords[:, 1] = coords[:, 1] * scale_y + center[1] - scale[1] * 0.5
return target_coords
def get_warp_matrix(theta: float, size_input: np.ndarray, size_dst: np.ndarray, size_target: np.ndarray):
"""
Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the
Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020).
Source: https://github.com/open-mmlab/mmpose/blob/master/mmpose/core/post_processing/post_transforms.py
Args:
theta (`float`):
Rotation angle in degrees.
size_input (`np.ndarray`):
Size of input image [width, height].
size_dst (`np.ndarray`):
Size of output image [width, height].
size_target (`np.ndarray`):
Size of ROI in input plane [w, h].
Returns:
`np.ndarray`: A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = math.cos(theta) * scale_x
matrix[0, 1] = -math.sin(theta) * scale_x
matrix[0, 2] = scale_x * (
-0.5 * size_input[0] * math.cos(theta) + 0.5 * size_input[1] * math.sin(theta) + 0.5 * size_target[0]
)
matrix[1, 0] = math.sin(theta) * scale_y
matrix[1, 1] = math.cos(theta) * scale_y
matrix[1, 2] = scale_y * (
-0.5 * size_input[0] * math.sin(theta) - 0.5 * size_input[1] * math.cos(theta) + 0.5 * size_target[1]
)
return matrix
def scipy_warp_affine(src, M, size):
"""
This function implements cv2.warpAffine function using affine_transform in scipy. See https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.affine_transform.html and https://docs.opencv.org/4.x/d4/d61/tutorial_warp_affine.html for more details.
Note: the original implementation of cv2.warpAffine uses cv2.INTER_LINEAR.
"""
channels = [src[..., i] for i in range(src.shape[-1])]
# Convert to a 3x3 matrix used by SciPy
M_scipy = np.vstack([M, [0, 0, 1]])
# If you have a matrix for the ‘push’ transformation, use its inverse (numpy.linalg.inv) in this function.
M_inv = inv(M_scipy)
M_inv[0, 0], M_inv[0, 1], M_inv[1, 0], M_inv[1, 1], M_inv[0, 2], M_inv[1, 2] = (
M_inv[1, 1],
M_inv[1, 0],
M_inv[0, 1],
M_inv[0, 0],
M_inv[1, 2],
M_inv[0, 2],
)
new_src = [affine_transform(channel, M_inv, output_shape=size, order=1) for channel in channels]
new_src = np.stack(new_src, axis=-1)
return new_src
| VitPoseImageProcessorKwargs |
python | huggingface__transformers | src/transformers/models/clap/modeling_clap.py | {
"start": 50056,
"end": 50760
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.align.modeling_align.AlignTextAttention with Align->Clap
| ClapTextSelfOutput |
python | google__jax | jax/_src/numpy/index_tricks.py | {
"start": 6550,
"end": 8735
} | class ____(_AxisConcat):
"""Concatenate slices, scalars and array-like objects along the first axis.
LAX-backend implementation of :obj:`numpy.r_`.
See Also:
``jnp.c_``: Concatenates slices, scalars and array-like objects along the last axis.
Examples:
Passing slices in the form ``[start:stop:step]`` generates ``jnp.arange`` objects:
>>> jnp.r_[-1:5:1, 0, 0, jnp.array([1,2,3])]
Array([-1, 0, 1, 2, 3, 4, 0, 0, 1, 2, 3], dtype=int32)
An imaginary value for ``step`` will create a ``jnp.linspace`` object instead,
which includes the right endpoint:
>>> jnp.r_[-1:1:6j, 0, jnp.array([1,2,3])] # doctest: +SKIP
Array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 1. , 2. , 3. ], dtype=float32)
Use a string directive of the form ``"axis,dims,trans1d"`` as the first argument to
specify concatenation axis, minimum number of dimensions, and the position of the
upgraded array's original dimensions in the resulting array's shape tuple:
>>> jnp.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, 2D output
Array([[1, 2, 3],
[4, 5, 6]], dtype=int32)
>>> jnp.r_['0,2,0', [1,2,3], [4,5,6]] # push last input axis to the front
Array([[1],
[2],
[3],
[4],
[5],
[6]], dtype=int32)
Negative values for ``trans1d`` offset the last axis towards the start
of the shape tuple:
>>> jnp.r_['0,2,-2', [1,2,3], [4,5,6]]
Array([[1],
[2],
[3],
[4],
[5],
[6]], dtype=int32)
Use the special directives ``"r"`` or ``"c"`` as the first argument on flat inputs
to create an array with an extra row or column axis, respectively:
>>> jnp.r_['r',[1,2,3], [4,5,6]]
Array([[1, 2, 3, 4, 5, 6]], dtype=int32)
>>> jnp.r_['c',[1,2,3], [4,5,6]]
Array([[1],
[2],
[3],
[4],
[5],
[6]], dtype=int32)
For higher-dimensional inputs (``dim >= 2``), both directives ``"r"`` and ``"c"``
give the same result.
"""
axis = 0
ndmin = 1
trans1d = -1
op_name = "r_"
r_ = export(RClass())
| RClass |
python | getsentry__sentry | tests/sentry/services/test_organization_actions.py | {
"start": 4957,
"end": 7335
} | class ____(TestCase):
def setUp(self) -> None:
self.org: Organization = self.create_organization(
slug="sluggy", name="barfoo", status=OrganizationStatus.ACTIVE
)
def test_mark_for_deletion_and_outbox_generation(self) -> None:
org_before_update = Organization.objects.get(id=self.org.id)
with outbox_context(flush=False):
updated_org = mark_organization_as_pending_deletion_with_outbox_message(
org_id=self.org.id
)
assert updated_org
self.org.refresh_from_db()
assert updated_org.status == self.org.status == OrganizationStatus.PENDING_DELETION
assert updated_org.name == self.org.name == org_before_update.name
assert updated_org.slug == self.org.slug == org_before_update.slug
assert_outbox_update_message_exists(self.org, 1)
def test_mark_for_deletion_on_already_deleted_org(self) -> None:
self.org.status = OrganizationStatus.PENDING_DELETION
self.org.save()
org_before_update = Organization.objects.get(id=self.org.id)
with outbox_context(flush=False):
updated_org = mark_organization_as_pending_deletion_with_outbox_message(
org_id=self.org.id
)
assert updated_org is None
self.org.refresh_from_db()
assert self.org.status == org_before_update.status
assert self.org.name == org_before_update.name
assert self.org.slug == org_before_update.slug
assert_outbox_update_message_exists(self.org, 0)
def test_mark_for_deletion_on_relocation_pending(self) -> None:
self.org.status = OrganizationStatus.RELOCATION_PENDING_APPROVAL
self.org.save()
org_before_update = Organization.objects.get(id=self.org.id)
with outbox_context(flush=False):
updated_org = mark_organization_as_pending_deletion_with_outbox_message(
org_id=self.org.id
)
assert updated_org, "Should update the org"
self.org.refresh_from_db()
assert self.org.status == OrganizationStatus.PENDING_DELETION
assert self.org.name == org_before_update.name
assert self.org.slug == org_before_update.slug
assert_outbox_update_message_exists(self.org, 1)
| OrganizationMarkOrganizationAsPendingDeletionWithOutboxMessageTest |
python | huggingface__transformers | src/transformers/models/llama4/modeling_llama4.py | {
"start": 28534,
"end": 29988
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
| Llama4CausalLMOutputWithPast |
python | Textualize__textual | tests/css/test_parse.py | {
"start": 34080,
"end": 35003
} | class ____:
@pytest.mark.parametrize(
"css_value, styles_value",
[
["-0.2", 0.0],
["0.4", 0.4],
["1.3", 1.0],
["-20%", 0.0],
["25%", 0.25],
["128%", 1.0],
],
)
def test_opacity_to_styles(self, css_value, styles_value):
css = f"#some-widget {{ text-opacity: {css_value} }}"
stylesheet = Stylesheet()
stylesheet.add_source(css)
assert stylesheet.rules[0].styles.text_opacity == styles_value
assert not stylesheet.rules[0].errors
def test_opacity_invalid_value(self):
css = "#some-widget { text-opacity: 123x }"
stylesheet = Stylesheet()
with pytest.raises(StylesheetParseError):
stylesheet.add_source(css)
stylesheet.parse()
rules = stylesheet._parse_rules(css, "foo")
assert rules[0].errors
| TestParseOpacity |
python | kamyu104__LeetCode-Solutions | Python/people-whose-list-of-favorite-companies-is-not-a-subset-of-another-list.py | {
"start": 1848,
"end": 2548
} | class ____(object):
def peopleIndexes(self, favoriteCompanies):
"""
:type favoriteCompanies: List[List[str]]
:rtype: List[int]
"""
lookup, comps = {}, []
for cs in favoriteCompanies:
comps.append(set())
for c in cs:
if c not in lookup:
lookup[c] = len(lookup)
comps[-1].add(lookup[c])
union_find = UnionFind(comps)
for i in xrange(len(comps)):
for j in xrange(len(comps)):
if j == i:
continue
union_find.union_set(i, j)
return [x for i, x in enumerate(union_find.set) if x == i]
| Solution2 |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 71985,
"end": 72219
} | class ____(Elemwise):
_parameters = ["frame"]
operation = staticmethod(_return_input)
_preserves_partitioning_information = True
def _divisions(self):
return (None,) * (self.frame.npartitions + 1)
| ClearDivisions |
python | sympy__sympy | sympy/simplify/hyperexpand.py | {
"start": 34461,
"end": 35163
} | class ____:
"""
This class holds a collection of meijer g formulae.
"""
def __init__(self):
formulae = []
add_meijerg_formulae(formulae)
self.formulae = defaultdict(list)
for formula in formulae:
self.formulae[formula.func.signature].append(formula)
self.formulae = dict(self.formulae)
def lookup_origin(self, func):
""" Try to find a formula that matches func. """
if func.signature not in self.formulae:
return None
for formula in self.formulae[func.signature]:
res = formula.try_instantiate(func)
if res is not None:
return res
| MeijerFormulaCollection |
python | pandas-dev__pandas | pandas/tests/indexes/datetimes/test_indexing.py | {
"start": 7359,
"end": 13794
} | class ____:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_take_dont_lose_meta(self, tzstr):
rng = date_range("1/1/2000", periods=20, tz=tzstr)
result = rng.take(range(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
def test_take_nan_first_datetime(self):
index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])
result = index.take([-1, 0, 1])
expected = DatetimeIndex([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])
def test_take(self, tz):
# GH#10295
idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx", tz=tz)
result = idx.take([0])
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx.take([0, 1, 2])
expected = date_range(
"2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = date_range(
"2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = date_range(
"2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(
["2011-01-04", "2011-01-03", "2011-01-06"],
dtype=idx.dtype,
freq=None,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(
["2011-01-29", "2011-01-03", "2011-01-06"],
dtype=idx.dtype,
freq=None,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
# TODO: This method came from test_datetime; de-dup with version above
@pytest.mark.parametrize("tz", [None, "US/Eastern", "Asia/Tokyo"])
def test_take2(self, tz):
dates = [
datetime(2010, 1, 1, 14),
datetime(2010, 1, 1, 15),
datetime(2010, 1, 1, 17),
datetime(2010, 1, 1, 21),
]
idx = date_range(
start="2010-01-01 09:00",
end="2010-02-01 09:00",
freq="h",
tz=tz,
name="idx",
)
expected = DatetimeIndex(dates, freq=None, name="idx", dtype=idx.dtype)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, DatetimeIndex)
assert taken.freq is None
assert taken.tz == expected.tz
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx")
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
def test_take_fill_value_with_timezone(self):
idx = DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
result = idx.take(np.array([1, 0, -1]))
expected = DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = DatetimeIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
| TestTake |
python | walkccc__LeetCode | solutions/217. Contains Duplicate/217.py | {
"start": 0,
"end": 111
} | class ____:
def containsDuplicate(self, nums: list[int]) -> bool:
return len(nums) != len(set(nums))
| Solution |
python | pypa__pip | src/pip/_internal/cache.py | {
"start": 3716,
"end": 6186
} | class ____(Cache):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir: str) -> None:
super().__init__(cache_dir)
def get_path_for_link(self, link: Link) -> str:
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
assert self.cache_dir
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(
self,
link: Link,
package_name: str | None,
supported_tags: list[Tag],
) -> Link:
candidates = []
if not package_name:
return link
canonical_package_name = canonicalize_name(package_name)
for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if wheel.name != canonical_package_name:
logger.debug(
"Ignoring cached wheel %s for %s as it "
"does not match the expected distribution name %s.",
wheel_name,
link,
package_name,
)
continue
if not wheel.supported(supported_tags):
# Built for a different python/arch/etc
continue
candidates.append(
(
wheel.support_index_min(supported_tags),
wheel_name,
wheel_dir,
)
)
if not candidates:
return link
_, wheel_name, wheel_dir = min(candidates)
return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
| SimpleWheelCache |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_rich_string10.py | {
"start": 315,
"end": 1027
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("rich_string10.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", " a", bold, "bc", "defg ")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/legacy_rnn/rnn_cell_impl.py | {
"start": 32870,
"end": 44823
} | class ____(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on (Gers et al., 1999).
The peephole implementation is based on (Sak et al., 2014).
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
References:
Long short-term memory recurrent neural network architectures for large
scale acoustic modeling:
[Sak et al., 2014]
(https://www.isca-speech.org/archive/interspeech_2014/i14_0338.html)
([pdf]
(https://www.isca-speech.org/archive/archive_papers/interspeech_2014/i14_0338.pdf))
Learning to forget:
[Gers et al., 1999]
(http://digital-library.theiet.org/content/conferences/10.1049/cp_19991218)
([pdf](https://arxiv.org/pdf/1409.2329.pdf))
Long Short-Term Memory:
[Hochreiter et al., 1997]
(https://www.mitpressjournals.org/doi/abs/10.1162/neco.1997.9.8.1735)
([pdf](http://ml.jku.at/publications/older/3504.pdf))
"""
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
warnings.warn("`tf.nn.rnn_cell.LSTMCell` is deprecated and will be "
"removed in a future version. This class "
"is equivalent as `tf.keras.layers.LSTMCell`, "
"and will be replaced by that in Tensorflow 2.0.")
super(LSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warning(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warning(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
if context.executing_eagerly() and tf_config.list_logical_devices("GPU"):
logging.warning(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units +
num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 *
num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner)
if self.dtype is None:
initializer = init_ops.zeros_initializer
else:
initializer = init_ops.zeros_initializer(dtype=self.dtype)
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer)
if self._use_peepholes:
self._w_f_diag = self.add_variable(
"w_f_diag", shape=[self._num_units], initializer=self._initializer)
self._w_i_diag = self.add_variable(
"w_i_diag", shape=[self._num_units], initializer=self._initializer)
self._w_o_diag = self.add_variable(
"w_o_diag", shape=[self._num_units], initializer=self._initializer)
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = self.add_variable(
"projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = math_ops.matmul(
array_ops.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| LSTMCell |
python | kamyu104__LeetCode-Solutions | Python/line-reflection.py | {
"start": 733,
"end": 1571
} | class ____(object):
def isReflected(self, points):
"""
:type points: List[List[int]]
:rtype: bool
"""
if not points:
return True
points.sort()
# Space: O(n)
points[len(points)/2:] = sorted(points[len(points)/2:], \
lambda x, y: y[1] - x[1] if x[0] == y[0] else \
x[0] - y[0])
mid = points[0][0] + points[-1][0]
left, right = 0, len(points) - 1
while left <= right:
if (mid != points[left][0] + points[right][0]) or \
(points[left][0] != points[right][0] and \
points[left][1] != points[right][1]):
return False
left += 1
right -= 1
return True
| Solution2 |
python | pypa__pip | src/pip/_internal/models/pylock.py | {
"start": 5269,
"end": 6211
} | class ____:
lock_version: str = "1.0"
# (not supported) environments: Optional[List[str]]
# (not supported) requires_python: Optional[str]
# (not supported) extras: List[str] = []
# (not supported) dependency_groups: List[str] = []
created_by: str = "pip"
packages: list[Package] = dataclasses.field(default_factory=list)
# (not supported) tool: Optional[Dict[str, Any]]
def as_toml(self) -> str:
return tomli_w.dumps(dataclasses.asdict(self, dict_factory=_toml_dict_factory))
@classmethod
def from_install_requirements(
cls, install_requirements: Iterable[InstallRequirement], base_dir: Path
) -> Self:
return cls(
packages=sorted(
(
Package.from_install_requirement(ireq, base_dir)
for ireq in install_requirements
),
key=lambda p: p.name,
)
)
| Pylock |
python | google__jax | jax/_src/export/serialization_generated.py | {
"start": 784,
"end": 896
} | class ____(object):
leaf = 0
none = 1
tuple = 2
list = 3
dict = 4
custom = 5
| PyTreeDefKind |
python | ansible__ansible | lib/ansible/modules/hostname.py | {
"start": 23493,
"end": 23622
} | class ____(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
| ScientificHostname |
python | huggingface__transformers | src/transformers/models/dinat/modeling_dinat.py | {
"start": 3813,
"end": 4994
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
| DinatImageClassifierOutput |
python | huggingface__transformers | src/transformers/models/patchtsmixer/modeling_patchtsmixer.py | {
"start": 42213,
"end": 44945
} | class ____(PatchTSMixerPreTrainedModel):
"""
Encoder for PatchTSMixer which inputs patched time-series and outputs patched embeddings.
Args:
config (`PatchTSMixerConfig`):
Configuration.
"""
def __init__(self, config: PatchTSMixerConfig):
super().__init__(config)
self.use_return_dict = config.use_return_dict
self.patcher = nn.Linear(config.patch_length, config.d_model)
if config.use_positional_encoding:
self.positional_encoder = PatchTSMixerPositionalEncoding(config=config)
else:
self.positional_encoder = None
self.mlp_mixer_encoder = PatchTSMixerBlock(config=config)
# Initialize weights and apply final processing
if config.post_init:
self.post_init()
@auto_docstring
def forward(
self,
past_values: torch.Tensor,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, PatchTSMixerEncoderOutput]:
r"""
past_values (`torch.FloatTensor` of shape `(batch_size, seq_length, num_input_channels)`):
Context values of the time series. For a pretraining task, this denotes the input time series to
predict the masked portion. For a forecasting task, this denotes the history/past time series values.
Similarly, for classification or regression tasks, it denotes the appropriate context values of the
time series.
For univariate time series, `num_input_channels` dimension should be 1. For multivariate time series,
it is greater than 1.
Returns:
`torch.FloatTensor` of shape `(batch_size, n_vars, num_patches, d_model)`
"""
return_dict = return_dict if return_dict is not None else self.use_return_dict
# flatten [bs x num_patch x d_model]. common_channel/mix_channel: [bs x n_vars x num_patch x d_model]
patches = self.patcher(past_values)
# add positional encoder
if self.positional_encoder is not None:
patches = self.positional_encoder(patches)
last_hidden_state, hidden_states = self.mlp_mixer_encoder(patches, output_hidden_states=output_hidden_states)
if not return_dict:
return tuple(
v
for v in [
last_hidden_state,
hidden_states,
]
)
return PatchTSMixerEncoderOutput(last_hidden_state=last_hidden_state, hidden_states=hidden_states)
@dataclass
@auto_docstring(
custom_intro="""
Base class for model's outputs, with potential hidden states.
"""
)
| PatchTSMixerEncoder |
python | ray-project__ray | python/ray/llm/_internal/batch/stages/base.py | {
"start": 1825,
"end": 8109
} | class ____:
"""A stage UDF wrapper that processes the input and output columns
before and after the UDF.
Args:
data_column: The internal data column name of the processor. The
__call__ method takes the data column as the input of the UDF
method, and encapsulates the output of the UDF method into the data
column for the next stage.
expected_input_keys: The expected input keys of the stage.
"""
# The internal column name for the index of the row in the batch.
# This is used to align the output of the UDF with the input batch.
IDX_IN_BATCH_COLUMN: str = "__idx_in_batch"
def __init__(
self, data_column: str, expected_input_keys: Optional[List[str]] = None
):
self.data_column = data_column
self.expected_input_keys = set(expected_input_keys or [])
async def __call__(self, batch: Dict[str, Any]) -> AsyncIterator[Dict[str, Any]]:
"""A stage UDF wrapper that processes the input and output columns
before and after the UDF. The expected schema of "batch" is:
{data_column: {
dataset columns,
other intermediate columns
},
...other metadata columns...,
}.
The input of the UDF will then [dataset columns and other intermediate columns].
In addition, while the output of the UDF depends on the UDF implementation,
the output schema is expected to be
{data_column: {
dataset columns,
other intermediate columns,
UDF output columns (will override above columns if they have the same name)
},
...other metadata columns...,
}.
And this will become the input of the next stage.
Examples:
Input dataset columns: {A, B, C}
Preprocess: (lambda row: {"D": row["A"] + 1})
Input:
UDF input: {A, B, C}
UDF output: {D}
Output: {__data: {A, B, C, D}}
Stage 1:
Input: {__data: {A, B, C, D}}
UDF input: {A, B, C, D}
UDF output: {E}
Output: {__data: {A, B, C, D, E}}
Stage 2:
Input: {__data: {A, B, C, D, E}}
UDF input: {A, B, C, D, E}
UDF output: {F, E} # E is in-place updated.
Output: {__data: {A, B, C, D, E, F}}
Postprocess: (lambda row: {"G": row["F"], "A": row["A"], "E": row["E"]})
Input: {__data: {A, B, C, D, E, F}}
UDF input: {A, B, C, D, E, F}
UDF output: {G, A, E}
Output: {G, A, E} # User chooses to keep G, A, E.
Args:
batch: The input batch.
Returns:
An async iterator of the outputs.
"""
# Handle the case where the batch is empty.
# FIXME: This should not happen.
if isinstance(batch, pyarrow.lib.Table) and batch.num_rows == 0:
yield {}
return
if self.data_column not in batch:
raise ValueError(
f"[Internal] {self.data_column} not found in batch {batch}"
)
inputs = batch.pop(self.data_column)
if hasattr(inputs, "tolist"):
inputs = inputs.tolist()
self.validate_inputs(inputs)
# Assign the index of the row in the batch to the idx_in_batch_column.
# This is beacuse the UDF output may be out-of-order (if asyncio.as_completed
# is used interanlly for example), and we need to carry over unused input
# columns to the next stage. Thus, we use the row index in batch to match
# the output of the UDF with the input.
for idx, row in enumerate(inputs):
row[self.IDX_IN_BATCH_COLUMN] = idx
# Collect all outputs first, then return them in the original order
# This is a requirement set by https://github.com/ray-project/ray/pull/54190/
not_outputed_rows = set(range(len(inputs)))
async for output in self.udf(inputs):
if self.IDX_IN_BATCH_COLUMN not in output:
raise ValueError(
"The output of the UDF must contain the column "
f"{self.IDX_IN_BATCH_COLUMN}."
)
idx_in_batch = output.pop(self.IDX_IN_BATCH_COLUMN)
if idx_in_batch not in not_outputed_rows:
raise ValueError(
f"The row {idx_in_batch} is outputed twice. "
"This is likely due to the UDF is not one-to-one."
)
not_outputed_rows.remove(idx_in_batch)
# Add stage outputs to the data column of the row.
inputs[idx_in_batch].pop(self.IDX_IN_BATCH_COLUMN)
inputs[idx_in_batch].update(output)
if not_outputed_rows:
raise ValueError(f"The rows {not_outputed_rows} are not outputed.")
# Return all updated inputs in the original order
yield {self.data_column: inputs}
def validate_inputs(self, inputs: List[Dict[str, Any]]):
"""Validate the inputs to make sure the required keys are present.
Args:
inputs: The inputs.
Raises:
ValueError: If the required keys are not found.
"""
for inp in inputs:
input_keys = set(inp.keys())
if self.IDX_IN_BATCH_COLUMN in input_keys:
raise ValueError(
f"The input column {self.IDX_IN_BATCH_COLUMN} is reserved "
"for internal use."
)
if not self.expected_input_keys:
continue
missing_required = self.expected_input_keys - input_keys
if missing_required:
raise ValueError(
f"Required input keys {missing_required} not found at the input of "
f"{self.__class__.__name__}. Input keys: {input_keys}"
)
async def udf(self, rows: List[Dict[str, Any]]) -> AsyncIterator[Dict[str, Any]]:
raise NotImplementedError("StageUDF must implement the udf method")
| StatefulStageUDF |
python | getsentry__sentry | src/sentry/hybridcloud/services/organization_mapping/model.py | {
"start": 500,
"end": 831
} | class ____(RpcOrganizationSummary):
region_name: str = ""
date_created: datetime = Field(default_factory=timezone.now)
verified: bool = False
customer_id: str | None = None
status: int | None = None
flags: RpcOrganizationMappingFlags = Field(default_factory=RpcOrganizationMappingFlags)
| RpcOrganizationMapping |
python | huggingface__transformers | tests/utils/import_structures/import_structure_raw_register_with_versions.py | {
"start": 710,
"end": 851
} | class ____:
def __init__(self):
pass
@requires(backends=("torch>=2.5",))
def d0():
pass
@requires(backends=("torch>2.5",))
| D0 |
python | pyinstaller__pyinstaller | PyInstaller/building/makespec.py | {
"start": 4602,
"end": 4801
} | class ____(argparse.Action):
def __init__(self, *args, **kwargs):
kwargs["help"] = argparse.SUPPRESS
kwargs["nargs"] = 0
super().__init__(*args, **kwargs)
| _RemovedFlagAction |
python | walkccc__LeetCode | solutions/1388. Pizza With 3n Slices/1388.py | {
"start": 0,
"end": 980
} | class ____:
def maxSizeSlices(self, slices: list[int]) -> int:
@functools.lru_cache(None)
def dp(i: int, j: int, k: int) -> int:
"""
Returns the maximum the sum of slices if you can pick k slices from
slices[i..j).
"""
if k == 1:
return max(slices[i:j])
# Note that j - i is not the number of all the left slices. Since you
# Might have chosen not to take a slice in a previous step, there would be
# Leftovers outside [i:j]. If you take slices[i], one of the slices your
# Friends take will be outside of [i:j], so the length of [i:j] is reduced
# By 2 instead of 3. Therefore, the minimum # Is 2 * k - 1 (the last step only
# Requires one slice).
if j - i < 2 * k - 1:
return -math.inf
return max(slices[i] + dp(i + 2, j, k - 1),
dp(i + 1, j, k))
k = len(slices) // 3
return max(dp(0, len(slices) - 1, k),
dp(1, len(slices), k))
| Solution |
python | huggingface__transformers | tests/utils/test_modeling_utils.py | {
"start": 141119,
"end": 148258
} | class ____(unittest.TestCase):
def test_seq2seq_lm_get_encoder_returns_encoder(self):
cfg = BartConfig(
vocab_size=128,
d_model=32,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=64,
decoder_ffn_dim=64,
)
model = BartForConditionalGeneration(cfg)
encoder = model.get_encoder()
assert encoder is model.model.encoder, (
f"Expected get_encoder() to return model.model.encoder, got {type(encoder)}"
)
def test_base_model_returns_encoder(self):
cfg = BartConfig(
vocab_size=128,
d_model=32,
encoder_layers=2,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
encoder_ffn_dim=64,
decoder_ffn_dim=64,
)
model = BartModel(cfg)
encoder = model.get_encoder()
assert encoder is model.encoder, f"Expected get_encoder() to return model.encoder, got {type(encoder)}"
def test_decoder_only_model_returns_self(self):
"""Test that decoder-only models (no encoder) return self."""
cfg = MistralConfig(
vocab_size=128,
hidden_size=32,
intermediate_size=64,
num_hidden_layers=2,
num_attention_heads=4,
)
model = MistralForCausalLM(cfg)
encoder = model.get_encoder()
assert encoder is model, f"Base model get_encoder() should return self, got {type(encoder)}"
def test_when_encoder_has_different_name(self):
"""Test models with non-standard name for encoder modular (Musicgen has `self.model.text_encoder`)."""
cfg = MusicgenConfig(
text_encoder={
"model_type": "t5",
"vocab_size": 99,
"d_model": 32,
"d_ff": 37,
"num_layers": 2,
"num_heads": 2,
},
audio_encoder={
"model_type": "encodec",
"hidden_size": 99,
"compress": 1,
"num_filters": 2,
"codebook_size": 32,
"codebook_dim": 32,
},
decoder={
"vocab_size": 99,
"ffn_dim": 32,
"num_attention_heads": 2,
"hidden_size": 32,
"num_hidden_layers": 2,
},
)
model = MusicgenForConditionalGeneration(cfg)
encoder = model.get_encoder()
assert encoder is model.text_encoder, (
f"MusicgenForConditionalGeneration get_encoder() should return model.model.text_encoder, got {type(encoder)}"
)
def test_audio_encoder(self):
"""Test models with multiple modality encoders (Musicgen has `self.model.audio_encoder`)."""
cfg = MusicgenConfig(
text_encoder={
"model_type": "t5",
"vocab_size": 99,
"d_model": 32,
"d_ff": 37,
"num_layers": 2,
"num_heads": 2,
},
audio_encoder={
"model_type": "encodec",
"hidden_size": 99,
"compress": 1,
"num_filters": 2,
"codebook_size": 32,
"codebook_dim": 32,
},
decoder={
"vocab_size": 99,
"ffn_dim": 32,
"num_attention_heads": 2,
"hidden_size": 32,
"num_hidden_layers": 2,
},
)
model = MusicgenForConditionalGeneration(cfg)
encoder = model.get_encoder(modality="audio")
assert encoder is model.audio_encoder, (
f"MusicgenForConditionalGeneration get_encoder(modality='audio') should return model.model.audio_encoder, got {type(encoder)}"
)
def test_non_existant_modality_throws_error(self):
"""Test that an error is thrown when a rquested modality does not exist."""
cfg = MistralConfig(
vocab_size=128,
hidden_size=32,
intermediate_size=64,
num_hidden_layers=2,
num_attention_heads=4,
)
model = MistralModel(cfg)
with self.assertRaises(ValueError):
_ = model.get_encoder(modality="3d")
def test_encoder_return_self_when_modality_not_found(self):
"""Test that `self` is returned if the model has no encoder for requested modality."""
cfg = MistralConfig(
vocab_size=128,
hidden_size=32,
intermediate_size=64,
num_hidden_layers=2,
num_attention_heads=4,
)
model = MistralModel(cfg)
encoder = model.get_encoder(modality="image")
assert encoder is model, f"Mistral get_encoder(modality='image') should return self, got {type(encoder)}"
def test_model_without_get_encoder(self):
"""Test edge case where model has model attribute but no get_encoder method."""
class MockInnerModel:
"""Mock model without get_encoder method."""
pass
class MockWrapperModel:
"""Mock wrapper with model attribute but inner has no get_encoder."""
def __init__(self):
self.model = MockInnerModel()
def get_encoder(self):
if hasattr(self, "encoder"):
return self.encoder
if hasattr(self, "model"):
inner = self.model
if hasattr(inner, "get_encoder") and type(inner) is not type(self):
return inner.get_encoder()
return inner
return self
wrapper = MockWrapperModel()
encoder = wrapper.get_encoder()
assert encoder is wrapper.model, f"Should return inner model when no get_encoder, got {type(encoder)}"
def test_vision_language_model(self):
"""Test vision-language models like LLaVA can find the modality encoder ("image")."""
text_config = MistralConfig(
vocab_size=128,
hidden_size=32,
intermediate_size=64,
num_hidden_layers=2,
num_attention_heads=4,
)
vision_config = {
"hidden_size": 32,
"intermediate_size": 64,
"num_hidden_layers": 2,
"num_attention_heads": 4,
"num_channels": 3,
"image_size": 224,
"patch_size": 16,
}
cfg = LlavaConfig(
text_config=text_config.to_dict(),
vision_config=vision_config,
vocab_size=128,
)
model = LlavaForConditionalGeneration(cfg)
image_encoder = model.get_encoder(modality="image")
assert image_encoder is model.model.vision_tower, (
f"LLaVA get_encoder(modality='image') should return vision_tower, got {type(image_encoder)}"
)
| TestGetEncoder |
python | graphql-python__graphene | graphene/relay/id_type.py | {
"start": 1841,
"end": 2247
} | class ____(BaseGlobalIDType):
"""
UUID global ID type.
By definition UUID are global so they are used as they are.
"""
graphene_type = UUID
@classmethod
def resolve_global_id(cls, info, global_id):
_type = info.return_type.graphene_type._meta.name
return _type, global_id
@classmethod
def to_global_id(cls, _type, _id):
return _id
| UUIDGlobalIDType |
python | readthedocs__readthedocs.org | readthedocs/proxito/tests/test_full.py | {
"start": 1379,
"end": 18446
} | class ____(BaseDocServing):
# Test the full range of possible doc URL's
def test_health_check(self):
url = reverse("health_check")
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json(), {"status": 200})
# Test with IP address, which should still work
# since we're skipping middleware
host = "127.0.0.1"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.json(), {"status": 200})
self.assertEqual(resp["CDN-Cache-Control"], "private")
def test_subproject_serving(self):
url = "/projects/subproject/en/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/awesome.html",
)
def test_subproject_single_version(self):
self.subproject.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.subproject.save()
url = "/projects/subproject/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject/latest/awesome.html",
)
def test_subproject_translation_serving(self):
url = "/projects/subproject/es/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject-translation/latest/awesome.html",
)
def test_subproject_alias_serving(self):
url = "/projects/this-is-an-alias/en/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/subproject-alias/latest/awesome.html",
)
def test_translation_serving(self):
url = "/es/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/translation/latest/awesome.html",
)
def test_translation_zh_deprecated_code_serving(self):
self.translation.language = "zh"
self.translation.save()
url = "/zh/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/translation/latest/awesome.html",
)
def test_normal_serving(self):
url = "/en/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/awesome.html",
)
def test_single_version_serving(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
url = "/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/awesome.html",
)
def test_single_version_serving_looks_like_normal(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
url = "/en/stable/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/en/stable/awesome.html",
)
def test_index_serving(self):
host = "project.dev.readthedocs.io"
urls = ("/en/latest/awesome/", "/en/latest/awesome/index.html")
for url in urls:
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/awesome/index.html",
)
def test_single_version_external_serving(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
url = "/awesome.html"
host = "project--10.dev.readthedocs.build"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/external/html/project/10/awesome.html",
)
def test_external_version_serving(self):
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
url = "/en/10/awesome.html"
host = "project--10.dev.readthedocs.build"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/external/html/project/10/awesome.html",
)
def test_external_version_serving_old_slugs(self):
"""
Test external version serving with projects with `--` in their slug.
Some old projects may have been created with a slug containg `--`,
our current code doesn't allow these type of slugs.
"""
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
self.project.slug = "test--project"
self.project.save()
host = "test--project--10.dev.readthedocs.build"
resp = self.client.get("/en/10/awesome.html", headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/external/html/test--project/10/awesome.html",
)
# Invalid tests
def test_non_existent_version(self):
url = "/en/non-existent-version/"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_non_existent_version_with_filename(self):
url = "/en/non-existent-version/doesnt-exist.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_inactive_version(self):
url = "/en/inactive/"
host = "project.dev.readthedocs.io"
fixture.get(
Version,
verbose_name="inactive",
slug="inactive",
active=False,
project=self.project,
)
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_serve_external_version_on_main_domain(self):
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
url = "/en/10/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["X-RTD-Project"], "project")
self.assertEqual(resp["X-RTD-Version"], "10")
def test_serve_non_external_version_on_external_domain(self):
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
url = "/en/latest/awesome.html"
host = "project--10.dev.readthedocs.build"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["X-RTD-Project"], "project")
self.assertEqual(resp["X-RTD-Version"], "10")
def test_serve_different_external_version_from_domain(self):
fixture.get(
Version,
verbose_name="10",
slug="10",
type=EXTERNAL,
active=True,
project=self.project,
)
fixture.get(
Version,
verbose_name="11",
slug="11",
type=EXTERNAL,
active=True,
project=self.project,
)
url = "/en/11/awesome.html"
host = "project--10.dev.readthedocs.build"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
self.assertEqual(resp["X-RTD-Project"], "project")
self.assertEqual(resp["X-RTD-Version"], "10")
def test_invalid_language_for_project_with_versions(self):
url = "/foo/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_invalid_translation_for_project_with_versions(self):
url = "/cs/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_invalid_subproject(self):
url = "/projects/doesnt-exist/foo.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
# https://github.com/readthedocs/readthedocs.org/pull/6226/files/596aa85a4886407f0eb65233ebf9c38ee3e8d485#r332445803
def test_valid_project_as_invalid_subproject(self):
url = "/projects/translation/es/latest/foo.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
def test_public_domain_hsts(self):
host = "project.dev.readthedocs.io"
response = self.client.get("/", headers={"host": host})
self.assertFalse("strict-transport-security" in response)
response = self.client.get("/", headers={"host": host}, secure=True)
self.assertFalse("strict-transport-security" in response)
with override_settings(PUBLIC_DOMAIN_USES_HTTPS=True):
response = self.client.get("/", headers={"host": host})
self.assertFalse("strict-transport-security" in response)
response = self.client.get("/", headers={"host": host}, secure=True)
self.assertEqual(
response["strict-transport-security"],
"max-age=31536000; includeSubDomains; preload",
)
def test_custom_domain_response_hsts(self):
hostname = "docs.random.com"
domain = fixture.get(
Domain,
project=self.project,
domain=hostname,
hsts_max_age=0,
hsts_include_subdomains=False,
hsts_preload=False,
)
response = self.client.get("/", headers={"host": hostname})
self.assertFalse("strict-transport-security" in response)
response = self.client.get("/", headers={"host": hostname}, secure=True)
self.assertFalse("strict-transport-security" in response)
domain.hsts_max_age = 3600
domain.save()
response = self.client.get("/", headers={"host": hostname})
self.assertFalse("strict-transport-security" in response)
response = self.client.get("/", headers={"host": hostname}, secure=True)
self.assertTrue("strict-transport-security" in response)
self.assertEqual(
response["strict-transport-security"],
"max-age=3600",
)
domain.hsts_include_subdomains = True
domain.hsts_preload = True
domain.save()
response = self.client.get("/", headers={"host": hostname}, secure=True)
self.assertTrue("strict-transport-security" in response)
self.assertEqual(
response["strict-transport-security"],
"max-age=3600; includeSubDomains; preload",
)
def test_single_version_serving_projects_dir(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
url = "/projects/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/projects/awesome.html",
)
def test_single_version_serving_language_like_subdir(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
url = "/en/api/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/en/api/awesome.html",
)
def test_single_version_serving_language_like_dir(self):
self.project.versioning_scheme = SINGLE_VERSION_WITHOUT_TRANSLATIONS
self.project.save()
url = "/en/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/en/awesome.html",
)
def test_multiple_versions_without_translations_serving(self):
self.project.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.project.save()
url = "/latest/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"], "/proxito/media/html/project/latest/awesome.html"
)
def test_multiple_versions_without_translations_serving_language_like_subdir(self):
self.project.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.project.save()
url = "/en/api/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
self.version.slug = "en"
self.version.save()
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/en/api/awesome.html",
)
def test_multiple_versions_without_translations_serving_subprojects_like_subdir(
self,
):
self.project.versioning_scheme = MULTIPLE_VERSIONS_WITHOUT_TRANSLATIONS
self.project.save()
url = "/projects/api/awesome.html"
host = "project.dev.readthedocs.io"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 404)
self.version.slug = "projects"
self.version.save()
resp = self.client.get(url, headers={"host": host})
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/projects/api/awesome.html",
)
def test_old_language_code(self):
self.project.language = "pt-br"
self.project.save()
host = "project.dev.readthedocs.io"
url = "/pt_BR/latest/index.html"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 302)
self.assertEqual(
resp["location"],
"http://project.dev.readthedocs.io/pt-br/latest/index.html",
)
url = "/pt-br/latest/index.html"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/index.html",
)
# Ambiguous path.
url = "/pt-br/latest/bt_BR/index.html"
resp = self.client.get(url, headers={"host": host})
self.assertEqual(resp.status_code, 200)
self.assertEqual(
resp["x-accel-redirect"],
"/proxito/media/html/project/latest/bt_BR/index.html",
)
@override_settings(
PUBLIC_DOMAIN="dev.readthedocs.io",
RTD_EXTERNAL_VERSION_DOMAIN="dev.readthedocs.build",
)
| TestFullDocServing |
python | huggingface__transformers | src/transformers/models/clipseg/modeling_clipseg.py | {
"start": 45776,
"end": 49880
} | class ____(CLIPSegPreTrainedModel):
def __init__(self, config: CLIPSegConfig):
super().__init__(config)
self.conditional_layer = config.conditional_layer
self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim)
self.film_add = nn.Linear(config.projection_dim, config.reduce_dim)
if config.use_complex_transposed_convolution:
transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4)
self.transposed_convolution = nn.Sequential(
nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1),
nn.ReLU(),
nn.ConvTranspose2d(
config.reduce_dim,
config.reduce_dim // 2,
kernel_size=transposed_kernels[0],
stride=transposed_kernels[0],
),
nn.ReLU(),
nn.ConvTranspose2d(
config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1]
),
)
else:
self.transposed_convolution = nn.ConvTranspose2d(
config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size
)
depth = len(config.extract_layers)
self.reduces = nn.ModuleList(
[nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)]
)
decoder_config = copy.deepcopy(config.vision_config)
decoder_config.hidden_size = config.reduce_dim
decoder_config.num_attention_heads = config.decoder_num_attention_heads
decoder_config.intermediate_size = config.decoder_intermediate_size
decoder_config.hidden_act = "relu"
self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))])
def forward(
self,
hidden_states: tuple[torch.Tensor],
conditional_embeddings: torch.Tensor,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = True,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
activations = hidden_states[::-1]
output = None
for i, (activation, layer, reduce) in enumerate(zip(activations, self.layers, self.reduces)):
if output is not None:
output = reduce(activation) + output
else:
output = reduce(activation)
if i == self.conditional_layer:
output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add(
conditional_embeddings
)
output = output.permute(1, 0, 2)
layer_outputs = layer(
output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions
)
output = layer_outputs[0]
if output_hidden_states:
all_hidden_states += (output,)
if output_attentions:
all_attentions += (layer_outputs[1],)
output = output[:, 1:, :].permute(0, 2, 1) # remove cls token and reshape to [batch_size, reduce_dim, seq_len]
size = int(math.sqrt(output.shape[2]))
batch_size = conditional_embeddings.shape[0]
output = output.view(batch_size, output.shape[1], size, size)
logits = self.transposed_convolution(output).squeeze(1)
if not return_dict:
return tuple(v for v in [logits, all_hidden_states, all_attentions] if v is not None)
return CLIPSegDecoderOutput(
logits=logits,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
@auto_docstring(
custom_intro="""
CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation.
"""
)
| CLIPSegDecoder |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modular_wav2vec2_bert.py | {
"start": 24129,
"end": 28268
} | class ____(PreTrainedModel):
config: Wav2Vec2BertConfig
base_model_prefix = "wav2vec2_bert"
main_input_name = "input_features"
input_modalities = "audio"
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, Wav2Vec2BertSelfAttention):
if hasattr(module, "pos_bias_u"):
init.xavier_uniform_(module.pos_bias_u)
if hasattr(module, "pos_bias_v"):
init.xavier_uniform_(module.pos_bias_v)
elif isinstance(module, Wav2Vec2BertFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
init.uniform_(module.projection.weight, a=-k, b=k)
init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, nn.Conv1d):
init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
init.uniform_(module.bias, a=-k, b=k)
elif isinstance(module, Wav2Vec2BertModel):
if hasattr(module, "masked_spec_embed"):
init.uniform_(module.masked_spec_embed)
elif isinstance(
module,
(Wav2Vec2BertForSequenceClassification, Wav2Vec2BertForAudioFrameClassification, Wav2Vec2BertForXVector),
):
if hasattr(module, "layer_weights"):
init.constant_(module.layer_weights, 1.0 / (self.config.num_hidden_layers + 1))
elif isinstance(module, AMSoftmaxLoss): # noqa: F821
init.normal_(module.weight)
# Ignore copy
def _get_feat_extract_output_lengths(
self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None
):
"""
Computes the output length of the convolutional layers
"""
add_adapter = self.config.add_adapter if add_adapter is None else add_adapter
def _conv_out_length(input_length, kernel_size, stride, padding):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length + 2 * padding - kernel_size, stride, rounding_mode="floor") + 1
if add_adapter:
padding = self.config.adapter_kernel_size // 2
for _ in range(self.config.num_adapter_layers):
input_lengths = _conv_out_length(
input_lengths, self.config.adapter_kernel_size, self.config.adapter_stride, padding
)
return input_lengths
def _get_feature_vector_attention_mask(
self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None
):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter)
output_lengths = output_lengths.to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
Wav2Vec2BertBaseModelOutput = Wav2Vec2BaseModelOutput
| Wav2Vec2BertPreTrainedModel |
python | huggingface__transformers | src/transformers/models/falcon_h1/modeling_falcon_h1.py | {
"start": 48881,
"end": 49761
} | class ____(nn.Module):
def __init__(self, config: FalconH1Config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
self.gate_multiplier, self.down_multiplier = config.mlp_multipliers
def forward(self, x):
y = self.up_proj(x) * self.act_fn(self.gate_proj(x) * self.gate_multiplier)
y = self.down_proj(y) * self.down_multiplier
return y
@use_kernel_forward_from_hub("RMSNorm")
| FalconH1MLP |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 25041,
"end": 30376
} | class ____(CamembertPreTrainedModel):
_no_split_modules = ["CamembertEmbeddings", "CamembertLayer"]
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.gradient_checkpointing = False
self.embeddings = CamembertEmbeddings(config)
self.encoder = CamembertEncoder(config)
self.pooler = CamembertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if use_cache and past_key_values is None:
past_key_values = (
EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if encoder_hidden_states is not None or self.config.is_encoder_decoder
else DynamicCache(config=self.config)
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if input_ids is not None:
device = input_ids.device
seq_length = input_ids.shape[1]
else:
device = inputs_embeds.device
seq_length = inputs_embeds.shape[1]
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(past_key_values_length, past_key_values_length + seq_length, device=device)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
attention_mask, encoder_attention_mask = self._create_attention_masks(
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
embedding_output=embedding_output,
encoder_hidden_states=encoder_hidden_states,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_ids=position_ids,
**kwargs,
)
sequence_output = encoder_outputs.last_hidden_state
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
)
def _create_attention_masks(
self,
attention_mask,
encoder_attention_mask,
embedding_output,
encoder_hidden_states,
cache_position,
past_key_values,
):
if self.config.is_decoder:
attention_mask = create_causal_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
else:
attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=attention_mask,
)
if encoder_attention_mask is not None:
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=embedding_output,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
return attention_mask, encoder_attention_mask
@auto_docstring
| CamembertModel |
python | huggingface__transformers | src/transformers/models/flava/modeling_flava.py | {
"start": 65383,
"end": 67690
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.global_backprop_contrastive = config.global_backprop_contrastive
def forward(self, image_embeddings, text_embeddings, logit_scale):
temperature = torch.exp(logit_scale)
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
labels = torch.arange(image_embeddings.size(0), device=image_embeddings.device)
image_embeddings_all = [image_embeddings]
text_embeddings_all = [text_embeddings]
else:
local_batch_size = image_embeddings.size(0)
world_size = torch.distributed.get_world_size()
if self.global_backprop_contrastive:
# `torch.distributed.nn.functional.all_gather` does backprop on all active workers
# whereas `torch.distributed.all_gather` does only backpropagates on the current worker.
image_embeddings_all = torch.distributed.nn.functional.all_gather(image_embeddings)
text_embeddings_all = torch.distributed.nn.functional.all_gather(text_embeddings)
else:
image_embeddings_all = [torch.zeros_like(text_embeddings) for _ in range(world_size)]
text_embeddings_all = [torch.zeros_like(image_embeddings) for _ in range(world_size)]
torch.distributed.all_gather(image_embeddings_all, image_embeddings)
torch.distributed.all_gather(text_embeddings_all, text_embeddings)
labels = local_batch_size * torch.distributed.get_rank() + torch.arange(
local_batch_size, device=image_embeddings.device
)
image_embeddings_all = torch.cat(image_embeddings_all)
text_embeddings_all = torch.cat(text_embeddings_all)
logits_per_image = torch.matmul(image_embeddings, text_embeddings_all.transpose(0, 1)) * temperature
logits_per_text = torch.matmul(text_embeddings, image_embeddings_all.transpose(0, 1)) * temperature
return logits_per_image, logits_per_text, labels
@auto_docstring(
custom_intro="""
The FLAVA model for pretraining which outputs losses, embeddings, logits and transformer outputs.
"""
)
| FlavaGlobalContrastiveHead |
python | apache__airflow | providers/standard/tests/unit/standard/sensors/test_external_task_sensor.py | {
"start": 4504,
"end": 44503
} | class ____:
def setup_method(self):
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
self.args = {"owner": "airflow", "start_date": DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, schedule=None, default_args=self.args)
self.dag_run_id = DagRunType.MANUAL.generate_run_id(DEFAULT_DATE)
def add_time_sensor(self, task_id=TEST_TASK_ID):
# TODO: Remove BaseOperator in https://github.com/apache/airflow/issues/47447
class TimeSensorNew(TimeSensor, BaseOperator):
def poke(self, context):
return True
op = TimeSensorNew(task_id=task_id, target_time=time(0), dag=self.dag)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def add_fake_task_group(self, target_states=None):
target_states = [State.SUCCESS] * 2 if target_states is None else target_states
with self.dag as dag:
with TaskGroup(group_id=TEST_TASK_GROUP_ID) as task_group:
_ = [EmptyOperator(task_id=f"task{i}") for i in range(len(target_states))]
dag.sync_to_db()
if AIRFLOW_V_3_0_PLUS:
SerializedDagModel.write_dag(dag, bundle_name="testing")
else:
SerializedDagModel.write_dag(dag)
for idx, task in enumerate(task_group):
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(task_group[idx].dag_id)
ti = TaskInstance(task=task, run_id=self.dag_run_id, dag_version_id=dag_version.id)
else:
ti = TaskInstance(task=task, run_id=self.dag_run_id)
ti.run(ignore_ti_state=True, mark_success=True)
ti.set_state(target_states[idx])
def add_fake_task_group_with_dynamic_tasks(self, target_state=State.SUCCESS):
map_indexes = range(5)
with self.dag as dag:
with TaskGroup(group_id=TEST_TASK_GROUP_ID) as task_group:
@task_deco
def fake_task():
pass
@task_deco
def fake_mapped_task(x: int):
return x
fake_task()
fake_mapped_task.expand(x=list(map_indexes))
dag.sync_to_db()
if AIRFLOW_V_3_0_PLUS:
SerializedDagModel.write_dag(dag, bundle_name="testing")
else:
SerializedDagModel.write_dag(dag)
for task in task_group:
if task.task_id == "fake_mapped_task":
for map_index in map_indexes:
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti = TaskInstance(task=task, run_id=self.dag_run_id, dag_version_id=dag_version.id)
else:
ti = TaskInstance(task=task, run_id=self.dag_run_id, map_index=map_index)
ti.run(ignore_ti_state=True, mark_success=True)
ti.set_state(target_state)
else:
if AIRFLOW_V_3_0_PLUS:
dag_version = DagVersion.get_latest_version(dag.dag_id)
ti = TaskInstance(task=task, run_id=self.dag_run_id, dag_version_id=dag_version.id)
else:
ti = TaskInstance(task=task, run_id=self.dag_run_id)
ti.run(ignore_ti_state=True, mark_success=True)
ti.set_state(target_state)
def test_external_task_sensor(self):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_multiple_task_ids(self):
self.add_time_sensor(task_id=TEST_TASK_ID)
self.add_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check_task_ids",
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_with_task_group(self):
self.add_time_sensor()
self.add_fake_task_group()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_task_group",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_raise_with_external_task_sensor_task_id_and_task_ids(self):
with pytest.raises(
ValueError,
match=EXTERNAL_ID_AND_IDS_PROVIDE_ERROR,
):
ExternalTaskSensor(
task_id="test_external_task_sensor_task_id_with_task_ids_failed_status",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
external_task_ids=TEST_TASK_ID,
dag=self.dag,
)
def test_raise_with_external_task_sensor_task_group_and_task_id(self):
with pytest.raises(ValueError, match=EXTERNAL_IDS_AND_TASK_GROUP_ID_PROVIDE_ERROR):
ExternalTaskSensor(
task_id="test_external_task_sensor_task_group_with_task_id_failed_status",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
dag=self.dag,
)
def test_raise_with_external_task_sensor_task_group_and_task_ids(self):
with pytest.raises(ValueError, match=EXTERNAL_IDS_AND_TASK_GROUP_ID_PROVIDE_ERROR):
ExternalTaskSensor(
task_id="test_external_task_sensor_task_group_with_task_ids_failed_status",
external_dag_id=TEST_DAG_ID,
external_task_ids=TEST_TASK_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
dag=self.dag,
)
# by default i.e. check_existence=False, if task_group doesn't exist, the sensor will run till timeout,
# this behaviour is similar to external_task_id doesn't exists
def test_external_task_group_not_exists_without_check_existence(self):
self.add_time_sensor()
self.add_fake_task_group()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id="fake-task-group",
timeout=0.001,
dag=self.dag,
poke_interval=0.1,
)
with pytest.raises(AirflowSensorTimeout, match="Sensor has timed out"):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_group_sensor_success(self):
self.add_time_sensor()
self.add_fake_task_group()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
failed_states=[State.FAILED],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_group_sensor_failed_states(self):
ti_states = [State.FAILED, State.FAILED]
self.add_time_sensor()
self.add_fake_task_group(ti_states)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
failed_states=[State.FAILED],
dag=self.dag,
)
with pytest.raises(
ExternalTaskGroupFailedError,
match=f"The external task_group '{TEST_TASK_GROUP_ID}' in DAG '{TEST_DAG_ID}' failed.",
):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_catch_overlap_allowed_failed_state(self):
with pytest.raises(DuplicateStateError):
ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.SUCCESS],
failed_states=[State.SUCCESS],
dag=self.dag,
)
def test_external_task_sensor_wrong_failed_states(self):
with pytest.raises(
ValueError,
match="Valid values for `allowed_states`, `skipped_states` and `failed_states` when `external_task_id` or `external_task_ids` or `external_task_group_id` is not `None`",
):
ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["invalid_state"],
dag=self.dag,
)
def test_external_task_sensor_failed_states(self):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
failed_states=["failed"],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_failed_states_as_success(self, caplog):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
error_message = rf"Some of the external tasks \['{TEST_TASK_ID}'\] in DAG {TEST_DAG_ID} failed\."
with caplog.at_level(logging.INFO, logger=op.log.name):
caplog.clear()
with pytest.raises(ExternalTaskFailedError, match=error_message):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
f"Poking for tasks ['{TEST_TASK_ID}'] in dag {TEST_DAG_ID} on {DEFAULT_DATE.isoformat()} ... "
) in caplog.messages
def test_external_task_sensor_soft_fail_failed_states_as_skipped(self):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.FAILED],
failed_states=[State.SUCCESS],
soft_fail=True,
dag=self.dag,
)
# when
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# then
session = settings.Session()
task_instances: list[TI] = session.query(TI).filter(TI.task_id == op.task_id).all()
assert len(task_instances) == 1, "Unexpected number of task instances"
assert task_instances[0].state == State.SKIPPED, "Unexpected external task state"
def test_external_task_sensor_skipped_states_as_skipped(self, session):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=[State.FAILED],
skipped_states=[State.SUCCESS],
dag=self.dag,
)
# when
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# then
task_instances: list[TI] = session.query(TI).filter(TI.task_id == op.task_id).all()
assert len(task_instances) == 1, "Unexpected number of task instances"
assert task_instances[0].state == State.SKIPPED, "Unexpected external task state"
def test_external_task_sensor_external_task_id_param(self, caplog):
"""Test external_task_ids is set properly when external_task_id is passed as a template"""
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="{{ params.dag_id }}",
external_task_id="{{ params.task_id }}",
params={"dag_id": TEST_DAG_ID, "task_id": TEST_TASK_ID},
dag=self.dag,
)
with caplog.at_level(logging.INFO, logger=op.log.name):
caplog.clear()
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
f"Poking for tasks ['{TEST_TASK_ID}'] in dag {TEST_DAG_ID} on {DEFAULT_DATE.isoformat()} ... "
) in caplog.messages
def test_external_task_sensor_external_task_ids_param(self, caplog):
"""Test external_task_ids rendering when a template is passed."""
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="{{ params.dag_id }}",
external_task_ids=["{{ params.task_id }}"],
params={"dag_id": TEST_DAG_ID, "task_id": TEST_TASK_ID},
dag=self.dag,
)
with caplog.at_level(logging.INFO, logger=op.log.name):
caplog.clear()
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
f"Poking for tasks ['{TEST_TASK_ID}'] in dag {TEST_DAG_ID} on {DEFAULT_DATE.isoformat()} ... "
) in caplog.messages
def test_external_task_sensor_failed_states_as_success_mulitple_task_ids(self, caplog):
self.add_time_sensor(task_id=TEST_TASK_ID)
self.add_time_sensor(task_id=TEST_TASK_ID_ALTERNATE)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check_task_ids",
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
allowed_states=["failed"],
failed_states=["success"],
dag=self.dag,
)
error_message = (
rf"Some of the external tasks \['{TEST_TASK_ID}'\, \'{TEST_TASK_ID_ALTERNATE}\'] "
rf"in DAG {TEST_DAG_ID} failed\."
)
with caplog.at_level(logging.INFO, logger=op.log.name):
caplog.clear()
with pytest.raises(ExternalTaskFailedError, match=error_message):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (
f"Poking for tasks ['{TEST_TASK_ID}', '{TEST_TASK_ID_ALTERNATE}'] "
f"in dag unit_test_dag on {DEFAULT_DATE.isoformat()} ... "
) in caplog.messages
def test_external_dag_sensor(self, dag_maker):
with dag_maker("other_dag", default_args=self.args, end_date=DEFAULT_DATE, schedule="@once"):
pass
dag_maker.create_dagrun(state=DagRunState.SUCCESS)
op = ExternalTaskSensor(
task_id="test_external_dag_sensor_check",
external_dag_id="other_dag",
external_task_id=None,
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_dag_sensor_log(self, caplog, dag_maker):
with dag_maker("other_dag", default_args=self.args, end_date=DEFAULT_DATE, schedule="@once"):
pass
dag_maker.create_dagrun(state=DagRunState.SUCCESS)
op = ExternalTaskSensor(
task_id="test_external_dag_sensor_check",
external_dag_id="other_dag",
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert (f"Poking for DAG 'other_dag' on {DEFAULT_DATE.isoformat()} ... ") in caplog.messages
def test_external_dag_sensor_soft_fail_as_skipped(self, dag_maker, session):
with dag_maker("other_dag", default_args=self.args, end_date=DEFAULT_DATE, schedule="@once"):
pass
dag_maker.create_dagrun(state=DagRunState.SUCCESS)
op = ExternalTaskSensor(
task_id="test_external_dag_sensor_check",
external_dag_id="other_dag",
external_task_id=None,
allowed_states=[State.FAILED],
failed_states=[State.SUCCESS],
soft_fail=True,
dag=self.dag,
)
# when
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# then
task_instances: list[TI] = session.query(TI).filter(TI.task_id == op.task_id).all()
assert len(task_instances) == 1, "Unexpected number of task instances"
assert task_instances[0].state == State.SKIPPED, "Unexpected external task state"
def test_external_task_sensor_fn_multiple_logical_dates(self):
bash_command_code = """
{% set s=logical_date.time().second %}
echo "second is {{ s }}"
if [[ $(( {{ s }} % 60 )) == 1 ]]
then
exit 1
fi
exit 0
"""
dag_external_id = TEST_DAG_ID + "_external"
dag_external = DAG(dag_external_id, default_args=self.args, schedule=timedelta(seconds=1))
task_external_with_failure = BashOperator(
task_id="task_external_with_failure", bash_command=bash_command_code, retries=0, dag=dag_external
)
task_external_without_failure = EmptyOperator(
task_id="task_external_without_failure", retries=0, dag=dag_external
)
task_external_without_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
session = settings.Session()
try:
task_external_with_failure.run(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + timedelta(seconds=1), ignore_ti_state=True
)
# The test_with_failure task is excepted to fail
# once per minute (the run on the first second of
# each minute).
except Exception as e:
failed_tis = (
session.query(TI)
.filter(
TI.dag_id == dag_external_id,
TI.state == State.FAILED,
TI.execution_date == DEFAULT_DATE + timedelta(seconds=1),
)
.all()
)
if len(failed_tis) == 1 and failed_tis[0].task_id == "task_external_with_failure":
pass
else:
raise e
dag_id = TEST_DAG_ID
dag = DAG(dag_id, default_args=self.args, schedule=timedelta(minutes=1))
task_without_failure = ExternalTaskSensor(
task_id="task_without_failure",
external_dag_id=dag_external_id,
external_task_id="task_external_without_failure",
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=["success"],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_with_failure = ExternalTaskSensor(
task_id="task_with_failure",
external_dag_id=dag_external_id,
external_task_id="task_external_with_failure",
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)],
allowed_states=["success"],
retries=0,
timeout=1,
poke_interval=1,
dag=dag,
)
task_without_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
with pytest.raises(AirflowSensorTimeout):
task_with_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# Test to ensure that if one task in a chain of tasks fails, the
# ExternalTaskSensor will also report a failure and return without
# waiting for a timeout.
task_chain_with_failure = ExternalTaskSensor(
task_id="task_chain_with_failure",
external_dag_id=dag_external_id,
external_task_id="task_external_with_failure",
execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(3)],
allowed_states=["success"],
failed_states=["failed"],
retries=0,
timeout=5,
poke_interval=1,
dag=dag,
)
# We need to test for an ExternalTaskFailedError explicitly since
# AirflowSensorTimeout is a subclass that will be raised if this does
# not execute properly.
with pytest.raises(ExternalTaskFailedError) as ex_ctx:
task_chain_with_failure.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
assert type(ex_ctx.value) is ExternalTaskFailedError
def test_external_task_sensor_delta(self):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check_delta",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
allowed_states=["success"],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn(self):
self.add_time_sensor()
# check that the execution_fn works
op1 = ExternalTaskSensor(
task_id="test_external_task_sensor_check_delta_1",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=["success"],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
# double check that the execution is being called by failing the test
op2 = ExternalTaskSensor(
task_id="test_external_task_sensor_check_delta_2",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=["success"],
timeout=1,
poke_interval=1,
dag=self.dag,
)
with pytest.raises(AirflowSensorTimeout):
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_multiple_args(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.add_time_sensor()
def my_func(dt, context):
assert context["logical_date"] == dt
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id="test_external_task_sensor_multiple_arg_fn",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=["success"],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_fn_kwargs(self):
"""Check this task sensor passes multiple args with full context. If no failure, means clean run."""
self.add_time_sensor()
def my_func(dt, ds_nodash):
assert ds_nodash == dt.strftime("%Y%m%d")
return dt + timedelta(0)
op1 = ExternalTaskSensor(
task_id="test_external_task_sensor_fn_kwargs",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_date_fn=my_func,
allowed_states=["success"],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_error_delta_and_fn(self):
self.add_time_sensor()
# Test that providing execution_delta and a function raises an error
with pytest.raises(
ValueError,
match="Only one of `execution_delta` or `execution_date_fn` may be provided to ExternalTaskSensor; not both.",
):
ExternalTaskSensor(
task_id="test_external_task_sensor_check_delta",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=["success"],
dag=self.dag,
)
def test_external_task_sensor_error_task_id_and_task_ids(self):
self.add_time_sensor()
# Test that providing execution_delta and a function raises an error
with pytest.raises(
ValueError,
match=EXTERNAL_ID_AND_IDS_PROVIDE_ERROR,
):
ExternalTaskSensor(
task_id="test_external_task_sensor_task_id_and_task_ids",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
external_task_ids=[TEST_TASK_ID],
allowed_states=["success"],
dag=self.dag,
)
def test_external_task_sensor_with_xcom_arg_does_not_fail_on_init(self):
self.add_time_sensor()
op1 = MockOperator(task_id="op1", dag=self.dag)
op2 = ExternalTaskSensor(
task_id="test_external_task_sensor_with_xcom_arg_does_not_fail_on_init",
external_dag_id=TEST_DAG_ID,
external_task_ids=XComArg(op1),
allowed_states=["success"],
dag=self.dag,
)
assert isinstance(op2.external_task_ids, XComArg)
def test_catch_duplicate_task_ids(self):
self.add_time_sensor()
op1 = ExternalTaskSensor(
task_id="test_external_task_duplicate_task_ids",
external_dag_id=TEST_DAG_ID,
external_task_ids=[TEST_TASK_ID, TEST_TASK_ID],
allowed_states=["success"],
dag=self.dag,
)
with pytest.raises(ValueError, match="Duplicate task_ids"):
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_catch_duplicate_task_ids_with_xcom_arg(self):
self.add_time_sensor()
op1 = PythonOperator(
python_callable=lambda: ["dupe_value", "dupe_value"],
task_id="op1",
do_xcom_push=True,
dag=self.dag,
)
op2 = ExternalTaskSensor(
task_id="test_external_task_duplicate_task_ids_with_xcom_arg",
external_dag_id=TEST_DAG_ID,
external_task_ids=XComArg(op1),
allowed_states=["success"],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with pytest.raises(ValueError, match="Duplicate task_ids"):
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_catch_duplicate_task_ids_with_multiple_xcom_args(self):
self.add_time_sensor()
op1 = PythonOperator(
python_callable=lambda: "value",
task_id="op1",
do_xcom_push=True,
dag=self.dag,
)
op2 = ExternalTaskSensor(
task_id="test_external_task_duplicate_task_ids_with_xcom_arg",
external_dag_id=TEST_DAG_ID,
external_task_ids=[XComArg(op1), XComArg(op1)],
allowed_states=["success"],
dag=self.dag,
)
op1.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
with pytest.raises(ValueError, match="Duplicate task_ids"):
op2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
def test_catch_invalid_allowed_states(self):
with pytest.raises(
ValueError,
match="Valid values for `allowed_states`, `skipped_states` and `failed_states`",
):
ExternalTaskSensor(
task_id="test_external_task_sensor_check_1",
external_dag_id=TEST_DAG_ID,
external_task_id=TEST_TASK_ID,
allowed_states=["invalid_state"],
dag=self.dag,
)
with pytest.raises(
ValueError,
match="Valid values for `allowed_states`, `skipped_states` and `failed_states`",
):
ExternalTaskSensor(
task_id="test_external_task_sensor_check_2",
external_dag_id=TEST_DAG_ID,
external_task_id=None,
allowed_states=["invalid_state"],
dag=self.dag,
)
def test_external_task_sensor_waits_for_task_check_existence(self):
self.add_time_sensor()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_id="non-existing-task",
check_existence=True,
dag=self.dag,
)
with pytest.raises(ExternalDagNotFoundError):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_sensor_waits_for_dag_check_existence(self):
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="non-existing-dag",
external_task_id=None,
check_existence=True,
dag=self.dag,
)
with pytest.raises(ExternalDagNotFoundError):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_group_with_mapped_tasks_sensor_success(self):
self.add_time_sensor()
self.add_fake_task_group_with_dynamic_tasks()
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
failed_states=[State.FAILED],
dag=self.dag,
)
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_group_with_mapped_tasks_failed_states(self):
self.add_time_sensor()
self.add_fake_task_group_with_dynamic_tasks(State.FAILED)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
failed_states=[State.FAILED],
dag=self.dag,
)
with pytest.raises(
ExternalTaskGroupFailedError,
match=f"The external task_group '{TEST_TASK_GROUP_ID}' in DAG '{TEST_DAG_ID}' failed.",
):
op.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_external_task_group_when_there_is_no_TIs(self):
"""Test that the sensor does not fail when there are no TIs to check."""
self.add_time_sensor()
self.add_fake_task_group_with_dynamic_tasks(State.FAILED)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id=TEST_DAG_ID,
external_task_group_id=TEST_TASK_GROUP_ID,
failed_states=[State.FAILED],
dag=self.dag,
poke_interval=1,
timeout=3,
)
with pytest.raises(AirflowSensorTimeout):
op.run(
start_date=DEFAULT_DATE + timedelta(hours=1),
end_date=DEFAULT_DATE + timedelta(hours=1),
ignore_ti_state=True,
)
@pytest.mark.parametrize(
("kwargs", "expected_message"),
(
(
{
"external_task_ids": [TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
"failed_states": [State.FAILED],
},
f"Some of the external tasks {re.escape(str([TEST_TASK_ID, TEST_TASK_ID_ALTERNATE]))}"
f" in DAG {TEST_DAG_ID} failed.",
),
(
{
"external_task_group_id": [TEST_TASK_ID, TEST_TASK_ID_ALTERNATE],
"failed_states": [State.FAILED],
},
f"The external task_group '{re.escape(str([TEST_TASK_ID, TEST_TASK_ID_ALTERNATE]))}'"
f" in DAG '{TEST_DAG_ID}' failed.",
),
(
{"failed_states": [State.FAILED]},
f"The external DAG {TEST_DAG_ID} failed.",
),
),
)
@pytest.mark.parametrize(
("soft_fail", "expected_exception"),
(
(
False,
ExternalTaskFailedError,
),
(
True,
AirflowSkipException,
),
),
)
@mock.patch("airflow.providers.standard.sensors.external_task.ExternalTaskSensor.get_count")
@mock.patch("airflow.providers.standard.sensors.external_task.ExternalTaskSensor._get_dttm_filter")
def test_fail_poke(
self, _get_dttm_filter, get_count, soft_fail, expected_exception, kwargs, expected_message
):
_get_dttm_filter.return_value = [DEFAULT_DATE]
get_count.return_value = 1
op = ExternalTaskSensor(
task_id="test_external_task_duplicate_task_ids",
external_dag_id=TEST_DAG_ID,
allowed_states=["success"],
dag=self.dag,
soft_fail=soft_fail,
deferrable=False,
**kwargs,
)
assert op.external_dates_filter is None
# We need to handle the specific exception types based on kwargs
if not soft_fail:
expected_exc = expected_exception
if "external_task_ids" in kwargs:
expected_exc = ExternalTaskFailedError
elif "external_task_group_id" in kwargs:
expected_exc = ExternalTaskGroupFailedError
elif "failed_states" in kwargs and not any(
k in kwargs for k in ["external_task_ids", "external_task_group_id"]
):
expected_exc = ExternalDagFailedError
with pytest.raises(expected_exc, match=expected_message):
op.execute(context={})
else:
with pytest.raises(expected_exception, match=expected_message):
op.execute(context={})
assert op.external_dates_filter == DEFAULT_DATE.isoformat()
@pytest.mark.parametrize(
("response_get_current", "response_exists", "kwargs", "expected_message"),
(
(None, None, {}, f"The external DAG {TEST_DAG_ID} does not exist."),
(
DAG(dag_id="test", schedule=None),
False,
{},
f"The external DAG {TEST_DAG_ID} was deleted.",
),
(
DAG(dag_id="test", schedule=None),
True,
{"external_task_ids": [TEST_TASK_ID, TEST_TASK_ID_ALTERNATE]},
f"The external task {TEST_TASK_ID} in DAG {TEST_DAG_ID} does not exist.",
),
(
DAG(dag_id="test", schedule=None),
True,
{"external_task_group_id": [TEST_TASK_ID, TEST_TASK_ID_ALTERNATE]},
f"The external task group '{re.escape(str([TEST_TASK_ID, TEST_TASK_ID_ALTERNATE]))}'"
f" in DAG '{TEST_DAG_ID}' does not exist.",
),
),
)
@pytest.mark.parametrize(
("soft_fail", "expected_exception"),
(
(
False,
ExternalDagNotFoundError,
),
(
True,
ExternalDagNotFoundError,
),
),
)
@mock.patch("airflow.providers.standard.sensors.external_task.ExternalTaskSensor._get_dttm_filter")
@mock.patch("airflow.models.dagbag.DagBag.get_dag")
@mock.patch("os.path.exists")
@mock.patch("airflow.models.dag.DagModel.get_current")
def test_fail__check_for_existence(
self,
get_current,
exists,
get_dag,
_get_dttm_filter,
soft_fail,
expected_exception,
response_get_current,
response_exists,
kwargs,
expected_message,
):
_get_dttm_filter.return_value = []
get_current.return_value = response_get_current
exists.return_value = response_exists
get_dag_response = mock.MagicMock()
get_dag.return_value = get_dag_response
get_dag_response.has_task.return_value = False
get_dag_response.has_task_group.return_value = False
op = ExternalTaskSensor(
task_id="test_external_task_duplicate_task_ids",
external_dag_id=TEST_DAG_ID,
allowed_states=["success"],
dag=self.dag,
soft_fail=soft_fail,
check_existence=True,
**kwargs,
)
if not hasattr(op, "never_fail"):
expected_message = "Skipping due to soft_fail is set to True." if soft_fail else expected_message
specific_exception = expected_exception
if response_get_current is None:
specific_exception = ExternalDagNotFoundError
elif not response_exists:
specific_exception = ExternalDagDeletedError
elif "external_task_ids" in kwargs:
specific_exception = ExternalTaskNotFoundError
elif "external_task_group_id" in kwargs:
specific_exception = ExternalTaskGroupNotFoundError
with pytest.raises(specific_exception, match=expected_message):
op.execute(context={})
@pytest.mark.execution_timeout(10)
def test_external_task_sensor_deferrable(self, dag_maker):
context = {"execution_date": DEFAULT_DATE}
with dag_maker() as dag:
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="test_dag_parent",
external_task_id="test_task",
deferrable=True,
allowed_states=["success"],
)
dr = dag.create_dagrun(
run_id="abcrhroceuh",
run_type=DagRunType.MANUAL,
state=None,
)
context.update(dag_run=dr, logical_date=DEFAULT_DATE)
with pytest.raises(TaskDeferred) as exc:
op.execute(context=context)
assert isinstance(exc.value.trigger, WorkflowTrigger)
assert exc.value.trigger.external_dag_id == "test_dag_parent"
assert exc.value.trigger.external_task_ids == ["test_task"]
assert exc.value.trigger.execution_dates == [DEFAULT_DATE]
def test_get_logical_date(self):
"""For AF 2, we check for execution_date in context."""
context = {"execution_date": DEFAULT_DATE}
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="test_dag_parent",
external_task_id="test_task",
)
assert op._get_logical_date(context) == DEFAULT_DATE
def test_handle_execution_date_fn(self):
def func(dt, context):
assert context["execution_date"] == dt
return dt + timedelta(0)
op = ExternalTaskSensor(
task_id="test_external_task_sensor_check",
external_dag_id="test_dag_parent",
external_task_id="test_task",
execution_date_fn=func,
)
context = {"execution_date": DEFAULT_DATE}
assert op._handle_execution_date_fn(context) == DEFAULT_DATE
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Different test for AF 2")
@pytest.mark.usefixtures("testing_dag_bundle")
| TestExternalTaskSensorV2 |
python | dask__distributed | distributed/chaos.py | {
"start": 216,
"end": 1947
} | class ____(WorkerPlugin):
"""Kill Workers Randomly
This kills workers in a cluster randomly. It is intended to be used in
stress testing.
Parameters
----------
delay: str
The expected amount of time for a worker to live.
The actual time will vary, treating worker death as a poisson process.
mode: str
or "graceful" which calls worker.close(...)
Either "sys.exit" which calls sys.exit(0)
or "segfault" which triggers a segfault
"""
def __init__(
self,
delay: str | int | float = "100 s",
mode: Literal["sys.exit", "graceful", "segfault"] = "sys.exit",
):
self.delay = parse_timedelta(delay)
if mode not in ("sys.exit", "graceful", "segfault"):
raise ValueError(
f"Three modes supported, 'sys.exit', 'graceful', and 'segfault'. "
f"got {mode!r}"
)
self.mode = mode
async def setup(self, worker):
self.worker = worker
if self.mode == "graceful":
f = self.graceful
elif self.mode == "sys.exit":
f = self.sys_exit
elif self.mode == "segfault":
f = self.segfault
self.worker.loop.asyncio_loop.call_later(
delay=random.expovariate(1 / self.delay),
callback=f,
)
def graceful(self):
asyncio.create_task(self.worker.close(nanny=False, executor_wait=False))
def sys_exit(self):
sys.exit(0)
def segfault(self):
"""
Magic, from https://gist.github.com/coolreader18/6dbe0be2ae2192e90e1a809f1624c694?permalink_comment_id=3874116#gistcomment-3874116
"""
ctypes.string_at(0)
| KillWorker |
python | realpython__materials | python-sqlite-sqlalchemy/project/examples/example_3/app/models.py | {
"start": 2654,
"end": 3755
} | class ____(db.Model):
__tablename__ = "customers"
customer_id = db.Column("CustomerId", db.Integer, primary_key=True)
first_name = db.Column("FirstName", db.String(40), nullable=False)
last_name = db.Column("LastName", db.String(20), nullable=False)
company = db.Column("Company", db.String(80))
address = db.Column("Address", db.String(70))
city = db.Column("City", db.String(40))
state = db.Column("State", db.String(40))
country = db.Column("Country", db.String(40))
postal_code = db.Column("PostalCode", db.String(10))
phone = db.Column("Phone", db.String(24))
fax = db.Column("Fax", db.String(24))
email = db.Column("Email", db.String(60), nullable=False)
support_rep_id = db.Column(
"SupportRepId", db.ForeignKey("employees.EmployeeId"), index=True
)
support_rep = db.relationship(
"Employee", backref=db.backref("customers", uselist=False)
)
invoices = db.relationship("Invoice", backref="customer")
@hybrid_property
def full_name(self):
return f"{self.first_name} {self.last_name}"
| Customer |
python | google__pytype | pytype/tools/analyze_project/environment.py | {
"start": 136,
"end": 809
} | class ____(fs.ExtensionRemappingFileSystem):
"""File system that remaps .py file extensions to pytd."""
def __init__(self, underlying):
super().__init__(underlying, 'pytd')
def create_importlab_environment(conf, typeshed):
"""Create an importlab environment from the python version and path."""
python_version = utils.version_from_string(conf.python_version)
path = fs.Path()
for p in conf.pythonpath:
path.add_path(p, 'os')
for p in typeshed.get_pytd_paths():
path.add_fs(PytdFileSystem(fs.OSFileSystem(p)))
for p in typeshed.get_typeshed_paths():
path.add_path(p, 'pyi')
return environment.Environment(path, python_version)
| PytdFileSystem |
python | django-import-export__django-import-export | import_export/tmp_storages.py | {
"start": 515,
"end": 1189
} | class ____(BaseStorage):
def save(self, data):
with self._open(mode="w") as file:
file.write(data)
def read(self):
with self._open(mode=self.read_mode) as file:
return file.read()
def remove(self):
os.remove(self.get_full_path())
def get_full_path(self):
return os.path.join(tempfile.gettempdir(), self.name)
def _open(self, mode="r"):
if self.name:
return open(self.get_full_path(), mode, encoding=self.encoding)
else:
tmp_file = tempfile.NamedTemporaryFile(delete=False)
self.name = tmp_file.name
return tmp_file
| TempFolderStorage |
python | huggingface__transformers | tests/models/siglip2/test_modeling_siglip2.py | {
"start": 10235,
"end": 13658
} | class ____(Siglip2ModelTesterMixin, unittest.TestCase):
"""
Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP2 does not use input_ids, inputs_embeds,
attention_mask and seq_length.
"""
all_model_classes = (Siglip2VisionModel,) if is_torch_available() else ()
additional_model_inputs = ["pixel_attention_mask", "spatial_shapes"]
test_resize_embeddings = False
# MP works but offload doesn't work when the MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
def setUp(self):
self.model_tester = Siglip2VisionModelTester(self)
self.config_tester = ConfigTester(
self, config_class=Siglip2VisionConfig, has_text_modality=False, hidden_size=37
)
def test_config(self):
self.config_tester.run_common_tests()
@unittest.skip(reason="SIGLIP2 does not use inputs_embeds")
def test_inputs_embeds(self):
pass
def test_model_get_set_embeddings(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
x = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(x, nn.Linear))
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["pixel_values"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Siglip2VisionModel does not support standalone training")
def test_training(self):
pass
@unittest.skip(reason="Siglip2VisionModel does not support standalone training")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="Siglip2VisionModel does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(reason="Siglip2VisionModel does not support standalone training")
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@slow
def test_model_from_pretrained(self):
model_name = "google/siglip2-base-patch16-naflex"
model = Siglip2VisionModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION)
@is_flaky()
def test_eager_matches_sdpa_inference(self, *args):
# adding only flaky decorator here and call the parent test method
return getattr(ModelTesterMixin, self._testMethodName)(self)
| Siglip2VisionModelTest |
python | plotly__plotly.py | plotly/graph_objs/funnelarea/_domain.py | {
"start": 233,
"end": 5068
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnelarea"
_path_str = "funnelarea.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this funnelarea trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this funnelarea trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this funnelarea trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this funnelarea trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this funnelarea trace .
row
If there is a layout grid, use the domain for this row
in the grid for this funnelarea trace .
x
Sets the horizontal domain of this funnelarea trace (in
plot fraction).
y
Sets the vertical domain of this funnelarea trace (in
plot fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this funnelarea trace .
row
If there is a layout grid, use the domain for this row
in the grid for this funnelarea trace .
x
Sets the horizontal domain of this funnelarea trace (in
plot fraction).
y
Sets the vertical domain of this funnelarea trace (in
plot fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnelarea.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | huggingface__transformers | src/transformers/models/siglip/image_processing_siglip_fast.py | {
"start": 902,
"end": 1257
} | class ____(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = IMAGENET_STANDARD_MEAN
image_std = IMAGENET_STANDARD_STD
size = {"height": 224, "width": 224}
default_to_square = False
do_resize = True
do_rescale = True
do_normalize = True
__all__ = ["SiglipImageProcessorFast"]
| SiglipImageProcessorFast |
python | huggingface__transformers | tests/models/xlm/test_tokenization_xlm.py | {
"start": 856,
"end": 3703
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "FacebookAI/xlm-mlm-en-2048"
tokenizer_class = XLMTokenizer
test_rust_tokenizer = False
@classmethod
def setUpClass(cls):
super().setUpClass()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
cls.vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
def test_full_tokenizer(self):
"""Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt"""
vocab = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdir:
vocab_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["vocab_file"])
merges_file = os.path.join(tmpdir, VOCAB_FILES_NAMES["merges_file"])
with open(vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
tokenizer = XLMTokenizer(vocab_file, merges_file)
text = "lower"
bpe_tokens = ["low", "er</w>"]
tokens = tokenizer.tokenize(text)
self.assertListEqual(tokens, bpe_tokens)
input_tokens = tokens + ["<unk>"]
input_bpe_tokens = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
@slow
def test_sequence_builders(self):
tokenizer = XLMTokenizer.from_pretrained("FacebookAI/xlm-mlm-en-2048")
text = tokenizer.encode("sequence builders", add_special_tokens=False)
text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_2 + [1]
| XLMTokenizationTest |
python | langchain-ai__langchain | libs/partners/prompty/tests/unit_tests/fake_callback_handler.py | {
"start": 6584,
"end": 9217
} | class ____(AsyncCallbackHandler, BaseFakeCallbackHandlerMixin):
"""Fake async callback handler for testing."""
@property
def ignore_llm(self) -> bool:
"""Whether to ignore LLM callbacks."""
return self.ignore_llm_
@property
def ignore_chain(self) -> bool:
"""Whether to ignore chain callbacks."""
return self.ignore_chain_
@property
def ignore_agent(self) -> bool:
"""Whether to ignore agent callbacks."""
return self.ignore_agent_
async def on_retry(
self,
*args: Any,
**kwargs: Any,
) -> Any:
self.on_retry_common()
async def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
self.on_llm_start_common()
async def on_llm_new_token(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_new_token_common()
async def on_llm_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_end_common()
async def on_llm_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_llm_error_common()
async def on_chain_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_start_common()
async def on_chain_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_end_common()
async def on_chain_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_chain_error_common()
async def on_tool_start(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_start_common()
async def on_tool_end(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_end_common()
async def on_tool_error(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_tool_error_common()
async def on_agent_action(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_action_common()
async def on_agent_finish(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_agent_finish_common()
async def on_text(
self,
*args: Any,
**kwargs: Any,
) -> None:
self.on_text_common()
# Overriding since BaseModel has __deepcopy__ method as well
def __deepcopy__(self, memo: dict) -> "FakeAsyncCallbackHandler": # type: ignore
return self
| FakeAsyncCallbackHandler |
python | chroma-core__chroma | chromadb/test/api/test_schema.py | {
"start": 2123,
"end": 138134
} | class ____:
"""Test cases for the new Schema class."""
def test_default_schema_initialization(self) -> None:
"""Test that Schema() initializes with correct defaults."""
schema = Schema()
# Verify defaults are populated
assert schema.defaults is not None
# Verify string value type defaults
assert schema.defaults.string is not None
assert schema.defaults.string.fts_index is not None
assert schema.defaults.string.fts_index.enabled is False # Disabled by default
assert schema.defaults.string.string_inverted_index is not None
assert (
schema.defaults.string.string_inverted_index.enabled is True
) # Enabled by default
# Verify float_list value type defaults
assert schema.defaults.float_list is not None
assert schema.defaults.float_list.vector_index is not None
assert (
schema.defaults.float_list.vector_index.enabled is False
) # Disabled by default
# Verify sparse_vector value type defaults
assert schema.defaults.sparse_vector is not None
assert schema.defaults.sparse_vector.sparse_vector_index is not None
assert (
schema.defaults.sparse_vector.sparse_vector_index.enabled is False
) # Disabled by default
# Verify int_value type defaults
assert schema.defaults.int_value is not None
assert schema.defaults.int_value.int_inverted_index is not None
assert (
schema.defaults.int_value.int_inverted_index.enabled is True
) # Enabled by default
# Verify float_value type defaults
assert schema.defaults.float_value is not None
assert schema.defaults.float_value.float_inverted_index is not None
assert (
schema.defaults.float_value.float_inverted_index.enabled is True
) # Enabled by default
# Verify boolean type defaults
assert schema.defaults.boolean is not None
assert schema.defaults.boolean.bool_inverted_index is not None
assert (
schema.defaults.boolean.bool_inverted_index.enabled is True
) # Enabled by default
# Verify keys are populated
assert schema.keys is not None
assert len(schema.keys) == 2 # Should have #document and #embedding
# Verify #document key override (FTS enabled, string inverted disabled)
assert "#document" in schema.keys
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.fts_index is not None
assert schema.keys["#document"].string.fts_index.enabled is True
assert schema.keys["#document"].string.string_inverted_index is not None
assert schema.keys["#document"].string.string_inverted_index.enabled is False
# Verify #embedding key override (vector index enabled)
assert "#embedding" in schema.keys
assert schema.keys["#embedding"].float_list is not None
assert schema.keys["#embedding"].float_list.vector_index is not None
assert schema.keys["#embedding"].float_list.vector_index.enabled is True
assert (
schema.keys["#embedding"].float_list.vector_index.config.source_key
== "#document"
)
def test_create_sparse_vector_index_on_key(self) -> None:
"""Test creating a sparse vector index on a specific key with default config."""
schema = Schema()
# Create sparse vector index on a custom key with default config
config = SparseVectorIndexConfig()
result = schema.create_index(config=config, key="custom_sparse_key")
# Should return self for chaining
assert result is schema
# Verify the key override was created
assert "custom_sparse_key" in schema.keys
# Verify sparse_vector type was set for this key
assert schema.keys["custom_sparse_key"].sparse_vector is not None
assert (
schema.keys["custom_sparse_key"].sparse_vector.sparse_vector_index
is not None
)
# Verify it's enabled and has the correct config
assert (
schema.keys["custom_sparse_key"].sparse_vector.sparse_vector_index.enabled
is True
)
assert (
schema.keys["custom_sparse_key"].sparse_vector.sparse_vector_index.config
== config
)
# Verify other value types for this key are None (not initialized)
assert schema.keys["custom_sparse_key"].string is None
assert schema.keys["custom_sparse_key"].float_list is None
assert schema.keys["custom_sparse_key"].int_value is None
assert schema.keys["custom_sparse_key"].float_value is None
assert schema.keys["custom_sparse_key"].boolean is None
# Verify defaults were not affected
assert schema.defaults.sparse_vector is not None
assert schema.defaults.sparse_vector.sparse_vector_index is not None
assert (
schema.defaults.sparse_vector.sparse_vector_index.enabled is False
) # Still disabled by default
def test_create_sparse_vector_index_with_custom_config(self) -> None:
"""Test creating a sparse vector index with custom config including embedding function."""
schema = Schema()
# Create custom sparse vector config with embedding function and source key
embedding_func = MockSparseEmbeddingFunction(name="custom_sparse_ef")
config = SparseVectorIndexConfig(
embedding_function=embedding_func, source_key="custom_document_field"
)
# Create sparse vector index on a custom key
result = schema.create_index(config=config, key="sparse_embeddings")
# Should return self for chaining
assert result is schema
# Verify the key override was created
assert "sparse_embeddings" in schema.keys
assert schema.keys["sparse_embeddings"].sparse_vector is not None
assert (
schema.keys["sparse_embeddings"].sparse_vector.sparse_vector_index
is not None
)
# Verify it's enabled
sparse_index = schema.keys[
"sparse_embeddings"
].sparse_vector.sparse_vector_index
assert sparse_index.enabled is True
# Verify the config has our custom settings
assert sparse_index.config.embedding_function == embedding_func
assert sparse_index.config.source_key == "custom_document_field"
# Verify the embedding function is the same instance
assert sparse_index.config.embedding_function.name() == "mock_sparse"
assert sparse_index.config.embedding_function.get_config() == {
"name": "custom_sparse_ef"
}
# Verify global defaults were not overridden
assert schema.defaults.sparse_vector is not None
assert schema.defaults.sparse_vector.sparse_vector_index is not None
assert (
schema.defaults.sparse_vector.sparse_vector_index.enabled is False
) # Still disabled by default
assert (
schema.defaults.sparse_vector.sparse_vector_index.config.embedding_function
is None
) # No custom embedding function
def test_delete_index_on_key(self) -> None:
"""Test disabling string inverted index on a specific key."""
schema = Schema()
# Create a config and disable it on a specific key
config = StringInvertedIndexConfig()
result = schema.delete_index(config=config, key="custom_text_key")
# Should return self for chaining
assert result is schema
# Verify the key override was created
assert "custom_text_key" in schema.keys
# Verify string inverted index is disabled for this key
assert schema.keys["custom_text_key"].string is not None
assert schema.keys["custom_text_key"].string.string_inverted_index is not None
assert (
schema.keys["custom_text_key"].string.string_inverted_index.enabled is False
)
# Verify other keys are not affected - check #document key
assert "#document" in schema.keys
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.string_inverted_index is not None
assert (
schema.keys["#document"].string.string_inverted_index.enabled is False
) # Was disabled by default in #document
# Verify other keys are not affected - check #embedding key (shouldn't have string config)
assert "#embedding" in schema.keys
assert (
schema.keys["#embedding"].string is None
) # #embedding doesn't have string configs
# Verify global defaults are not affected
assert schema.defaults.string is not None
assert schema.defaults.string.string_inverted_index is not None
assert (
schema.defaults.string.string_inverted_index.enabled is True
) # Global default is still enabled
def test_chained_create_and_delete_operations(self) -> None:
"""Test chaining create_index() and delete_index() operations together."""
schema = Schema()
# Chain multiple operations:
# 1. Create sparse vector index on "embeddings_key"
# 2. Disable string inverted index on "text_key_1"
# 3. Disable string inverted index on "text_key_2"
sparse_config = SparseVectorIndexConfig(
source_key="raw_text", embedding_function=MockSparseEmbeddingFunction()
)
string_config = StringInvertedIndexConfig()
result = (
schema.create_index(config=sparse_config, key="embeddings_key")
.delete_index(config=string_config, key="text_key_1")
.delete_index(config=string_config, key="text_key_2")
)
# Should return self for chaining
assert result is schema
# Verify all three key overrides were created
assert "embeddings_key" in schema.keys
assert "text_key_1" in schema.keys
assert "text_key_2" in schema.keys
# Verify sparse vector index on "embeddings_key" is enabled
assert schema.keys["embeddings_key"].sparse_vector is not None
assert (
schema.keys["embeddings_key"].sparse_vector.sparse_vector_index is not None
)
assert (
schema.keys["embeddings_key"].sparse_vector.sparse_vector_index.enabled
is True
)
assert (
schema.keys[
"embeddings_key"
].sparse_vector.sparse_vector_index.config.source_key
== "raw_text"
)
# Verify only sparse_vector is set for embeddings_key (other types are None)
assert schema.keys["embeddings_key"].string is None
assert schema.keys["embeddings_key"].float_list is None
assert schema.keys["embeddings_key"].int_value is None
assert schema.keys["embeddings_key"].float_value is None
assert schema.keys["embeddings_key"].boolean is None
# Verify string inverted index on "text_key_1" is disabled
assert schema.keys["text_key_1"].string is not None
assert schema.keys["text_key_1"].string.string_inverted_index is not None
assert schema.keys["text_key_1"].string.string_inverted_index.enabled is False
# Verify only string is set for text_key_1 (other types are None)
assert schema.keys["text_key_1"].sparse_vector is None
assert schema.keys["text_key_1"].float_list is None
assert schema.keys["text_key_1"].int_value is None
assert schema.keys["text_key_1"].float_value is None
assert schema.keys["text_key_1"].boolean is None
# Verify string inverted index on "text_key_2" is disabled
assert schema.keys["text_key_2"].string is not None
assert schema.keys["text_key_2"].string.string_inverted_index is not None
assert schema.keys["text_key_2"].string.string_inverted_index.enabled is False
# Verify only string is set for text_key_2 (other types are None)
assert schema.keys["text_key_2"].sparse_vector is None
assert schema.keys["text_key_2"].float_list is None
assert schema.keys["text_key_2"].int_value is None
assert schema.keys["text_key_2"].float_value is None
assert schema.keys["text_key_2"].boolean is None
# Verify global defaults are not affected
assert schema.defaults.sparse_vector is not None
assert schema.defaults.sparse_vector.sparse_vector_index is not None
assert (
schema.defaults.sparse_vector.sparse_vector_index.enabled is False
) # Still disabled globally
assert schema.defaults.string is not None
assert schema.defaults.string.string_inverted_index is not None
assert (
schema.defaults.string.string_inverted_index.enabled is True
) # Still enabled globally
# Verify pre-existing key overrides (#document, #embedding) are not affected
assert "#document" in schema.keys
assert "#embedding" in schema.keys
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.fts_index is not None
assert (
schema.keys["#document"].string.fts_index.enabled is True
) # Still enabled
assert schema.keys["#embedding"].float_list is not None
assert schema.keys["#embedding"].float_list.vector_index is not None
assert (
schema.keys["#embedding"].float_list.vector_index.enabled is True
) # Still enabled
def test_vector_index_config_and_restrictions(self) -> None:
"""Test vector index configuration and key restrictions."""
schema = Schema()
vector_config = VectorIndexConfig(space="cosine", source_key="custom_source")
# Test 1: CAN set vector config globally - applies to defaults and #embedding
result = schema.create_index(config=vector_config)
assert result is schema # Should return self for chaining
# Verify the vector config was applied to defaults (enabled state preserved as False)
assert schema.defaults.float_list is not None
assert schema.defaults.float_list.vector_index is not None
assert (
schema.defaults.float_list.vector_index.enabled is False
) # Still disabled in defaults
assert schema.defaults.float_list.vector_index.config.space == "cosine"
assert (
schema.defaults.float_list.vector_index.config.source_key == "custom_source"
)
# Verify the vector config was also applied to #embedding (enabled state preserved as True)
# Note: source_key should NOT be overridden on #embedding - it should stay as "#document"
assert schema.keys["#embedding"].float_list is not None
assert schema.keys["#embedding"].float_list.vector_index is not None
assert (
schema.keys["#embedding"].float_list.vector_index.enabled is True
) # Still enabled on #embedding
assert (
schema.keys["#embedding"].float_list.vector_index.config.space == "cosine"
)
assert (
schema.keys["#embedding"].float_list.vector_index.config.source_key
== "#document"
) # Preserved, NOT overridden
# Test 2: Cannot create vector index on custom key
vector_config2 = VectorIndexConfig(space="l2")
with pytest.raises(
ValueError, match="Vector index cannot be enabled on specific keys"
):
schema.create_index(config=vector_config2, key="my_vectors")
# Test 3: Cannot create vector index on #document key (special key blocked globally)
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=vector_config2, key="#document")
# Test 4: Cannot create vector index on #embedding key (special key blocked globally)
vector_config3 = VectorIndexConfig(space="ip")
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=vector_config3, key="#embedding")
def test_vector_index_with_embedding_function_and_hnsw(self) -> None:
"""Test setting embedding function and HNSW config for vector index."""
schema = Schema()
# Create a custom embedding function and HNSW config
mock_ef = MockEmbeddingFunction(model_name="custom_model_v2")
hnsw_config = HnswIndexConfig(
ef_construction=200, max_neighbors=32, ef_search=100
)
# Set vector config with embedding function, space, and HNSW config
vector_config = VectorIndexConfig(
embedding_function=mock_ef,
space="l2", # Override default space from EF
hnsw=hnsw_config,
source_key="custom_document_field",
)
result = schema.create_index(config=vector_config)
assert result is schema
# Verify defaults: should have EF, space, HNSW, and source_key
assert schema.defaults.float_list is not None
defaults_vector = schema.defaults.float_list.vector_index
assert defaults_vector is not None
assert defaults_vector.enabled is False
assert defaults_vector.config.embedding_function is mock_ef
assert defaults_vector.config.embedding_function.name() == "mock_embedding"
assert defaults_vector.config.embedding_function.get_config() == {
"model_name": "custom_model_v2"
}
assert defaults_vector.config.space == "l2"
assert defaults_vector.config.hnsw is not None
assert defaults_vector.config.hnsw.ef_construction == 200
assert defaults_vector.config.hnsw.max_neighbors == 32
assert defaults_vector.config.hnsw.ef_search == 100
assert defaults_vector.config.source_key == "custom_document_field"
# Verify #embedding: should have EF, space, HNSW, but source_key is preserved as "#document"
assert schema.keys["#embedding"].float_list is not None
embedding_vector = schema.keys["#embedding"].float_list.vector_index
assert embedding_vector is not None
assert embedding_vector.enabled is True
assert embedding_vector.config.embedding_function is mock_ef
assert embedding_vector.config.space == "l2"
assert embedding_vector.config.hnsw is not None
assert embedding_vector.config.hnsw.ef_construction == 200
assert (
embedding_vector.config.source_key == "#document"
) # Preserved, NOT overridden by user config
def test_fts_index_config_and_restrictions(self) -> None:
"""Test FTS index configuration and key restrictions."""
schema = Schema()
fts_config = FtsIndexConfig()
# Test 1: CAN set FTS config globally - applies to defaults and #document
result = schema.create_index(config=fts_config)
assert result is schema # Should return self for chaining
# Verify the FTS config was applied to defaults (enabled state preserved as False)
assert schema.defaults.string is not None
assert schema.defaults.string.fts_index is not None
assert (
schema.defaults.string.fts_index.enabled is False
) # Still disabled in defaults
assert schema.defaults.string.fts_index.config == fts_config
# Verify the FTS config was also applied to #document (enabled state preserved as True)
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.fts_index is not None
assert (
schema.keys["#document"].string.fts_index.enabled is True
) # Still enabled on #document
assert schema.keys["#document"].string.fts_index.config == fts_config
# Test 2: Cannot create FTS index on custom key
fts_config2 = FtsIndexConfig()
with pytest.raises(
ValueError, match="FTS index cannot be enabled on specific keys"
):
schema.create_index(config=fts_config2, key="custom_text_field")
# Test 3: Cannot create FTS index on #embedding key (special key blocked globally)
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=fts_config2, key="#embedding")
# Test 4: Cannot create FTS index on #document key (special key blocked globally)
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=fts_config2, key="#document")
def test_special_keys_blocked_for_all_index_types(self) -> None:
"""Test that #embedding and #document keys are blocked for all index types."""
schema = Schema()
# Test with StringInvertedIndexConfig on #document
string_config = StringInvertedIndexConfig()
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=string_config, key="#document")
# Test with StringInvertedIndexConfig on #embedding
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=string_config, key="#embedding")
# Test with SparseVectorIndexConfig on #document
sparse_config = SparseVectorIndexConfig()
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=sparse_config, key="#document")
# Test with SparseVectorIndexConfig on #embedding
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=sparse_config, key="#embedding")
def test_cannot_enable_all_indexes_for_key(self) -> None:
"""Test that enabling all indexes for a key is not allowed."""
schema = Schema()
# Try to enable all indexes for a custom key (config=None, key="my_key")
with pytest.raises(
ValueError, match="Cannot enable all index types for key 'my_key'"
):
schema.create_index(key="my_key")
# Try to disable all indexes for a custom key (config=None, key="my_key")
with pytest.raises(
ValueError, match="Cannot disable all index types for key 'my_key'"
):
schema.delete_index(key="my_key")
def test_cannot_delete_vector_or_fts_index(self) -> None:
"""Test that deleting vector and FTS indexes is not allowed."""
schema = Schema()
# Try to delete vector index globally
vector_config = VectorIndexConfig()
with pytest.raises(
ValueError, match="Deleting vector index is not currently supported"
):
schema.delete_index(config=vector_config)
# Try to delete vector index on a custom key
with pytest.raises(
ValueError, match="Deleting vector index is not currently supported"
):
schema.delete_index(config=vector_config, key="my_vectors")
# Try to delete FTS index globally
fts_config = FtsIndexConfig()
with pytest.raises(
ValueError, match="Deleting FTS index is not currently supported"
):
schema.delete_index(config=fts_config)
# Try to delete FTS index on a custom key
with pytest.raises(
ValueError, match="Deleting FTS index is not currently supported"
):
schema.delete_index(config=fts_config, key="my_text")
def test_disable_string_inverted_index_globally(self) -> None:
"""Test disabling string inverted index globally."""
schema = Schema()
# Verify string inverted index is enabled by default in global defaults
assert schema.defaults.string is not None
assert schema.defaults.string.string_inverted_index is not None
assert schema.defaults.string.string_inverted_index.enabled is True
# Disable string inverted index globally
string_config = StringInvertedIndexConfig()
result = schema.delete_index(config=string_config)
assert result is schema # Should return self for chaining
# Verify it's now disabled in defaults
assert schema.defaults.string.string_inverted_index is not None
assert schema.defaults.string.string_inverted_index.enabled is False
assert schema.defaults.string.string_inverted_index.config == string_config
# Verify key overrides are not affected (e.g., #document still has its config)
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.string_inverted_index is not None
assert (
schema.keys["#document"].string.string_inverted_index.enabled is False
) # #document has it disabled
def test_disable_string_inverted_index_on_key(self) -> None:
"""Test disabling string inverted index on a specific key."""
schema = Schema()
# Disable string inverted index on a custom key
string_config = StringInvertedIndexConfig()
result = schema.delete_index(config=string_config, key="my_text_field")
assert result is schema
# Verify it's disabled on the custom key
assert "my_text_field" in schema.keys
assert schema.keys["my_text_field"].string is not None
assert schema.keys["my_text_field"].string.string_inverted_index is not None
assert (
schema.keys["my_text_field"].string.string_inverted_index.enabled is False
)
assert (
schema.keys["my_text_field"].string.string_inverted_index.config
== string_config
)
# Verify other value types on this key are None (sparse override)
assert schema.keys["my_text_field"].float_list is None
assert schema.keys["my_text_field"].sparse_vector is None
assert schema.keys["my_text_field"].int_value is None
# Verify global defaults are not affected
assert schema.defaults.string is not None
assert schema.defaults.string.string_inverted_index is not None
assert schema.defaults.string.string_inverted_index.enabled is True
# Verify other key overrides are not affected
assert schema.keys["#document"].string is not None
assert schema.keys["#document"].string.string_inverted_index is not None
assert schema.keys["#document"].string.string_inverted_index.enabled is False
assert schema.keys["#embedding"].float_list is not None
assert schema.keys["#embedding"].float_list.vector_index is not None
assert schema.keys["#embedding"].float_list.vector_index.enabled is True
def test_disable_int_inverted_index(self) -> None:
"""Test disabling int inverted index globally and on a specific key."""
schema = Schema()
# Verify int inverted index is enabled by default
assert schema.defaults.int_value is not None
assert schema.defaults.int_value.int_inverted_index is not None
assert schema.defaults.int_value.int_inverted_index.enabled is True
# Test 1: Disable int inverted index globally
int_config = IntInvertedIndexConfig()
result = schema.delete_index(config=int_config)
assert result is schema
# Verify it's now disabled in defaults
assert schema.defaults.int_value.int_inverted_index.enabled is False
assert schema.defaults.int_value.int_inverted_index.config == int_config
# Test 2: Disable int inverted index on a specific key
int_config2 = IntInvertedIndexConfig()
result = schema.delete_index(config=int_config2, key="age_field")
assert result is schema
# Verify it's disabled on the custom key
assert "age_field" in schema.keys
assert schema.keys["age_field"].int_value is not None
assert schema.keys["age_field"].int_value.int_inverted_index is not None
assert schema.keys["age_field"].int_value.int_inverted_index.enabled is False
assert (
schema.keys["age_field"].int_value.int_inverted_index.config == int_config2
)
# Verify sparse override (only int_value is set)
assert schema.keys["age_field"].string is None
assert schema.keys["age_field"].float_list is None
assert schema.keys["age_field"].sparse_vector is None
assert schema.keys["age_field"].float_value is None
assert schema.keys["age_field"].boolean is None
# Verify other keys are not affected
assert schema.keys["#document"].string is not None
assert schema.keys["#embedding"].float_list is not None
def test_serialize_deserialize_default_schema(self) -> None:
"""Test serialization and deserialization of a default Schema."""
# Create a default schema
original = Schema()
# Serialize to JSON
json_data = original.serialize_to_json()
# Verify the top-level structure
assert "defaults" in json_data
assert "keys" in json_data
assert isinstance(json_data["defaults"], dict)
assert isinstance(json_data["keys"], dict)
# Verify defaults structure in detail
defaults = json_data["defaults"]
# Check string
assert "string" in defaults
assert "fts_index" in defaults["string"]
assert defaults["string"]["fts_index"]["enabled"] is False
assert defaults["string"]["fts_index"]["config"] == {}
assert "string_inverted_index" in defaults["string"]
assert defaults["string"]["string_inverted_index"]["enabled"] is True
assert defaults["string"]["string_inverted_index"]["config"] == {}
# Check float_list
assert "float_list" in defaults
assert "vector_index" in defaults["float_list"]
assert defaults["float_list"]["vector_index"]["enabled"] is False
vector_config = defaults["float_list"]["vector_index"]["config"]
assert "space" in vector_config
assert vector_config["space"] == "l2" # Default space
assert "embedding_function" in vector_config
assert vector_config["embedding_function"]["type"] == "known"
assert vector_config["embedding_function"]["name"] == "default"
assert vector_config["embedding_function"]["config"] == {}
# Check sparse_vector
assert "sparse_vector" in defaults
assert "sparse_vector_index" in defaults["sparse_vector"]
assert defaults["sparse_vector"]["sparse_vector_index"]["enabled"] is False
sparse_vector_config = defaults["sparse_vector"]["sparse_vector_index"][
"config"
]
# SparseVectorIndexConfig has embedding_function field with unknown default
assert "embedding_function" in sparse_vector_config
assert sparse_vector_config["embedding_function"] == {"type": "unknown"}
# Check int
assert "int" in defaults
assert "int_inverted_index" in defaults["int"]
assert defaults["int"]["int_inverted_index"]["enabled"] is True
assert defaults["int"]["int_inverted_index"]["config"] == {}
# Check float
assert "float" in defaults
assert "float_inverted_index" in defaults["float"]
assert defaults["float"]["float_inverted_index"]["enabled"] is True
assert defaults["float"]["float_inverted_index"]["config"] == {}
# Check bool
assert "bool" in defaults
assert "bool_inverted_index" in defaults["bool"]
assert defaults["bool"]["bool_inverted_index"]["enabled"] is True
assert defaults["bool"]["bool_inverted_index"]["config"] == {}
# Verify key overrides structure in detail
keys = json_data["keys"]
# Check #document
assert "#document" in keys
assert "string" in keys["#document"]
assert "fts_index" in keys["#document"]["string"]
assert keys["#document"]["string"]["fts_index"]["enabled"] is True
assert keys["#document"]["string"]["fts_index"]["config"] == {}
assert "string_inverted_index" in keys["#document"]["string"]
assert keys["#document"]["string"]["string_inverted_index"]["enabled"] is False
assert keys["#document"]["string"]["string_inverted_index"]["config"] == {}
# Check #embedding
assert "#embedding" in keys
assert "float_list" in keys["#embedding"]
assert "vector_index" in keys["#embedding"]["float_list"]
assert keys["#embedding"]["float_list"]["vector_index"]["enabled"] is True
embedding_vector_config = keys["#embedding"]["float_list"]["vector_index"][
"config"
]
assert "space" in embedding_vector_config
assert embedding_vector_config["space"] == "l2" # Default space
assert "source_key" in embedding_vector_config
assert embedding_vector_config["source_key"] == "#document"
assert "embedding_function" in embedding_vector_config
assert embedding_vector_config["embedding_function"]["type"] == "known"
assert embedding_vector_config["embedding_function"]["name"] == "default"
assert embedding_vector_config["embedding_function"]["config"] == {}
# Deserialize back to Schema
deserialized = Schema.deserialize_from_json(json_data)
# Verify deserialized schema matches original - exhaustive validation
# Check defaults.string
assert deserialized.defaults.string is not None
assert deserialized.defaults.string.fts_index is not None
assert deserialized.defaults.string.fts_index.enabled is False
assert deserialized.defaults.string.fts_index.enabled == original.defaults.string.fts_index.enabled # type: ignore[union-attr]
assert deserialized.defaults.string.string_inverted_index is not None
assert deserialized.defaults.string.string_inverted_index.enabled is True
assert deserialized.defaults.string.string_inverted_index.enabled == original.defaults.string.string_inverted_index.enabled # type: ignore[union-attr]
# Check defaults.float_list (vector index)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.enabled is False
assert deserialized.defaults.float_list.vector_index.enabled == original.defaults.float_list.vector_index.enabled # type: ignore[union-attr]
# Space is resolved during serialization, so deserialized has explicit value
assert deserialized.defaults.float_list.vector_index.config.space == "l2"
# Check embedding function is preserved
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function.name()
== "default"
)
assert original.defaults.float_list.vector_index.config.embedding_function.name() == "default" # type: ignore[union-attr]
# Check defaults.sparse_vector
assert deserialized.defaults.sparse_vector is not None
assert deserialized.defaults.sparse_vector.sparse_vector_index is not None
assert deserialized.defaults.sparse_vector.sparse_vector_index.enabled is False
assert deserialized.defaults.sparse_vector.sparse_vector_index.enabled == original.defaults.sparse_vector.sparse_vector_index.enabled # type: ignore[union-attr]
# Check defaults.int_value
assert deserialized.defaults.int_value is not None
assert deserialized.defaults.int_value.int_inverted_index is not None
assert deserialized.defaults.int_value.int_inverted_index.enabled is True
assert deserialized.defaults.int_value.int_inverted_index.enabled == original.defaults.int_value.int_inverted_index.enabled # type: ignore[union-attr]
# Check defaults.float_value
assert deserialized.defaults.float_value is not None
assert deserialized.defaults.float_value.float_inverted_index is not None
assert deserialized.defaults.float_value.float_inverted_index.enabled is True
assert deserialized.defaults.float_value.float_inverted_index.enabled == original.defaults.float_value.float_inverted_index.enabled # type: ignore[union-attr]
# Check defaults.boolean
assert deserialized.defaults.boolean is not None
assert deserialized.defaults.boolean.bool_inverted_index is not None
assert deserialized.defaults.boolean.bool_inverted_index.enabled is True
assert deserialized.defaults.boolean.bool_inverted_index.enabled == original.defaults.boolean.bool_inverted_index.enabled # type: ignore[union-attr]
# Check keys.#document
assert "#document" in deserialized.keys
assert deserialized.keys["#document"].string is not None
assert deserialized.keys["#document"].string.fts_index is not None
assert deserialized.keys["#document"].string.fts_index.enabled is True
assert deserialized.keys["#document"].string.fts_index.enabled == original.keys["#document"].string.fts_index.enabled # type: ignore[union-attr]
assert deserialized.keys["#document"].string.string_inverted_index is not None
assert (
deserialized.keys["#document"].string.string_inverted_index.enabled is False
)
assert deserialized.keys["#document"].string.string_inverted_index.enabled == original.keys["#document"].string.string_inverted_index.enabled # type: ignore[union-attr]
# Check keys.#embedding
assert "#embedding" in deserialized.keys
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert deserialized.keys["#embedding"].float_list.vector_index.enabled is True
assert deserialized.keys["#embedding"].float_list.vector_index.enabled == original.keys["#embedding"].float_list.vector_index.enabled # type: ignore[union-attr]
# Verify source_key is preserved
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.source_key
== "#document"
)
assert original.keys["#embedding"].float_list.vector_index.config.source_key == "#document" # type: ignore[union-attr]
# Verify space is preserved (resolved during serialization)
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space == "l2"
)
# Verify embedding function is preserved
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function.name()
== "default"
)
assert original.keys["#embedding"].float_list.vector_index.config.embedding_function.name() == "default" # type: ignore[union-attr]
def test_serialize_deserialize_with_vector_config_no_ef(self) -> None:
"""Test serialization/deserialization of Schema with vector config where embedding_function=None."""
# Create a default schema and modify vector config with ef=None
original = Schema()
vector_config = VectorIndexConfig(
space="cosine", embedding_function=None # Explicitly set to None
)
original.create_index(config=vector_config)
# Serialize to JSON
json_data = original.serialize_to_json()
# Verify defaults structure - vector index should reflect the changes
defaults = json_data["defaults"]
assert "float_list" in defaults
assert "vector_index" in defaults["float_list"]
vector_json = defaults["float_list"]["vector_index"]
assert vector_json["enabled"] is False # Still disabled in defaults
assert vector_json["config"]["space"] == "cosine" # User-specified space
# When ef=None, it should serialize as legacy
assert vector_json["config"]["embedding_function"]["type"] == "legacy"
# Verify #embedding also has the updated config
keys = json_data["keys"]
assert "#embedding" in keys
embedding_vector_json = keys["#embedding"]["float_list"]["vector_index"]
assert embedding_vector_json["enabled"] is True # Still enabled on #embedding
assert (
embedding_vector_json["config"]["space"] == "cosine"
) # User-specified space
assert embedding_vector_json["config"]["source_key"] == "#document" # Preserved
# When ef=None, it should serialize as legacy
assert embedding_vector_json["config"]["embedding_function"]["type"] == "legacy"
# Deserialize back to Schema
deserialized = Schema.deserialize_from_json(json_data)
# Verify deserialized schema has the correct values
# Check defaults.float_list (vector index)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.enabled is False
assert (
deserialized.defaults.float_list.vector_index.config.space == "cosine"
) # User space preserved
# ef=None should deserialize as None (legacy)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is None
)
# Check #embedding vector index
assert "#embedding" in deserialized.keys
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert deserialized.keys["#embedding"].float_list.vector_index.enabled is True
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "cosine"
) # User space preserved
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.source_key
== "#document"
) # Preserved
# ef=None should deserialize as None (legacy)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function
is None
)
def test_serialize_deserialize_with_custom_ef(self) -> None:
"""Test serialization/deserialization of Schema with custom embedding function."""
# Register the mock embedding function so it can be deserialized
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
# Create a default schema and modify vector config with custom EF
original = Schema()
custom_ef = MockEmbeddingFunction(model_name="custom_model_v3")
hnsw_config = HnswIndexConfig(
ef_construction=256, max_neighbors=48, ef_search=128
)
vector_config = VectorIndexConfig(
embedding_function=custom_ef,
space="ip", # Inner product
hnsw=hnsw_config,
)
original.create_index(config=vector_config)
# Serialize to JSON
json_data = original.serialize_to_json()
# Verify defaults structure - vector index should reflect the changes
defaults = json_data["defaults"]
assert "float_list" in defaults
assert "vector_index" in defaults["float_list"]
vector_json = defaults["float_list"]["vector_index"]
assert vector_json["enabled"] is False # Still disabled in defaults
assert vector_json["config"]["space"] == "ip" # User-specified space
# Custom EF should serialize as known type
assert vector_json["config"]["embedding_function"]["type"] == "known"
assert (
vector_json["config"]["embedding_function"]["name"] == "mock_embedding"
)
assert (
vector_json["config"]["embedding_function"]["config"]["model_name"]
== "custom_model_v3"
)
# HNSW config should be present
assert "hnsw" in vector_json["config"]
assert vector_json["config"]["hnsw"]["ef_construction"] == 256
assert vector_json["config"]["hnsw"]["max_neighbors"] == 48
assert vector_json["config"]["hnsw"]["ef_search"] == 128
# Verify #embedding also has the updated config
keys = json_data["keys"]
assert "#embedding" in keys
embedding_vector_json = keys["#embedding"]["float_list"]["vector_index"]
assert (
embedding_vector_json["enabled"] is True
) # Still enabled on #embedding
assert (
embedding_vector_json["config"]["space"] == "ip"
) # User-specified space
assert (
embedding_vector_json["config"]["source_key"] == "#document"
) # Preserved
# Custom EF should serialize as known type
assert (
embedding_vector_json["config"]["embedding_function"]["type"] == "known"
)
assert (
embedding_vector_json["config"]["embedding_function"]["name"]
== "mock_embedding"
)
assert (
embedding_vector_json["config"]["embedding_function"]["config"][
"model_name"
]
== "custom_model_v3"
)
# HNSW config should be present
assert "hnsw" in embedding_vector_json["config"]
assert embedding_vector_json["config"]["hnsw"]["ef_construction"] == 256
assert embedding_vector_json["config"]["hnsw"]["max_neighbors"] == 48
assert embedding_vector_json["config"]["hnsw"]["ef_search"] == 128
# Deserialize back to Schema
deserialized = Schema.deserialize_from_json(json_data)
# Verify deserialized schema has the correct values
# Check defaults.float_list (vector index)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.enabled is False
assert (
deserialized.defaults.float_list.vector_index.config.space == "ip"
) # User space preserved
# Custom EF should be reconstructed
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# Verify the EF config is correct
ef_config = (
deserialized.defaults.float_list.vector_index.config.embedding_function.get_config()
)
assert ef_config["model_name"] == "custom_model_v3"
# HNSW config should be preserved
assert deserialized.defaults.float_list.vector_index.config.hnsw is not None
assert (
deserialized.defaults.float_list.vector_index.config.hnsw.ef_construction
== 256
)
assert (
deserialized.defaults.float_list.vector_index.config.hnsw.max_neighbors
== 48
)
assert (
deserialized.defaults.float_list.vector_index.config.hnsw.ef_search
== 128
)
# Check #embedding vector index
assert "#embedding" in deserialized.keys
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert (
deserialized.keys["#embedding"].float_list.vector_index.enabled is True
)
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "ip"
) # User space preserved
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.source_key
== "#document"
) # Preserved
# Custom EF should be reconstructed
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# Verify the EF config is correct
ef_config_embedding = deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function.get_config()
assert ef_config_embedding["model_name"] == "custom_model_v3"
# HNSW config should be preserved
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.hnsw
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.hnsw.ef_construction
== 256
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.hnsw.max_neighbors
== 48
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.hnsw.ef_search
== 128
)
finally:
# Clean up: remove the mock function from known_embedding_functions
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_serialize_deserialize_with_spann_config(self) -> None:
"""Test serialization/deserialization of Schema with SPANN index config."""
# Register the mock embedding function so it can be deserialized
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
# Create a default schema and modify vector config with SPANN
original = Schema()
custom_ef = MockEmbeddingFunction(model_name="spann_model")
spann_config = SpannIndexConfig(
search_nprobe=100, write_nprobe=50, ef_construction=200, ef_search=150
)
vector_config = VectorIndexConfig(
embedding_function=custom_ef, space="cosine", spann=spann_config
)
original.create_index(config=vector_config)
# Serialize to JSON
json_data = original.serialize_to_json()
# Verify defaults structure - vector index should reflect the changes
defaults = json_data["defaults"]
assert "float_list" in defaults
assert "vector_index" in defaults["float_list"]
vector_json = defaults["float_list"]["vector_index"]
assert vector_json["enabled"] is False # Still disabled in defaults
assert vector_json["config"]["space"] == "cosine" # User-specified space
# Custom EF should serialize as known type
assert vector_json["config"]["embedding_function"]["type"] == "known"
assert (
vector_json["config"]["embedding_function"]["name"] == "mock_embedding"
)
assert (
vector_json["config"]["embedding_function"]["config"]["model_name"]
== "spann_model"
)
# SPANN config should be present
assert "spann" in vector_json["config"]
assert vector_json["config"]["spann"]["search_nprobe"] == 100
assert vector_json["config"]["spann"]["write_nprobe"] == 50
assert vector_json["config"]["spann"]["ef_construction"] == 200
assert vector_json["config"]["spann"]["ef_search"] == 150
# HNSW should not be present
assert vector_json["config"].get("hnsw") is None
# Verify #embedding also has the updated config
keys = json_data["keys"]
assert "#embedding" in keys
embedding_vector_json = keys["#embedding"]["float_list"]["vector_index"]
assert (
embedding_vector_json["enabled"] is True
) # Still enabled on #embedding
assert (
embedding_vector_json["config"]["space"] == "cosine"
) # User-specified space
assert (
embedding_vector_json["config"]["source_key"] == "#document"
) # Preserved
# Custom EF should serialize as known type
assert (
embedding_vector_json["config"]["embedding_function"]["type"] == "known"
)
assert (
embedding_vector_json["config"]["embedding_function"]["name"]
== "mock_embedding"
)
assert (
embedding_vector_json["config"]["embedding_function"]["config"][
"model_name"
]
== "spann_model"
)
# SPANN config should be present
assert "spann" in embedding_vector_json["config"]
assert embedding_vector_json["config"]["spann"]["search_nprobe"] == 100
assert embedding_vector_json["config"]["spann"]["write_nprobe"] == 50
assert embedding_vector_json["config"]["spann"]["ef_construction"] == 200
assert embedding_vector_json["config"]["spann"]["ef_search"] == 150
# HNSW should not be present
assert embedding_vector_json["config"].get("hnsw") is None
# Deserialize back to Schema
deserialized = Schema.deserialize_from_json(json_data)
# Verify deserialized schema has the correct values
# Check defaults.float_list (vector index)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.enabled is False
assert (
deserialized.defaults.float_list.vector_index.config.space == "cosine"
) # User space preserved
# Custom EF should be reconstructed
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# Verify the EF config is correct
ef_config = (
deserialized.defaults.float_list.vector_index.config.embedding_function.get_config()
)
assert ef_config["model_name"] == "spann_model"
# SPANN config should be preserved
assert (
deserialized.defaults.float_list.vector_index.config.spann is not None
)
assert (
deserialized.defaults.float_list.vector_index.config.spann.search_nprobe
== 100
)
assert (
deserialized.defaults.float_list.vector_index.config.spann.write_nprobe
== 50
)
assert (
deserialized.defaults.float_list.vector_index.config.spann.ef_construction
== 200
)
assert (
deserialized.defaults.float_list.vector_index.config.spann.ef_search
== 150
)
# HNSW should be None
assert deserialized.defaults.float_list.vector_index.config.hnsw is None
# Check #embedding vector index
assert "#embedding" in deserialized.keys
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert (
deserialized.keys["#embedding"].float_list.vector_index.enabled is True
)
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "cosine"
) # User space preserved
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.source_key
== "#document"
) # Preserved
# Custom EF should be reconstructed
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# Verify the EF config is correct
ef_config_embedding = deserialized.keys[
"#embedding"
].float_list.vector_index.config.embedding_function.get_config()
assert ef_config_embedding["model_name"] == "spann_model"
# SPANN config should be preserved
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.spann
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.spann.search_nprobe
== 100
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.spann.write_nprobe
== 50
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.spann.ef_construction
== 200
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.spann.ef_search
== 150
)
# HNSW should be None
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.hnsw
is None
)
finally:
# Clean up: remove the mock function from known_embedding_functions
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_serialize_deserialize_complex_mixed_modifications(self) -> None:
"""Test serialization/deserialization with multiple mixed schema modifications."""
# Register the mock embedding functions so they can be deserialized
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
known_embedding_functions["mock_sparse"] = MockSparseEmbeddingFunction # type: ignore[assignment]
try:
# Create a default schema and apply multiple modifications
original = Schema()
# 1. Set custom vector config globally (space + HNSW)
custom_ef = MockEmbeddingFunction(model_name="mixed_test_model")
hnsw_config = HnswIndexConfig(ef_construction=300, max_neighbors=64)
vector_config = VectorIndexConfig(
embedding_function=custom_ef, space="ip", hnsw=hnsw_config
)
original.create_index(config=vector_config)
# 2. Enable sparse vector index on "embeddings_field" key
sparse_ef = MockSparseEmbeddingFunction(name="sparse_model")
sparse_config = SparseVectorIndexConfig(
embedding_function=sparse_ef, source_key="text_field"
)
original.create_index(config=sparse_config, key="embeddings_field")
# 3. Disable string_inverted_index on "tags" key
string_config = StringInvertedIndexConfig()
original.delete_index(config=string_config, key="tags")
# 4. Disable int_inverted_index on "count" key
int_config = IntInvertedIndexConfig()
original.delete_index(config=int_config, key="count")
# 5. Disable float_inverted_index on "price" key
float_config = FloatInvertedIndexConfig()
original.delete_index(config=float_config, key="price")
# Serialize to JSON
json_data = original.serialize_to_json()
# Verify JSON structure has all modifications
defaults = json_data["defaults"]
keys = json_data["keys"]
# Check defaults reflect global vector config changes
assert defaults["float_list"]["vector_index"]["config"]["space"] == "ip"
assert (
defaults["float_list"]["vector_index"]["config"]["hnsw"][
"ef_construction"
]
== 300
)
assert (
defaults["float_list"]["vector_index"]["config"]["hnsw"][
"max_neighbors"
]
== 64
)
# Check key overrides exist for all modified keys
assert "embeddings_field" in keys
assert "tags" in keys
assert "count" in keys
assert "price" in keys
assert "#document" in keys # Default key
assert "#embedding" in keys # Default key with vector config
# Exhaustive validation of embeddings_field
embeddings_field_json = keys["embeddings_field"]
assert "sparse_vector" in embeddings_field_json
assert (
embeddings_field_json["sparse_vector"]["sparse_vector_index"]["enabled"]
is True
)
assert (
embeddings_field_json["sparse_vector"]["sparse_vector_index"]["config"][
"source_key"
]
== "text_field"
)
assert (
embeddings_field_json["sparse_vector"]["sparse_vector_index"]["config"][
"embedding_function"
]["type"]
== "known"
)
assert (
embeddings_field_json["sparse_vector"]["sparse_vector_index"]["config"][
"embedding_function"
]["name"]
== "mock_sparse"
)
assert (
embeddings_field_json["sparse_vector"]["sparse_vector_index"]["config"][
"embedding_function"
]["config"]["name"]
== "sparse_model"
)
# Verify sparse override: only sparse_vector should be present
assert "string" not in embeddings_field_json
assert "float_list" not in embeddings_field_json
assert "int" not in embeddings_field_json
assert "float" not in embeddings_field_json
assert "bool" not in embeddings_field_json
# Exhaustive validation of tags
tags_json = keys["tags"]
assert "string" in tags_json
assert tags_json["string"]["string_inverted_index"]["enabled"] is False
assert tags_json["string"]["string_inverted_index"]["config"] == {}
# FTS should not be present (not modified)
assert "fts_index" not in tags_json["string"]
# Verify sparse override: only string should be present
assert "sparse_vector" not in tags_json
assert "float_list" not in tags_json
assert "int" not in tags_json
assert "float" not in tags_json
assert "bool" not in tags_json
# Exhaustive validation of count
count_json = keys["count"]
assert "int" in count_json
assert count_json["int"]["int_inverted_index"]["enabled"] is False
assert count_json["int"]["int_inverted_index"]["config"] == {}
# Verify sparse override: only int should be present
assert "string" not in count_json
assert "sparse_vector" not in count_json
assert "float_list" not in count_json
assert "float" not in count_json
assert "bool" not in count_json
# Exhaustive validation of price
price_json = keys["price"]
assert "float" in price_json
assert price_json["float"]["float_inverted_index"]["enabled"] is False
assert price_json["float"]["float_inverted_index"]["config"] == {}
# Verify sparse override: only float should be present
assert "string" not in price_json
assert "sparse_vector" not in price_json
assert "float_list" not in price_json
assert "int" not in price_json
assert "bool" not in price_json
# Exhaustive validation of #embedding
embedding_json = keys["#embedding"]
assert "float_list" in embedding_json
assert embedding_json["float_list"]["vector_index"]["enabled"] is True
assert (
embedding_json["float_list"]["vector_index"]["config"]["space"] == "ip"
)
assert (
embedding_json["float_list"]["vector_index"]["config"]["source_key"]
== "#document"
)
assert (
embedding_json["float_list"]["vector_index"]["config"][
"embedding_function"
]["type"]
== "known"
)
assert (
embedding_json["float_list"]["vector_index"]["config"][
"embedding_function"
]["name"]
== "mock_embedding"
)
assert (
embedding_json["float_list"]["vector_index"]["config"][
"embedding_function"
]["config"]["model_name"]
== "mixed_test_model"
)
assert (
embedding_json["float_list"]["vector_index"]["config"]["hnsw"][
"ef_construction"
]
== 300
)
assert (
embedding_json["float_list"]["vector_index"]["config"]["hnsw"][
"max_neighbors"
]
== 64
)
assert (
embedding_json["float_list"]["vector_index"]["config"].get("spann")
is None
)
# Verify sparse override: only float_list should be present
assert "string" not in embedding_json
assert "sparse_vector" not in embedding_json
assert "int" not in embedding_json
assert "float" not in embedding_json
assert "bool" not in embedding_json
# Exhaustive validation of #document (unchanged, but with FTS enabled)
document_json = keys["#document"]
assert "string" in document_json
assert document_json["string"]["fts_index"]["enabled"] is True
assert document_json["string"]["fts_index"]["config"] == {}
assert document_json["string"]["string_inverted_index"]["enabled"] is False
assert document_json["string"]["string_inverted_index"]["config"] == {}
# Verify sparse override: only string should be present
assert "sparse_vector" not in document_json
assert "float_list" not in document_json
assert "int" not in document_json
assert "float" not in document_json
assert "bool" not in document_json
# Deserialize back to Schema
deserialized = Schema.deserialize_from_json(json_data)
# Verify all modifications are preserved after deserialization
# 1. Check global vector config
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.config.space == "ip"
assert deserialized.defaults.float_list.vector_index.config.hnsw is not None
assert (
deserialized.defaults.float_list.vector_index.config.hnsw.ef_construction
== 300
)
assert (
deserialized.defaults.float_list.vector_index.config.hnsw.max_neighbors
== 64
)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is not None
)
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# 2. Check embeddings_field sparse vector
assert "embeddings_field" in deserialized.keys
assert deserialized.keys["embeddings_field"].sparse_vector is not None
assert (
deserialized.keys["embeddings_field"].sparse_vector.sparse_vector_index
is not None
)
assert (
deserialized.keys[
"embeddings_field"
].sparse_vector.sparse_vector_index.enabled
is True
)
assert (
deserialized.keys[
"embeddings_field"
].sparse_vector.sparse_vector_index.config.source_key
== "text_field"
)
# Sparse override: other value types should be None
assert deserialized.keys["embeddings_field"].string is None
assert deserialized.keys["embeddings_field"].float_list is None
assert deserialized.keys["embeddings_field"].int_value is None
# 3. Check tags has string_inverted_index disabled
assert "tags" in deserialized.keys
assert deserialized.keys["tags"].string is not None
assert deserialized.keys["tags"].string.string_inverted_index is not None
assert (
deserialized.keys["tags"].string.string_inverted_index.enabled is False
)
# Sparse override: other value types should be None
assert deserialized.keys["tags"].sparse_vector is None
assert deserialized.keys["tags"].float_list is None
# 4. Check count has int_inverted_index disabled
assert "count" in deserialized.keys
assert deserialized.keys["count"].int_value is not None
assert deserialized.keys["count"].int_value.int_inverted_index is not None
assert (
deserialized.keys["count"].int_value.int_inverted_index.enabled is False
)
# Sparse override: other value types should be None
assert deserialized.keys["count"].string is None
assert deserialized.keys["count"].float_list is None
# 5. Check price has float_inverted_index disabled
assert "price" in deserialized.keys
assert deserialized.keys["price"].float_value is not None
assert (
deserialized.keys["price"].float_value.float_inverted_index is not None
)
assert (
deserialized.keys["price"].float_value.float_inverted_index.enabled
is False
)
# Sparse override: other value types should be None
assert deserialized.keys["price"].string is None
assert deserialized.keys["price"].sparse_vector is None
# 6. Check #embedding has updated vector config
assert "#embedding" in deserialized.keys
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "ip"
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.source_key
== "#document"
)
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.hnsw
is not None
)
assert (
deserialized.keys[
"#embedding"
].float_list.vector_index.config.hnsw.ef_construction
== 300
)
# 7. Verify defaults for unchanged indexes remain correct
assert deserialized.defaults.string is not None
assert deserialized.defaults.string.string_inverted_index is not None
assert (
deserialized.defaults.string.string_inverted_index.enabled is True
) # Still enabled globally
assert deserialized.defaults.int_value is not None
assert deserialized.defaults.int_value.int_inverted_index is not None
assert (
deserialized.defaults.int_value.int_inverted_index.enabled is True
) # Still enabled globally
assert deserialized.defaults.sparse_vector is not None
assert deserialized.defaults.sparse_vector.sparse_vector_index is not None
assert (
deserialized.defaults.sparse_vector.sparse_vector_index.enabled is False
) # Still disabled globally
finally:
# Clean up: remove the mock functions from known_embedding_functions
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
if "mock_sparse" in known_embedding_functions:
del known_embedding_functions["mock_sparse"]
def test_multiple_index_types_on_same_key(self) -> None:
"""Test that multiple index types can coexist on the same key."""
schema = Schema()
# Enable sparse vector on "multi_field"
sparse_config = SparseVectorIndexConfig(
source_key="source", embedding_function=MockSparseEmbeddingFunction()
)
schema.create_index(config=sparse_config, key="multi_field")
# Also enable string_inverted_index on the same key
string_config = StringInvertedIndexConfig()
schema.create_index(config=string_config, key="multi_field")
# Verify both indexes exist on the same key
assert "multi_field" in schema.keys
multi_field = schema.keys["multi_field"]
assert multi_field.sparse_vector is not None
assert multi_field.sparse_vector.sparse_vector_index is not None
assert multi_field.sparse_vector.sparse_vector_index.enabled is True
assert multi_field.string is not None
assert multi_field.string.string_inverted_index is not None
assert multi_field.string.string_inverted_index.enabled is True
# Verify other value types are still None (sparse override)
assert schema.keys["multi_field"].float_list is None
assert schema.keys["multi_field"].int_value is None
assert schema.keys["multi_field"].float_value is None
assert schema.keys["multi_field"].boolean is None
# Serialize and verify both are present in JSON
json_data = schema.serialize_to_json()
multi_field_json = json_data["keys"]["multi_field"]
assert "sparse_vector" in multi_field_json
assert "string" in multi_field_json
assert (
multi_field_json["sparse_vector"]["sparse_vector_index"]["enabled"] is True
)
assert multi_field_json["string"]["string_inverted_index"]["enabled"] is True
# Deserialize and verify both survive roundtrip
deserialized = Schema.deserialize_from_json(json_data)
assert "multi_field" in deserialized.keys
des_multi_field = deserialized.keys["multi_field"]
assert des_multi_field.sparse_vector is not None
assert des_multi_field.sparse_vector.sparse_vector_index is not None
assert des_multi_field.sparse_vector.sparse_vector_index.enabled is True
assert des_multi_field.string is not None
assert des_multi_field.string.string_inverted_index is not None
assert des_multi_field.string.string_inverted_index.enabled is True
def test_override_then_revert_to_default(self) -> None:
"""Test that disabling an index reverts to default behavior (key may still exist with disabled state)."""
schema = Schema()
# Enable string_inverted_index on "temp_field"
string_config = StringInvertedIndexConfig()
schema.create_index(config=string_config, key="temp_field")
# Verify it's enabled
assert "temp_field" in schema.keys
temp_field_initial = schema.keys["temp_field"]
assert temp_field_initial.string is not None
assert temp_field_initial.string.string_inverted_index is not None
assert temp_field_initial.string.string_inverted_index.enabled is True
# Now disable it
schema.delete_index(config=string_config, key="temp_field")
# Verify it's now disabled (key still exists but with disabled state)
assert "temp_field" in schema.keys
temp_field = schema.keys["temp_field"]
assert temp_field.string is not None
assert temp_field.string.string_inverted_index is not None
assert temp_field.string.string_inverted_index.enabled is False
# Serialize and verify disabled state is preserved
json_data = schema.serialize_to_json()
assert "temp_field" in json_data["keys"]
temp_field_json = json_data["keys"]["temp_field"]
assert "string" in temp_field_json
assert temp_field_json["string"]["string_inverted_index"]["enabled"] is False
# Deserialize and verify disabled state survives roundtrip
deserialized = Schema.deserialize_from_json(json_data)
assert "temp_field" in deserialized.keys
des_temp_field = deserialized.keys["temp_field"]
assert des_temp_field.string is not None
assert des_temp_field.string.string_inverted_index is not None
assert des_temp_field.string.string_inverted_index.enabled is False
def test_error_handling_invalid_operations(self) -> None:
"""Test that invalid operations raise appropriate errors."""
schema = Schema()
# Test 1: Cannot create index on #embedding key
vector_config = VectorIndexConfig()
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=vector_config, key="#embedding")
# Test 2: Cannot create index on #document key
fts_config = FtsIndexConfig()
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=fts_config, key="#document")
# Test 3: Cannot enable all indexes globally
with pytest.raises(ValueError, match="Cannot enable all index types globally"):
schema.create_index()
# Test 4: Cannot enable all indexes for a specific key
with pytest.raises(
ValueError, match="Cannot enable all index types for key 'mykey'"
):
schema.create_index(key="mykey")
# Test 5: Cannot disable all indexes for a specific key
with pytest.raises(
ValueError, match="Cannot disable all index types for key 'mykey'"
):
schema.delete_index(key="mykey")
# Test 6: Cannot delete vector index
with pytest.raises(
ValueError, match="Deleting vector index is not currently supported"
):
schema.delete_index(config=vector_config)
# Test 7: Cannot delete FTS index
with pytest.raises(
ValueError, match="Deleting FTS index is not currently supported"
):
schema.delete_index(config=fts_config)
# Test 8: Cannot create vector index on custom key
with pytest.raises(
ValueError, match="Vector index cannot be enabled on specific keys"
):
schema.create_index(config=vector_config, key="custom_field")
# Test 9: Cannot create FTS index on custom key
with pytest.raises(
ValueError, match="FTS index cannot be enabled on specific keys"
):
schema.create_index(config=fts_config, key="custom_field")
def test_empty_schema_serialization(self) -> None:
"""Test serialization/deserialization of an unmodified schema."""
# Create a schema without any modifications
original = Schema()
# Serialize
json_data = original.serialize_to_json()
# Verify only default keys exist in keys
assert len(json_data["keys"]) == 2
assert "#document" in json_data["keys"]
assert "#embedding" in json_data["keys"]
# Deserialize
deserialized = Schema.deserialize_from_json(json_data)
# Verify defaults match
defaults = deserialized.defaults
assert defaults.string is not None
assert defaults.string.string_inverted_index is not None
assert defaults.string.string_inverted_index.enabled is True
assert defaults.string.fts_index is not None
assert defaults.string.fts_index.enabled is False
assert defaults.float_list is not None
assert defaults.float_list.vector_index is not None
assert defaults.float_list.vector_index.enabled is False
assert defaults.sparse_vector is not None
assert defaults.sparse_vector.sparse_vector_index is not None
assert defaults.sparse_vector.sparse_vector_index.enabled is False
assert defaults.int_value is not None
assert defaults.int_value.int_inverted_index is not None
assert defaults.int_value.int_inverted_index.enabled is True
assert defaults.float_value is not None
assert defaults.float_value.float_inverted_index is not None
assert defaults.float_value.float_inverted_index.enabled is True
assert defaults.boolean is not None
assert defaults.boolean.bool_inverted_index is not None
assert defaults.boolean.bool_inverted_index.enabled is True
# Verify only default keys exist in keys
assert len(deserialized.keys) == 2
assert "#document" in deserialized.keys
assert "#embedding" in deserialized.keys
def test_multiple_serialize_deserialize_roundtrips(self) -> None:
"""Test that multiple serialization/deserialization cycles preserve schema integrity."""
# Register the mock embedding function
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
# Create a complex schema
original = Schema()
custom_ef = MockEmbeddingFunction(model_name="roundtrip_model")
hnsw_config = HnswIndexConfig(ef_construction=150, max_neighbors=40)
vector_config = VectorIndexConfig(
embedding_function=custom_ef, space="cosine", hnsw=hnsw_config
)
original.create_index(config=vector_config)
original.create_index(
config=SparseVectorIndexConfig(
source_key="text", embedding_function=MockSparseEmbeddingFunction()
),
key="embeddings",
)
original.delete_index(config=StringInvertedIndexConfig(), key="tags")
# First roundtrip
json1 = original.serialize_to_json()
schema1 = Schema.deserialize_from_json(json1)
# Second roundtrip
json2 = schema1.serialize_to_json()
schema2 = Schema.deserialize_from_json(json2)
# Third roundtrip
json3 = schema2.serialize_to_json()
schema3 = Schema.deserialize_from_json(json3)
# Verify all schemas are identical
# Check vector config persists
for schema in [schema1, schema2, schema3]:
assert schema.defaults.float_list is not None
assert schema.defaults.float_list.vector_index is not None
assert schema.defaults.float_list.vector_index.config.space == "cosine"
assert schema.defaults.float_list.vector_index.config.hnsw is not None
assert (
schema.defaults.float_list.vector_index.config.hnsw.ef_construction
== 150
)
assert (
schema.defaults.float_list.vector_index.config.hnsw.max_neighbors
== 40
)
assert (
schema.defaults.float_list.vector_index.config.embedding_function
is not None
)
assert (
schema.defaults.float_list.vector_index.config.embedding_function.name()
== "mock_embedding"
)
# Check sparse vector on embeddings key
assert "embeddings" in schema.keys
embeddings_override = schema.keys["embeddings"]
assert embeddings_override.sparse_vector is not None
assert embeddings_override.sparse_vector.sparse_vector_index is not None
assert (
embeddings_override.sparse_vector.sparse_vector_index.enabled
is True
)
assert (
embeddings_override.sparse_vector.sparse_vector_index.config.source_key
== "text"
)
# Check disabled string index on tags key
assert "tags" in schema.keys
tags_override = schema.keys["tags"]
assert tags_override.string is not None
assert tags_override.string.string_inverted_index is not None
assert tags_override.string.string_inverted_index.enabled is False
# Verify semantic equivalence: all three schemas should have same number of overrides
assert len(schema1.keys) == len(schema2.keys) == len(schema3.keys)
assert (
set(schema1.keys.keys())
== set(schema2.keys.keys())
== set(schema3.keys.keys())
)
finally:
# Clean up
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_many_keys_stress(self) -> None:
"""Test schema with many key overrides (stress test)."""
schema = Schema()
# Create 50 key overrides with different configurations
for i in range(50):
key_name = f"field_{i}"
if i == 0:
# Enable sparse vector on ONE key only
schema.create_index(
config=SparseVectorIndexConfig(
source_key=f"source_{i}",
embedding_function=MockSparseEmbeddingFunction(),
),
key=key_name,
)
elif i % 2 == 1:
# Disable string inverted index
schema.delete_index(config=StringInvertedIndexConfig(), key=key_name)
else:
# Disable int inverted index
schema.delete_index(config=IntInvertedIndexConfig(), key=key_name)
# Verify all 50 keys + 2 defaults exist
assert len(schema.keys) == 52 # 50 custom + #document + #embedding
# Verify a sample of keys
assert "field_0" in schema.keys
field_0 = schema.keys["field_0"]
assert field_0.sparse_vector is not None
assert field_0.sparse_vector.sparse_vector_index is not None
assert field_0.sparse_vector.sparse_vector_index.enabled is True
assert "field_1" in schema.keys
field_1 = schema.keys["field_1"]
assert field_1.string is not None
assert field_1.string.string_inverted_index is not None
assert field_1.string.string_inverted_index.enabled is False
assert "field_2" in schema.keys
field_2 = schema.keys["field_2"]
assert field_2.int_value is not None
assert field_2.int_value.int_inverted_index is not None
assert field_2.int_value.int_inverted_index.enabled is False
# Serialize
json_data = schema.serialize_to_json()
assert len(json_data["keys"]) == 52
# Deserialize
deserialized = Schema.deserialize_from_json(json_data)
assert len(deserialized.keys) == 52
# Spot check deserialized values
assert "field_0" in deserialized.keys # i == 0 -> sparse vector
des_field_0 = deserialized.keys["field_0"]
assert des_field_0.sparse_vector is not None
assert des_field_0.sparse_vector.sparse_vector_index is not None
assert des_field_0.sparse_vector.sparse_vector_index.enabled is True
assert (
des_field_0.sparse_vector.sparse_vector_index.config.source_key
== "source_0"
)
assert "field_49" in deserialized.keys # 49 % 2 == 1 -> string disabled
des_field_49 = deserialized.keys["field_49"]
assert des_field_49.string is not None
assert des_field_49.string.string_inverted_index is not None
assert des_field_49.string.string_inverted_index.enabled is False
assert "field_48" in deserialized.keys # 48 % 2 == 0 -> int disabled
des_field_48 = deserialized.keys["field_48"]
assert des_field_48.int_value is not None
assert des_field_48.int_value.int_inverted_index is not None
assert des_field_48.int_value.int_inverted_index.enabled is False
def test_chained_operations(self) -> None:
"""Test chaining multiple create_index and delete_index operations."""
schema = Schema()
# Chain multiple operations
result = (
schema.create_index(
config=SparseVectorIndexConfig(
source_key="text", embedding_function=MockSparseEmbeddingFunction()
),
key="field1",
)
.delete_index(config=StringInvertedIndexConfig(), key="field2")
.delete_index(config=StringInvertedIndexConfig(), key="field3")
.delete_index(config=IntInvertedIndexConfig(), key="field4")
)
# Verify chaining returns the same schema object
assert result is schema
# Verify all operations were applied
assert "field1" in schema.keys
field1 = schema.keys["field1"]
assert field1.sparse_vector is not None
assert field1.sparse_vector.sparse_vector_index is not None
assert field1.sparse_vector.sparse_vector_index.enabled is True
assert "field2" in schema.keys
field2 = schema.keys["field2"]
assert field2.string is not None
assert field2.string.string_inverted_index is not None
assert field2.string.string_inverted_index.enabled is False
assert "field3" in schema.keys
field3 = schema.keys["field3"]
assert field3.string is not None
assert field3.string.string_inverted_index is not None
assert field3.string.string_inverted_index.enabled is False
assert "field4" in schema.keys
field4 = schema.keys["field4"]
assert field4.int_value is not None
assert field4.int_value.int_inverted_index is not None
assert field4.int_value.int_inverted_index.enabled is False
def test_float_and_bool_inverted_indexes(self) -> None:
"""Test enabling/disabling float and bool inverted indexes."""
schema = Schema()
# Verify defaults
assert schema.defaults.float_value is not None
assert schema.defaults.float_value.float_inverted_index is not None
assert schema.defaults.float_value.float_inverted_index.enabled is True
assert schema.defaults.boolean is not None
assert schema.defaults.boolean.bool_inverted_index is not None
assert schema.defaults.boolean.bool_inverted_index.enabled is True
# Disable float inverted index globally
float_config = FloatInvertedIndexConfig()
schema.delete_index(config=float_config)
assert schema.defaults.float_value.float_inverted_index is not None
assert schema.defaults.float_value.float_inverted_index.enabled is False
# Disable bool inverted index globally
bool_config = BoolInvertedIndexConfig()
schema.delete_index(config=bool_config)
assert schema.defaults.boolean.bool_inverted_index is not None
assert schema.defaults.boolean.bool_inverted_index.enabled is False
# Enable float inverted index on a specific key
schema.create_index(config=FloatInvertedIndexConfig(), key="price")
assert "price" in schema.keys
assert schema.keys["price"].float_value.float_inverted_index.enabled is True
# Disable bool inverted index on a specific key
schema.delete_index(config=BoolInvertedIndexConfig(), key="is_active")
assert "is_active" in schema.keys
assert schema.keys["is_active"].boolean.bool_inverted_index.enabled is False
# Serialize and verify
json_data = schema.serialize_to_json()
assert (
json_data["defaults"]["float"]["float_inverted_index"]["enabled"] is False
)
assert json_data["defaults"]["bool"]["bool_inverted_index"]["enabled"] is False
assert (
json_data["keys"]["price"]["float"]["float_inverted_index"]["enabled"]
is True
)
assert (
json_data["keys"]["is_active"]["bool"]["bool_inverted_index"]["enabled"]
is False
)
# Deserialize and verify
deserialized = Schema.deserialize_from_json(json_data)
assert deserialized.defaults.float_value.float_inverted_index.enabled is False
assert deserialized.defaults.boolean.bool_inverted_index.enabled is False
assert (
deserialized.keys["price"].float_value.float_inverted_index.enabled is True
)
assert (
deserialized.keys["is_active"].boolean.bool_inverted_index.enabled is False
)
def test_space_inference_from_embedding_function(self) -> None:
"""Test that space is correctly inferred from embedding function when not explicitly set."""
# Register the mock embedding function
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
schema = Schema()
# Create vector config with EF but WITHOUT explicit space
# MockEmbeddingFunction has default_space() = "cosine"
custom_ef = MockEmbeddingFunction(model_name="space_inference_test")
vector_config = VectorIndexConfig(
embedding_function=custom_ef
# Note: space is NOT specified, should be inferred from EF
)
schema.create_index(config=vector_config)
# Serialize to JSON
json_data = schema.serialize_to_json()
# Verify that space was inferred and set to "cosine" in serialized JSON
defaults_vector = json_data["defaults"]["float_list"]["vector_index"]
assert defaults_vector["config"]["space"] == "cosine" # Inferred from EF
# Verify #embedding key also has inferred space
embedding_vector = json_data["keys"]["#embedding"]["float_list"][
"vector_index"
]
assert embedding_vector["config"]["space"] == "cosine" # Inferred from EF
# Deserialize and verify space is preserved
deserialized = Schema.deserialize_from_json(json_data)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert (
deserialized.defaults.float_list.vector_index.config.space == "cosine"
)
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "cosine"
)
finally:
# Clean up
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_explicit_space_overrides_embedding_function_default(self) -> None:
"""Test that explicit space parameter overrides the embedding function's default space."""
# Register the mock embedding function
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
schema = Schema()
# Create vector config with EF and EXPLICIT space that differs from EF default
# MockEmbeddingFunction has default_space() = "cosine"
# But we explicitly set space = "l2"
custom_ef = MockEmbeddingFunction(model_name="override_test")
vector_config = VectorIndexConfig(
embedding_function=custom_ef,
space="l2", # Explicitly override the EF's default
)
schema.create_index(config=vector_config)
# Serialize to JSON
json_data = schema.serialize_to_json()
# Verify that explicit space overrode the EF default
defaults_vector = json_data["defaults"]["float_list"]["vector_index"]
assert (
defaults_vector["config"]["space"] == "l2"
) # User-specified, not "cosine"
embedding_vector = json_data["keys"]["#embedding"]["float_list"][
"vector_index"
]
assert (
embedding_vector["config"]["space"] == "l2"
) # User-specified, not "cosine"
# Deserialize and verify explicit space is preserved
deserialized = Schema.deserialize_from_json(json_data)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.config.space == "l2"
assert deserialized.keys["#embedding"].float_list is not None
assert deserialized.keys["#embedding"].float_list.vector_index is not None
assert (
deserialized.keys["#embedding"].float_list.vector_index.config.space
== "l2"
)
finally:
# Clean up
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_space_inference_with_no_embedding_function(self) -> None:
"""Test space handling when no embedding function is provided (legacy mode)."""
schema = Schema()
# Create vector config with explicit space but NO embedding function (legacy)
vector_config = VectorIndexConfig(
embedding_function=None,
space="ip", # Must be explicit since no EF to infer from
)
schema.create_index(config=vector_config)
# Serialize to JSON
json_data = schema.serialize_to_json()
# Verify space is correctly set
defaults_vector = json_data["defaults"]["float_list"]["vector_index"]
assert defaults_vector["config"]["space"] == "ip"
assert defaults_vector["config"]["embedding_function"]["type"] == "legacy"
embedding_vector = json_data["keys"]["#embedding"]["float_list"]["vector_index"]
assert embedding_vector["config"]["space"] == "ip"
assert embedding_vector["config"]["embedding_function"]["type"] == "legacy"
# Deserialize and verify
deserialized = Schema.deserialize_from_json(json_data)
assert deserialized.defaults.float_list is not None
assert deserialized.defaults.float_list.vector_index is not None
assert deserialized.defaults.float_list.vector_index.config.space == "ip"
assert (
deserialized.defaults.float_list.vector_index.config.embedding_function
is None
)
def test_space_inference_multiple_roundtrips(self) -> None:
"""Test that inferred space remains stable across multiple serialization roundtrips."""
# Register the mock embedding function
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
# Create schema with inferred space (no explicit space)
original = Schema()
custom_ef = MockEmbeddingFunction(model_name="roundtrip_space_test")
vector_config = VectorIndexConfig(
embedding_function=custom_ef
) # No explicit space
original.create_index(config=vector_config)
# First roundtrip
json1 = original.serialize_to_json()
assert (
json1["defaults"]["float_list"]["vector_index"]["config"]["space"]
== "cosine"
)
schema1 = Schema.deserialize_from_json(json1)
# Second roundtrip
json2 = schema1.serialize_to_json()
assert (
json2["defaults"]["float_list"]["vector_index"]["config"]["space"]
== "cosine"
)
schema2 = Schema.deserialize_from_json(json2)
# Third roundtrip
json3 = schema2.serialize_to_json()
assert (
json3["defaults"]["float_list"]["vector_index"]["config"]["space"]
== "cosine"
)
# Verify all schemas have the inferred space
for schema in [schema1, schema2]:
assert schema.defaults.float_list is not None
assert schema.defaults.float_list.vector_index is not None
assert schema.defaults.float_list.vector_index.config.space == "cosine"
finally:
# Clean up
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_keys_have_independent_configs(self) -> None:
"""Test that each key override has its own independent config (no inheritance from defaults)."""
schema = Schema()
# Enable sparse vector on a key - it gets exactly what we specify
sparse_config = SparseVectorIndexConfig(
source_key="default_source",
embedding_function=MockSparseEmbeddingFunction(),
)
schema.create_index(config=sparse_config, key="field1")
# Verify field1 has the sparse vector with the specified source_key
assert "field1" in schema.keys
field1 = schema.keys["field1"]
assert field1.sparse_vector is not None
assert field1.sparse_vector.sparse_vector_index is not None
assert field1.sparse_vector.sparse_vector_index.enabled is True
assert (
field1.sparse_vector.sparse_vector_index.config.source_key
== "default_source"
)
# Now create another key with a DIFFERENT config (use string_inverted_index instead)
string_config = StringInvertedIndexConfig()
schema.create_index(config=string_config, key="field2")
# Verify field2 has its own config
assert "field2" in schema.keys
field2 = schema.keys["field2"]
assert field2.string is not None
assert field2.string.string_inverted_index is not None
assert field2.string.string_inverted_index.enabled is True
# Verify field1 is unchanged
assert (
field1.sparse_vector.sparse_vector_index.config.source_key
== "default_source"
)
def test_global_default_changes_dont_affect_existing_overrides(self) -> None:
"""Test that changes to global defaults don't affect already-created key overrides."""
# Register the mock embedding function
from chromadb.utils.embedding_functions import known_embedding_functions
known_embedding_functions["mock_embedding"] = MockEmbeddingFunction
try:
schema = Schema()
# Create initial vector config with HNSW
ef1 = MockEmbeddingFunction(model_name="initial_model")
hnsw1 = HnswIndexConfig(ef_construction=100, max_neighbors=16)
vector_config1 = VectorIndexConfig(
embedding_function=ef1, space="cosine", hnsw=hnsw1
)
schema.create_index(config=vector_config1)
# Capture the initial state of #embedding
initial_embedding_hnsw = schema.keys["#embedding"].float_list.vector_index.config.hnsw # type: ignore[union-attr]
assert initial_embedding_hnsw is not None
assert initial_embedding_hnsw.ef_construction == 100
assert initial_embedding_hnsw.max_neighbors == 16
# Now change the global vector config to different values
ef2 = MockEmbeddingFunction(model_name="updated_model")
hnsw2 = HnswIndexConfig(ef_construction=200, max_neighbors=32)
vector_config2 = VectorIndexConfig(
embedding_function=ef2, space="l2", hnsw=hnsw2
)
schema.create_index(config=vector_config2)
# Verify global defaults changed
assert schema.defaults.float_list is not None
assert schema.defaults.float_list.vector_index is not None
assert schema.defaults.float_list.vector_index.config.space == "l2"
assert schema.defaults.float_list.vector_index.config.hnsw is not None
assert (
schema.defaults.float_list.vector_index.config.hnsw.ef_construction
== 200
)
assert (
schema.defaults.float_list.vector_index.config.hnsw.max_neighbors == 32
)
# Verify #embedding was also updated (since it's the target of vector config)
assert schema.keys["#embedding"].float_list is not None
assert schema.keys["#embedding"].float_list.vector_index is not None
updated_embedding_hnsw = schema.keys[
"#embedding"
].float_list.vector_index.config.hnsw
assert updated_embedding_hnsw is not None
assert updated_embedding_hnsw.ef_construction == 200
assert updated_embedding_hnsw.max_neighbors == 32
assert (
schema.keys["#embedding"].float_list.vector_index.config.space == "l2"
)
finally:
# Clean up
if "mock_embedding" in known_embedding_functions:
del known_embedding_functions["mock_embedding"]
def test_key_specific_overrides_are_independent(self) -> None:
"""Test that modifying one key's overrides doesn't affect other keys."""
schema = Schema()
# Create sparse vector on one key and string indexes on others
schema.create_index(
config=SparseVectorIndexConfig(
source_key="source_a", embedding_function=MockSparseEmbeddingFunction()
),
key="key_a",
)
schema.create_index(config=StringInvertedIndexConfig(), key="key_b")
schema.create_index(config=StringInvertedIndexConfig(), key="key_c")
# Verify each key has its own config
assert schema.keys["key_a"].sparse_vector.sparse_vector_index.config.source_key == "source_a" # type: ignore[union-attr]
assert schema.keys["key_b"].string.string_inverted_index.enabled is True # type: ignore[union-attr]
assert schema.keys["key_c"].string.string_inverted_index.enabled is True # type: ignore[union-attr]
# Now disable string inverted index on key_b
schema.delete_index(config=StringInvertedIndexConfig(), key="key_b")
# Verify key_b is disabled
assert schema.keys["key_b"].string.string_inverted_index.enabled is False # type: ignore[union-attr]
# Verify key_a and key_c are unaffected
key_a = schema.keys["key_a"]
assert key_a.sparse_vector is not None
assert key_a.sparse_vector.sparse_vector_index is not None
assert key_a.sparse_vector.sparse_vector_index.enabled is True
assert key_a.sparse_vector.sparse_vector_index.config.source_key == "source_a"
key_c = schema.keys["key_c"]
assert key_c.string is not None
assert key_c.string.string_inverted_index is not None
assert key_c.string.string_inverted_index.enabled is True
# Serialize and deserialize to ensure independence is preserved
json_data = schema.serialize_to_json()
deserialized = Schema.deserialize_from_json(json_data)
# Verify after roundtrip
assert (
deserialized.keys[
"key_a"
].sparse_vector.sparse_vector_index.config.source_key
== "source_a"
)
assert deserialized.keys["key_b"].string.string_inverted_index.enabled is False
assert deserialized.keys["key_c"].string.string_inverted_index.enabled is True
def test_global_default_disable_then_key_enable(self) -> None:
"""Test disabling an index globally, then enabling it on specific keys."""
schema = Schema()
# Verify string_inverted_index is enabled by default
assert schema.defaults.string is not None
assert schema.defaults.string.string_inverted_index is not None
assert schema.defaults.string.string_inverted_index.enabled is True
# Disable string_inverted_index globally
schema.delete_index(config=StringInvertedIndexConfig())
assert schema.defaults.string.string_inverted_index.enabled is False
# Now enable it on specific keys
schema.create_index(config=StringInvertedIndexConfig(), key="important_field")
schema.create_index(config=StringInvertedIndexConfig(), key="searchable_field")
# Verify global default is still disabled
assert schema.defaults.string.string_inverted_index.enabled is False
# Verify specific keys have it enabled
important = schema.keys["important_field"]
assert important.string is not None
assert important.string.string_inverted_index is not None
assert important.string.string_inverted_index.enabled is True
searchable = schema.keys["searchable_field"]
assert searchable.string is not None
assert searchable.string.string_inverted_index is not None
assert searchable.string.string_inverted_index.enabled is True
# Verify other keys would inherit the disabled global default
# (by checking serialization - keys without overrides shouldn't appear)
json_data = schema.serialize_to_json()
# Only our explicitly modified keys + defaults (#document, #embedding) should be in overrides
assert "important_field" in json_data["keys"]
assert "searchable_field" in json_data["keys"]
assert "#document" in json_data["keys"]
assert "#embedding" in json_data["keys"]
# A hypothetical "other_field" would NOT be in overrides (uses global default)
assert "other_field" not in json_data["keys"]
def test_partial_override_fills_from_defaults(self) -> None:
"""Test that when you override one aspect of a value type, other indexes still follow defaults."""
schema = Schema()
# Enable sparse vector on a key
schema.create_index(
config=SparseVectorIndexConfig(
source_key="my_source", embedding_function=MockSparseEmbeddingFunction()
),
key="multi_index_field",
)
# This key now has sparse_vector overridden, but string, int, etc. should still follow global defaults
field = schema.keys["multi_index_field"]
# Sparse vector is explicitly set
assert field.sparse_vector is not None
assert field.sparse_vector.sparse_vector_index is not None
assert field.sparse_vector.sparse_vector_index.enabled is True
# Other value types are None (will fall back to global defaults)
assert field.string is None
assert field.int_value is None
assert field.float_value is None
assert field.boolean is None
assert field.float_list is None
# Serialize to verify sparse override behavior
json_data = schema.serialize_to_json()
field_json = json_data["keys"]["multi_index_field"]
# Only sparse_vector should be in the JSON for this key
assert "sparse_vector" in field_json
assert "string" not in field_json # Falls back to global
assert "int" not in field_json
assert "float" not in field_json
assert "bool" not in field_json
assert "float_list" not in field_json
# Deserialize and verify
deserialized = Schema.deserialize_from_json(json_data)
des_field = deserialized.keys["multi_index_field"]
# Sparse vector is set
assert des_field.sparse_vector is not None
assert des_field.sparse_vector.sparse_vector_index is not None
assert des_field.sparse_vector.sparse_vector_index.enabled is True
# Others are None (sparse override)
assert des_field.string is None
assert des_field.int_value is None
def test_sparse_vector_cannot_be_created_globally() -> None:
"""Test that sparse vector index cannot be created globally (without a key)."""
schema = Schema()
sparse_config = SparseVectorIndexConfig()
# Try to enable sparse vector globally - should fail
with pytest.raises(
ValueError, match="Sparse vector index must be created on a specific key"
):
schema.create_index(config=sparse_config)
def test_sparse_vector_cannot_be_deleted() -> None:
"""Test that sparse vector index cannot be deleted (temporarily disallowed)."""
schema = Schema()
sparse_config = SparseVectorIndexConfig()
# Create sparse vector on a key first
schema.create_index(config=sparse_config, key="my_key")
assert schema.keys["my_key"].sparse_vector is not None
assert schema.keys["my_key"].sparse_vector.sparse_vector_index is not None
assert schema.keys["my_key"].sparse_vector.sparse_vector_index.enabled is True
# Try to delete it - should fail
with pytest.raises(
ValueError, match="Deleting sparse vector index is not currently supported"
):
schema.delete_index(config=sparse_config, key="my_key")
def test_create_index_accepts_key_type() -> None:
"""Test that create_index accepts both str and Key types for the key parameter."""
schema = Schema()
# Test with string key
string_config = StringInvertedIndexConfig()
schema.create_index(config=string_config, key="test_field_str")
# Verify the index was created with string key
assert "test_field_str" in schema.keys
assert schema.keys["test_field_str"].string is not None
assert schema.keys["test_field_str"].string.string_inverted_index is not None
assert schema.keys["test_field_str"].string.string_inverted_index.enabled is True
# Test with Key type
int_config = IntInvertedIndexConfig()
schema.create_index(config=int_config, key=Key("test_field_key"))
# Verify the index was created with Key type (should be stored as string internally)
assert "test_field_key" in schema.keys
assert schema.keys["test_field_key"].int_value is not None
assert schema.keys["test_field_key"].int_value.int_inverted_index is not None
assert schema.keys["test_field_key"].int_value.int_inverted_index.enabled is True
# Test that both approaches produce equivalent results
schema2 = Schema()
schema2.create_index(config=string_config, key="same_field")
schema3 = Schema()
schema3.create_index(config=string_config, key=Key("same_field"))
# Both should have the same configuration
assert schema2.keys["same_field"].string is not None
assert schema2.keys["same_field"].string.string_inverted_index is not None
assert schema3.keys["same_field"].string is not None
assert schema3.keys["same_field"].string.string_inverted_index is not None
assert (
schema2.keys["same_field"].string.string_inverted_index.enabled
== schema3.keys["same_field"].string.string_inverted_index.enabled
)
def test_delete_index_accepts_key_type() -> None:
"""Test that delete_index accepts both str and Key types for the key parameter."""
schema = Schema()
# First, create some indexes to delete
string_config = StringInvertedIndexConfig()
int_config = IntInvertedIndexConfig()
# Test delete with string key
schema.delete_index(config=string_config, key="test_field_str")
# Verify the index was disabled with string key
assert "test_field_str" in schema.keys
assert schema.keys["test_field_str"].string is not None
assert schema.keys["test_field_str"].string.string_inverted_index is not None
assert schema.keys["test_field_str"].string.string_inverted_index.enabled is False
# Test delete with Key type
schema.delete_index(config=int_config, key=Key("test_field_key"))
# Verify the index was disabled with Key type (should be stored as string internally)
assert "test_field_key" in schema.keys
assert schema.keys["test_field_key"].int_value is not None
assert schema.keys["test_field_key"].int_value.int_inverted_index is not None
assert schema.keys["test_field_key"].int_value.int_inverted_index.enabled is False
# Test that both approaches produce equivalent results
schema2 = Schema()
schema2.delete_index(config=string_config, key="same_field")
schema3 = Schema()
schema3.delete_index(config=string_config, key=Key("same_field"))
# Both should have the same configuration
assert schema2.keys["same_field"].string is not None
assert schema2.keys["same_field"].string.string_inverted_index is not None
assert schema3.keys["same_field"].string is not None
assert schema3.keys["same_field"].string.string_inverted_index is not None
assert (
schema2.keys["same_field"].string.string_inverted_index.enabled
== schema3.keys["same_field"].string.string_inverted_index.enabled
)
def test_create_index_rejects_special_keys() -> None:
"""Test that create_index rejects special keys like Key.DOCUMENT and Key.EMBEDDING."""
schema = Schema()
string_config = StringInvertedIndexConfig()
# Test that Key.DOCUMENT is rejected (first check catches it)
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=string_config, key=Key.DOCUMENT)
# Test that Key.EMBEDDING is rejected (first check catches it)
with pytest.raises(
ValueError, match="Cannot create index on special key '#embedding'"
):
schema.create_index(config=string_config, key=Key.EMBEDDING)
# Test that string "#document" is also rejected (for consistency)
with pytest.raises(
ValueError, match="Cannot create index on special key '#document'"
):
schema.create_index(config=string_config, key="#document")
# Test that any other key starting with # is rejected (second check)
with pytest.raises(ValueError, match="key cannot begin with '#'"):
schema.create_index(config=string_config, key="#custom_key")
# Test with Key object for custom special key
with pytest.raises(ValueError, match="key cannot begin with '#'"):
schema.create_index(config=string_config, key=Key("#custom"))
def test_delete_index_rejects_special_keys() -> None:
"""Test that delete_index rejects special keys like Key.DOCUMENT and Key.EMBEDDING."""
schema = Schema()
string_config = StringInvertedIndexConfig()
# Test that Key.DOCUMENT is rejected (first check catches it)
with pytest.raises(
ValueError, match="Cannot delete index on special key '#document'"
):
schema.delete_index(config=string_config, key=Key.DOCUMENT)
# Test that Key.EMBEDDING is rejected (first check catches it)
with pytest.raises(
ValueError, match="Cannot delete index on special key '#embedding'"
):
schema.delete_index(config=string_config, key=Key.EMBEDDING)
# Test that string "#embedding" is also rejected (for consistency)
with pytest.raises(
ValueError, match="Cannot delete index on special key '#embedding'"
):
schema.delete_index(config=string_config, key="#embedding")
# Test that any other key starting with # is rejected (second check)
with pytest.raises(ValueError, match="key cannot begin with '#'"):
schema.delete_index(config=string_config, key="#custom_key")
# Test with Key object for custom special key
with pytest.raises(ValueError, match="key cannot begin with '#'"):
schema.delete_index(config=string_config, key=Key("#custom"))
def test_vector_index_config_source_key_accepts_key_type() -> None:
"""Test that VectorIndexConfig.source_key accepts both str and Key types."""
# Test with string
config1 = VectorIndexConfig(source_key="my_field")
assert config1.source_key == "my_field"
assert isinstance(config1.source_key, str)
# Test with Key object
config2 = VectorIndexConfig(source_key=Key("my_field")) # type: ignore[arg-type]
assert config2.source_key == "my_field"
assert isinstance(config2.source_key, str)
# Test with Key.DOCUMENT
config3 = VectorIndexConfig(source_key=Key.DOCUMENT) # type: ignore[arg-type]
assert config3.source_key == "#document"
assert isinstance(config3.source_key, str)
# Test that both approaches produce the same result
config4 = VectorIndexConfig(source_key="test")
config5 = VectorIndexConfig(source_key=Key("test")) # type: ignore[arg-type]
assert config4.source_key == config5.source_key
# Test with None
config6 = VectorIndexConfig(source_key=None)
assert config6.source_key is None
# Test serialization works correctly
config7 = VectorIndexConfig(source_key=Key("serialize_test")) # type: ignore[arg-type]
config_dict = config7.model_dump()
assert config_dict["source_key"] == "serialize_test"
assert isinstance(config_dict["source_key"], str)
def test_sparse_vector_index_config_source_key_accepts_key_type() -> None:
"""Test that SparseVectorIndexConfig.source_key accepts both str and Key types."""
# Test with string
config1 = SparseVectorIndexConfig(source_key="my_field")
assert config1.source_key == "my_field"
assert isinstance(config1.source_key, str)
# Test with Key object
config2 = SparseVectorIndexConfig(source_key=Key("my_field")) # type: ignore[arg-type]
assert config2.source_key == "my_field"
assert isinstance(config2.source_key, str)
# Test with Key.DOCUMENT
config3 = SparseVectorIndexConfig(source_key=Key.DOCUMENT) # type: ignore[arg-type]
assert config3.source_key == "#document"
assert isinstance(config3.source_key, str)
# Test that both approaches produce the same result
config4 = SparseVectorIndexConfig(source_key="test")
config5 = SparseVectorIndexConfig(source_key=Key("test")) # type: ignore[arg-type]
assert config4.source_key == config5.source_key
# Test with None
config6 = SparseVectorIndexConfig(source_key=None)
assert config6.source_key is None
# Test serialization works correctly
config7 = SparseVectorIndexConfig(source_key=Key("serialize_test")) # type: ignore[arg-type]
config_dict = config7.model_dump()
assert config_dict["source_key"] == "serialize_test"
assert isinstance(config_dict["source_key"], str)
def test_config_source_key_rejects_invalid_types() -> None:
"""Test that config validators reject invalid types for source_key."""
# Test VectorIndexConfig rejects invalid types
with pytest.raises(ValueError, match="source_key must be str or Key"):
VectorIndexConfig(source_key=123) # type: ignore[arg-type]
with pytest.raises(ValueError, match="source_key must be str or Key"):
VectorIndexConfig(source_key=["not", "valid"]) # type: ignore[arg-type]
# Test SparseVectorIndexConfig rejects invalid types
with pytest.raises(ValueError, match="source_key must be str or Key"):
SparseVectorIndexConfig(source_key=123) # type: ignore[arg-type]
with pytest.raises(ValueError, match="source_key must be str or Key"):
SparseVectorIndexConfig(source_key={"not": "valid"}) # type: ignore[arg-type]
def test_config_source_key_validates_special_keys() -> None:
"""Test that source_key only allows #document, rejects other special keys."""
# Test VectorIndexConfig
# #document is allowed (string)
config1 = VectorIndexConfig(source_key="#document")
assert config1.source_key == "#document"
# #document is allowed (Key)
config2 = VectorIndexConfig(source_key=Key.DOCUMENT) # type: ignore[arg-type]
assert config2.source_key == "#document"
# #embedding is rejected (string)
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
VectorIndexConfig(source_key="#embedding")
# #embedding is rejected (Key)
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
VectorIndexConfig(source_key=Key.EMBEDDING) # type: ignore[arg-type]
# #metadata is rejected
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
VectorIndexConfig(source_key="#metadata")
# #score is rejected
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
VectorIndexConfig(source_key="#score")
# Any other key starting with # is rejected
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
VectorIndexConfig(source_key="#custom")
# Regular keys (no #) are allowed
config3 = VectorIndexConfig(source_key="my_field")
assert config3.source_key == "my_field"
# Test SparseVectorIndexConfig
# #document is allowed (string)
config4 = SparseVectorIndexConfig(source_key="#document")
assert config4.source_key == "#document"
# #document is allowed (Key)
config5 = SparseVectorIndexConfig(source_key=Key.DOCUMENT) # type: ignore[arg-type]
assert config5.source_key == "#document"
# #embedding is rejected (string)
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
SparseVectorIndexConfig(source_key="#embedding")
# #embedding is rejected (Key)
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
SparseVectorIndexConfig(source_key=Key.EMBEDDING) # type: ignore[arg-type]
# #metadata is rejected
with pytest.raises(ValueError, match="source_key cannot begin with '#'"):
SparseVectorIndexConfig(source_key="#metadata")
# Regular keys (no #) are allowed
config6 = SparseVectorIndexConfig(source_key="my_field")
assert config6.source_key == "my_field"
def test_sparse_vector_config_requires_ef_with_source_key() -> None:
"""Test that SparseVectorIndexConfig raises ValueError when source_key is provided without embedding_function."""
schema = Schema()
# Attempt to create sparse vector index with source_key but no embedding_function
with pytest.raises(ValueError) as exc_info:
schema.create_index(
key="invalid_sparse",
config=SparseVectorIndexConfig(
source_key="text_field",
# No embedding_function provided - should raise ValueError
),
)
# Verify the error message mentions both source_key and embedding_function
error_msg = str(exc_info.value)
assert "source_key" in error_msg.lower()
assert "embedding_function" in error_msg.lower()
def test_config_classes_reject_invalid_fields() -> None:
"""Test that all config classes reject invalid/unknown fields."""
# Test SparseVectorIndexConfig rejects invalid field 'key' instead of 'source_key'
with pytest.raises((ValueError, ValidationError)) as exc_info:
SparseVectorIndexConfig(key=Key.DOCUMENT) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "key" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test VectorIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
VectorIndexConfig(invalid_field="test") # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test FtsIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
FtsIndexConfig(invalid_field="test") # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test StringInvertedIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
StringInvertedIndexConfig(invalid_field="test") # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test IntInvertedIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
IntInvertedIndexConfig(invalid_field=123) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test FloatInvertedIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
FloatInvertedIndexConfig(invalid_field=1.23) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test BoolInvertedIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
BoolInvertedIndexConfig(invalid_field=True) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
assert "extra" in error_msg.lower() or "permitted" in error_msg.lower()
# Test HnswIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
HnswIndexConfig(invalid_field=123) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
# Test HnswIndexConfig accepts all valid fields (all are defined in the model)
# This should not raise an error
config = HnswIndexConfig(
ef_construction=100,
max_neighbors=16,
ef_search=100,
num_threads=4,
batch_size=100,
sync_threshold=1000,
resize_factor=1.2,
)
assert config.ef_construction == 100
assert config.max_neighbors == 16
# Test SpannIndexConfig rejects invalid fields
with pytest.raises((ValueError, ValidationError)) as exc_info:
SpannIndexConfig(invalid_field=123) # type: ignore[call-arg]
error_msg = str(exc_info.value)
assert "invalid_field" in error_msg.lower()
# Test SpannIndexConfig accepts internal fields (allowed by validator but not stored)
# These should not raise an error but won't be stored as attributes
spann_config = SpannIndexConfig(
search_nprobe=64,
search_rng_factor=1.0, # type: ignore[call-arg] # internal field - allowed but not stored
search_rng_epsilon=10.0, # type: ignore[call-arg] # internal field - allowed but not stored
nreplica_count=8, # type: ignore[call-arg] # internal field - allowed but not stored
write_nprobe=32,
write_rng_factor=1.0, # type: ignore[call-arg] # internal field - allowed but not stored
write_rng_epsilon=5.0, # type: ignore[call-arg] # internal field - allowed but not stored
split_threshold=50,
num_samples_kmeans=1000, # type: ignore[call-arg] # internal field - allowed but not stored
initial_lambda=100.0, # type: ignore[call-arg] # internal field - allowed but not stored
reassign_neighbor_count=64,
merge_threshold=25,
num_centers_to_merge_to=8, # type: ignore[call-arg] # internal field - allowed but not stored
ef_construction=200,
ef_search=200,
max_neighbors=64,
)
# Verify defined fields are stored
assert spann_config.search_nprobe == 64
assert spann_config.write_nprobe == 32
assert spann_config.ef_construction == 200
# Verify internal fields are not stored (they're ignored due to "extra": "ignore")
assert not hasattr(spann_config, "search_rng_factor")
assert not hasattr(spann_config, "nreplica_count")
assert not hasattr(spann_config, "num_samples_kmeans")
| TestNewSchema |
python | cython__cython | Cython/Compiler/StringEncoding.py | {
"start": 3891,
"end": 8486
} | class ____(bytes):
# bytes subclass that is compatible with EncodedString
encoding = None
def __deepcopy__(self, memo):
return self
def byteencode(self):
return bytes(self)
def utf8encode(self):
assert False, "this is not a unicode string: %r" % self
def __str__(self):
"""Fake-decode the byte string to unicode to support %
formatting of unicode strings.
"""
return self.decode('ISO-8859-1')
is_unicode = False
def as_c_string_literal(self):
value = split_string_literal(escape_byte_string(self))
return '"%s"' % value
def bytes_literal(s, encoding):
assert isinstance(s, bytes)
s = BytesLiteral(s)
s.encoding = encoding
return s
def encoded_string(s, encoding):
assert isinstance(s, (str, bytes))
s = EncodedString(s)
if encoding is not None:
s.encoding = encoding
return s
def encoded_string_or_bytes_literal(s, encoding):
if isinstance(s, bytes):
return bytes_literal(s, encoding)
else:
return encoded_string(s, encoding)
char_from_escape_sequence = {
r'\a' : '\a',
r'\b' : '\b',
r'\f' : '\f',
r'\n' : '\n',
r'\r' : '\r',
r'\t' : '\t',
r'\v' : '\v',
}.get
_c_special = ('\\', '??', '"') + tuple(map(chr, range(32)))
def _to_escape_sequence(s):
if s in '\n\r\t':
return repr(s)[1:-1]
elif s == '"':
return r'\"'
elif s == '\\':
return r'\\'
else:
# within a character sequence, oct passes much better than hex
return ''.join([f'\\{ord(c):03o}' for c in s])
def _build_specials_replacer():
subexps = []
replacements = {}
for special in _c_special:
regexp = ''.join(['[%s]' % c.replace('\\', '\\\\') for c in special])
subexps.append(regexp)
replacements[special.encode('ASCII')] = _to_escape_sequence(special).encode('ASCII')
sub = re.compile(('(%s)' % '|'.join(subexps)).encode('ASCII')).sub
def replace_specials(m):
return replacements[m.group(1)]
def replace(s):
return sub(replace_specials, s)
return replace
_replace_specials = _build_specials_replacer()
def escape_char(c):
c = c.decode('ISO-8859-1')
if c in '\n\r\t\\':
return repr(c)[1:-1]
elif c == "'":
return "\\'"
n = ord(c)
if n < 32 or n >= 127:
# hex works well for characters
return "\\x%02X" % n
else:
# strictly £, @ and ` (which fall in this list) are only allowed
# in C23. But practically they're well-supported earlier.
return c
def escape_byte_string(s):
"""Escape a byte string so that it can be written into C code.
Note that this returns a Unicode string instead which, when
encoded as ASCII, will result in the correct byte sequence
being written.
"""
s = _replace_specials(s)
try:
return s.decode("ASCII") # trial decoding: plain ASCII => done
except UnicodeDecodeError:
pass
s_new = bytearray()
append, extend = s_new.append, s_new.extend
for b in s:
if b >= 127:
extend(b'\\%03o' % b)
else:
append(b)
return s_new.decode('ASCII')
def split_string_literal(s, limit=2000):
# MSVC can't handle long string literals.
if len(s) < limit:
return s
else:
start = 0
chunks = []
while start < len(s):
end = start + limit
if len(s) > end-4 and '\\' in s[end-4:end]:
end -= 4 - s[end-4:end].find('\\') # just before the backslash
while s[end-1] == '\\':
end -= 1
if end == start:
# must have been a long line of backslashes
end = start + limit - (limit % 2) - 4
break
chunks.append(s[start:end])
start = end
return '""'.join(chunks)
def encode_pyunicode_string(characters):
"""Create Py_UNICODE[] representation of a given unicode string.
"""
characters = list(map(ord, characters))
characters.append(0)
utf16, utf32 = [], characters
for code_point in characters:
if code_point >= 0x10000: # outside of BMP
high, low = divmod(code_point - 0x10000, 1024)
utf16.append(high + 0xD800)
utf16.append(low + 0xDC00)
else:
utf16.append(code_point)
if utf16 == utf32:
utf16 = []
return ",".join(map(str, utf16)), ",".join(map(str, utf32))
| BytesLiteral |
python | ray-project__ray | python/ray/air/tests/test_integration_comet.py | {
"start": 6362,
"end": 7161
} | class ____(unittest.TestCase):
@patch("comet_ml.Experiment")
def test_kwargs_passthrough(self, experiment):
"""Test that additional keyword arguments to CometLoggerCallback get
passed through to comet_ml.Experiment on log_trial_start
"""
experiment_kwargs = {"kwarg_1": "val_1"}
logger = CometLoggerCallback(**experiment_kwargs)
trial = MockTrial({"parameter": 1}, "trial2", 1, "artifact")
logger.log_trial_start(trial)
# These are the default kwargs that get passed to create the experiment
expected_kwargs = {kwarg: False for kwarg in logger._exclude_autolog}
expected_kwargs.update(experiment_kwargs)
experiment.assert_called_with(**expected_kwargs)
@patch("comet_ml.Experiment")
| ExperimentKwargsTest |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 38854,
"end": 39064
} | class ____(Sky2PixProjection, QuadCube):
r"""
COBE quadrilateralized spherical cube projection - sky to pixel.
Corresponds to the ``CSC`` projection in FITS WCS.
"""
| Sky2Pix_COBEQuadSphericalCube |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/constant_op_eager_test.py | {
"start": 12066,
"end": 12368
} | class ____(test.TestCase):
def testAsTensorForTensorInput(self):
t = constant_op.constant(10.0)
x = ops.convert_to_tensor(t)
self.assertIs(t, x)
def testAsTensorForNonTensorInput(self):
x = ops.convert_to_tensor(10.0)
self.assertTrue(isinstance(x, ops.EagerTensor))
| AsTensorTest |
python | huggingface__transformers | src/transformers/models/yolos/modeling_yolos.py | {
"start": 6652,
"end": 8201
} | class ____(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.config = config
def forward(self, pos_embed, img_size=(800, 1344)) -> torch.Tensor:
cls_pos_embed = pos_embed[:, :, 0, :]
cls_pos_embed = cls_pos_embed[:, None]
det_pos_embed = pos_embed[:, :, -self.config.num_detection_tokens :, :]
patch_pos_embed = pos_embed[:, :, 1 : -self.config.num_detection_tokens, :]
patch_pos_embed = patch_pos_embed.transpose(2, 3)
depth, batch_size, hidden_size, seq_len = patch_pos_embed.shape
patch_height, patch_width = (
self.config.image_size[0] // self.config.patch_size,
self.config.image_size[1] // self.config.patch_size,
)
patch_pos_embed = patch_pos_embed.view(depth * batch_size, hidden_size, patch_height, patch_width)
height, width = img_size
new_patch_height, new_patch_width = height // self.config.patch_size, width // self.config.patch_size
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed, size=(new_patch_height, new_patch_width), mode="bicubic", align_corners=False
)
patch_pos_embed = (
patch_pos_embed.flatten(2)
.transpose(1, 2)
.contiguous()
.view(depth, batch_size, new_patch_height * new_patch_width, hidden_size)
)
scale_pos_embed = torch.cat((cls_pos_embed, patch_pos_embed, det_pos_embed), dim=2)
return scale_pos_embed
| InterpolateMidPositionEmbeddings |
python | walkccc__LeetCode | solutions/3289. The Two Sneaky Numbers of Digitville/3289.py | {
"start": 0,
"end": 169
} | class ____:
def getSneakyNumbers(self, nums: list[int]) -> list[int]:
return [num for num, freq in collections.Counter(nums).items()
if freq == 2]
| Solution |
python | explosion__spaCy | spacy/pipeline/textcat.py | {
"start": 2063,
"end": 14038
} | class ____(TrainablePipe):
"""Pipeline component for single-label text classification.
DOCS: https://spacy.io/api/textcategorizer
"""
def __init__(
self,
vocab: Vocab,
model: Model,
name: str = "textcat",
*,
threshold: float,
scorer: Optional[Callable] = textcat_score,
) -> None:
"""Initialize a text categorizer for single-label classification.
vocab (Vocab): The shared vocabulary.
model (thinc.api.Model): The Thinc Model powering the pipeline component.
name (str): The component instance name, used to add entries to the
losses during training.
threshold (float): Unused, not needed for single-label (exclusive
classes) classification.
scorer (Optional[Callable]): The scoring method. Defaults to
Scorer.score_cats for the attribute "cats".
DOCS: https://spacy.io/api/textcategorizer#init
"""
self.vocab = vocab
self.model = model
self.name = name
self._rehearsal_model = None
cfg: Dict[str, Any] = {
"labels": [],
"threshold": threshold,
"positive_label": None,
}
self.cfg = dict(cfg)
self.scorer = scorer
@property
def support_missing_values(self):
# There are no missing values as the textcat should always
# predict exactly one label. All other labels are 0.0
# Subclasses may override this property to change internal behaviour.
return False
@property
def labels(self) -> Tuple[str]:
"""RETURNS (Tuple[str]): The labels currently added to the component.
DOCS: https://spacy.io/api/textcategorizer#labels
"""
return tuple(self.cfg["labels"]) # type: ignore[arg-type, return-value]
@property
def label_data(self) -> List[str]:
"""RETURNS (List[str]): Information about the component's labels.
DOCS: https://spacy.io/api/textcategorizer#label_data
"""
return self.labels # type: ignore[return-value]
def predict(self, docs: Iterable[Doc]):
"""Apply the pipeline's model to a batch of docs, without modifying them.
docs (Iterable[Doc]): The documents to predict.
RETURNS: The models prediction for each document.
DOCS: https://spacy.io/api/textcategorizer#predict
"""
if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs.
tensors = [doc.tensor for doc in docs]
xp = self.model.ops.xp
scores = xp.zeros((len(list(docs)), len(self.labels)))
return scores
scores = self.model.predict(docs)
scores = self.model.ops.asarray(scores)
return scores
def set_annotations(self, docs: Iterable[Doc], scores) -> None:
"""Modify a batch of Doc objects, using pre-computed scores.
docs (Iterable[Doc]): The documents to modify.
scores: The scores to set, produced by TextCategorizer.predict.
DOCS: https://spacy.io/api/textcategorizer#set_annotations
"""
for i, doc in enumerate(docs):
for j, label in enumerate(self.labels):
doc.cats[label] = float(scores[i, j])
def update(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Learn from a batch of documents and gold-standard information,
updating the pipe's model. Delegates to predict and get_loss.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/textcategorizer#update
"""
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
validate_examples(examples, "TextCategorizer.update")
self._validate_categories(examples)
if not any(len(eg.predicted) if eg.predicted else 0 for eg in examples):
# Handle cases where there are no tokens in any docs.
return losses
set_dropout_rate(self.model, drop)
scores, bp_scores = self.model.begin_update([eg.predicted for eg in examples])
loss, d_scores = self.get_loss(examples, scores)
bp_scores(d_scores)
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += loss
return losses
def rehearse(
self,
examples: Iterable[Example],
*,
drop: float = 0.0,
sgd: Optional[Optimizer] = None,
losses: Optional[Dict[str, float]] = None,
) -> Dict[str, float]:
"""Perform a "rehearsal" update from a batch of data. Rehearsal updates
teach the current model to make predictions similar to an initial model,
to try to address the "catastrophic forgetting" problem. This feature is
experimental.
examples (Iterable[Example]): A batch of Example objects.
drop (float): The dropout rate.
sgd (thinc.api.Optimizer): The optimizer.
losses (Dict[str, float]): Optional record of the loss during training.
Updated using the component name as the key.
RETURNS (Dict[str, float]): The updated losses dictionary.
DOCS: https://spacy.io/api/textcategorizer#rehearse
"""
if losses is None:
losses = {}
losses.setdefault(self.name, 0.0)
if self._rehearsal_model is None:
return losses
validate_examples(examples, "TextCategorizer.rehearse")
self._validate_categories(examples)
docs = [eg.predicted for eg in examples]
if not any(len(doc) for doc in docs):
# Handle cases where there are no tokens in any docs.
return losses
set_dropout_rate(self.model, drop)
scores, bp_scores = self.model.begin_update(docs)
target, _ = self._rehearsal_model.begin_update(docs)
gradient = scores - target
bp_scores(gradient)
if sgd is not None:
self.finish_update(sgd)
losses[self.name] += (gradient**2).sum()
return losses
def _examples_to_truth(
self, examples: Iterable[Example]
) -> Tuple[numpy.ndarray, numpy.ndarray]:
nr_examples = len(list(examples))
truths = numpy.zeros((nr_examples, len(self.labels)), dtype="f")
not_missing = numpy.ones((nr_examples, len(self.labels)), dtype="f")
for i, eg in enumerate(examples):
for j, label in enumerate(self.labels):
if label in eg.reference.cats:
truths[i, j] = eg.reference.cats[label]
elif self.support_missing_values:
not_missing[i, j] = 0.0
truths = self.model.ops.asarray(truths) # type: ignore
return truths, not_missing # type: ignore
def get_loss(self, examples: Iterable[Example], scores) -> Tuple[float, float]:
"""Find the loss and gradient of loss for the batch of documents and
their predicted scores.
examples (Iterable[Examples]): The batch of examples.
scores: Scores representing the model's predictions.
RETURNS (Tuple[float, float]): The loss and the gradient.
DOCS: https://spacy.io/api/textcategorizer#get_loss
"""
validate_examples(examples, "TextCategorizer.get_loss")
self._validate_categories(examples)
truths, not_missing = self._examples_to_truth(examples)
not_missing = self.model.ops.asarray(not_missing) # type: ignore
d_scores = scores - truths
d_scores *= not_missing
mean_square_error = (d_scores**2).mean()
return float(mean_square_error), d_scores
def add_label(self, label: str) -> int:
"""Add a new label to the pipe.
label (str): The label to add.
RETURNS (int): 0 if label is already present, otherwise 1.
DOCS: https://spacy.io/api/textcategorizer#add_label
"""
if not isinstance(label, str):
raise ValueError(Errors.E187)
if label in self.labels:
return 0
self._allow_extra_label()
self.cfg["labels"].append(label) # type: ignore[attr-defined]
if self.model and "resize_output" in self.model.attrs:
self.model = self.model.attrs["resize_output"](self.model, len(self.labels))
self.vocab.strings.add(label)
return 1
def initialize(
self,
get_examples: Callable[[], Iterable[Example]],
*,
nlp: Optional[Language] = None,
labels: Optional[Iterable[str]] = None,
positive_label: Optional[str] = None,
) -> None:
"""Initialize the pipe for training, using a representative set
of data examples.
get_examples (Callable[[], Iterable[Example]]): Function that
returns a representative sample of gold-standard Example objects.
nlp (Language): The current nlp object the component is part of.
labels (Optional[Iterable[str]]): The labels to add to the component, typically generated by the
`init labels` command. If no labels are provided, the get_examples
callback is used to extract the labels from the data.
positive_label (Optional[str]): The positive label for a binary task with exclusive classes,
`None` otherwise and by default.
DOCS: https://spacy.io/api/textcategorizer#initialize
"""
validate_get_examples(get_examples, "TextCategorizer.initialize")
self._validate_categories(get_examples())
if labels is None:
for example in get_examples():
for cat in example.y.cats:
self.add_label(cat)
else:
for label in labels:
self.add_label(label)
if len(self.labels) < 2:
raise ValueError(Errors.E867)
if positive_label is not None:
if positive_label not in self.labels:
err = Errors.E920.format(pos_label=positive_label, labels=self.labels)
raise ValueError(err)
if len(self.labels) != 2:
err = Errors.E919.format(pos_label=positive_label, labels=self.labels)
raise ValueError(err)
self.cfg["positive_label"] = positive_label
subbatch = list(islice(get_examples(), 10))
doc_sample = [eg.reference for eg in subbatch]
label_sample, _ = self._examples_to_truth(subbatch)
self._require_labels()
assert len(doc_sample) > 0, Errors.E923.format(name=self.name)
assert len(label_sample) > 0, Errors.E923.format(name=self.name)
self.model.initialize(X=doc_sample, Y=label_sample)
def _validate_categories(self, examples: Iterable[Example]):
"""Check whether the provided examples all have single-label cats annotations."""
for ex in examples:
vals = list(ex.reference.cats.values())
if vals.count(1.0) > 1:
raise ValueError(Errors.E895.format(value=ex.reference.cats))
for val in vals:
if not (val == 1.0 or val == 0.0):
raise ValueError(Errors.E851.format(val=val))
# Setup backwards compatibility hook for factories
def __getattr__(name):
if name == "make_textcat":
module = importlib.import_module("spacy.pipeline.factories")
return module.make_textcat
raise AttributeError(f"module {__name__} has no attribute {name}")
| TextCategorizer |
python | facelessuser__soupsieve | tests/test_level3/test_target.py | {
"start": 51,
"end": 704
} | class ____(util.TestCase):
"""Test target selectors."""
MARKUP = """
<div>
<h2 id="head-1">Header 1</h1>
<div><p>content</p></div>
<h2 id="head-2">Header 2</h1>
<div><p>content</p></div>
</div>
"""
def test_target(self):
"""Test target."""
self.assert_selector(
self.MARKUP,
"#head-2:target",
[],
flags=util.HTML
)
def test_not_target(self):
"""Test not target."""
self.assert_selector(
self.MARKUP,
"#head-2:not(:target)",
["head-2"],
flags=util.HTML
)
| TestTarget |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 29347,
"end": 33290
} | class ____(BaseDDLElement):
"""Represent a :class:`_schema.Column`
as rendered in a CREATE TABLE statement,
via the :class:`.CreateTable` construct.
This is provided to support custom column DDL within the generation
of CREATE TABLE statements, by using the
compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel`
to extend :class:`.CreateColumn`.
Typical integration is to examine the incoming :class:`_schema.Column`
object, and to redirect compilation if a particular flag or condition
is found::
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
if "special" not in column.info:
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type),
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
if not column.nullable:
text += " NOT NULL"
if column.constraints:
text += " ".join(
compiler.process(const) for const in column.constraints
)
return text
The above construct can be applied to a :class:`_schema.Table`
as follows::
from sqlalchemy import Table, Metadata, Column, Integer, String
from sqlalchemy import schema
metadata = MetaData()
table = Table(
"mytable",
MetaData(),
Column("x", Integer, info={"special": True}, primary_key=True),
Column("y", String(50)),
Column("z", String(20), info={"special": True}),
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`_schema.Column.info`
collection
will be detected by our custom compilation scheme:
.. sourcecode:: sql
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
y VARCHAR(50),
z SPECIAL DIRECTIVE VARCHAR(20),
PRIMARY KEY (x)
)
The :class:`.CreateColumn` construct can also be used to skip certain
columns when producing a ``CREATE TABLE``. This is accomplished by
creating a compilation rule that conditionally returns ``None``.
This is essentially how to produce the same effect as using the
``system=True`` argument on :class:`_schema.Column`, which marks a column
as an implicitly-present "system" column.
For example, suppose we wish to produce a :class:`_schema.Table`
which skips
rendering of the PostgreSQL ``xmin`` column against the PostgreSQL
backend, but on other backends does render it, in anticipation of a
triggered rule. A conditional compilation rule could skip this name only
on PostgreSQL::
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == "xmin":
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("xmin", Integer),
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column
will be omitted, but only against the PostgreSQL backend.
"""
__visit_name__ = "create_column"
element: Column[Any]
def __init__(self, element: Column[Any]) -> None:
self.element = element
| CreateColumn |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks.py | {
"start": 66840,
"end": 75192
} | class ____:
"""
Tests for async functionality of DatabricksHook.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login=LOGIN,
password=PASSWORD,
extra=None,
)
)
self.hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
@pytest.mark.asyncio
async def test_init_async_session(self):
async with self.hook:
assert isinstance(self.hook._session, aiohttp.ClientSession)
assert self.hook._session is None
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_retries_with_client_connector_error(self, mock_get):
mock_get.side_effect = aiohttp.ClientConnectorError(
connection_key=None,
os_error=ssl.SSLError(
"SSL handshake is taking longer than 60.0 seconds: aborting the connection"
),
)
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
with pytest.raises(AirflowException):
await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_retries_with_client_timeout_error(self, mock_get):
mock_get.side_effect = TimeoutError()
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
with pytest.raises(AirflowException):
await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_retries_with_retryable_error(self, mock_get):
mock_get.side_effect = aiohttp.ClientResponseError(None, None, status=500)
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
with pytest.raises(AirflowException):
await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_does_not_retry_with_non_retryable_error(self, mock_get):
mock_get.side_effect = aiohttp.ClientResponseError(None, None, status=400)
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
with pytest.raises(AirflowException):
await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
mock_errors.assert_not_called()
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_succeeds_after_retrying(self, mock_get):
mock_get.side_effect = [
aiohttp.ClientResponseError(None, None, status=500),
create_valid_response_mock({"run_id": "1"}),
]
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
response = await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
assert mock_errors.call_count == 1
assert response == {"run_id": "1"}
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_do_api_call_waits_between_retries(self, mock_get):
self.hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
mock_get.side_effect = aiohttp.ClientResponseError(None, None, status=500)
with mock.patch.object(self.hook.log, "error") as mock_errors:
async with self.hook:
with pytest.raises(AirflowException):
await self.hook._a_do_api_call(GET_RUN_ENDPOINT, {})
assert mock_errors.call_count == DEFAULT_RETRY_NUMBER
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.patch")
async def test_do_api_call_patch(self, mock_patch):
mock_patch.return_value.__aenter__.return_value.json = AsyncMock(
return_value={"cluster_name": "new_name"}
)
data = {"cluster_name": "new_name"}
async with self.hook:
patched_cluster_name = await self.hook._a_do_api_call(("PATCH", "2.1/jobs/runs/submit"), data)
assert patched_cluster_name["cluster_name"] == "new_name"
mock_patch.assert_called_once_with(
submit_run_endpoint(HOST),
json={"cluster_name": "new_name"},
auth=aiohttp.BasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_get_run_page_url(self, mock_get):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value=GET_RUN_RESPONSE)
async with self.hook:
run_page_url = await self.hook.a_get_run_page_url(RUN_ID)
assert run_page_url == RUN_PAGE_URL
mock_get.assert_called_once_with(
get_run_endpoint(HOST),
json={"run_id": RUN_ID},
auth=aiohttp.BasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_get_run_state(self, mock_get):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value=GET_RUN_RESPONSE)
async with self.hook:
run_state = await self.hook.a_get_run_state(RUN_ID)
assert run_state == RunState(LIFE_CYCLE_STATE, RESULT_STATE, STATE_MESSAGE)
mock_get.assert_called_once_with(
get_run_endpoint(HOST),
json={"run_id": RUN_ID},
auth=aiohttp.BasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_get_cluster_state(self, mock_get):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value=GET_CLUSTER_RESPONSE)
async with self.hook:
cluster_state = await self.hook.a_get_cluster_state(CLUSTER_ID)
assert cluster_state == ClusterState(CLUSTER_STATE, CLUSTER_STATE_MESSAGE)
mock_get.assert_called_once_with(
get_cluster_endpoint(HOST),
json={"cluster_id": CLUSTER_ID},
auth=aiohttp.BasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
async def test_get_run_output(self, mock_get):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value=GET_RUN_OUTPUT_RESPONSE)
async with self.hook:
run_output = await self.hook.a_get_run_output(RUN_ID)
run_output_error = run_output.get("error")
assert run_output_error == ERROR_MESSAGE
mock_get.assert_called_once_with(
get_run_output_endpoint(HOST),
json={"run_id": RUN_ID},
auth=aiohttp.BasicAuth(LOGIN, PASSWORD),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
@pytest.mark.db_test
| TestDatabricksHookAsyncMethods |
python | sphinx-doc__sphinx | sphinx/cmd/quickstart.py | {
"start": 3449,
"end": 5479
} | class ____(Exception):
"""Raised for validation errors."""
def is_path(x: str) -> str:
x = os.path.expanduser(x)
if not os.path.isdir(x):
raise ValidationError(__('Please enter a valid path name.'))
return x
def is_path_or_empty(x: str) -> str:
if not x:
return ''
return is_path(x)
def allow_empty(x: str) -> str:
return x
def nonempty(x: str) -> str:
if not x:
raise ValidationError(__('Please enter some text.'))
return x
def choice(*l: str) -> Callable[[str], str]:
def val(x: str) -> str:
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
return val
def boolean(x: str) -> bool:
if x.upper() not in {'Y', 'YES', 'N', 'NO'}:
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in {'Y', 'YES'}
def suffix(x: str) -> str:
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, e.g. '.rst' or '.txt'."))
return x
def ok(x: str) -> str:
return x
def do_prompt(
text: str,
default: str | None = None,
validator: Callable[[str], Any] = nonempty,
) -> str | bool:
while True:
if default is not None:
prompt = PROMPT_PREFIX + f'{text} [{default}]: '
else:
prompt = PROMPT_PREFIX + text + ': '
if USE_LIBEDIT:
# Note: libedit has a problem for combination of ``input()``
# and escape sequences.
# To avoid the problem, all prompts are not colored on libedit.
# See https://github.com/sphinx-doc/sphinx/issues/5335
pass
else:
prompt = _question_colour(prompt)
x = term_input(prompt).strip()
if default and not x:
x = default
try:
x = validator(x)
except ValidationError as err:
print(red('* ' + str(err)))
continue
break
return x
| ValidationError |
python | openai__openai-python | src/openai/types/responses/tool.py | {
"start": 6031,
"end": 6227
} | class ____(BaseModel):
file_id: Optional[str] = None
"""File ID for the mask image."""
image_url: Optional[str] = None
"""Base64-encoded mask image."""
| ImageGenerationInputImageMask |
python | vyperlang__vyper | vyper/semantics/environment.py | {
"start": 1058,
"end": 1857
} | class ____(_EnvType):
_id = "tx"
_type_members = {"origin": AddressT(), "gasprice": UINT256_T}
CONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}
def get_constant_vars() -> Dict:
"""
Get a dictionary of constant environment variables.
"""
result = {}
for k, v in CONSTANT_ENVIRONMENT_VARS.items():
result[k] = VarInfo(v, modifiability=Modifiability.RUNTIME_CONSTANT)
return result
MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": SelfT}
def get_mutable_vars() -> Dict:
"""
Get a dictionary of mutable environment variables (those that are
modified during the course of contract execution, such as `self`).
"""
return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}
| _Tx |
python | ray-project__ray | rllib/evaluation/postprocessing.py | {
"start": 360,
"end": 12131
} | class ____:
"""Constant definitions for postprocessing."""
ADVANTAGES = "advantages"
VALUE_TARGETS = "value_targets"
@OldAPIStack
def adjust_nstep(n_step: int, gamma: float, batch: SampleBatch) -> None:
"""Rewrites `batch` to encode n-step rewards, terminateds, truncateds, and next-obs.
Observations and actions remain unaffected. At the end of the trajectory,
n is truncated to fit in the traj length.
Args:
n_step: The number of steps to look ahead and adjust.
gamma: The discount factor.
batch: The SampleBatch to adjust (in place).
Examples:
n-step=3
Trajectory=o0 r0 d0, o1 r1 d1, o2 r2 d2, o3 r3 d3, o4 r4 d4=True o5
gamma=0.9
Returned trajectory:
0: o0 [r0 + 0.9*r1 + 0.9^2*r2 + 0.9^3*r3] d3 o0'=o3
1: o1 [r1 + 0.9*r2 + 0.9^2*r3 + 0.9^3*r4] d4 o1'=o4
2: o2 [r2 + 0.9*r3 + 0.9^2*r4] d4 o1'=o5
3: o3 [r3 + 0.9*r4] d4 o3'=o5
4: o4 r4 d4 o4'=o5
"""
assert (
batch.is_single_trajectory()
), "Unexpected terminated|truncated in middle of trajectory!"
len_ = len(batch)
# Shift NEXT_OBS, TERMINATEDS, and TRUNCATEDS.
batch[SampleBatch.NEXT_OBS] = np.concatenate(
[
batch[SampleBatch.OBS][n_step:],
np.stack([batch[SampleBatch.NEXT_OBS][-1]] * min(n_step, len_)),
],
axis=0,
)
batch[SampleBatch.TERMINATEDS] = np.concatenate(
[
batch[SampleBatch.TERMINATEDS][n_step - 1 :],
np.tile(batch[SampleBatch.TERMINATEDS][-1], min(n_step - 1, len_)),
],
axis=0,
)
# Only fix `truncateds`, if present in the batch.
if SampleBatch.TRUNCATEDS in batch:
batch[SampleBatch.TRUNCATEDS] = np.concatenate(
[
batch[SampleBatch.TRUNCATEDS][n_step - 1 :],
np.tile(batch[SampleBatch.TRUNCATEDS][-1], min(n_step - 1, len_)),
],
axis=0,
)
# Change rewards in place.
for i in range(len_):
for j in range(1, n_step):
if i + j < len_:
batch[SampleBatch.REWARDS][i] += (
gamma**j * batch[SampleBatch.REWARDS][i + j]
)
@OldAPIStack
def compute_advantages(
rollout: SampleBatch,
last_r: float,
gamma: float = 0.9,
lambda_: float = 1.0,
use_gae: bool = True,
use_critic: bool = True,
rewards: TensorType = None,
vf_preds: TensorType = None,
):
"""Given a rollout, compute its value targets and the advantages.
Args:
rollout: SampleBatch of a single trajectory.
last_r: Value estimation for last observation.
gamma: Discount factor.
lambda_: Parameter for GAE.
use_gae: Using Generalized Advantage Estimation.
use_critic: Whether to use critic (value estimates). Setting
this to False will use 0 as baseline.
rewards: Override the reward values in rollout.
vf_preds: Override the value function predictions in rollout.
Returns:
SampleBatch with experience from rollout and processed rewards.
"""
assert (
SampleBatch.VF_PREDS in rollout or not use_critic
), "use_critic=True but values not found"
assert use_critic or not use_gae, "Can't use gae without using a value function"
last_r = convert_to_numpy(last_r)
if rewards is None:
rewards = rollout[SampleBatch.REWARDS]
if vf_preds is None and use_critic:
vf_preds = rollout[SampleBatch.VF_PREDS]
if use_gae:
vpred_t = np.concatenate([vf_preds, np.array([last_r])])
delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1]
# This formula for the advantage comes from:
# "Generalized Advantage Estimation": https://arxiv.org/abs/1506.02438
rollout[Postprocessing.ADVANTAGES] = discount_cumsum(delta_t, gamma * lambda_)
rollout[Postprocessing.VALUE_TARGETS] = (
rollout[Postprocessing.ADVANTAGES] + vf_preds
).astype(np.float32)
else:
rewards_plus_v = np.concatenate([rewards, np.array([last_r])])
discounted_returns = discount_cumsum(rewards_plus_v, gamma)[:-1].astype(
np.float32
)
if use_critic:
rollout[Postprocessing.ADVANTAGES] = discounted_returns - vf_preds
rollout[Postprocessing.VALUE_TARGETS] = discounted_returns
else:
rollout[Postprocessing.ADVANTAGES] = discounted_returns
rollout[Postprocessing.VALUE_TARGETS] = np.zeros_like(
rollout[Postprocessing.ADVANTAGES]
)
rollout[Postprocessing.ADVANTAGES] = rollout[Postprocessing.ADVANTAGES].astype(
np.float32
)
return rollout
@OldAPIStack
def compute_gae_for_sample_batch(
policy: Policy,
sample_batch: SampleBatch,
other_agent_batches: Optional[Dict[AgentID, SampleBatch]] = None,
episode=None,
) -> SampleBatch:
"""Adds GAE (generalized advantage estimations) to a trajectory.
The trajectory contains only data from one episode and from one agent.
- If `config.batch_mode=truncate_episodes` (default), sample_batch may
contain a truncated (at-the-end) episode, in case the
`config.rollout_fragment_length` was reached by the sampler.
- If `config.batch_mode=complete_episodes`, sample_batch will contain
exactly one episode (no matter how long).
New columns can be added to sample_batch and existing ones may be altered.
Args:
policy: The Policy used to generate the trajectory (`sample_batch`)
sample_batch: The SampleBatch to postprocess.
other_agent_batches: Optional dict of AgentIDs mapping to other
agents' trajectory data (from the same episode).
NOTE: The other agents use the same policy.
episode: Optional multi-agent episode object in which the agents
operated.
Returns:
The postprocessed, modified SampleBatch (or a new one).
"""
# Compute the SampleBatch.VALUES_BOOTSTRAPPED column, which we'll need for the
# following `last_r` arg in `compute_advantages()`.
sample_batch = compute_bootstrap_value(sample_batch, policy)
vf_preds = np.array(sample_batch[SampleBatch.VF_PREDS])
rewards = np.array(sample_batch[SampleBatch.REWARDS])
# We need to squeeze out the time dimension if there is one
# Sanity check that both have the same shape
if len(vf_preds.shape) == 2:
assert vf_preds.shape == rewards.shape
vf_preds = np.squeeze(vf_preds, axis=1)
rewards = np.squeeze(rewards, axis=1)
squeezed = True
else:
squeezed = False
# Adds the policy logits, VF preds, and advantages to the batch,
# using GAE ("generalized advantage estimation") or not.
batch = compute_advantages(
rollout=sample_batch,
last_r=sample_batch[SampleBatch.VALUES_BOOTSTRAPPED][-1],
gamma=policy.config["gamma"],
lambda_=policy.config["lambda"],
use_gae=policy.config["use_gae"],
use_critic=policy.config.get("use_critic", True),
vf_preds=vf_preds,
rewards=rewards,
)
if squeezed:
# If we needed to squeeze rewards and vf_preds, we need to unsqueeze
# advantages again for it to have the same shape
batch[Postprocessing.ADVANTAGES] = np.expand_dims(
batch[Postprocessing.ADVANTAGES], axis=1
)
return batch
@OldAPIStack
def compute_bootstrap_value(sample_batch: SampleBatch, policy: Policy) -> SampleBatch:
"""Performs a value function computation at the end of a trajectory.
If the trajectory is terminated (not truncated), will not use the value function,
but assume that the value of the last timestep is 0.0.
In all other cases, will use the given policy's value function to compute the
"bootstrapped" value estimate at the end of the given trajectory. To do so, the
very last observation (sample_batch[NEXT_OBS][-1]) and - if applicable -
the very last state output (sample_batch[STATE_OUT][-1]) wil be used as inputs to
the value function.
The thus computed value estimate will be stored in a new column of the
`sample_batch`: SampleBatch.VALUES_BOOTSTRAPPED. Thereby, values at all timesteps
in this column are set to 0.0, except or the last timestep, which receives the
computed bootstrapped value.
This is done, such that in any loss function (which processes raw, intact
trajectories, such as those of IMPALA and APPO) can use this new column as follows:
Example: numbers=ts in episode, '|'=episode boundary (terminal),
X=bootstrapped value (!= 0.0 b/c ts=12 is not a terminal).
ts=5 is NOT a terminal.
T: 8 9 10 11 12 <- no terminal
VF_PREDS: . . . . .
VALUES_BOOTSTRAPPED: 0 0 0 0 X
Args:
sample_batch: The SampleBatch (single trajectory) for which to compute the
bootstrap value at the end. This SampleBatch will be altered in place
(by adding a new column: SampleBatch.VALUES_BOOTSTRAPPED).
policy: The Policy object, whose value function to use.
Returns:
The altered SampleBatch (with the extra SampleBatch.VALUES_BOOTSTRAPPED
column).
"""
# Trajectory is actually complete -> last r=0.0.
if sample_batch[SampleBatch.TERMINATEDS][-1]:
last_r = 0.0
# Trajectory has been truncated -> last r=VF estimate of last obs.
else:
# Input dict is provided to us automatically via the Model's
# requirements. It's a single-timestep (last one in trajectory)
# input_dict.
# Create an input dict according to the Policy's requirements.
input_dict = sample_batch.get_single_step_input_dict(
policy.view_requirements, index="last"
)
last_r = policy._value(**input_dict)
vf_preds = np.array(sample_batch[SampleBatch.VF_PREDS])
# We need to squeeze out the time dimension if there is one
if len(vf_preds.shape) == 2:
vf_preds = np.squeeze(vf_preds, axis=1)
squeezed = True
else:
squeezed = False
# Set the SampleBatch.VALUES_BOOTSTRAPPED field to VF_PREDS[1:] + the
# very last timestep (where this bootstrapping value is actually needed), which
# we set to the computed `last_r`.
sample_batch[SampleBatch.VALUES_BOOTSTRAPPED] = np.concatenate(
[
convert_to_numpy(vf_preds[1:]),
np.array([convert_to_numpy(last_r)], dtype=np.float32),
],
axis=0,
)
if squeezed:
sample_batch[SampleBatch.VF_PREDS] = np.expand_dims(vf_preds, axis=1)
sample_batch[SampleBatch.VALUES_BOOTSTRAPPED] = np.expand_dims(
sample_batch[SampleBatch.VALUES_BOOTSTRAPPED], axis=1
)
return sample_batch
@OldAPIStack
def discount_cumsum(x: np.ndarray, gamma: float) -> np.ndarray:
"""Calculates the discounted cumulative sum over a reward sequence `x`.
y[t] - discount*y[t+1] = x[t]
reversed(y)[t] - discount*reversed(y)[t-1] = reversed(x)[t]
Args:
gamma: The discount factor gamma.
Returns:
The sequence containing the discounted cumulative sums
for each individual reward in `x` till the end of the trajectory.
.. testcode::
:skipif: True
x = np.array([0.0, 1.0, 2.0, 3.0])
gamma = 0.9
discount_cumsum(x, gamma)
.. testoutput::
array([0.0 + 0.9*1.0 + 0.9^2*2.0 + 0.9^3*3.0,
1.0 + 0.9*2.0 + 0.9^2*3.0,
2.0 + 0.9*3.0,
3.0])
"""
return scipy.signal.lfilter([1], [1, float(-gamma)], x[::-1], axis=0)[::-1]
| Postprocessing |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 3796,
"end": 5692
} | class ____:
"""Manages a stack of paths like a browser."""
def __init__(self) -> None:
self.stack: list[Path] = []
self.index = 0
@property
def location(self) -> Path:
"""The current location.
Returns:
A path for the current document.
"""
if not self.stack:
return Path(".")
return self.stack[self.index]
@property
def start(self) -> bool:
"""Is the current location at the start of the stack?"""
return self.index == 0
@property
def end(self) -> bool:
"""Is the current location at the end of the stack?"""
return self.index >= len(self.stack) - 1
def go(self, path: str | PurePath) -> Path:
"""Go to a new document.
Args:
path: Path to new document.
Returns:
New location.
"""
location, anchor = Markdown.sanitize_location(str(path))
if location == Path(".") and anchor:
current_file, _ = Markdown.sanitize_location(str(self.location))
path = f"{current_file}#{anchor}"
new_path = self.location.parent / Path(path)
self.stack = self.stack[: self.index + 1]
new_path = new_path.absolute()
self.stack.append(new_path)
self.index = len(self.stack) - 1
return new_path
def back(self) -> bool:
"""Go back in the stack.
Returns:
True if the location changed, otherwise False.
"""
if self.index:
self.index -= 1
return True
return False
def forward(self) -> bool:
"""Go forward in the stack.
Returns:
True if the location changed, otherwise False.
"""
if self.index < len(self.stack) - 1:
self.index += 1
return True
return False
| Navigator |
python | huggingface__transformers | src/transformers/quantizers/quantizer_hqq.py | {
"start": 1565,
"end": 11171
} | class ____(HfQuantizer):
"""
HQQ quantizer base HF class.
nn.Linear modules are first tagged with quant_config in _process_model_before_weight_loading().
"""
use_keep_in_fp32_modules = False
requires_parameters_quantization = True
requires_calibration = False
required_packages = ["hqq"]
def __init__(self, quantization_config, **kwargs):
if not is_hqq_available():
raise ImportError(
"A valid HQQ version (>=0.2.1) is not available. Please follow the instructions to install it: `https://github.com/mobiusml/hqq/`."
)
super().__init__(quantization_config, **kwargs)
self.dtype = None
self.using_multi_gpu = False
# Keys that are serialized specifically by hqq
self.hqq_keys = HQQLinear(None, None).state_dict_keys() - {"bias"}
def validate_environment(self, *args, **kwargs):
if self.dtype is None:
if "dtype" in kwargs:
self.dtype = kwargs["dtype"]
else:
self.dtype = torch.float32
logger.info("Setting dtype to torch.float32 as the default value since it was not specified.")
device_map = kwargs.get("device_map")
if isinstance(device_map, dict):
if "cpu" in device_map.values() or "disk" in device_map.values():
raise ValueError(
"You are attempting to use an HQQ model with a device_map that contains a CPU or disk device."
" This is not supported. Please remove the CPU or disk device from the device_map."
)
else:
self.using_multi_gpu = len(set(device_map.values())) > 1
def update_missing_keys(
self, model: "PreTrainedModel", missing_keys: list[str], prefix: str, **kwargs
) -> list[str]:
if self.pre_quantized:
return [key for key in missing_keys if ("weight" not in key)]
else:
return missing_keys
# Adds missing keys for HQQLinear modules that are loaded but the model with initialized with torch.nn.Linear
def update_expected_keys(
self, model: "PreTrainedModel", expected_keys: list[str], loaded_keys: list[str]
) -> list[str]:
if not self.pre_quantized:
return expected_keys
# Collects all quantizable (linear) layers
def _find_hqq_quantizable_layers(model, layers):
for name, module in model.named_children():
if isinstance(module, (torch.nn.Linear)):
layers.add(module.name)
_find_hqq_quantizable_layers(module, layers)
new_keys = set(expected_keys)
# Name modules
for name, module in model.named_modules():
module.name = name
# valid modules are Linear layers that have HQQLinear state_dict. We ignore skip_modules and any layers with Linear state_dict() params
_valid_modules = set()
_find_hqq_quantizable_layers(model, _valid_modules)
# Remove skipped modules
_skipped_modules = set()
for _module in _valid_modules:
for _skip_module in model.config.quantization_config["skip_modules"]:
if _skip_module in _module:
_skipped_modules.add(_module)
_valid_modules -= _skipped_modules
# Append new expected layers based on _ref_keys
_ref_keys = HQQLinear(
linear_layer=None,
quant_config=None,
compute_dtype=torch.float16,
device="cpu",
del_orig=False,
).state_dict_keys() - {"bias"}
# Clean-up
_rm_keys = set()
for key in new_keys:
if any(_module in key for _module in _valid_modules):
_rm_keys.add(key)
new_keys -= _rm_keys
# At this point, new_keys contains all the keys of the layers that are NOT HQQLinear or torch.nn.Linear
# Re-populate Linear/HQQLinear
for _module in _valid_modules:
if _module + ".weight" in loaded_keys:
new_keys.add(_module + ".weight")
else:
new_keys.update({_module + "." + _ref_key for _ref_key in _ref_keys})
if _module + ".bias" in loaded_keys:
new_keys.add(_module + ".bias")
return list(new_keys)
def param_needs_quantization(self, model: "PreTrainedModel", param_name: str, **kwargs) -> bool:
module, _ = get_module_from_name(model, param_name)
# Since we do not prepare the modules in advance, we need every param of the Linear layer to go through
# `create_quantized_param`, even when `self.is_quantized == True`
return isinstance(module, torch.nn.Linear)
def create_quantized_param(
self,
model: "PreTrainedModel",
param_value: "torch.Tensor",
param_name: str,
target_device: "torch.device",
**kwargs,
):
module, tensor_name = get_module_from_name(model, param_name)
module_name = param_name.rsplit(".", 1)[0]
parent_module, node = get_module_from_name(model, module_name)
quant_config = model.config.quantization_config["quant_config"]
skip_modules = model.config.quantization_config["skip_modules"]
# In this case we do not quantize this layer (it's explicitly skipped) -> simply load param
if any(skip_module in module.name for skip_module in skip_modules):
module.load_state_dict(
{tensor_name: param_value.to(device=target_device, dtype=self.dtype)}, strict=False, assign=True
)
return
# We need this hack as the model is not pre-prepared as an empty skeleton on meta device
if self.pre_quantized:
# Save them for later
if not hasattr(self, "hqq_params"):
self.hqq_params = defaultdict(dict)
self.hqq_params[module_name].update({tensor_name: param_value})
hqq_params = self.hqq_params[module_name]
# If they are all present and saved, make it a HQQLinear layer! (we cannot do it param after param because
# hqq does not support it...)
if all(k in hqq_params for k in self.hqq_keys) and ("bias" in hqq_params or module.bias is None):
hqq_layer = HQQLinear(
linear_layer=None,
quant_config=None,
compute_dtype=self.dtype,
device=target_device,
del_orig=False,
)
hqq_layer.load_state_dict(hqq_params)
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
del self.hqq_params[module_name], module
return
# Load param in the module (without caring about device or dtype, it will be changed later)
module.load_state_dict({tensor_name: param_value}, strict=False, assign=True)
# If both the weight and bias have already been loaded, time to quantize!
module_is_ready = module.weight.device.type != "meta" and (
module.bias is None or module.bias.device.type != "meta"
)
if module_is_ready:
module_tag = ".".join(module.name.split(".")[-2:])
if "weight_quant_params" in quant_config:
module_quant_config = quant_config
elif module_tag in quant_config:
module_quant_config = quant_config[module_tag]
hqq_layer = HQQLinear(
module,
quant_config=module_quant_config,
compute_dtype=self.dtype,
device=target_device,
del_orig=True,
)
if hqq_layer.bias is not None and isinstance(hqq_layer.bias, torch.Tensor):
hqq_layer.bias = torch.nn.Parameter(hqq_layer.bias)
if self.using_multi_gpu:
hqq_layer = self._patch_layer_for_multigpu(hqq_layer)
setattr(parent_module, node, hqq_layer)
def _patch_layer_for_multigpu(self, hqq_layer):
def forward_with_device(self, x):
out = torch.matmul(x.to(self.device), self.dequantize().t())
if self.bias is not None:
out += self.bias
return out
hqq_layer.forward = lambda x: forward_with_device(hqq_layer, x)
return hqq_layer
def _process_model_before_weight_loading(
self,
model: "PreTrainedModel",
**kwargs,
):
# Add the corresponding quant_config to each valid module. This allows us to do the actual nn.Linear -> HQQLinear conversion in create_quantized_param().
# prepare_for_hqq_linear() also sets the right quantization config inside the model (model.config.quantization_config) and the layers (hqq_layer.quant_config)
model = prepare_for_hqq_linear(model, quantization_config=self.quantization_config)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
model.is_hqq_quantized = True
model.is_hqq_serializable = self.is_serializable()
return model
def is_serializable(self, safe_serialization=None):
return True
@property
def is_trainable(self) -> bool:
return True
| HqqHfQuantizer |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_area06.py | {
"start": 315,
"end": 1188
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_area06.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with a print area."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.print_area("A1:F9")
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | getsentry__sentry | src/sentry/lang/javascript/cache.py | {
"start": 1761,
"end": 2639
} | class ____:
"""
Stores mappings between
- the url of a file to be demangled and the url of its associated
source map, and
- a source map's url and the map's contents.
"""
def __init__(self):
self._cache = {}
self._mapping = {}
def __contains__(self, sourcemap_url):
return sourcemap_url in self._cache
def link(self, url, sourcemap_url):
self._mapping[url] = sourcemap_url
def add(self, sourcemap_url, sourcemap_view):
self._cache[sourcemap_url] = sourcemap_view
def get(self, sourcemap_url):
return self._cache.get(sourcemap_url)
def get_link(self, url):
sourcemap_url = self._mapping.get(url)
if sourcemap_url:
sourcemap = self.get(sourcemap_url)
return (sourcemap_url, sourcemap)
return (None, None)
| SourceMapCache |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 3158,
"end": 3589
} | class ____:
scenarii: list[tuple[str, str, bytes | None]] = [
("https://example.com/page.html", "https://example.com/", None),
("http://www.example.com/", "https://scrapy.org/", None),
("http://www.example.com/", "http://scrapy.org/", None),
("https://www.example.com/", "http://scrapy.org/", None),
("file:///home/path/to/somefile.html", "http://scrapy.org/", None),
]
| MixinNoReferrer |
python | numba__numba | numba/tests/test_linalg.py | {
"start": 30490,
"end": 40358
} | class ____(TestLinalgBase):
"""
Tests for np.linalg.eig/eigvals.
"""
def sample_matrix(self, m, dtype, order):
# This is a tridiag with the same but skewed values on the diagonals
v = self.sample_vector(m, dtype)
Q = np.diag(v)
idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], 1))
Q[idx] = v[1:]
idx = np.nonzero(np.eye(Q.shape[0], Q.shape[1], -1))
Q[idx] = v[:-1]
Q = np.array(Q, dtype=dtype, order=order)
return Q
def assert_no_domain_change(self, name, cfunc, args):
msg = name + "() argument must not cause a domain change."
self.assert_error(cfunc, args, msg)
def _check_worker(self, cfunc, name, expected_res_len,
check_for_domain_change):
def check(*args):
expected = cfunc.py_func(*args)
got = cfunc(*args)
a = args[0]
# check that the returned tuple is same length
self.assertEqual(len(expected), len(got))
# and that dimension is correct
res_is_tuple = False
if isinstance(got, tuple):
res_is_tuple = True
self.assertEqual(len(got), expected_res_len)
else: # its an array
self.assertEqual(got.ndim, expected_res_len)
# and that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of each array to np first
for k in range(len(expected)):
try:
np.testing.assert_array_almost_equal_nulp(
got[k], expected[k], nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# If plain match fails then reconstruction is used.
# this checks that A*V ~== V*diag(W)
# i.e. eigensystem ties out
# this is required as numpy uses only double precision lapack
# routines and computation of eigenvectors is numerically
# sensitive, numba uses the type specific routines therefore
# sometimes comes out with a different (but entirely
# valid) answer (eigenvectors are not unique etc.).
# This is only applicable if eigenvectors are computed
# along with eigenvalues i.e. result is a tuple.
resolution = 5 * np.finfo(a.dtype).resolution
if use_reconstruction:
if res_is_tuple:
w, v = got
# modify 'a' if hermitian eigensystem functionality is
# being tested. 'L' for use lower part is default and
# the only thing used at present so we conjugate transpose
# the lower part into the upper for use in the
# reconstruction. By construction the sample matrix is
# tridiag so this is just a question of copying the lower
# diagonal into the upper and conjugating on the way.
if name[-1] == 'h':
idxl = np.nonzero(np.eye(a.shape[0], a.shape[1], -1))
idxu = np.nonzero(np.eye(a.shape[0], a.shape[1], 1))
cfunc(*args)
# upper idx must match lower for default uplo="L"
# if complex, conjugate
a[idxu] = np.conj(a[idxl])
# also, only the real part of the diagonals is
# considered in the calculation so the imag is zeroed
# out for the purposes of use in reconstruction.
a[np.diag_indices(a.shape[0])] = np.real(np.diag(a))
lhs = np.dot(a, v)
rhs = np.dot(v, np.diag(w))
np.testing.assert_allclose(
lhs.real,
rhs.real,
rtol=resolution,
atol=resolution
)
if np.iscomplexobj(v):
np.testing.assert_allclose(
lhs.imag,
rhs.imag,
rtol=resolution,
atol=resolution
)
else:
# This isn't technically reconstruction but is here to
# deal with that the order of the returned eigenvalues
# may differ in the case of routines just returning
# eigenvalues and there's no true reconstruction
# available with which to perform a check.
np.testing.assert_allclose(
np.sort(expected),
np.sort(got),
rtol=resolution,
atol=resolution
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(*args)
return check
def checker_for_linalg_eig(
self, name, func, expected_res_len, check_for_domain_change=None):
"""
Test np.linalg.eig
"""
n = 10
cfunc = jit(nopython=True)(func)
check = self._check_worker(cfunc, name, expected_res_len,
check_for_domain_change)
# The main test loop
for dtype, order in product(self.dtypes, 'FC'):
a = self.sample_matrix(n, dtype, order)
check(a)
# Test both a real and complex type as the impls are different
for ty in [np.float32, np.complex64]:
# 0 dimensioned matrix
check(np.empty((0, 0), dtype=ty))
# Non square matrices
self.assert_non_square(cfunc, (np.ones((2, 3), dtype=ty),))
# Wrong dtype
self.assert_wrong_dtype(name, cfunc,
(np.ones((2, 2), dtype=np.int32),))
# Dimension issue
self.assert_wrong_dimensions(name, cfunc, (np.ones(10, dtype=ty),))
# no nans or infs
self.assert_no_nan_or_inf(cfunc,
(np.array([[1., 2., ], [np.inf, np.nan]],
dtype=ty),))
if check_for_domain_change:
# By design numba does not support dynamic return types, numpy does
# and uses this in the case of returning eigenvalues/vectors of
# a real matrix. The return type of np.linalg.eig(), when
# operating on a matrix in real space depends on the values present
# in the matrix itself (recalling that eigenvalues are the roots of the
# characteristic polynomial of the system matrix, which will by
# construction depend on the values present in the system matrix).
# This test asserts that if a domain change is required on the return
# type, i.e. complex eigenvalues from a real input, an error is raised.
# For complex types, regardless of the value of the imaginary part of
# the returned eigenvalues, a complex type will be returned, this
# follows numpy and fits in with numba.
# First check that the computation is valid (i.e. in complex space)
A = np.array([[1, -2], [2, 1]])
check(A.astype(np.complex128))
# and that the imaginary part is nonzero
l, _ = func(A)
self.assertTrue(np.any(l.imag))
# Now check that the computation fails in real space
for ty in [np.float32, np.float64]:
self.assert_no_domain_change(name, cfunc, (A.astype(ty),))
@needs_lapack
def test_linalg_eig(self):
self.checker_for_linalg_eig("eig", eig_matrix, 2, True)
@needs_lapack
def test_linalg_eigvals(self):
self.checker_for_linalg_eig("eigvals", eigvals_matrix, 1, True)
@needs_lapack
def test_linalg_eigh(self):
self.checker_for_linalg_eig("eigh", eigh_matrix, 2, False)
@needs_lapack
def test_linalg_eigvalsh(self):
self.checker_for_linalg_eig("eigvalsh", eigvalsh_matrix, 1, False)
@needs_lapack
def test_no_input_mutation(self):
# checks inputs are not mutated
for c in (('eig', 2, True),
('eigvals', 1, True),
('eigh', 2, False),
('eigvalsh', 1, False)):
m, nout, domain_change = c
meth = getattr(np.linalg, m)
@jit(nopython=True)
def func(X, test):
if test:
# not executed, but necessary to trigger A ordering in X
X = X[1:2, :]
return meth(X)
check = self._check_worker(func, m, nout, domain_change)
for dtype in (np.float64, np.complex128):
with self.subTest(meth=meth, dtype=dtype):
# trivial system, doesn't matter, just checking if it gets
# mutated
X = np.array([[10., 1, 0, 1],
[1, 9, 0, 0],
[0, 0, 8, 0],
[1, 0, 0, 7],
], order='F', dtype=dtype)
X_orig = np.copy(X)
expected = func.py_func(X, False)
np.testing.assert_allclose(X, X_orig)
got = func(X, False)
np.testing.assert_allclose(X, X_orig)
check(X, False)
| TestLinalgEigenSystems |
python | doocs__leetcode | solution/0400-0499/0425.Word Squares/Solution.py | {
"start": 0,
"end": 584
} | class ____:
def __init__(self):
self.children = [None] * 26
self.v = []
def insert(self, w, i):
node = self
for c in w:
idx = ord(c) - ord('a')
if node.children[idx] is None:
node.children[idx] = Trie()
node = node.children[idx]
node.v.append(i)
def search(self, w):
node = self
for c in w:
idx = ord(c) - ord('a')
if node.children[idx] is None:
return []
node = node.children[idx]
return node.v
| Trie |
python | kubernetes-client__python | kubernetes/client/models/v1_daemon_set_spec.py | {
"start": 383,
"end": 7947
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'min_ready_seconds': 'int',
'revision_history_limit': 'int',
'selector': 'V1LabelSelector',
'template': 'V1PodTemplateSpec',
'update_strategy': 'V1DaemonSetUpdateStrategy'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'revision_history_limit': 'revisionHistoryLimit',
'selector': 'selector',
'template': 'template',
'update_strategy': 'updateStrategy'
}
def __init__(self, min_ready_seconds=None, revision_history_limit=None, selector=None, template=None, update_strategy=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._min_ready_seconds = None
self._revision_history_limit = None
self._selector = None
self._template = None
self._update_strategy = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
self.selector = selector
self.template = template
if update_strategy is not None:
self.update_strategy = update_strategy
@property
def min_ready_seconds(self):
"""Gets the min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). # noqa: E501
:return: The min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""Sets the min_ready_seconds of this V1DaemonSetSpec.
The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). # noqa: E501
:param min_ready_seconds: The min_ready_seconds of this V1DaemonSetSpec. # noqa: E501
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def revision_history_limit(self):
"""Gets the revision_history_limit of this V1DaemonSetSpec. # noqa: E501
The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:return: The revision_history_limit of this V1DaemonSetSpec. # noqa: E501
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""Sets the revision_history_limit of this V1DaemonSetSpec.
The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10. # noqa: E501
:param revision_history_limit: The revision_history_limit of this V1DaemonSetSpec. # noqa: E501
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def selector(self):
"""Gets the selector of this V1DaemonSetSpec. # noqa: E501
:return: The selector of this V1DaemonSetSpec. # noqa: E501
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1DaemonSetSpec.
:param selector: The selector of this V1DaemonSetSpec. # noqa: E501
:type: V1LabelSelector
"""
if self.local_vars_configuration.client_side_validation and selector is None: # noqa: E501
raise ValueError("Invalid value for `selector`, must not be `None`") # noqa: E501
self._selector = selector
@property
def template(self):
"""Gets the template of this V1DaemonSetSpec. # noqa: E501
:return: The template of this V1DaemonSetSpec. # noqa: E501
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""Sets the template of this V1DaemonSetSpec.
:param template: The template of this V1DaemonSetSpec. # noqa: E501
:type: V1PodTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and template is None: # noqa: E501
raise ValueError("Invalid value for `template`, must not be `None`") # noqa: E501
self._template = template
@property
def update_strategy(self):
"""Gets the update_strategy of this V1DaemonSetSpec. # noqa: E501
:return: The update_strategy of this V1DaemonSetSpec. # noqa: E501
:rtype: V1DaemonSetUpdateStrategy
"""
return self._update_strategy
@update_strategy.setter
def update_strategy(self, update_strategy):
"""Sets the update_strategy of this V1DaemonSetSpec.
:param update_strategy: The update_strategy of this V1DaemonSetSpec. # noqa: E501
:type: V1DaemonSetUpdateStrategy
"""
self._update_strategy = update_strategy
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetSpec):
return True
return self.to_dict() != other.to_dict()
| V1DaemonSetSpec |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.