language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scipy__scipy | scipy/integrate/tests/test_quadrature.py | {
"start": 19537,
"end": 28534
} | class ____:
x0 = np.arange(4)
y0 = x0**2
@pytest.mark.parametrize('use_dx', (False, True))
@pytest.mark.parametrize('use_initial', (False, True))
def test_1d(self, use_dx, use_initial, xp):
# Test for exact agreement with polynomial of highest
# possible order (3 if `dx` is constant, 2 otherwise).
rng = np.random.default_rng(82456839535679456794)
n = 10
# Generate random polynomials and ground truth
# integral of appropriate order
order = 3 if use_dx else 2
dx = xp.asarray(rng.random())
if order == 2:
x = xp.asarray(np.sort(rng.random(n)))
else:
x = xp.arange(n, dtype=xp.float64)*dx + xp.asarray(rng.random())
i = xp.arange(order + 1, dtype=xp.float64)[:, xp.newaxis]
c = xp.asarray(rng.random(order + 1))[:, xp.newaxis]
y = xp.sum(c*x**i, axis=0)
Y = xp.sum(c*x**(i + 1)/(i + 1), axis=0)
ref = Y if use_initial else (Y-Y[0])[1:]
# Integrate with `cumulative_simpson`
initial = Y[0] if use_initial else None
kwarg = {'dx': dx} if use_dx else {'x': x}
res = cumulative_simpson(y, **kwarg, initial=initial)
# Compare result against reference
if not use_dx:
xp_assert_close(res, ref, rtol=2e-15)
else:
i0 = 0 if use_initial else 1
# all terms are "close"
xp_assert_close(res, ref, rtol=0.0025)
# only even-interval terms are "exact"
xp_assert_close(res[i0::2], ref[i0::2], rtol=2e-15)
@skip_xp_backends(cpu_only=True) # uses np.apply_along_axis
@pytest.mark.parametrize('axis', np.arange(-3, 3))
@pytest.mark.parametrize('x_ndim', (1, 3))
@pytest.mark.parametrize('x_len', (1, 2, 7))
@pytest.mark.parametrize('i_ndim', (None, 0, 3,))
@pytest.mark.parametrize('dx', (None, True))
def test_nd(self, axis, x_ndim, x_len, i_ndim, dx, xp):
# Test behavior of `cumulative_simpson` with N-D `y`
rng = np.random.default_rng(82456839535679456794)
# determine shapes
shape = [5, 6, x_len]
shape[axis], shape[-1] = shape[-1], shape[axis]
shape_len_1 = shape.copy()
shape_len_1[axis] = 1
i_shape = shape_len_1 if i_ndim == 3 else ()
# initialize arguments
y = xp.asarray(rng.random(size=shape))
x, dx = None, None
if dx:
dx = rng.random(size=shape_len_1) if x_ndim > 1 else rng.random()
dx = xp.asarray(dx)
else:
x = (np.sort(rng.random(size=shape), axis=axis) if x_ndim > 1
else np.sort(rng.random(size=shape[axis])))
x = xp.asarray(x)
initial = None if i_ndim is None else xp.asarray(rng.random(size=i_shape))
# compare results
res = cumulative_simpson(y, x=x, dx=dx, initial=initial, axis=axis)
# use np to generate `ref` as `cumulative_simpson_nd_ref`
# uses `apply_along_axis`
ref = cumulative_simpson_nd_reference(
np.asarray(y), x=np.asarray(x), dx=None if dx is None else np.asarray(dx),
initial=None if initial is None else np.asarray(initial), axis=axis
)
xp_assert_close(res, xp.asarray(ref), rtol=1e-15)
@pytest.mark.parametrize(('message', 'kwarg_update'), [
("x must be strictly increasing", dict(x=[2, 2, 3, 4])),
("x must be strictly increasing", dict(x=[x0, [2, 2, 4, 8]], y=[y0, y0])),
("x must be strictly increasing", dict(x=[x0, x0, x0], y=[y0, y0, y0], axis=0)),
("At least one point is required", dict(x=[], y=[])),
("`axis=4` is not valid for `y` with `y.ndim=1`", dict(axis=4)),
("shape of `x` must be the same as `y` or 1-D", dict(x=np.arange(5))),
("`initial` must either be a scalar or...", dict(initial=np.arange(5))),
("`dx` must either be a scalar or...", dict(x=None, dx=np.arange(5))),
])
def test_simpson_exceptions(self, message, kwarg_update, xp):
kwargs0 = dict(y=xp.asarray(self.y0), x=xp.asarray(self.x0), dx=None,
initial=None, axis=-1)
kwarg_update = {k: xp.asarray(np.asarray(v)) if isinstance(v, list) else v
for k, v in kwarg_update.items()}
with pytest.raises(ValueError, match=message):
cumulative_simpson(**dict(kwargs0, **kwarg_update))
def test_special_cases(self, xp):
# Test special cases not checked elsewhere
rng = np.random.default_rng(82456839535679456794)
y = xp.asarray(rng.random(size=10))
res = cumulative_simpson(y, dx=0.)
xp_assert_equal(res, xp.zeros(9, dtype=xp.float64))
# Should add tests of:
# - all elements of `x` identical
# These should work as they do for `simpson`
def _get_theoretical_diff_between_simps_and_cum_simps(self, y, x):
"""`cumulative_simpson` and `simpson` can be tested against other to verify
they give consistent results. `simpson` will iteratively be called with
successively higher upper limits of integration. This function calculates
the theoretical correction required to `simpson` at even intervals to match
with `cumulative_simpson`.
"""
d = np.diff(x, axis=-1)
sub_integrals_h1 = _cumulative_simpson_unequal_intervals(y, d)
sub_integrals_h2 = _cumulative_simpson_unequal_intervals(
y[..., ::-1], d[..., ::-1]
)[..., ::-1]
# Concatenate to build difference array
zeros_shape = (*y.shape[:-1], 1)
theoretical_difference = np.concatenate(
[
np.zeros(zeros_shape),
(sub_integrals_h1[..., 1:] - sub_integrals_h2[..., :-1]),
np.zeros(zeros_shape),
],
axis=-1,
)
# Differences only expected at even intervals. Odd intervals will
# match exactly so there is no correction
theoretical_difference[..., 1::2] = 0.0
# Note: the first interval will not match from this correction as
# `simpson` uses the trapezoidal rule
return theoretical_difference
@pytest.mark.fail_slow(10)
@pytest.mark.slow
@given(
y=hyp_num.arrays(
np.float64,
hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
)
)
def test_cumulative_simpson_against_simpson_with_default_dx(
self, y, xp
):
"""Theoretically, the output of `cumulative_simpson` will be identical
to `simpson` at all even indices and in the last index. The first index
will not match as `simpson` uses the trapezoidal rule when there are only two
data points. Odd indices after the first index are shown to match with
a mathematically-derived correction."""
def simpson_reference(y):
return np.stack(
[simpson(y[..., :i], dx=1.0) for i in range(2, y.shape[-1]+1)], axis=-1,
)
res = cumulative_simpson(xp.asarray(y), dx=1.0)
ref = simpson_reference(y)
theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
y, x=np.arange(y.shape[-1])
)
xp_assert_close(
res[..., 1:], xp.asarray(ref[..., 1:] + theoretical_difference[..., 1:]),
atol=1e-16
)
@pytest.mark.fail_slow(10)
@pytest.mark.slow
@given(
y=hyp_num.arrays(
np.float64,
hyp_num.array_shapes(max_dims=4, min_side=3, max_side=10),
elements=st.floats(-10, 10, allow_nan=False).filter(lambda x: abs(x) > 1e-7)
)
)
def test_cumulative_simpson_against_simpson(
self, y, xp
):
"""Theoretically, the output of `cumulative_simpson` will be identical
to `simpson` at all even indices and in the last index. The first index
will not match as `simpson` uses the trapezoidal rule when there are only two
data points. Odd indices after the first index are shown to match with
a mathematically-derived correction."""
interval = 10/(y.shape[-1] - 1)
x = np.linspace(0, 10, num=y.shape[-1])
x[1:] = x[1:] + 0.2*interval*np.random.uniform(-1, 1, len(x) - 1)
def simpson_reference(y, x):
return np.stack(
[simpson(y[..., :i], x=x[..., :i]) for i in range(2, y.shape[-1]+1)],
axis=-1,
)
res = cumulative_simpson(xp.asarray(y), x=xp.asarray(x))
ref = simpson_reference(y, x)
theoretical_difference = self._get_theoretical_diff_between_simps_and_cum_simps(
y, x
)
xp_assert_close(
res[..., 1:], xp.asarray(ref[..., 1:] + theoretical_difference[..., 1:])
)
@make_xp_test_case(integrate.lebedev_rule)
| TestCumulativeSimpson |
python | scipy__scipy | scipy/stats/_resampling.py | {
"start": 93735,
"end": 94568
} | class ____:
"""Configuration information for a statistical resampling method.
Instances of this class can be passed into the `method` parameter of some
hypothesis test functions to perform a resampling or Monte Carlo version
of the hypothesis test.
Attributes
----------
n_resamples : int
The number of resamples to perform or Monte Carlo samples to draw.
batch : int, optional
The number of resamples to process in each vectorized call to
the statistic. Batch sizes >>1 tend to be faster when the statistic
is vectorized, but memory usage scales linearly with the batch size.
Default is ``None``, which processes all resamples in a single batch.
"""
n_resamples: int = 9999
batch: int = None # type: ignore[assignment]
@dataclass
| ResamplingMethod |
python | kamyu104__LeetCode-Solutions | Python/maximum-sum-of-two-non-overlapping-subarrays.py | {
"start": 29,
"end": 617
} | class ____(object):
def maxSumTwoNoOverlap(self, A, L, M):
"""
:type A: List[int]
:type L: int
:type M: int
:rtype: int
"""
for i in xrange(1, len(A)):
A[i] += A[i-1]
result, L_max, M_max = A[L+M-1], A[L-1], A[M-1]
for i in xrange(L+M, len(A)):
L_max = max(L_max, A[i-M] - A[i-L-M])
M_max = max(M_max, A[i-L] - A[i-L-M])
result = max(result,
L_max + A[i] - A[i-M],
M_max + A[i] - A[i-L])
return result
| Solution |
python | pytorch__pytorch | test/export/test_dynamic_shapes.py | {
"start": 158,
"end": 1373
} | class ____(TestCase):
def test_dimhint_repr(self):
hint = _DimHint(_DimHintType.DYNAMIC)
self.assertEqual(repr(hint), "DimHint(DYNAMIC)")
hint_with_bounds = _DimHint(_DimHintType.AUTO, min=1, max=64)
self.assertEqual(repr(hint_with_bounds), "DimHint(AUTO, min=1, max=64)")
non_factory_hint = _DimHint(_DimHintType.STATIC, min=16, _factory=False)
self.assertEqual(repr(non_factory_hint), "DimHint(STATIC, min=16)")
def test_dimhint_factory(self):
factory = _DimHint(_DimHintType.AUTO)
self.assertTrue(factory._factory)
result = factory(min=8, max=32)
self.assertEqual(result.type, _DimHintType.AUTO)
self.assertEqual(result.min, 8)
self.assertEqual(result.max, 32)
self.assertFalse(result._factory)
with self.assertRaises(TypeError) as cm:
result(min=1, max=10)
self.assertIn("object is not callable", str(cm.exception))
bounded = Dim.DYNAMIC(min=4, max=16)
self.assertEqual(repr(bounded), "DimHint(DYNAMIC, min=4, max=16)")
with self.assertRaises(AssertionError):
factory(min=-1)
if __name__ == "__main__":
run_tests()
| TestDimHint |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 126087,
"end": 131584
} | class ____(Layer):
"""Cropping layer for 2D input (e.g. picture).
It crops along spatial dimensions, i.e. height and width.
Examples:
>>> input_shape = (2, 28, 28, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping2D(cropping=((2, 2), (4, 4)))(x)
>>> print(y.shape)
(2, 24, 20, 3)
Args:
cropping: Int, or tuple of 2 ints, or tuple of 2 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to height and width.
- If tuple of 2 ints:
interpreted as two different
symmetric cropping values for height and width:
`(symmetric_height_crop, symmetric_width_crop)`.
- If tuple of 2 tuples of 2 ints:
interpreted as
`((top_crop, bottom_crop), (left_crop, right_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, rows, cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, rows, cols)`
Output shape:
4D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, cropped_rows, cropped_cols, channels)`
- If `data_format` is `"channels_first"`:
`(batch_size, channels, cropped_rows, cropped_cols)`
"""
def __init__(self, cropping=((0, 0), (0, 0)), data_format=None, **kwargs):
super(Cropping2D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 2:
raise ValueError('`cropping` should have two elements. '
'Found: ' + str(cropping))
height_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
width_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
self.cropping = (height_cropping, width_cropping)
else:
raise ValueError('`cropping` should be either an int, '
'a tuple of 2 ints '
'(symmetric_height_crop, symmetric_width_crop), '
'or a tuple of 2 tuples of 2 ints '
'((top_crop, bottom_crop), (left_crop, right_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
return tensor_shape.TensorShape([
input_shape[0], input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] else None
])
else:
return tensor_shape.TensorShape([
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] else None, input_shape[3]
])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| Cropping2D |
python | ansible__ansible | lib/ansible/playbook/play_context.py | {
"start": 1608,
"end": 13847
} | class ____(Base):
"""
This class is used to consolidate the connection information for
hosts in a play and child tasks, where the task may override some
connection/authentication information.
"""
_post_validate_object = True
# base
module_compression = FieldAttribute(isa='string', default=C.DEFAULT_MODULE_COMPRESSION)
shell = FieldAttribute(isa='string')
executable = FieldAttribute(isa='string', default=C.DEFAULT_EXECUTABLE)
# connection fields, some are inherited from Base:
# (connection, port, remote_user, environment, no_log)
remote_addr = FieldAttribute(isa='string')
password = FieldAttribute(isa='string')
timeout = FieldAttribute(isa='int', default=C.DEFAULT_TIMEOUT)
connection_user = FieldAttribute(isa='string')
private_key_file = FieldAttribute(isa='string', default=C.DEFAULT_PRIVATE_KEY_FILE)
pipelining = FieldAttribute(isa='bool', default=C.ANSIBLE_PIPELINING)
# networking modules
network_os = FieldAttribute(isa='string')
# FIXME: docker - remove these
docker_extra_args = FieldAttribute(isa='string')
# ???
connection_lockfd = FieldAttribute(isa='int')
# privilege escalation fields
become = FieldAttribute(isa='bool')
become_method = FieldAttribute(isa='string')
become_user = FieldAttribute(isa='string')
become_pass = FieldAttribute(isa='string')
become_exe = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_EXE)
become_flags = FieldAttribute(isa='string', default=C.DEFAULT_BECOME_FLAGS)
prompt = FieldAttribute(isa='string')
start_at_task = FieldAttribute(isa='string')
step = FieldAttribute(isa='bool', default=False)
# "PlayContext.force_handlers should not be used, the calling code should be using play itself instead"
force_handlers = FieldAttribute(isa='bool', default=False)
def __init__(self, play=None, passwords=None, connection_lockfd=None):
# Note: play is really not optional. The only time it could be omitted is when we create
# a PlayContext just so we can invoke its deserialize method to load it from a serialized
# data source.
super(PlayContext, self).__init__()
if passwords is None:
passwords = {}
self.password = passwords.get('conn_pass', '')
self.become_pass = passwords.get('become_pass', '')
self._become_plugin = None
self.prompt = ''
self.success_key = ''
# a file descriptor to be used during locking operations
self.connection_lockfd = connection_lockfd
# set options before play to allow play to override them
if context.CLIARGS:
self.set_attributes_from_cli()
else:
self._internal_verbosity = 0
if play:
self.set_attributes_from_play(play)
def set_attributes_from_plugin(self, plugin):
# generic derived from connection plugin, temporary for backwards compat, in the end we should not set play_context properties
# get options for plugins
options = C.config.get_configuration_definitions(plugin.plugin_type, plugin._load_name)
for option in options:
if option:
flag = options[option].get('name')
if flag:
setattr(self, flag, plugin.get_option(flag))
def set_attributes_from_play(self, play):
self.force_handlers = play.force_handlers
def set_attributes_from_cli(self):
"""
Configures this connection information instance with data from
options specified by the user on the command line. These have a
lower precedence than those set on the play or host.
"""
if context.CLIARGS.get('timeout', False):
self.timeout = int(context.CLIARGS['timeout'])
# From the command line. These should probably be used directly by plugins instead
# For now, they are likely to be moved to FieldAttribute defaults
self.private_key_file = context.CLIARGS.get('private_key_file') # Else default
self._internal_verbosity = context.CLIARGS.get('verbosity') # Else default
# Not every cli that uses PlayContext has these command line args so have a default
self.start_at_task = context.CLIARGS.get('start_at_task', None) # Else default
def set_task_and_variable_override(self, task, variables, templar):
"""
Sets attributes from the task if they are set, which will override
those from the play.
:arg task: the task object with the parameters that were set on it
:arg variables: variables from inventory
:arg templar: templar instance if templating variables is needed
"""
new_info = self.copy()
# loop through a subset of attributes on the task object and set
# connection fields based on their values
for attr in TASK_ATTRIBUTE_OVERRIDES:
if (attr_val := getattr(task, attr, None)) is not None:
setattr(new_info, attr, attr_val)
# next, use the MAGIC_VARIABLE_MAPPING dictionary to update this
# connection info object with 'magic' variables from the variable list.
# If the value 'ansible_delegated_vars' is in the variables, it means
# we have a delegated-to host, so we check there first before looking
# at the variables in general
if task.delegate_to is not None:
# In the case of a loop, the delegated_to host may have been
# templated based on the loop variable, so we try and locate
# the host name in the delegated variable dictionary here
delegated_vars = variables.get('ansible_delegated_vars', dict()).get(task.delegate_to, dict())
delegated_transport = C.DEFAULT_TRANSPORT
for transport_var in C.MAGIC_VARIABLE_MAPPING.get('connection'):
if transport_var in delegated_vars:
delegated_transport = delegated_vars[transport_var]
break
# make sure this delegated_to host has something set for its remote
# address, otherwise we default to connecting to it by name. This
# may happen when users put an IP entry into their inventory, or if
# they rely on DNS for a non-inventory hostname
for address_var in ('ansible_%s_host' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_addr'):
if address_var in delegated_vars:
break
else:
display.debug("no remote address found for delegated host %s\nusing its name, so success depends on DNS resolution" % task.delegate_to)
delegated_vars['ansible_host'] = task.delegate_to
# reset the port back to the default if none was specified, to prevent
# the delegated host from inheriting the original host's setting
for port_var in ('ansible_%s_port' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('port'):
if port_var in delegated_vars:
break
else:
if delegated_transport == 'winrm':
delegated_vars['ansible_port'] = 5986
else:
delegated_vars['ansible_port'] = C.DEFAULT_REMOTE_PORT
# and likewise for the remote user
for user_var in ('ansible_%s_user' % delegated_transport,) + C.MAGIC_VARIABLE_MAPPING.get('remote_user'):
if user_var in delegated_vars and delegated_vars[user_var]:
break
else:
delegated_vars['ansible_user'] = task.remote_user or self.remote_user
else:
delegated_vars = dict()
# setup shell
for exe_var in C.MAGIC_VARIABLE_MAPPING.get('executable'):
if exe_var in variables:
setattr(new_info, 'executable', variables.get(exe_var))
attrs_considered = []
for (attr, variable_names) in C.MAGIC_VARIABLE_MAPPING.items():
for variable_name in variable_names:
if attr in attrs_considered:
continue
# if delegation task ONLY use delegated host vars, avoid delegated FOR host vars
if task.delegate_to is not None:
if isinstance(delegated_vars, dict) and variable_name in delegated_vars:
setattr(new_info, attr, delegated_vars[variable_name])
attrs_considered.append(attr)
elif variable_name in variables:
setattr(new_info, attr, variables[variable_name])
attrs_considered.append(attr)
# no else, as no other vars should be considered
# become legacy updates -- from inventory file (inventory overrides
# commandline)
for become_pass_name in C.MAGIC_VARIABLE_MAPPING.get('become_pass'):
if become_pass_name in variables:
break
# make sure we get port defaults if needed
if new_info.port is None and C.DEFAULT_REMOTE_PORT is not None:
new_info.port = int(C.DEFAULT_REMOTE_PORT)
# special overrides for the connection setting
if len(delegated_vars) > 0:
# in the event that we were using local before make sure to reset the
# connection type to the default transport for the delegated-to host,
# if not otherwise specified
for connection_type in C.MAGIC_VARIABLE_MAPPING.get('connection'):
if connection_type in delegated_vars:
break
else:
remote_addr_local = new_info.remote_addr in C.LOCALHOST
inv_hostname_local = delegated_vars.get('inventory_hostname') in C.LOCALHOST
if remote_addr_local and inv_hostname_local:
setattr(new_info, 'connection', 'local')
elif getattr(new_info, 'connection', None) == 'local' and (not remote_addr_local or not inv_hostname_local):
setattr(new_info, 'connection', C.DEFAULT_TRANSPORT)
# we store original in 'connection_user' for use of network/other modules that fallback to it as login user
# connection_user to be deprecated once connection=local is removed for, as local resets remote_user
if new_info.connection == 'local':
if not new_info.connection_user:
new_info.connection_user = new_info.remote_user
# for case in which connection plugin still uses pc.remote_addr and in it's own options
# specifies 'default: inventory_hostname', but never added to vars:
if new_info.remote_addr == 'inventory_hostname':
new_info.remote_addr = variables.get('inventory_hostname')
display.warning('The "%s" connection plugin has an improperly configured remote target value, '
'forcing "inventory_hostname" templated value instead of the string' % new_info.connection)
if task.check_mode is not None:
new_info.check_mode = task.check_mode
if task.diff is not None:
new_info.diff = task.diff
return new_info
def set_become_plugin(self, plugin):
self._become_plugin = plugin
def update_vars(self, variables):
"""
Adds 'magic' variables relating to connections to the variable dictionary provided.
In case users need to access from the play, this is a legacy from runner.
"""
for prop, var_list in C.MAGIC_VARIABLE_MAPPING.items():
try:
if 'become' in prop:
continue
var_val = getattr(self, prop)
for var_opt in var_list:
if var_opt not in variables and var_val is not None:
variables[var_opt] = var_val
except AttributeError:
continue
def deserialize(self, data):
"""Do not use this method. Backward compatibility for network connections plugins that rely on it."""
self.from_attrs(data)
| PlayContext |
python | plotly__plotly.py | plotly/graph_objs/scattercarpet/_hoverlabel.py | {
"start": 233,
"end": 11283
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattercarpet"
_path_str = "scattercarpet.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattercarpet.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattercarpet.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattercarpet.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattercarpet.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | readthedocs__readthedocs.org | readthedocs/profiles/views.py | {
"start": 6068,
"end": 6612
} | class ____(PrivateViewMixin):
"""User token to access APIv3."""
model = Token
lookup_url_kwarg = "token_pk"
template_name = "profiles/private/token_list.html"
def get_queryset(self):
# NOTE: we are currently showing just one token since the DRF model has
# a OneToOneField relation with User. Although, we plan to have multiple
# scope-based tokens.
return Token.objects.filter(user__in=[self.request.user])
def get_success_url(self):
return reverse("profiles_tokens")
| TokenMixin |
python | kubernetes-client__python | kubernetes/base/watch/watch_test.py | {
"start": 776,
"end": 25694
} | class ____(unittest.TestCase):
def setUp(self):
# counter for a test that needs test global state
self.callcount = 0
def test_watch_with_decode(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
'"resourceVersion": "2"}, "spec": {}, "sta',
'tus": {}}}\n'
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n',
'should_not_happened\n'])
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
w = Watch()
count = 1
for e in w.stream(fake_api.get_namespaces):
self.assertEqual("ADDED", e['type'])
# make sure decoder worked and we got a model with the right name
self.assertEqual("test%d" % count, e['object'].metadata.name)
# make sure decoder worked and updated Watch.resource_version
self.assertEqual(
"%d" % count, e['object'].metadata.resource_version)
self.assertEqual("%d" % count, w.resource_version)
count += 1
# make sure we can stop the watch and the last event with won't be
# returned
if count == 4:
w.stop()
# make sure that all three records were consumed by the stream
self.assertEqual(4, count)
fake_api.get_namespaces.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_with_interspersed_newlines(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'\n',
'{"type": "ADDED", "object": {"metadata":',
'{"name": "test1","resourceVersion": "1"}}}\n{"type": "ADDED", ',
'"object": {"metadata": {"name": "test2", "resourceVersion": "2"}}}\n',
'\n',
'',
'{"type": "ADDED", "object": {"metadata": {"name": "test3", "resourceVersion": "3"}}}\n',
'\n\n\n',
'\n',
])
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
w = Watch()
count = 0
# Consume all test events from the mock service, stopping when no more data is available.
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
# the only way to do so. Without that, the stream will re-read the test data forever.
for e in w.stream(fake_api.get_namespaces, timeout_seconds=1):
# Here added a statement for exception for empty lines.
if e is None:
continue
count += 1
self.assertEqual("test%d" % count, e['object'].metadata.name)
self.assertEqual(3, count)
def test_watch_with_multibyte_utf8(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
# two-byte utf-8 character
'{"type":"MODIFIED","object":{"data":{"utf-8":"© 1"},"metadata":{"name":"test1","resourceVersion":"1"}}}\n',
# same copyright character expressed as bytes
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2\xA9 2"},"metadata":{"name":"test2","resourceVersion":"2"}}}\n'
# same copyright character with bytes split across two stream chunks
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xC2',
b'\xA9 3"},"metadata":{"n',
# more chunks of the same event, sent as a mix of bytes and strings
'ame":"test3","resourceVersion":"3"',
'}}}',
b'\n'
])
fake_api = Mock()
fake_api.get_configmaps = Mock(return_value=fake_resp)
fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList'
w = Watch()
count = 0
# Consume all test events from the mock service, stopping when no more data is available.
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
# the only way to do so. Without that, the stream will re-read the test data forever.
for event in w.stream(fake_api.get_configmaps, timeout_seconds=1):
count += 1
self.assertEqual("MODIFIED", event['type'])
self.assertEqual("test%d" % count, event['object'].metadata.name)
self.assertEqual("© %d" % count, event['object'].data["utf-8"])
self.assertEqual(
"%d" % count, event['object'].metadata.resource_version)
self.assertEqual("%d" % count, w.resource_version)
self.assertEqual(3, count)
def test_watch_with_invalid_utf8(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
# test 1 uses 1 invalid utf-8 byte
# test 2 uses a sequence of 2 invalid utf-8 bytes
# test 3 uses a sequence of 3 invalid utf-8 bytes
return_value=[
# utf-8 sequence for 😄 is \xF0\x9F\x98\x84
# all other sequences below are invalid
# ref: https://www.w3.org/2001/06/utf-8-wrong/UTF-8-test.html
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 1","invalid":"\x80 1"},"metadata":{"name":"test1"}}}\n',
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98\x84 2","invalid":"\xC0\xAF 2"},"metadata":{"name":"test2"}}}\n',
# mix bytes/strings and split byte sequences across chunks
b'{"type":"MODIFIED","object":{"data":{"utf-8":"\xF0\x9F\x98',
b'\x84 ',
b'',
b'3","invalid":"\xE0\x80',
b'\xAF ',
'3"},"metadata":{"n',
'ame":"test3"',
'}}}',
b'\n'
])
fake_api = Mock()
fake_api.get_configmaps = Mock(return_value=fake_resp)
fake_api.get_configmaps.__doc__ = ':return: V1ConfigMapList'
w = Watch()
count = 0
# Consume all test events from the mock service, stopping when no more data is available.
# Note that "timeout_seconds" below is not a timeout; rather, it disables retries and is
# the only way to do so. Without that, the stream will re-read the test data forever.
for event in w.stream(fake_api.get_configmaps, timeout_seconds=1):
count += 1
self.assertEqual("MODIFIED", event['type'])
self.assertEqual("test%d" % count, event['object'].metadata.name)
self.assertEqual("😄 %d" % count, event['object'].data["utf-8"])
# expect N replacement characters in test N
self.assertEqual("� %d".replace('�', '�'*count) %
count, event['object'].data["invalid"])
self.assertEqual(3, count)
def test_watch_for_follow(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'log_line_1\n',
'log_line_2\n'])
fake_api = Mock()
fake_api.read_namespaced_pod_log = Mock(return_value=fake_resp)
fake_api.read_namespaced_pod_log.__doc__ = ':param bool follow:\n:return: str'
w = Watch()
count = 1
for e in w.stream(fake_api.read_namespaced_pod_log):
self.assertEqual("log_line_1", e)
count += 1
# make sure we can stop the watch and the last event with won't be
# returned
if count == 2:
w.stop()
fake_api.read_namespaced_pod_log.assert_called_once_with(
_preload_content=False, follow=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_resource_version_set(self):
# https://github.com/kubernetes-client/python/issues/700
# ensure watching from a resource version does reset to resource
# version 0 after k8s resets the watch connection
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
values = [
'{"type": "ADDED", "object": {"metadata": {"name": "test1",'
'"resourceVersion": "1"}, "spec": {}, "status": {}}}\n',
'{"type": "ADDED", "object": {"metadata": {"name": "test2",'
'"resourceVersion": "2"}, "spec": {}, "sta',
'tus": {}}}\n'
'{"type": "ADDED", "object": {"metadata": {"name": "test3",'
'"resourceVersion": "3"}, "spec": {}, "status": {}}}\n'
]
# return nothing on the first call and values on the second
# this emulates a watch from a rv that returns nothing in the first k8s
# watch reset and values later
def get_values(*args, **kwargs):
self.callcount += 1
if self.callcount == 1:
return []
else:
return values
fake_resp.stream = Mock(
side_effect=get_values)
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
w = Watch()
# ensure we keep our requested resource version or the version latest
# returned version when the existing versions are older than the
# requested version
# needed for the list existing objects, then watch from there use case
calls = []
iterations = 2
# first two calls must use the passed rv, the first call is a
# "reset" and does not actually return anything
# the second call must use the same rv but will return values
# (with a wrong rv but a real cluster would behave correctly)
# calls following that will use the rv from those returned values
calls.append(call(_preload_content=False, watch=True,
resource_version="5"))
calls.append(call(_preload_content=False, watch=True,
resource_version="5"))
for i in range(iterations):
# ideally we want 5 here but as rv must be treated as an
# opaque value we cannot interpret it and order it so rely
# on k8s returning the events completely and in order
calls.append(call(_preload_content=False, watch=True,
resource_version="3"))
for c, e in enumerate(w.stream(fake_api.get_namespaces,
resource_version="5")):
if c == len(values) * iterations:
w.stop()
# check calls are in the list, gives good error output
fake_api.get_namespaces.assert_has_calls(calls)
# more strict test with worse error message
self.assertEqual(fake_api.get_namespaces.mock_calls, calls)
def test_watch_stream_twice(self):
w = Watch(float)
for step in ['first', 'second']:
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=['{"type": "ADDED", "object": 1}\n'] * 4)
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
count = 1
for e in w.stream(fake_api.get_namespaces):
count += 1
if count == 3:
w.stop()
self.assertEqual(count, 3)
fake_api.get_namespaces.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_stream_loop(self):
w = Watch(float)
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=['{"type": "ADDED", "object": 1}\n'])
fake_api = Mock()
fake_api.get_namespaces = Mock(return_value=fake_resp)
fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
count = 0
# when timeout_seconds is set, auto-exist when timeout reaches
for e in w.stream(fake_api.get_namespaces, timeout_seconds=1):
count = count + 1
self.assertEqual(count, 1)
# when no timeout_seconds, only exist when w.stop() is called
for e in w.stream(fake_api.get_namespaces):
count = count + 1
if count == 2:
w.stop()
self.assertEqual(count, 2)
self.assertEqual(fake_api.get_namespaces.call_count, 2)
self.assertEqual(fake_resp.stream.call_count, 2)
self.assertEqual(fake_resp.close.call_count, 2)
self.assertEqual(fake_resp.release_conn.call_count, 2)
def test_unmarshal_with_float_object(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": 1}', 'float')
self.assertEqual("ADDED", event['type'])
self.assertEqual(1.0, event['object'])
self.assertTrue(isinstance(event['object'], float))
self.assertEqual(1, event['raw_object'])
def test_unmarshal_with_no_return_type(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": ["test1"]}',
None)
self.assertEqual("ADDED", event['type'])
self.assertEqual(["test1"], event['object'])
self.assertEqual(["test1"], event['raw_object'])
def test_unmarshal_with_custom_object(self):
w = Watch()
event = w.unmarshal_event('{"type": "ADDED", "object": {"apiVersion":'
'"test.com/v1beta1","kind":"foo","metadata":'
'{"name": "bar", "resourceVersion": "1"}}}',
'object')
self.assertEqual("ADDED", event['type'])
# make sure decoder deserialized json into dictionary and updated
# Watch.resource_version
self.assertTrue(isinstance(event['object'], dict))
self.assertEqual("1", event['object']['metadata']['resourceVersion'])
self.assertEqual("1", w.resource_version)
def test_unmarshal_with_bookmark(self):
w = Watch()
event = w.unmarshal_event(
'{"type":"BOOKMARK","object":{"kind":"Job","apiVersion":"batch/v1"'
',"metadata":{"resourceVersion":"1"},"spec":{"template":{'
'"metadata":{},"spec":{"containers":null}}},"status":{}}}',
'V1Job')
self.assertEqual("BOOKMARK", event['type'])
# Watch.resource_version is *not* updated, as BOOKMARK is treated the
# same as ERROR for a quick fix of decoding exception,
# resource_version in BOOKMARK is *not* used at all.
self.assertEqual(None, w.resource_version)
def test_watch_with_exception(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(side_effect=KeyError('expected'))
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing):
self.fail(self, "Should fail on exception.")
except KeyError:
pass
# expected
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_with_error_event(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
# No events are generated when no initial resourceVersion is passed
# No retry is attempted either, preventing an ApiException
assert not list(w.stream(fake_api.get_thing))
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
def test_watch_retries_on_error_event(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing, resource_version=0):
self.fail(self, "Should fail with ApiException.")
except client.rest.ApiException:
pass
# Two calls should be expected during a retry
fake_api.get_thing.assert_has_calls(
[call(resource_version=0, _preload_content=False, watch=True)] * 2)
fake_resp.stream.assert_has_calls(
[call(amt=None, decode_content=False)] * 2)
assert fake_resp.close.call_count == 2
assert fake_resp.release_conn.call_count == 2
def test_watch_with_error_event_and_timeout_param(self):
fake_resp = Mock()
fake_resp.close = Mock()
fake_resp.release_conn = Mock()
fake_resp.stream = Mock(
return_value=[
'{"type": "ERROR", "object": {"code": 410, '
'"reason": "Gone", "message": "error message"}}\n'])
fake_api = Mock()
fake_api.get_thing = Mock(return_value=fake_resp)
w = Watch()
try:
for _ in w.stream(fake_api.get_thing, timeout_seconds=10):
self.fail(self, "Should fail with ApiException.")
except client.rest.ApiException:
pass
fake_api.get_thing.assert_called_once_with(
_preload_content=False, watch=True, timeout_seconds=10)
fake_resp.stream.assert_called_once_with(
amt=None, decode_content=False)
fake_resp.close.assert_called_once()
fake_resp.release_conn.assert_called_once()
@classmethod
def setUpClass(cls):
cls.api = Mock()
cls.namespace = "default"
def test_pod_log_empty_lines(self):
pod_name = "demo-bug"
try:
self.api.create_namespaced_pod = Mock()
self.api.read_namespaced_pod = Mock()
self.api.delete_namespaced_pod = Mock()
self.api.read_namespaced_pod_log = Mock()
#pod creating step
self.api.create_namespaced_pod.return_value = None
#Checking pod status
mock_pod = Mock()
mock_pod.status.phase = "Running"
self.api.read_namespaced_pod.return_value = mock_pod
# Printing at pod output
self.api.read_namespaced_pod_log.return_value = iter(["Hello from Docker\n"])
# Wait for the pod to reach 'Running'
timeout = 60
start_time = time.time()
while time.time() - start_time < timeout:
pod = self.api.read_namespaced_pod(name=pod_name, namespace=self.namespace)
if pod.status.phase == "Running":
break
time.sleep(2)
else:
self.fail("Pod did not reach 'Running' state within timeout")
# Reading and streaming logs using Watch (mocked)
w = Watch()
log_output = []
#Mock logs used for this test
w.stream = Mock(return_value=[
"Hello from Docker",
"",
"",
"\n\n",
"Another log line",
"",
"\n",
"Final log"
])
for event in w.stream(self.api.read_namespaced_pod_log, name=pod_name, namespace=self.namespace, follow=True):
log_output.append(event)
print(event)
# Print outputs
print(f"Captured logs: {log_output}")
# self.assertTrue(any("Hello from Docker" in line for line in log_output))
# self.assertTrue(any(line.strip() == "" for line in log_output), "No empty lines found in logs")
expected_log = [
"Hello from Docker",
"",
"",
"\n\n",
"Another log line",
"",
"\n",
"Final log"
]
self.assertEqual(log_output, expected_log, "Captured logs do not match expected logs")
except ApiException as e:
self.fail(f"Kubernetes API exception: {e}")
finally:
#checking pod is calling for delete
self.api.delete_namespaced_pod(name=pod_name, namespace=self.namespace)
self.api.delete_namespaced_pod.assert_called_once_with(name=pod_name, namespace=self.namespace)
# Comment out the test below, it does not work currently.
# def test_watch_with_deserialize_param(self):
# """test watch.stream() deserialize param"""
# # prepare test data
# test_json = '{"type": "ADDED", "object": {"metadata": {"name": "test1", "resourceVersion": "1"}, "spec": {}, "status": {}}}'
# fake_resp = Mock()
# fake_resp.close = Mock()
# fake_resp.release_conn = Mock()
# fake_resp.stream = Mock(return_value=[test_json + '\n'])
#
# fake_api = Mock()
# fake_api.get_namespaces = Mock(return_value=fake_resp)
# fake_api.get_namespaces.__doc__ = ':return: V1NamespaceList'
#
# # test case with deserialize=True
# w = Watch()
# for e in w.stream(fake_api.get_namespaces, deserialize=True):
# self.assertEqual("ADDED", e['type'])
# # Verify that the object is deserialized correctly
# self.assertTrue(hasattr(e['object'], 'metadata'))
# self.assertEqual("test1", e['object'].metadata.name)
# self.assertEqual("1", e['object'].metadata.resource_version)
# # Verify that the original object is saved
# self.assertEqual(json.loads(test_json)['object'], e['raw_object'])
#
# # test case with deserialize=False
# w = Watch()
# for e in w.stream(fake_api.get_namespaces, deserialize=False):
# self.assertEqual("ADDED", e['type'])
# # The validation object remains in the original dictionary format
# self.assertIsInstance(e['object'], dict)
# self.assertEqual("test1", e['object']['metadata']['name'])
# self.assertEqual("1", e['object']['metadata']['resourceVersion'])
#
# # verify the api is called twice
# fake_api.get_namespaces.assert_has_calls([
# call(_preload_content=False, watch=True),
# call(_preload_content=False, watch=True)
# ])
if __name__ == '__main__':
unittest.main()
| WatchTests |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 72196,
"end": 73534
} | class ____(Operation):
def __init__(self, axis=None, dtype=None, *, name=None):
super().__init__(name=name)
self.axis = axis
self.dtype = None if dtype is None else backend.standardize_dtype(dtype)
def call(self, x):
return backend.numpy.cumprod(x, axis=self.axis, dtype=self.dtype)
def compute_output_spec(self, x):
if self.axis is None:
if None in x.shape:
output_shape = (None,)
else:
output_shape = (int(np.prod(x.shape)),)
else:
output_shape = x.shape
output_dtype = (
backend.standardize_dtype(x.dtype)
if self.dtype is None
else self.dtype
)
if output_dtype == "bool":
output_dtype = "int32"
return KerasTensor(output_shape, output_dtype)
@keras_export(["keras.ops.cumprod", "keras.ops.numpy.cumprod"])
def cumprod(x, axis=None, dtype=None):
"""Return the cumulative product of elements along a given axis.
Args:
x: Input tensor.
axis: Axis along which the cumulative product is computed.
By default the input is flattened.
dtype: dtype of returned tensor. Defaults to x.dtype.
Returns:
Output tensor.
"""
return Cumprod(axis=axis, dtype=dtype)(x)
| Cumprod |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/batch_apis.py | {
"start": 1634,
"end": 1837
} | class ____(TypedDict, total=False):
"""Represent the parameters of ``is_authorized_pool`` API in the auth manager."""
method: ResourceMethod
details: PoolDetails | None
| IsAuthorizedPoolRequest |
python | google__flatbuffers | tests/MyGame/Example/NestedUnion/Vec3.py | {
"start": 280,
"end": 3847
} | class ____(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset: int = 0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Vec3()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsVec3(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Vec3
def Init(self, buf: bytes, pos: int):
self._tab = flatbuffers.table.Table(buf, pos)
# Vec3
def X(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Y(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Z(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Test1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Test2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Vec3
def Test3(self) -> Optional[Test]:
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = o + self._tab.Pos
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
def Vec3Start(builder: flatbuffers.Builder):
builder.StartObject(6)
def Start(builder: flatbuffers.Builder):
Vec3Start(builder)
def Vec3AddX(builder: flatbuffers.Builder, x: float):
builder.PrependFloat64Slot(0, x, 0.0)
def AddX(builder: flatbuffers.Builder, x: float):
Vec3AddX(builder, x)
def Vec3AddY(builder: flatbuffers.Builder, y: float):
builder.PrependFloat64Slot(1, y, 0.0)
def AddY(builder: flatbuffers.Builder, y: float):
Vec3AddY(builder, y)
def Vec3AddZ(builder: flatbuffers.Builder, z: float):
builder.PrependFloat64Slot(2, z, 0.0)
def AddZ(builder: flatbuffers.Builder, z: float):
Vec3AddZ(builder, z)
def Vec3AddTest1(builder: flatbuffers.Builder, test1: float):
builder.PrependFloat64Slot(3, test1, 0.0)
def AddTest1(builder: flatbuffers.Builder, test1: float):
Vec3AddTest1(builder, test1)
def Vec3AddTest2(builder: flatbuffers.Builder, test2: int):
builder.PrependUint8Slot(4, test2, 0)
def AddTest2(builder: flatbuffers.Builder, test2: int):
Vec3AddTest2(builder, test2)
def Vec3AddTest3(builder: flatbuffers.Builder, test3: Any):
builder.PrependStructSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(test3), 0)
def AddTest3(builder: flatbuffers.Builder, test3: Any):
Vec3AddTest3(builder, test3)
def Vec3End(builder: flatbuffers.Builder) -> int:
return builder.EndObject()
def End(builder: flatbuffers.Builder) -> int:
return Vec3End(builder)
import MyGame.Example.NestedUnion.Test
try:
from typing import Optional
except:
pass
| Vec3 |
python | sqlalchemy__sqlalchemy | test/sql/test_defaults.py | {
"start": 42312,
"end": 46806
} | class ____(fixtures.TestBase):
__sparse_driver_backend__ = True
@testing.provide_metadata
def test_string_default_none_on_insert(self, connection):
"""Test that without implicit returning, we return None for
a string server default.
That is, we don't want to attempt to pre-execute "server_default"
generically - the user should use a Python side-default for a case
like this. Testing that all backends do the same thing here.
"""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, (None,))
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.requires.insert_returning
@testing.provide_metadata
def test_string_default_on_insert_with_returning(self, connection):
"""With implicit_returning, we get a string PK default back no
problem."""
metadata = self.metadata
t = Table(
"x",
metadata,
Column(
"y", String(10), server_default="key_one", primary_key=True
),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, ("key_one",))
eq_(list(connection.execute(t.select())), [("key_one", "data")])
@testing.provide_metadata
def test_int_default_none_on_insert(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, (None,))
if testing.against("sqlite"):
eq_(list(connection.execute(t.select())), [(1, "data")])
else:
eq_(list(connection.execute(t.select())), [(5, "data")])
@testing.provide_metadata
def test_autoincrement_reflected_from_server_default(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
assert t._autoincrement_column is None
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
assert t2._autoincrement_column is None
@testing.provide_metadata
def test_int_default_none_on_insert_reflected(self, connection):
metadata = self.metadata
Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
implicit_returning=False,
)
metadata.create_all(connection)
m2 = MetaData()
t2 = Table("x", m2, autoload_with=connection, implicit_returning=False)
r = connection.execute(t2.insert(), dict(data="data"))
eq_(r.inserted_primary_key, (None,))
if testing.against("sqlite"):
eq_(list(connection.execute(t2.select())), [(1, "data")])
else:
eq_(list(connection.execute(t2.select())), [(5, "data")])
@testing.requires.insert_returning
@testing.provide_metadata
@testing.fails_on("sqlite", "sqlite doesn't like our default trick here")
def test_int_default_on_insert_with_returning(self, connection):
metadata = self.metadata
t = Table(
"x",
metadata,
Column("y", Integer, server_default="5", primary_key=True),
Column("data", String(10)),
)
metadata.create_all(connection)
r = connection.execute(t.insert(), dict(data="data"))
eq_(r.inserted_primary_key, (5,))
eq_(list(connection.execute(t.select())), [(5, "data")])
| ServerDefaultsOnPKTest |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 26553,
"end": 26714
} | class ____(strip_text_x, strip_text_y):
"""
Facet labels along both axes
Parameters
----------
theme_element : element_text
"""
| strip_text |
python | readthedocs__readthedocs.org | readthedocs/projects/tests/test_build_tasks.py | {
"start": 2541,
"end": 5326
} | class ____(BuildEnvironmentBase):
# Relative path to where a custom config file is assumed to exist in repo
config_file_name = "unique.yaml"
def _get_project(self):
return fixture.get(
Project,
slug="project",
readthedocs_yaml_path=self.config_file_name,
)
@mock.patch("readthedocs.doc_builder.director.load_yaml_config")
@mock.patch("readthedocs.doc_builder.director.BuildDirector.build_docs_class")
def test_config_is_stored(self, build_docs_class, load_yaml_config):
"""Test that a custom config file is stored"""
# We add the PDF format to this config so we can check that the
# config file is in use
config = get_build_config(
{
"version": 2,
"formats": ["pdf"],
"sphinx": {
"configuration": "docs/conf.py",
},
},
source_file=self.config_file_name,
validate=True,
)
load_yaml_config.return_value = config
build_docs_class.return_value = True # success
assert not BuildData.objects.all().exists()
self._trigger_update_docs_task()
# Assert that the director tries to load the custom config file
load_yaml_config.assert_called_once_with(
version=mock.ANY, readthedocs_yaml_path=self.config_file_name
)
# Assert that we are building a PDF, since that is what our custom config file says
build_docs_class.assert_called_with("sphinx_pdf")
@mock.patch("readthedocs.core.utils.filesystem.assert_path_is_inside_docroot")
@mock.patch("readthedocs.doc_builder.director.BuildDirector.build_docs_class")
def test_config_file_is_loaded(
self, build_docs_class, assert_path_is_inside_docroot
):
"""Test that a custom config file is loaded
The readthedocs_yaml_path field on Project should be loading the file that we add
to the repo."""
# While testing, we are unsure if temporary test files exist in the docroot
assert_path_is_inside_docroot.return_value = True
self.mocker.add_file_in_repo_checkout(
self.config_file_name,
textwrap.dedent(
"""
version: 2
build:
os: "ubuntu-22.04"
tools:
python: "3"
formats: [pdf]
sphinx:
configuration: docs/conf.py
"""
),
)
self._trigger_update_docs_task()
# Assert that we are building a PDF, since that is what our custom config file says
build_docs_class.assert_called_with("sphinx_pdf")
| TestCustomConfigFile |
python | langchain-ai__langchain | libs/core/tests/unit_tests/stores/test_in_memory.py | {
"start": 245,
"end": 533
} | class ____(BaseStoreSyncTests):
@pytest.fixture
@override
def kv_store(self) -> InMemoryStore:
return InMemoryStore()
@pytest.fixture
@override
def three_values(self) -> tuple[str, str, str]:
return "value1", "value2", "value3"
| TestSyncInMemoryStore |
python | catalyst-team__catalyst | catalyst/contrib/datasets/imagewang.py | {
"start": 75,
"end": 469
} | class ____(ImageClassificationDataset):
"""
`Imagewang <https://github.com/fastai/imagenette#image%E7%BD%91>`_ Dataset.
.. note::
catalyst[cv] required for this dataset.
"""
name = "imagewang"
resources = [
(
"https://s3.amazonaws.com/fast-ai-imageclas/imagewang.tgz",
"46f9749616a29837e7cd67b103396f6e",
)
]
| Imagewang |
python | lepture__authlib | authlib/jose/rfc7518/jwe_encs.py | {
"start": 3215,
"end": 5093
} | class ____(JWEEncAlgorithm):
# Use of an IV of size 96 bits is REQUIRED with this algorithm.
# https://tools.ietf.org/html/rfc7518#section-5.3
IV_SIZE = 96
def __init__(self, key_size):
self.name = f"A{key_size}GCM"
self.description = f"AES GCM using {key_size}-bit key"
self.key_size = key_size
self.CEK_SIZE = key_size
def encrypt(self, msg, aad, iv, key):
"""Key Encryption with AES GCM.
:param msg: text to be encrypt in bytes
:param aad: additional authenticated data in bytes
:param iv: initialization vector in bytes
:param key: encrypted key in bytes
:return: (ciphertext, iv, tag)
"""
self.check_iv(iv)
cipher = Cipher(AES(key), GCM(iv), backend=default_backend())
enc = cipher.encryptor()
enc.authenticate_additional_data(aad)
ciphertext = enc.update(msg) + enc.finalize()
return ciphertext, enc.tag
def decrypt(self, ciphertext, aad, iv, tag, key):
"""Key Decryption with AES GCM.
:param ciphertext: ciphertext in bytes
:param aad: additional authenticated data in bytes
:param iv: initialization vector in bytes
:param tag: authentication tag in bytes
:param key: encrypted key in bytes
:return: message
"""
self.check_iv(iv)
cipher = Cipher(AES(key), GCM(iv, tag), backend=default_backend())
d = cipher.decryptor()
d.authenticate_additional_data(aad)
return d.update(ciphertext) + d.finalize()
JWE_ENC_ALGORITHMS = [
CBCHS2EncAlgorithm(128, 256), # A128CBC-HS256
CBCHS2EncAlgorithm(192, 384), # A192CBC-HS384
CBCHS2EncAlgorithm(256, 512), # A256CBC-HS512
GCMEncAlgorithm(128), # A128GCM
GCMEncAlgorithm(192), # A192GCM
GCMEncAlgorithm(256), # A256GCM
]
| GCMEncAlgorithm |
python | numba__numba | numba/core/types/functions.py | {
"start": 24582,
"end": 25215
} | class ____(Callable, Opaque):
"""
Type class for namedtuple classes.
"""
def __init__(self, instance_class):
self.instance_class = instance_class
name = "class(%s)" % (instance_class)
super(NamedTupleClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in
# typing.collections
return None
def get_call_signatures(self):
return (), True
def get_impl_key(self, sig):
return type(self)
@property
def key(self):
return self.instance_class
| NamedTupleClass |
python | pytorch__pytorch | test/inductor/test_split_cat_fx_aten_passes.py | {
"start": 518,
"end": 1462
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x: torch.Tensor, y: torch.Tensor, z: torch.Tensor):
cat = torch.ops.aten.cat.default([x, y], 1)
split = torch.ops.aten.split.Tensor(cat, 32, 1)
getitem = split[0]
getitem_1 = split[1]
getitem_2 = split[2]
getitem_3 = split[3]
getitem_4 = split[4]
getitem_5 = split[5]
getitem_6 = split[6]
getitem_7 = split[7]
cat_1 = torch.ops.aten.cat.default(
[
getitem,
getitem_1,
getitem_2,
getitem_3,
getitem_4,
getitem_5,
getitem_6,
getitem_7,
],
1,
)
cat_2 = torch.ops.aten.cat.default([getitem, z], 1)
return torch.ops.aten.cat.default([cat_1, cat_2], 1)
| TestSplitCat |
python | tiangolo__fastapi | docs_src/schema_extra_example/tutorial005_an_py310.py | {
"start": 114,
"end": 1523
} | class ____(BaseModel):
name: str
description: str | None = None
price: float
tax: float | None = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Annotated[
Item,
Body(
openapi_examples={
"normal": {
"summary": "A normal example",
"description": "A **normal** item works correctly.",
"value": {
"name": "Foo",
"description": "A very nice Item",
"price": 35.4,
"tax": 3.2,
},
},
"converted": {
"summary": "An example with converted data",
"description": "FastAPI can convert price `strings` to actual `numbers` automatically",
"value": {
"name": "Bar",
"price": "35.4",
},
},
"invalid": {
"summary": "Invalid data is rejected with an error",
"value": {
"name": "Baz",
"price": "thirty five point four",
},
},
},
),
],
):
results = {"item_id": item_id, "item": item}
return results
| Item |
python | doocs__leetcode | solution/2900-2999/2961.Double Modular Exponentiation/Solution.py | {
"start": 0,
"end": 247
} | class ____:
def getGoodIndices(self, variables: List[List[int]], target: int) -> List[int]:
return [
i
for i, (a, b, c, m) in enumerate(variables)
if pow(pow(a, b, 10), c, m) == target
]
| Solution |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 48587,
"end": 49195
} | class ____(system_info):
section = 'armpl'
dir_env_var = 'ARMPL_DIR'
_lib_armpl = ['armpl_lp64_mp']
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
armpl_libs = self.get_libs('armpl_libs', self._lib_armpl)
info = self.check_libs2(lib_dirs, armpl_libs)
if info is None:
return
dict_append(info,
define_macros=[('SCIPY_MKL_H', None),
('HAVE_CBLAS', None)],
include_dirs=incl_dirs)
self.set_info(**info)
| armpl_info |
python | numba__numba | numba/cuda/codegen.py | {
"start": 1502,
"end": 11350
} | class ____(serialize.ReduceMixin, CodeLibrary):
"""
The CUDACodeLibrary generates PTX, SASS, cubins for multiple different
compute capabilities. It also loads cubins to multiple devices (via
get_cufunc), which may be of different compute capabilities.
"""
def __init__(self, codegen, name, entry_name=None, max_registers=None,
nvvm_options=None):
"""
codegen:
Codegen object.
name:
Name of the function in the source.
entry_name:
Name of the kernel function in the binary, if this is a global
kernel and not a device function.
max_registers:
The maximum register usage to aim for when linking.
nvvm_options:
Dict of options to pass to NVVM.
"""
super().__init__(codegen, name)
# The llvmlite module for this library.
self._module = None
# CodeLibrary objects that will be "linked" into this library. The
# modules within them are compiled from NVVM IR to PTX along with the
# IR from this module - in that sense they are "linked" by NVVM at PTX
# generation time, rather than at link time.
self._linking_libraries = set()
# Files to link with the generated PTX. These are linked using the
# Driver API at link time.
self._linking_files = set()
# Should we link libcudadevrt?
self.needs_cudadevrt = False
# Cache the LLVM IR string
self._llvm_strs = None
# Maps CC -> PTX string
self._ptx_cache = {}
# Maps CC -> LTO-IR
self._ltoir_cache = {}
# Maps CC -> cubin
self._cubin_cache = {}
# Maps CC -> linker info output for cubin
self._linkerinfo_cache = {}
# Maps Device numeric ID -> cufunc
self._cufunc_cache = {}
self._max_registers = max_registers
if nvvm_options is None:
nvvm_options = {}
self._nvvm_options = nvvm_options
self._entry_name = entry_name
@property
def llvm_strs(self):
if self._llvm_strs is None:
self._llvm_strs = [str(mod) for mod in self.modules]
return self._llvm_strs
def get_llvm_str(self):
return "\n\n".join(self.llvm_strs)
def _ensure_cc(self, cc):
if cc is not None:
return cc
device = devices.get_context().device
return device.compute_capability
def get_asm_str(self, cc=None):
cc = self._ensure_cc(cc)
ptxes = self._ptx_cache.get(cc, None)
if ptxes:
return ptxes
arch = nvvm.get_arch_option(*cc)
options = self._nvvm_options.copy()
options['arch'] = arch
irs = self.llvm_strs
ptx = nvvm.compile_ir(irs, **options)
# Sometimes the result from NVVM contains trailing whitespace and
# nulls, which we strip so that the assembly dump looks a little
# tidier.
ptx = ptx.decode().strip('\x00').strip()
if config.DUMP_ASSEMBLY:
print(("ASSEMBLY %s" % self._name).center(80, '-'))
print(ptx)
print('=' * 80)
self._ptx_cache[cc] = ptx
return ptx
def get_ltoir(self, cc=None):
cc = self._ensure_cc(cc)
ltoir = self._ltoir_cache.get(cc, None)
if ltoir is not None:
return ltoir
arch = nvvm.get_arch_option(*cc)
options = self._nvvm_options.copy()
options['arch'] = arch
options['gen-lto'] = None
irs = self.llvm_strs
ltoir = nvvm.compile_ir(irs, **options)
self._ltoir_cache[cc] = ltoir
return ltoir
def get_cubin(self, cc=None):
cc = self._ensure_cc(cc)
cubin = self._cubin_cache.get(cc, None)
if cubin:
return cubin
linker = driver.Linker.new(max_registers=self._max_registers, cc=cc)
if linker.lto:
ltoir = self.get_ltoir(cc=cc)
linker.add_ltoir(ltoir)
else:
ptx = self.get_asm_str(cc=cc)
linker.add_ptx(ptx.encode())
for path in self._linking_files:
linker.add_file_guess_ext(path)
if self.needs_cudadevrt:
linker.add_file_guess_ext(get_cudalib('cudadevrt', static=True))
cubin = linker.complete()
self._cubin_cache[cc] = cubin
self._linkerinfo_cache[cc] = linker.info_log
return cubin
def get_cufunc(self):
if self._entry_name is None:
msg = "Missing entry_name - are you trying to get the cufunc " \
"for a device function?"
raise RuntimeError(msg)
ctx = devices.get_context()
device = ctx.device
cufunc = self._cufunc_cache.get(device.id, None)
if cufunc:
return cufunc
cubin = self.get_cubin(cc=device.compute_capability)
module = ctx.create_module_image(cubin)
# Load
cufunc = module.get_function(self._entry_name)
# Populate caches
self._cufunc_cache[device.id] = cufunc
return cufunc
def get_linkerinfo(self, cc):
try:
return self._linkerinfo_cache[cc]
except KeyError:
raise KeyError(f'No linkerinfo for CC {cc}')
def get_sass(self, cc=None):
return disassemble_cubin(self.get_cubin(cc=cc))
def get_sass_cfg(self, cc=None):
return disassemble_cubin_for_cfg(self.get_cubin(cc=cc))
def add_ir_module(self, mod):
self._raise_if_finalized()
if self._module is not None:
raise RuntimeError('CUDACodeLibrary only supports one module')
self._module = mod
def add_linking_library(self, library):
library._ensure_finalized()
# We don't want to allow linking more libraries in after finalization
# because our linked libraries are modified by the finalization, and we
# won't be able to finalize again after adding new ones
self._raise_if_finalized()
self._linking_libraries.add(library)
def add_linking_file(self, filepath):
self._linking_files.add(filepath)
def get_function(self, name):
for fn in self._module.functions:
if fn.name == name:
return fn
raise KeyError(f'Function {name} not found')
@property
def modules(self):
return [self._module] + [mod for lib in self._linking_libraries
for mod in lib.modules]
@property
def linking_libraries(self):
# Libraries we link to may link to other libraries, so we recursively
# traverse the linking libraries property to build up a list of all
# linked libraries.
libs = []
for lib in self._linking_libraries:
libs.extend(lib.linking_libraries)
libs.append(lib)
return libs
def finalize(self):
# Unlike the CPUCodeLibrary, we don't invoke the binding layer here -
# we only adjust the linkage of functions. Global kernels (with
# external linkage) have their linkage untouched. Device functions are
# set linkonce_odr to prevent them appearing in the PTX.
self._raise_if_finalized()
# Note in-place modification of the linkage of functions in linked
# libraries. This presently causes no issues as only device functions
# are shared across code libraries, so they would always need their
# linkage set to linkonce_odr. If in a future scenario some code
# libraries require linkonce_odr linkage of functions in linked
# modules, and another code library requires another linkage, each code
# library will need to take its own private copy of its linked modules.
#
# See also discussion on PR #890:
# https://github.com/numba/numba/pull/890
for library in self._linking_libraries:
for mod in library.modules:
for fn in mod.functions:
if not fn.is_declaration:
fn.linkage = 'linkonce_odr'
self._finalized = True
def _reduce_states(self):
"""
Reduce the instance for serialization. We retain the PTX and cubins,
but loaded functions are discarded. They are recreated when needed
after deserialization.
"""
if self._linking_files:
msg = 'Cannot pickle CUDACodeLibrary with linking files'
raise RuntimeError(msg)
if not self._finalized:
raise RuntimeError('Cannot pickle unfinalized CUDACodeLibrary')
return dict(
codegen=None,
name=self.name,
entry_name=self._entry_name,
llvm_strs=self.llvm_strs,
ptx_cache=self._ptx_cache,
cubin_cache=self._cubin_cache,
linkerinfo_cache=self._linkerinfo_cache,
max_registers=self._max_registers,
nvvm_options=self._nvvm_options,
needs_cudadevrt=self.needs_cudadevrt
)
@classmethod
def _rebuild(cls, codegen, name, entry_name, llvm_strs, ptx_cache,
cubin_cache, linkerinfo_cache, max_registers, nvvm_options,
needs_cudadevrt):
"""
Rebuild an instance.
"""
instance = cls(codegen, name, entry_name=entry_name)
instance._llvm_strs = llvm_strs
instance._ptx_cache = ptx_cache
instance._cubin_cache = cubin_cache
instance._linkerinfo_cache = linkerinfo_cache
instance._max_registers = max_registers
instance._nvvm_options = nvvm_options
instance.needs_cudadevrt = needs_cudadevrt
instance._finalized = True
return instance
| CUDACodeLibrary |
python | networkx__networkx | networkx/algorithms/approximation/tests/test_traveling_salesman.py | {
"start": 9438,
"end": 32048
} | class ____(TestSimulatedAnnealingTSP):
tsp = staticmethod(nx_app.threshold_accepting_tsp)
def test_failure_of_costs_too_high_when_iterations_low(self):
# Threshold Version:
# set number of moves low and number of iterations low
cycle = self.tsp(
self.DG2,
"greedy",
source="D",
move="1-0",
N_inner=1,
max_iterations=1,
seed=4,
)
cost = sum(self.DG2[n][nbr]["weight"] for n, nbr in pairwise(cycle))
assert cost > self.DG2_cost
# set threshold too low
initial_sol = ["D", "A", "B", "C", "D"]
cycle = self.tsp(
self.DG, initial_sol, source="D", move="1-0", threshold=-3, seed=42
)
cost = sum(self.DG[n][nbr]["weight"] for n, nbr in pairwise(cycle))
assert cost > self.DG_cost
# Tests for function traveling_salesman_problem
def test_TSP_method():
G = nx.cycle_graph(9)
G[4][5]["weight"] = 10
# Test using the old currying method
def sa_tsp(G, weight):
return nx_app.simulated_annealing_tsp(G, "greedy", weight, source=4, seed=1)
path = nx_app.traveling_salesman_problem(
G,
method=sa_tsp,
cycle=False,
)
assert path == [4, 3, 2, 1, 0, 8, 7, 6, 5]
def test_TSP_unweighted():
G = nx.cycle_graph(9)
path = nx_app.traveling_salesman_problem(G, nodes=[3, 6], cycle=False)
assert path in ([3, 4, 5, 6], [6, 5, 4, 3])
cycle = nx_app.traveling_salesman_problem(G, nodes=[3, 6])
assert cycle in ([3, 4, 5, 6, 5, 4, 3], [6, 5, 4, 3, 4, 5, 6])
def test_TSP_weighted():
G = nx.cycle_graph(9)
G[0][1]["weight"] = 2
G[1][2]["weight"] = 2
G[2][3]["weight"] = 2
G[3][4]["weight"] = 4
G[4][5]["weight"] = 5
G[5][6]["weight"] = 4
G[6][7]["weight"] = 2
G[7][8]["weight"] = 2
G[8][0]["weight"] = 2
tsp = nx_app.traveling_salesman_problem
# path between 3 and 6
expected_paths = ([3, 2, 1, 0, 8, 7, 6], [6, 7, 8, 0, 1, 2, 3])
# cycle between 3 and 6
expected_cycles = (
[3, 2, 1, 0, 8, 7, 6, 7, 8, 0, 1, 2, 3],
[6, 7, 8, 0, 1, 2, 3, 2, 1, 0, 8, 7, 6],
)
# path through all nodes
expected_tourpaths = ([5, 6, 7, 8, 0, 1, 2, 3, 4], [4, 3, 2, 1, 0, 8, 7, 6, 5])
# Check default method
cycle = tsp(G, nodes=[3, 6], weight="weight")
assert cycle in expected_cycles
path = tsp(G, nodes=[3, 6], weight="weight", cycle=False)
assert path in expected_paths
tourpath = tsp(G, weight="weight", cycle=False)
assert tourpath in expected_tourpaths
# Check all methods
methods = [
(nx_app.christofides, {}),
(nx_app.greedy_tsp, {}),
(
nx_app.simulated_annealing_tsp,
{"init_cycle": "greedy"},
),
(
nx_app.threshold_accepting_tsp,
{"init_cycle": "greedy"},
),
]
for method, kwargs in methods:
cycle = tsp(G, nodes=[3, 6], weight="weight", method=method, **kwargs)
assert cycle in expected_cycles
path = tsp(
G, nodes=[3, 6], weight="weight", method=method, cycle=False, **kwargs
)
assert path in expected_paths
tourpath = tsp(G, weight="weight", method=method, cycle=False, **kwargs)
assert tourpath in expected_tourpaths
def test_TSP_incomplete_graph_short_path():
G = nx.cycle_graph(9)
G.add_edges_from([(4, 9), (9, 10), (10, 11), (11, 0)])
G[4][5]["weight"] = 5
cycle = nx_app.traveling_salesman_problem(G)
assert len(cycle) == 17 and len(set(cycle)) == 12
# make sure that cutting one edge out of complete graph formulation
# cuts out many edges out of the path of the TSP
path = nx_app.traveling_salesman_problem(G, cycle=False)
assert len(path) == 13 and len(set(path)) == 12
def test_TSP_alternate_weight():
G = nx.complete_graph(9)
G[0][1]["weight"] = 2
G[1][2]["weight"] = 2
G[2][3]["weight"] = 2
G[3][4]["weight"] = 4
G[4][5]["weight"] = 5
G[5][6]["weight"] = 4
G[6][7]["weight"] = 2
G[7][8]["weight"] = 2
G[8][0]["weight"] = 2
H = nx.complete_graph(9)
H[0][1]["distance"] = 2
H[1][2]["distance"] = 2
H[2][3]["distance"] = 2
H[3][4]["distance"] = 4
H[4][5]["distance"] = 5
H[5][6]["distance"] = 4
H[6][7]["distance"] = 2
H[7][8]["distance"] = 2
H[8][0]["distance"] = 2
assert nx_app.traveling_salesman_problem(
G, weight="weight"
) == nx_app.traveling_salesman_problem(H, weight="distance")
def test_held_karp_ascent():
"""
Test the Held-Karp relaxation with the ascent method
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
# Adjacency matrix from page 1153 of the 1970 Held and Karp paper
# which have been edited to be directional, but also symmetric
G_array = np.array(
[
[0, 97, 60, 73, 17, 52],
[97, 0, 41, 52, 90, 30],
[60, 41, 0, 21, 35, 41],
[73, 52, 21, 0, 95, 46],
[17, 90, 35, 95, 0, 81],
[52, 30, 41, 46, 81, 0],
]
)
solution_edges = [(1, 3), (2, 4), (3, 2), (4, 0), (5, 1), (0, 5)]
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 207.00
# Check that the z_stars are the same
solution = nx.DiGraph()
solution.add_edges_from(solution_edges)
# Use undirected edges for `edges_equal` because the graph is symmetric.
assert nx.utils.edges_equal(z_star.edges, solution.edges)
def test_ascent_fractional_solution():
"""
Test the ascent method using a modified version of Figure 2 on page 1140
in 'The Traveling Salesman Problem and Minimum Spanning Trees' by Held and
Karp
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
# This version of Figure 2 has all of the edge weights multiplied by 100
# and is a complete directed graph with infinite edge weights for the
# edges not listed in the original graph
G_array = np.array(
[
[0, 100, 100, 100000, 100000, 1],
[100, 0, 100, 100000, 1, 100000],
[100, 100, 0, 1, 100000, 100000],
[100000, 100000, 1, 0, 100, 100],
[100000, 1, 100000, 100, 0, 100],
[1, 100000, 100000, 100, 100, 0],
]
)
solution_z_star = {
(0, 1): 5 / 12,
(0, 2): 5 / 12,
(0, 5): 5 / 6,
(1, 0): 5 / 12,
(1, 2): 1 / 3,
(1, 4): 5 / 6,
(2, 0): 5 / 12,
(2, 1): 1 / 3,
(2, 3): 5 / 6,
(3, 2): 5 / 6,
(3, 4): 1 / 3,
(3, 5): 1 / 2,
(4, 1): 5 / 6,
(4, 3): 1 / 3,
(4, 5): 1 / 2,
(5, 0): 5 / 6,
(5, 3): 1 / 2,
(5, 4): 1 / 2,
}
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 303.00
# Check that the z_stars are the same
assert {key: round(z_star[key], 4) for key in z_star} == {
key: round(solution_z_star[key], 4) for key in solution_z_star
}
def test_ascent_method_asymmetric():
"""
Tests the ascent method using a truly asymmetric graph for which the
solution has been brute forced
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
[0, 26, 63, 59, 69, 31, 41],
[62, 0, 91, 53, 75, 87, 47],
[47, 82, 0, 90, 15, 9, 18],
[68, 19, 5, 0, 58, 34, 93],
[11, 58, 53, 55, 0, 61, 79],
[88, 75, 13, 76, 98, 0, 40],
[41, 61, 55, 88, 46, 45, 0],
]
)
solution_edges = [(0, 1), (1, 3), (3, 2), (2, 5), (5, 6), (4, 0), (6, 4)]
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 190.00
# Check that the z_stars match.
solution = nx.DiGraph()
solution.add_edges_from(solution_edges)
assert nx.utils.edges_equal(z_star.edges, solution.edges, directed=True)
def test_ascent_method_asymmetric_2():
"""
Tests the ascent method using a truly asymmetric graph for which the
solution has been brute forced
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
[0, 45, 39, 92, 29, 31],
[72, 0, 4, 12, 21, 60],
[81, 6, 0, 98, 70, 53],
[49, 71, 59, 0, 98, 94],
[74, 95, 24, 43, 0, 47],
[56, 43, 3, 65, 22, 0],
]
)
solution_edges = [(0, 5), (5, 4), (1, 3), (3, 0), (2, 1), (4, 2)]
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 144.00
# Check that the z_stars match.
solution = nx.DiGraph()
solution.add_edges_from(solution_edges)
assert nx.utils.edges_equal(z_star.edges, solution.edges, directed=True)
def test_held_karp_ascent_asymmetric_3():
"""
Tests the ascent method using a truly asymmetric graph with a fractional
solution for which the solution has been brute forced.
In this graph their are two different optimal, integral solutions (which
are also the overall atsp solutions) to the Held Karp relaxation. However,
this particular graph has two different tours of optimal value and the
possible solutions in the held_karp_ascent function are not stored in an
ordered data structure.
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
[0, 1, 5, 2, 7, 4],
[7, 0, 7, 7, 1, 4],
[4, 7, 0, 9, 2, 1],
[7, 2, 7, 0, 4, 4],
[5, 5, 4, 4, 0, 3],
[3, 9, 1, 3, 4, 0],
]
)
solution1_edges = [(0, 3), (1, 4), (2, 5), (3, 1), (4, 2), (5, 0)]
solution2_edges = [(0, 3), (3, 1), (1, 4), (4, 5), (2, 0), (5, 2)]
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
assert round(opt_hk, 2) == 13.00
# Check that the z_stars are the same
solution1 = nx.DiGraph()
solution1.add_edges_from(solution1_edges)
solution2 = nx.DiGraph()
solution2.add_edges_from(solution2_edges)
assert nx.utils.edges_equal(
z_star.edges, solution1.edges, directed=True
) or nx.utils.edges_equal(z_star.edges, solution2.edges, directed=True)
def test_held_karp_ascent_fractional_asymmetric():
"""
Tests the ascent method using a truly asymmetric graph with a fractional
solution for which the solution has been brute forced
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
[0, 100, 150, 100000, 100000, 1],
[150, 0, 100, 100000, 1, 100000],
[100, 150, 0, 1, 100000, 100000],
[100000, 100000, 1, 0, 150, 100],
[100000, 2, 100000, 100, 0, 150],
[2, 100000, 100000, 150, 100, 0],
]
)
solution_z_star = {
(0, 1): 5 / 12,
(0, 2): 5 / 12,
(0, 5): 5 / 6,
(1, 0): 5 / 12,
(1, 2): 5 / 12,
(1, 4): 5 / 6,
(2, 0): 5 / 12,
(2, 1): 5 / 12,
(2, 3): 5 / 6,
(3, 2): 5 / 6,
(3, 4): 5 / 12,
(3, 5): 5 / 12,
(4, 1): 5 / 6,
(4, 3): 5 / 12,
(4, 5): 5 / 12,
(5, 0): 5 / 6,
(5, 3): 5 / 12,
(5, 4): 5 / 12,
}
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
opt_hk, z_star = tsp.held_karp_ascent(G)
# Check that the optimal weights are the same
assert round(opt_hk, 2) == 304.00
# Check that the z_stars are the same
assert {key: round(z_star[key], 4) for key in z_star} == {
key: round(solution_z_star[key], 4) for key in solution_z_star
}
def test_spanning_tree_distribution():
"""
Test that we can create an exponential distribution of spanning trees such
that the probability of each tree is proportional to the product of edge
weights.
Results of this test have been confirmed with hypothesis testing from the
created distribution.
This test uses the symmetric, fractional Held Karp solution.
"""
import networkx.algorithms.approximation.traveling_salesman as tsp
pytest.importorskip("numpy")
pytest.importorskip("scipy")
z_star = {
(0, 1): 5 / 12,
(0, 2): 5 / 12,
(0, 5): 5 / 6,
(1, 0): 5 / 12,
(1, 2): 1 / 3,
(1, 4): 5 / 6,
(2, 0): 5 / 12,
(2, 1): 1 / 3,
(2, 3): 5 / 6,
(3, 2): 5 / 6,
(3, 4): 1 / 3,
(3, 5): 1 / 2,
(4, 1): 5 / 6,
(4, 3): 1 / 3,
(4, 5): 1 / 2,
(5, 0): 5 / 6,
(5, 3): 1 / 2,
(5, 4): 1 / 2,
}
solution_gamma = {
(0, 1): -0.6383,
(0, 2): -0.6827,
(0, 5): 0,
(1, 2): -1.0781,
(1, 4): 0,
(2, 3): 0,
(5, 3): -0.2820,
(5, 4): -0.3327,
(4, 3): -0.9927,
}
# The undirected support of z_star
G = nx.MultiGraph()
for u, v in z_star:
if (u, v) in G.edges or (v, u) in G.edges:
continue
G.add_edge(u, v)
gamma = tsp.spanning_tree_distribution(G, z_star)
assert {key: round(gamma[key], 4) for key in gamma} == solution_gamma
def test_asadpour_tsp():
"""
Test the complete asadpour tsp algorithm with the fractional, symmetric
Held Karp solution. This test also uses an incomplete graph as input.
"""
# This version of Figure 2 has all of the edge weights multiplied by 100
# and the 0 weight edges have a weight of 1.
pytest.importorskip("numpy")
pytest.importorskip("scipy")
edge_list = [
(0, 1, 100),
(0, 2, 100),
(0, 5, 1),
(1, 2, 100),
(1, 4, 1),
(2, 3, 1),
(3, 4, 100),
(3, 5, 100),
(4, 5, 100),
(1, 0, 100),
(2, 0, 100),
(5, 0, 1),
(2, 1, 100),
(4, 1, 1),
(3, 2, 1),
(4, 3, 100),
(5, 3, 100),
(5, 4, 100),
]
G = nx.DiGraph()
G.add_weighted_edges_from(edge_list)
tour = nx_app.traveling_salesman_problem(
G, weight="weight", method=nx_app.asadpour_atsp, seed=19
)
# Check that the returned list is a valid tour. Because this is an
# incomplete graph, the conditions are not as strict. We need the tour to
#
# Start and end at the same node
# Pass through every vertex at least once
# Have a total cost at most ln(6) / ln(ln(6)) = 3.0723 times the optimal
#
# For the second condition it is possible to have the tour pass through the
# same vertex more then. Imagine that the tour on the complete version takes
# an edge not in the original graph. In the output this is substituted with
# the shortest path between those vertices, allowing vertices to appear more
# than once.
#
# Even though we are using a fixed seed, multiple tours have been known to
# be returned. The first two are from the original development of this test,
# and the third one from issue #5913 on GitHub. If other tours are returned,
# add it on the list of expected tours.
expected_tours = [
[1, 4, 5, 0, 2, 3, 2, 1],
[3, 2, 0, 1, 4, 5, 3],
[3, 2, 1, 0, 5, 4, 3],
]
assert tour in expected_tours
def test_asadpour_real_world():
"""
This test uses airline prices between the six largest cities in the US.
* New York City -> JFK
* Los Angeles -> LAX
* Chicago -> ORD
* Houston -> IAH
* Phoenix -> PHX
* Philadelphia -> PHL
Flight prices from August 2021 using Delta or American airlines to get
nonstop flight. The brute force solution found the optimal tour to cost $872
This test also uses the `source` keyword argument to ensure that the tour
always starts at city 0.
"""
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
# JFK LAX ORD IAH PHX PHL
[0, 243, 199, 208, 169, 183], # JFK
[277, 0, 217, 123, 127, 252], # LAX
[297, 197, 0, 197, 123, 177], # ORD
[303, 169, 197, 0, 117, 117], # IAH
[257, 127, 160, 117, 0, 319], # PHX
[183, 332, 217, 117, 319, 0], # PHL
]
)
node_list = ["JFK", "LAX", "ORD", "IAH", "PHX", "PHL"]
expected_tours = [
["JFK", "LAX", "PHX", "ORD", "IAH", "PHL", "JFK"],
["JFK", "ORD", "PHX", "LAX", "IAH", "PHL", "JFK"],
]
G = nx.from_numpy_array(G_array, nodelist=node_list, create_using=nx.DiGraph)
tour = nx_app.traveling_salesman_problem(
G, weight="weight", method=nx_app.asadpour_atsp, seed=37, source="JFK"
)
assert tour in expected_tours
def test_asadpour_real_world_path():
"""
This test uses airline prices between the six largest cities in the US. This
time using a path, not a cycle.
* New York City -> JFK
* Los Angeles -> LAX
* Chicago -> ORD
* Houston -> IAH
* Phoenix -> PHX
* Philadelphia -> PHL
Flight prices from August 2021 using Delta or American airlines to get
nonstop flight. The brute force solution found the optimal tour to cost $872
"""
np = pytest.importorskip("numpy")
pytest.importorskip("scipy")
G_array = np.array(
[
# JFK LAX ORD IAH PHX PHL
[0, 243, 199, 208, 169, 183], # JFK
[277, 0, 217, 123, 127, 252], # LAX
[297, 197, 0, 197, 123, 177], # ORD
[303, 169, 197, 0, 117, 117], # IAH
[257, 127, 160, 117, 0, 319], # PHX
[183, 332, 217, 117, 319, 0], # PHL
]
)
node_list = ["JFK", "LAX", "ORD", "IAH", "PHX", "PHL"]
expected_paths = [
["ORD", "PHX", "LAX", "IAH", "PHL", "JFK"],
["JFK", "PHL", "IAH", "ORD", "PHX", "LAX"],
]
G = nx.from_numpy_array(G_array, nodelist=node_list, create_using=nx.DiGraph)
path = nx_app.traveling_salesman_problem(
G, weight="weight", cycle=False, method=nx_app.asadpour_atsp, seed=56
)
assert path in expected_paths
def test_asadpour_disconnected_graph():
"""
Test that the proper exception is raised when asadpour_atsp is given an
disconnected graph.
"""
G = nx.complete_graph(4, create_using=nx.DiGraph)
# have to set edge weights so that if the exception is not raised, the
# function will complete and we will fail the test
nx.set_edge_attributes(G, 1, "weight")
G.add_node(5)
pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
def test_asadpour_incomplete_graph():
"""
Test that the proper exception is raised when asadpour_atsp is given an
incomplete graph
"""
G = nx.complete_graph(4, create_using=nx.DiGraph)
# have to set edge weights so that if the exception is not raised, the
# function will complete and we will fail the test
nx.set_edge_attributes(G, 1, "weight")
G.remove_edge(0, 1)
pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
def test_asadpour_empty_graph():
"""
Test the asadpour_atsp function with an empty graph
"""
G = nx.DiGraph()
pytest.raises(nx.NetworkXError, nx_app.asadpour_atsp, G)
def test_asadpour_small_graphs():
# 1 node
G = nx.path_graph(1, create_using=nx.DiGraph)
with pytest.raises(nx.NetworkXError, match="at least two nodes"):
nx_app.asadpour_atsp(G)
# 2 nodes
G = nx.DiGraph()
G.add_weighted_edges_from([(0, 1, 7), (1, 0, 8)])
assert nx_app.asadpour_atsp(G) in [[0, 1], [1, 0]]
assert nx_app.asadpour_atsp(G, source=1) == [1, 0]
assert nx_app.asadpour_atsp(G, source=0) == [0, 1]
@pytest.mark.slow
def test_asadpour_integral_held_karp():
"""
This test uses an integral held karp solution and the held karp function
will return a graph rather than a dict, bypassing most of the asadpour
algorithm.
At first glance, this test probably doesn't look like it ensures that we
skip the rest of the asadpour algorithm, but it does. We are not fixing a
see for the random number generator, so if we sample any spanning trees
the approximation would be different basically every time this test is
executed but it is not since held karp is deterministic and we do not
reach the portion of the code with the dependence on random numbers.
"""
np = pytest.importorskip("numpy")
G_array = np.array(
[
[0, 26, 63, 59, 69, 31, 41],
[62, 0, 91, 53, 75, 87, 47],
[47, 82, 0, 90, 15, 9, 18],
[68, 19, 5, 0, 58, 34, 93],
[11, 58, 53, 55, 0, 61, 79],
[88, 75, 13, 76, 98, 0, 40],
[41, 61, 55, 88, 46, 45, 0],
]
)
G = nx.from_numpy_array(G_array, create_using=nx.DiGraph)
for _ in range(2):
tour = nx_app.traveling_salesman_problem(G, method=nx_app.asadpour_atsp)
assert [1, 3, 2, 5, 2, 6, 4, 0, 1] == tour
def test_directed_tsp_impossible():
"""
Test the asadpour algorithm with a graph without a hamiltonian circuit
"""
pytest.importorskip("numpy")
# In this graph, once we leave node 0 we cannot return
edges = [
(0, 1, 10),
(0, 2, 11),
(0, 3, 12),
(1, 2, 4),
(1, 3, 6),
(2, 1, 3),
(2, 3, 2),
(3, 1, 5),
(3, 2, 1),
]
G = nx.DiGraph()
G.add_weighted_edges_from(edges)
pytest.raises(nx.NetworkXError, nx_app.traveling_salesman_problem, G)
| TestThresholdAcceptingTSP |
python | ansible__ansible | lib/ansible/module_utils/errors.py | {
"start": 2416,
"end": 2501
} | class ____(AnsibleValidationError):
"""Error converting no_log values"""
| NoLogError |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/events.py | {
"start": 6572,
"end": 7042
} | class ____(FileSystemEvent):
"""File system event representing directory deletion on the file system."""
event_type = EVENT_TYPE_DELETED
is_directory = True
def __init__(self, src_path):
super(DirDeletedEvent, self).__init__(src_path)
def __repr__(self):
return ("<%(class_name)s: src_path=%(src_path)r>"
) % (dict(class_name=self.__class__.__name__,
src_path=self.src_path))
| DirDeletedEvent |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 64847,
"end": 72931
} | class ____:
async def test_task_input_hash_within_flows(
self,
):
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(x):
return x
@flow
def bar():
return (
foo(1, return_state=True),
foo(2, return_state=True),
foo(1, return_state=True),
)
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await first_state.result() != await second_state.result()
assert await first_state.result() == await third_state.result()
assert await first_state.result() == 1
async def test_task_input_hash_between_flows(
self,
):
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(x):
return x
@flow
def bar(x):
return foo(x, return_state=True)
first_state = bar(1)
second_state = bar(2)
third_state = bar(1)
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await first_state.result() != await second_state.result()
assert await first_state.result() == await third_state.result() == 1
async def test_task_input_hash_works_with_object_return_types(
self,
):
"""
This is a regression test for a weird bug where `task_input_hash` would always
use cloudpickle to generate the hash since we were passing in the raw function
which is not JSON serializable. In this case, the return value could affect
the pickle which would change the hash across runs. To fix this,
`task_input_hash` hashes the function before passing data to `hash_objects` so
the JSON serializer can be used.
"""
class TestClass:
def __init__(self, x):
self.x = x
def __eq__(self, other) -> bool:
return type(self) is type(other) and self.x == other.x
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(x):
return TestClass(x)
@flow
def bar():
return (
foo(1, return_state=True),
foo(2, return_state=True),
foo(1, return_state=True),
)
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await first_state.result() != await second_state.result()
assert await first_state.result() == await third_state.result()
async def test_task_input_hash_works_with_object_input_types(
self,
):
class TestClass:
def __init__(self, x):
self.x = x
def __eq__(self, other) -> bool:
return type(self) is type(other) and self.x == other.x
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(instance):
return instance.x
@flow
def bar():
return (
foo(TestClass(1), return_state=True),
foo(TestClass(2), return_state=True),
foo(TestClass(1), return_state=True),
)
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await first_state.result() != await second_state.result()
assert await first_state.result() == await third_state.result() == 1
async def test_task_input_hash_works_with_block_input_types(
self,
):
class TestBlock(Block):
x: int
y: int
z: int
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(instance):
return instance.x
@flow
def bar():
return (
foo(TestBlock(x=1, y=2, z=3), return_state=True),
foo(TestBlock(x=4, y=2, z=3), return_state=True),
foo(TestBlock(x=1, y=2, z=3), return_state=True),
)
first_state, second_state, third_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert await first_state.result() != await second_state.result()
assert await first_state.result() == await third_state.result() == 1
async def test_task_input_hash_depends_on_task_key_and_code(
self,
):
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(x):
return x
def foo_new_code(x):
return x + 1
def foo_same_code(x):
return x
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def bar(x):
return x
@flow
def my_flow():
first = foo(1, return_state=True)
foo.fn = foo_same_code
second = foo(1, return_state=True)
foo.fn = foo_new_code
third = foo(1, return_state=True)
fourth = bar(1, return_state=True)
fifth = bar(1, return_state=True)
return first, second, third, fourth, fifth
(
first_state,
second_state,
third_state,
fourth_state,
fifth_state,
) = my_flow()
assert first_state.name == "Completed"
assert second_state.name == "Cached"
assert third_state.name == "Completed"
assert fourth_state.name == "Completed"
assert fifth_state.name == "Cached"
assert await first_state.result() == await second_state.result() == 1
assert await first_state.result() != await third_state.result()
assert await fourth_state.result() == await fifth_state.result() == 1
async def test_task_input_hash_works_with_block_input_types_and_bytes(
self,
):
class TestBlock(Block):
x: int
y: int
z: bytes
@task(
cache_key_fn=task_input_hash,
persist_result=True,
)
def foo(instance):
return instance.x
@flow
def bar():
return (
foo(
TestBlock(x=1, y=2, z="dog".encode("utf-8")), return_state=True
), # same
foo(
TestBlock(x=4, y=2, z="dog".encode("utf-8")), return_state=True
), # different x
foo(
TestBlock(x=1, y=2, z="dog".encode("utf-8")), return_state=True
), # same
foo(
TestBlock(x=1, y=2, z="dog".encode("latin-1")), return_state=True
), # same
foo(
TestBlock(x=1, y=2, z="cat".encode("utf-8")), return_state=True
), # different z
)
first_state, second_state, third_state, fourth_state, fifth_state = bar()
assert first_state.name == "Completed"
assert second_state.name == "Completed"
assert third_state.name == "Cached"
assert fourth_state.name == "Cached"
assert fifth_state.name == "Completed"
assert await first_state.result() != await second_state.result()
assert (
await first_state.result()
== await third_state.result()
== await fourth_state.result()
== 1
)
| TestCacheFunctionBuiltins |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared/check/builder.py | {
"start": 674,
"end": 873
} | class ____(NamedTuple):
"""A pointer to where to lazily import from to resolve a ForwardRef.
Used with Annotated ie: Annotated['Foo', ImportFrom('baz.bar')]
"""
module: str
| ImportFrom |
python | optuna__optuna | optuna/_gp/acqf.py | {
"start": 11389,
"end": 13050
} | class ____(BaseAcquisitionFunc):
def __init__(
self,
gpr_list: list[GPRegressor],
search_space: SearchSpace,
Y_feasible: torch.Tensor | None,
n_qmc_samples: int,
qmc_seed: int | None,
constraints_gpr_list: list[GPRegressor],
constraints_threshold_list: list[float],
stabilizing_noise: float = 1e-12,
) -> None:
assert (
len(constraints_gpr_list) == len(constraints_threshold_list) and constraints_gpr_list
)
self._acqf = (
LogEHVI(gpr_list, search_space, Y_feasible, n_qmc_samples, qmc_seed, stabilizing_noise)
if Y_feasible is not None
else None
)
self._constraints_acqf_list = [
LogPI(_gpr, search_space, _threshold, stabilizing_noise)
for _gpr, _threshold in zip(constraints_gpr_list, constraints_threshold_list)
]
# Since all the objectives are equally important, we simply use the mean of
# inverse of squared mean lengthscales over all the objectives.
# inverse_squared_lengthscales is used in optim_mixed.py.
# cf. https://github.com/optuna/optuna/blob/v4.3.0/optuna/_gp/optim_mixed.py#L200-L209
super().__init__(np.mean([gpr.length_scales for gpr in gpr_list], axis=0), search_space)
def eval_acqf(self, x: torch.Tensor) -> torch.Tensor:
constraints_acqf_values = sum(acqf.eval_acqf(x) for acqf in self._constraints_acqf_list)
if self._acqf is None:
return cast(torch.Tensor, constraints_acqf_values)
return constraints_acqf_values + self._acqf.eval_acqf(x)
| ConstrainedLogEHVI |
python | great-expectations__great_expectations | contrib/great_expectations_ethical_ai_expectations/great_expectations_ethical_ai_expectations/expectations/expect_table_linear_feature_importances_to_be.py | {
"start": 1206,
"end": 2745
} | class ____(TableMetricProvider):
metric_name = "table.modeling.linear.feature_importances"
value_keys = ("y_column",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
X, y = (
df.drop(columns=[metric_value_kwargs["y_column"]]),
df[metric_value_kwargs["y_column"]],
)
model = LinearRegression().fit(X, y)
importances = permutation_importance(
model,
X,
y,
n_repeats=30,
random_state=42,
scoring="neg_mean_absolute_percentage_error",
)
return {i: j for i, j in zip(X.columns, importances.importances_mean, strict=False)}
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine=None,
runtime_configuration=None,
):
return {
"table.columns": MetricConfiguration("table.columns", metric.metric_domain_kwargs),
}
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| TableModelingRidgeFeatureImportances |
python | numba__numba | numba/tests/test_linalg.py | {
"start": 59972,
"end": 65269
} | class ____(TestLinalgSystems):
"""
Tests for np.linalg.solve.
"""
@needs_lapack
def test_linalg_solve(self):
"""
Test np.linalg.solve
"""
cfunc = jit(nopython=True)(solve_system)
def check(a, b, **kwargs):
expected = solve_system(a, b, **kwargs)
got = cfunc(a, b, **kwargs)
# check that the computed results are contig and in the same way
self.assert_contig_sanity(got, "F")
use_reconstruction = False
# try plain match of the result first
try:
np.testing.assert_array_almost_equal_nulp(
got, expected, nulp=10)
except AssertionError:
# plain match failed, test by reconstruction
use_reconstruction = True
# If plain match fails then reconstruction is used,
# this checks that AX ~= B.
# Plain match can fail due to numerical fuzziness associated
# with system size and conditioning, or more simply from
# numpy using double precision routines for computation that
# could be done in single precision (which is what numba does).
# Therefore minor differences in results can appear due to
# e.g. numerical roundoff being different between two precisions.
if use_reconstruction:
# check they are dimensionally correct
self.assertEqual(got.shape, expected.shape)
# check AX=B
rec = np.dot(a, got)
resolution = np.finfo(a.dtype).resolution
np.testing.assert_allclose(
b,
rec,
rtol=10 * resolution,
atol=100 * resolution # zeros tend to be fuzzy
)
# Ensure proper resource management
with self.assertNoNRTLeak():
cfunc(a, b, **kwargs)
# test: prime size squares
sizes = [(1, 1), (3, 3), (7, 7)]
# test loop
for size, dtype, order in \
product(sizes, self.dtypes, 'FC'):
A = self.specific_sample_matrix(size, dtype, order)
b_sizes = (1, 13)
for b_size, b_order in product(b_sizes, 'FC'):
# check 2D B
B = self.specific_sample_matrix(
(A.shape[0], b_size), dtype, b_order)
check(A, B)
# check 1D B
tmp = B[:, 0].copy(order=b_order)
check(A, tmp)
# check empty
cfunc(np.empty((0, 0)), np.empty((0,)))
# Test input validation
ok = np.array([[1., 0.], [0., 1.]], dtype=np.float64)
# check ok input is ok
cfunc(ok, ok)
# check bad inputs
rn = "solve"
# Wrong dtype
bad = np.array([[1, 0], [0, 1]], dtype=np.int32)
self.assert_wrong_dtype(rn, cfunc, (ok, bad))
self.assert_wrong_dtype(rn, cfunc, (bad, ok))
# different dtypes
bad = np.array([[1, 2], [3, 4]], dtype=np.float32)
self.assert_homogeneous_dtypes(rn, cfunc, (ok, bad))
self.assert_homogeneous_dtypes(rn, cfunc, (bad, ok))
# Dimension issue
bad = np.array([1, 0], dtype=np.float64)
self.assert_wrong_dimensions(rn, cfunc, (bad, ok))
# no nans or infs
bad = np.array([[1., 0., ], [np.inf, np.nan]], dtype=np.float64)
self.assert_no_nan_or_inf(cfunc, (ok, bad))
self.assert_no_nan_or_inf(cfunc, (bad, ok))
# check 1D is accepted for B (2D is done previously)
# and then that anything of higher dimension raises
ok_oneD = np.array([1., 2.], dtype=np.float64)
cfunc(ok, ok_oneD)
bad = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float64)
self.assert_wrong_dimensions_1D(rn, cfunc, (ok, bad))
# check an invalid system raises (1D and 2D cases checked)
bad1D = np.array([1.], dtype=np.float64)
bad2D = np.array([[1.], [2.], [3.]], dtype=np.float64)
self.assert_dimensionally_invalid(cfunc, (ok, bad1D))
self.assert_dimensionally_invalid(cfunc, (ok, bad2D))
# check that a singular system raises
bad2D = self.specific_sample_matrix((2, 2), np.float64, 'C', rank=1)
self.assert_raise_on_singular(cfunc, (bad2D, ok))
@needs_lapack
def test_no_input_mutation(self):
X = np.array([[1., 1, 1, 1],
[0., 1, 1, 1],
[0., 0, 1, 1],
[1., 0, 0, 1],], order='F')
X_orig = np.copy(X)
y = np.array([1., 2., 3., 4])
y_orig = np.copy(y)
@jit(nopython=True)
def func(X, y, test):
if test:
# not executed, triggers A order in X
X = X[1:2, :]
return np.linalg.solve(X, y)
expected = func.py_func(X, y, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(y, y_orig)
got = func(X, y, False)
np.testing.assert_allclose(X, X_orig)
np.testing.assert_allclose(y, y_orig)
np.testing.assert_allclose(expected, got)
| TestLinalgSolve |
python | django__django | django/db/migrations/autodetector.py | {
"start": 1153,
"end": 89926
} | class ____:
"""
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
self.existing_apps = {app for app, model in from_state.models}
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
changes = self.arrange_for_graph(changes, graph, migration_name)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def deep_deconstruct(self, obj):
"""
Recursive deconstruction for a field and its arguments.
Used for full comparison for rename/alter; sometimes a single-level
deconstruction will not compare correctly.
"""
if isinstance(obj, list):
return [self.deep_deconstruct(value) for value in obj]
elif isinstance(obj, tuple):
return tuple(self.deep_deconstruct(value) for value in obj)
elif isinstance(obj, dict):
return {key: self.deep_deconstruct(value) for key, value in obj.items()}
elif isinstance(obj, functools.partial):
return (
obj.func,
self.deep_deconstruct(obj.args),
self.deep_deconstruct(obj.keywords),
)
elif isinstance(obj, COMPILED_REGEX_TYPE):
return RegexObject(obj)
elif isinstance(obj, type):
# If this is a type that implements 'deconstruct' as an instance
# method, avoid treating this as being deconstructible itself - see
# #22951
return obj
elif hasattr(obj, "deconstruct"):
deconstructed = obj.deconstruct()
if isinstance(obj, models.Field):
# we have a field which also returns a name
deconstructed = deconstructed[1:]
path, args, kwargs = deconstructed
return (
path,
[self.deep_deconstruct(value) for value in args],
{key: self.deep_deconstruct(value) for key, value in kwargs.items()},
)
else:
return obj
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to. Used for detecting renames (as
the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields.items()):
deconstruction = self.deep_deconstruct(field)
if field.remote_field and field.remote_field.model:
deconstruction[2].pop("to", None)
fields_def.append(deconstruction)
return fields_def
def _detect_changes(self, convert_apps=None, graph=None):
"""
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
convert_apps is the list of apps to convert to use migrations
(i.e. to make initial migrations for, in the usual case)
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
self.altered_constraints = {}
self.renamed_fields = {}
# Prepare some old/new state and model lists, separating
# proxy models and ignoring unmigrated apps.
self.old_model_keys = set()
self.old_proxy_keys = set()
self.old_unmanaged_keys = set()
self.new_model_keys = set()
self.new_proxy_keys = set()
self.new_unmanaged_keys = set()
for (app_label, model_name), model_state in self.from_state.models.items():
if not model_state.options.get("managed", True):
self.old_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps:
if model_state.options.get("proxy"):
self.old_proxy_keys.add((app_label, model_name))
else:
self.old_model_keys.add((app_label, model_name))
for (app_label, model_name), model_state in self.to_state.models.items():
if not model_state.options.get("managed", True):
self.new_unmanaged_keys.add((app_label, model_name))
elif app_label not in self.from_state.real_apps or (
convert_apps and app_label in convert_apps
):
if model_state.options.get("proxy"):
self.new_proxy_keys.add((app_label, model_name))
else:
self.new_model_keys.add((app_label, model_name))
self.from_state.resolve_fields_and_relations()
self.to_state.resolve_fields_and_relations()
# Renames have to come first
self.generate_renamed_models()
# Prepare lists of fields and generate through model map
self._prepare_field_lists()
self._generate_through_model_map()
# Generate non-rename model operations
self.generate_deleted_models()
self.generate_created_models()
self.generate_deleted_proxies()
self.generate_created_proxies()
self.generate_altered_options()
self.generate_altered_managers()
self.generate_altered_db_table_comment()
# Create the renamed fields and store them in self.renamed_fields.
# They are used by create_altered_indexes(), generate_altered_fields(),
# generate_removed_altered_unique_together(), and
# generate_altered_unique_together().
self.create_renamed_fields()
# Create the altered indexes and store them in self.altered_indexes.
# This avoids the same computation in generate_removed_indexes()
# and generate_added_indexes().
self.create_altered_indexes()
self.create_altered_constraints()
# Generate index removal operations before field is removed
self.generate_removed_constraints()
self.generate_removed_indexes()
# Generate field renaming operations.
self.generate_renamed_fields()
self.generate_renamed_indexes()
# Generate removal of foo together.
self.generate_removed_altered_unique_together()
# Generate field operations.
self.generate_removed_fields()
self.generate_added_fields()
self.generate_altered_fields()
self.generate_altered_order_with_respect_to()
self.generate_altered_unique_together()
self.generate_added_indexes()
self.generate_added_constraints()
self.generate_altered_constraints()
self.generate_altered_db_table()
self._sort_migrations()
self._build_migration_list(graph)
self._optimize_migrations()
return self.migrations
def _prepare_field_lists(self):
"""
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = self.old_model_keys & self.new_model_keys
self.kept_proxy_keys = self.old_proxy_keys & self.new_proxy_keys
self.kept_unmanaged_keys = self.old_unmanaged_keys & self.new_unmanaged_keys
self.through_users = {}
self.old_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.from_state.models[
app_label, self.renamed_models.get((app_label, model_name), model_name)
].fields
}
self.new_field_keys = {
(app_label, model_name, field_name)
for app_label, model_name in self.kept_model_keys
for field_name in self.to_state.models[app_label, model_name].fields
}
def _generate_through_model_map(self):
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
for field_name, field in old_model_state.fields.items():
if hasattr(field, "remote_field") and getattr(
field.remote_field, "through", None
):
through_key = resolve_relation(
field.remote_field.through, app_label, model_name
)
self.through_users[through_key] = (
app_label,
old_model_name,
field_name,
)
@staticmethod
def _resolve_dependency(dependency):
"""
Return the resolved dependency and a boolean denoting whether or not
it was swappable.
"""
if dependency.app_label != "__setting__":
return dependency, False
resolved_app_label, resolved_object_name = getattr(
settings, dependency.model_name
).split(".")
return (
OperationDependency(
resolved_app_label,
resolved_object_name.lower(),
dependency.field_name,
dependency.type,
),
True,
)
def _build_migration_list(self, graph=None):
"""
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
num_ops = sum(len(x) for x in self.generated_operations.values())
chop_mode = False
while num_ops:
# On every iteration, we step through all the apps and see if there
# is a completed set of operations.
# If we find that a subset of the operations are complete we can
# try to chop it off from the rest and continue, but we only
# do this if we've already been through the list once before
# without any chopping and nothing has changed.
for app_label in sorted(self.generated_operations):
chopped = []
dependencies = set()
for operation in list(self.generated_operations[app_label]):
deps_satisfied = True
operation_dependencies = set()
for dep in operation._auto_deps:
# Temporarily resolve the swappable dependency to
# prevent circular references. While keeping the
# dependency checks on the resolved model, add the
# swappable dependencies.
original_dep = dep
dep, is_swappable_dep = self._resolve_dependency(dep)
if dep.app_label != app_label:
# External app dependency. See if it's not yet
# satisfied.
for other_operation in self.generated_operations.get(
dep.app_label, []
):
if self.check_dependency(other_operation, dep):
deps_satisfied = False
break
if not deps_satisfied:
break
else:
if is_swappable_dep:
operation_dependencies.add(
(
original_dep.app_label,
original_dep.model_name,
)
)
elif dep.app_label in self.migrations:
operation_dependencies.add(
(
dep.app_label,
self.migrations[dep.app_label][-1].name,
)
)
else:
# If we can't find the other app, we add a
# first/last dependency, but only if we've
# already been through once and checked
# everything.
if chop_mode:
# If the app already exists, we add a
# dependency on the last migration, as
# we don't know which migration
# contains the target field. If it's
# not yet migrated or has no
# migrations, we use __first__.
if graph and graph.leaf_nodes(dep.app_label):
operation_dependencies.add(
graph.leaf_nodes(dep.app_label)[0]
)
else:
operation_dependencies.add(
(dep.app_label, "__first__")
)
else:
deps_satisfied = False
if deps_satisfied:
chopped.append(operation)
dependencies.update(operation_dependencies)
del self.generated_operations[app_label][0]
else:
break
# Make a migration! Well, only if there's stuff to put in it
if dependencies or chopped:
if not self.generated_operations[app_label] or chop_mode:
subclass = type(
"Migration",
(Migration,),
{"operations": [], "dependencies": []},
)
instance = subclass(
"auto_%i" % (len(self.migrations.get(app_label, [])) + 1),
app_label,
)
instance.dependencies = list(dependencies)
instance.operations = chopped
instance.initial = app_label not in self.existing_apps
self.migrations.setdefault(app_label, []).append(instance)
chop_mode = False
else:
self.generated_operations[app_label] = (
chopped + self.generated_operations[app_label]
)
new_num_ops = sum(len(x) for x in self.generated_operations.values())
if new_num_ops == num_ops:
if not chop_mode:
chop_mode = True
else:
raise ValueError(
"Cannot resolve operation dependencies: %r"
% self.generated_operations
)
num_ops = new_num_ops
def _sort_migrations(self):
"""
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
ts = TopologicalSorter()
for op in ops:
ts.add(op)
for dep in op._auto_deps:
# Resolve intra-app dependencies to handle circular
# references involving a swappable model.
dep = self._resolve_dependency(dep)[0]
if dep.app_label != app_label:
continue
ts.add(op, *(x for x in ops if self.check_dependency(x, dep)))
self.generated_operations[app_label] = list(ts.static_order())
def _optimize_migrations(self):
# Add in internal dependencies among the migrations
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# De-dupe dependencies
for migrations in self.migrations.values():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
# Optimize migrations
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.operations = MigrationOptimizer().optimize(
migration.operations, app_label
)
def check_dependency(self, operation, dependency):
"""
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if (
dependency.field_name is None
and dependency.type == OperationDependency.Type.CREATE
):
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency.model_name_lower
)
# Created field
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.CREATE
):
return (
isinstance(operation, operations.CreateModel)
and operation.name_lower == dependency.model_name_lower
and any(dependency.field_name == x for x, y in operation.fields)
) or (
isinstance(operation, operations.AddField)
and operation.model_name_lower == dependency.model_name_lower
and operation.name_lower == dependency.field_name_lower
)
# Removed field
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.REMOVE
):
return (
isinstance(operation, operations.RemoveField)
and operation.model_name_lower == dependency.model_name_lower
and operation.name_lower == dependency.field_name_lower
)
# Removed model
elif (
dependency.field_name is None
and dependency.type == OperationDependency.Type.REMOVE
):
return (
isinstance(operation, operations.DeleteModel)
and operation.name_lower == dependency.model_name_lower
)
# Field being altered
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.ALTER
):
return (
isinstance(operation, operations.AlterField)
and operation.model_name_lower == dependency.model_name_lower
and operation.name_lower == dependency.field_name_lower
)
# order_with_respect_to being unset for a field
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.REMOVE_ORDER_WRT
):
return (
isinstance(operation, operations.AlterOrderWithRespectTo)
and operation.name_lower == dependency.model_name_lower
and (operation.order_with_respect_to or "").lower()
!= dependency.field_name_lower
)
# Field is removed and part of an index/unique_together
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.ALTER_FOO_TOGETHER
):
return (
isinstance(
operation,
(operations.AlterUniqueTogether, operations.AlterIndexTogether),
)
and operation.name_lower == dependency.model_name_lower
)
# Field is removed and part of an index/constraint.
elif (
dependency.field_name is not None
and dependency.type == OperationDependency.Type.REMOVE_INDEX_OR_CONSTRAINT
):
return (
isinstance(
operation,
(operations.RemoveIndex, operations.RemoveConstraint),
)
and operation.model_name_lower == dependency.model_name_lower
)
# Unknown dependency. Raise an error.
else:
raise ValueError("Can't handle dependency %r" % (dependency,))
def add_operation(self, app_label, operation, dependencies=None, beginning=False):
# Dependencies are
# (app_label, model_name, field_name, create/delete as True/False)
operation._auto_deps = dependencies or []
if beginning:
self.generated_operations.setdefault(app_label, []).insert(0, operation)
else:
self.generated_operations.setdefault(app_label, []).append(operation)
def swappable_first_key(self, item):
"""
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model_state = self.to_state.models[item]
base_names = {
base if isinstance(base, str) else base.__name__
for base in model_state.bases
}
string_version = "%s.%s" % (item[0], item[1])
if (
model_state.options.get("swappable")
or "AbstractUser" in base_names
or "AbstractBaseUser" in base_names
or settings.AUTH_USER_MODEL.lower() == string_version.lower()
):
return ("___" + item[0], "___" + item[1])
except LookupError:
pass
return item
def generate_renamed_models(self):
"""
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
added_models = self.new_model_keys - self.old_model_keys
for app_label, model_name in sorted(added_models):
model_state = self.to_state.models[app_label, model_name]
model_fields_def = self.only_relation_agnostic_fields(model_state.fields)
removed_models = self.old_model_keys - self.new_model_keys
for rem_app_label, rem_model_name in removed_models:
if rem_app_label == app_label:
rem_model_state = self.from_state.models[
rem_app_label, rem_model_name
]
rem_model_fields_def = self.only_relation_agnostic_fields(
rem_model_state.fields
)
if model_fields_def == rem_model_fields_def:
if self.questioner.ask_rename_model(
rem_model_state, model_state
):
dependencies = []
fields = list(model_state.fields.values()) + [
field.remote_field
for relations in self.to_state.relations[
app_label, model_name
].values()
for field in relations.values()
]
for field in fields:
if field.is_relation:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
self.add_operation(
app_label,
operations.RenameModel(
old_name=rem_model_state.name,
new_name=model_state.name,
),
dependencies=dependencies,
)
self.renamed_models[app_label, model_name] = rem_model_name
renamed_models_rel_key = "%s.%s" % (
rem_model_state.app_label,
rem_model_state.name_lower,
)
self.renamed_models_rel[renamed_models_rel_key] = (
"%s.%s"
% (
model_state.app_label,
model_state.name_lower,
)
)
self.old_model_keys.remove((rem_app_label, rem_model_name))
self.old_model_keys.add((app_label, model_name))
break
def generate_created_models(self):
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (these are optimized later, if
possible).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together).
"""
old_keys = self.old_model_keys | self.old_unmanaged_keys
added_models = self.new_model_keys - old_keys
added_unmanaged_models = self.new_unmanaged_keys - old_keys
all_added_models = chain(
sorted(added_models, key=self.swappable_first_key, reverse=True),
sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True),
)
for app_label, model_name in all_added_models:
model_state = self.to_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
primary_key_rel = None
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
if field.primary_key:
primary_key_rel = field.remote_field.model
elif not field.remote_field.parent_link:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Are there indexes/unique_together to defer?
indexes = model_state.options.pop("indexes")
constraints = model_state.options.pop("constraints")
unique_together = model_state.options.pop("unique_together", None)
order_with_respect_to = model_state.options.pop(
"order_with_respect_to", None
)
# Depend on the deletion of any possible proxy version of us
dependencies = [
OperationDependency(
app_label, model_name, None, OperationDependency.Type.REMOVE
),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append(
OperationDependency(
base_app_label,
base_name,
None,
OperationDependency.Type.CREATE,
)
)
# Depend on the removal of base fields if the new model has
# a field with the same name.
old_base_model_state = self.from_state.models.get(
(base_app_label, base_name)
)
new_base_model_state = self.to_state.models.get(
(base_app_label, base_name)
)
if old_base_model_state and new_base_model_state:
removed_base_fields = (
set(old_base_model_state.fields)
.difference(
new_base_model_state.fields,
)
.intersection(model_state.fields)
)
for removed_base_field in removed_base_fields:
dependencies.append(
OperationDependency(
base_app_label,
base_name,
removed_base_field,
OperationDependency.Type.REMOVE,
)
)
# Depend on the other end of the primary key if it's a relation
if primary_key_rel:
dependencies.append(
OperationDependency(
*resolve_relation(primary_key_rel, app_label, model_name),
None,
OperationDependency.Type.CREATE,
),
)
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[
d
for d in model_state.fields.items()
if d[0] not in related_fields
],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
dependencies=dependencies,
beginning=True,
)
# Don't add operations which modify the database for unmanaged
# models
if not model_state.options.get("managed", True):
continue
# Generate operations for each related field
for name, field in sorted(related_fields.items()):
dependencies = self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
# Depend on our own model being created
dependencies.append(
OperationDependency(
app_label, model_name, None, OperationDependency.Type.CREATE
)
)
# Make operation
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=name,
field=field,
),
dependencies=list(set(dependencies)),
)
# Generate other opns
if order_with_respect_to:
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=order_with_respect_to,
),
dependencies=[
OperationDependency(
app_label,
model_name,
order_with_respect_to,
OperationDependency.Type.CREATE,
),
OperationDependency(
app_label, model_name, None, OperationDependency.Type.CREATE
),
],
)
related_dependencies = [
OperationDependency(
app_label, model_name, name, OperationDependency.Type.CREATE
)
for name in sorted(related_fields)
]
related_dependencies.append(
OperationDependency(
app_label, model_name, None, OperationDependency.Type.CREATE
)
)
for index in indexes:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=related_dependencies,
)
for constraint in constraints:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=related_dependencies,
)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=unique_together,
),
dependencies=related_dependencies,
)
# Fix relationships if the model changed from a proxy model to a
# concrete model.
relations = self.to_state.relations
if (app_label, model_name) in self.old_proxy_keys:
for related_model_key, related_fields in relations[
app_label, model_name
].items():
related_model_state = self.to_state.models[related_model_key]
for related_field_name, related_field in related_fields.items():
self.add_operation(
related_model_state.app_label,
operations.AlterField(
model_name=related_model_state.name,
name=related_field_name,
field=related_field,
),
dependencies=[
OperationDependency(
app_label,
model_name,
None,
OperationDependency.Type.CREATE,
)
],
)
def generate_created_proxies(self):
"""
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but for proxy models it's
safe to skip all the pointless field stuff and chuck out an operation.
"""
added = self.new_proxy_keys - self.old_proxy_keys
for app_label, model_name in sorted(added):
model_state = self.to_state.models[app_label, model_name]
assert model_state.options.get("proxy")
# Depend on the deletion of any possible non-proxy version of us
dependencies = [
OperationDependency(
app_label, model_name, None, OperationDependency.Type.REMOVE
),
]
# Depend on all bases
for base in model_state.bases:
if isinstance(base, str) and "." in base:
base_app_label, base_name = base.split(".", 1)
dependencies.append(
OperationDependency(
base_app_label,
base_name,
None,
OperationDependency.Type.CREATE,
)
)
# Generate creation operation
self.add_operation(
app_label,
operations.CreateModel(
name=model_state.name,
fields=[],
options=model_state.options,
bases=model_state.bases,
managers=model_state.managers,
),
# Depend on the deletion of any possible non-proxy version of
# us
dependencies=dependencies,
)
def generate_deleted_models(self):
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (these are optimized later, if
possible).
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = self.new_model_keys | self.new_unmanaged_keys
deleted_models = self.old_model_keys - new_keys
deleted_unmanaged_models = self.old_unmanaged_keys - new_keys
all_deleted_models = chain(
sorted(deleted_models), sorted(deleted_unmanaged_models)
)
for app_label, model_name in all_deleted_models:
model_state = self.from_state.models[app_label, model_name]
# Gather related fields
related_fields = {}
for field_name, field in model_state.fields.items():
if field.remote_field:
if field.remote_field.model:
related_fields[field_name] = field
if getattr(field.remote_field, "through", None):
related_fields[field_name] = field
# Generate option removal first
unique_together = model_state.options.pop("unique_together", None)
if unique_together:
self.add_operation(
app_label,
operations.AlterUniqueTogether(
name=model_name,
unique_together=None,
),
)
if indexes := model_state.options.pop("indexes", None):
for index in indexes:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
),
)
if constraints := model_state.options.pop("constraints", None):
for constraint in constraints:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
),
)
# Then remove each related field
for name in sorted(related_fields):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=name,
),
dependencies=[
OperationDependency(
app_label,
model_name,
name,
OperationDependency.Type.REMOVE_INDEX_OR_CONSTRAINT,
),
],
)
# Finally, remove the model.
# This depends on both the removal/alteration of all incoming
# fields and the removal of all its own related fields, and if it's
# a through model the field that references it.
dependencies = []
relations = self.from_state.relations
for (
related_object_app_label,
object_name,
), relation_related_fields in relations[app_label, model_name].items():
for field_name, field in relation_related_fields.items():
dependencies.append(
OperationDependency(
related_object_app_label,
object_name,
field_name,
OperationDependency.Type.REMOVE,
),
)
if not field.many_to_many:
dependencies.append(
OperationDependency(
related_object_app_label,
object_name,
field_name,
OperationDependency.Type.ALTER,
),
)
for name in sorted(related_fields):
dependencies.append(
OperationDependency(
app_label, model_name, name, OperationDependency.Type.REMOVE
)
)
# We're referenced in another field's through=
through_user = self.through_users.get((app_label, model_state.name_lower))
if through_user:
dependencies.append(
OperationDependency(*through_user, OperationDependency.Type.REMOVE),
)
# Finally, make the operation, deduping any dependencies
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
dependencies=list(set(dependencies)),
)
def generate_deleted_proxies(self):
"""Make DeleteModel options for proxy models."""
deleted = self.old_proxy_keys - self.new_proxy_keys
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
assert model_state.options.get("proxy")
self.add_operation(
app_label,
operations.DeleteModel(
name=model_state.name,
),
)
def create_renamed_fields(self):
"""Work out renamed fields."""
self.renamed_operations = []
old_field_keys = self.old_field_keys.copy()
for app_label, model_name, field_name in sorted(
self.new_field_keys - old_field_keys
):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
field = new_model_state.get_field(field_name)
# Scan to see if this is actually a rename!
field_dec = self.deep_deconstruct(field)
for rem_app_label, rem_model_name, rem_field_name in sorted(
old_field_keys - self.new_field_keys
):
if rem_app_label == app_label and rem_model_name == model_name:
old_field = old_model_state.get_field(rem_field_name)
old_field_dec = self.deep_deconstruct(old_field)
if (
field.remote_field
and field.remote_field.model
and "to" in old_field_dec[2]
):
old_rel_to = old_field_dec[2]["to"]
if old_rel_to in self.renamed_models_rel:
old_field_dec[2]["to"] = self.renamed_models_rel[old_rel_to]
old_field.set_attributes_from_name(rem_field_name)
old_db_column = old_field.get_attname_column()[1]
if old_field_dec == field_dec or (
# Was the field renamed and db_column equal to the
# old field's column added?
old_field_dec[0:2] == field_dec[0:2]
and dict(old_field_dec[2], db_column=old_db_column)
== field_dec[2]
):
if self.questioner.ask_rename(
model_name, rem_field_name, field_name, field
):
self.renamed_operations.append(
(
rem_app_label,
rem_model_name,
old_field.db_column,
rem_field_name,
app_label,
model_name,
field,
field_name,
)
)
old_field_keys.remove(
(rem_app_label, rem_model_name, rem_field_name)
)
old_field_keys.add((app_label, model_name, field_name))
self.renamed_fields[app_label, model_name, field_name] = (
rem_field_name
)
break
def generate_renamed_fields(self):
"""Generate RenameField operations."""
for (
rem_app_label,
rem_model_name,
rem_db_column,
rem_field_name,
app_label,
model_name,
field,
field_name,
) in self.renamed_operations:
# A db_column mismatch requires a prior noop AlterField for the
# subsequent RenameField to be a noop on attempts at preserving the
# old name.
if rem_db_column != field.db_column:
altered_field = field.clone()
altered_field.name = rem_field_name
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=rem_field_name,
field=altered_field,
),
)
self.add_operation(
app_label,
operations.RenameField(
model_name=model_name,
old_name=rem_field_name,
new_name=field_name,
),
)
self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name))
self.old_field_keys.add((app_label, model_name, field_name))
def generate_added_fields(self):
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(
self.new_field_keys - self.old_field_keys
):
self._generate_added_field(app_label, model_name, field_name)
def _generate_added_field(self, app_label, model_name, field_name):
field = self.to_state.models[app_label, model_name].get_field(field_name)
# Adding a field always depends at least on its removal.
dependencies = [
OperationDependency(
app_label, model_name, field_name, OperationDependency.Type.REMOVE
)
]
# Fields that are foreignkeys/m2ms depend on stuff.
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
if field.generated:
dependencies.extend(self._get_dependencies_for_generated_field(field))
# You can't just add NOT NULL fields with no default or fields
# which don't allow empty strings as default.
time_fields = (models.DateField, models.DateTimeField, models.TimeField)
auto_fields = (models.AutoField, models.SmallAutoField, models.BigAutoField)
preserve_default = (
field.null
or field.has_default()
or field.has_db_default()
or field.many_to_many
or (field.blank and field.empty_strings_allowed)
or (isinstance(field, time_fields) and field.auto_now)
or (isinstance(field, auto_fields))
)
if not preserve_default:
field = field.clone()
if isinstance(field, time_fields) and field.auto_now_add:
field.default = self.questioner.ask_auto_now_add_addition(
field_name, model_name
)
else:
field.default = self.questioner.ask_not_null_addition(
field_name, model_name
)
if field.unique and field.has_default() and callable(field.default):
self.questioner.ask_unique_callable_default_addition(field_name, model_name)
self.add_operation(
app_label,
operations.AddField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
def generate_removed_fields(self):
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(
self.old_field_keys - self.new_field_keys
):
self._generate_removed_field(app_label, model_name, field_name)
def _generate_removed_field(self, app_label, model_name, field_name):
self.add_operation(
app_label,
operations.RemoveField(
model_name=model_name,
name=field_name,
),
# Include dependencies such as order_with_respect_to, constraints,
# and any generated fields that may depend on this field. These
# are safely ignored if not present.
dependencies=[
OperationDependency(
app_label,
model_name,
field_name,
OperationDependency.Type.REMOVE_ORDER_WRT,
),
OperationDependency(
app_label,
model_name,
field_name,
OperationDependency.Type.ALTER_FOO_TOGETHER,
),
OperationDependency(
app_label,
model_name,
field_name,
OperationDependency.Type.REMOVE_INDEX_OR_CONSTRAINT,
),
*self._get_generated_field_dependencies_for_removed_field(
app_label, model_name, field_name
),
],
)
def generate_altered_fields(self):
"""
Make AlterField operations, or possibly RemovedField/AddField if alter
isn't possible.
"""
for app_label, model_name, field_name in sorted(
self.old_field_keys & self.new_field_keys
):
# Did the field change?
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_field_name = self.renamed_fields.get(
(app_label, model_name, field_name), field_name
)
old_field = self.from_state.models[app_label, old_model_name].get_field(
old_field_name
)
new_field = self.to_state.models[app_label, model_name].get_field(
field_name
)
dependencies = []
# Implement any model renames on relations; these are handled by
# RenameModel so we need to exclude them from the comparison
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "model", None
):
rename_key = resolve_relation(
new_field.remote_field.model, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.model = old_field.remote_field.model
# Handle ForeignKey which can only have a single to_field.
remote_field_name = getattr(new_field.remote_field, "field_name", None)
if remote_field_name:
to_field_rename_key = (*rename_key, remote_field_name)
if to_field_rename_key in self.renamed_fields:
# Repoint both model and field name because to_field
# inclusion in ForeignKey.deconstruct() is based on
# both.
new_field.remote_field.model = old_field.remote_field.model
new_field.remote_field.field_name = (
old_field.remote_field.field_name
)
# Handle ForeignObjects which can have multiple
# from_fields/to_fields.
from_fields = getattr(new_field, "from_fields", None)
if from_fields:
from_rename_key = (app_label, model_name)
new_field.from_fields = tuple(
[
self.renamed_fields.get(
(*from_rename_key, from_field), from_field
)
for from_field in from_fields
]
)
new_field.to_fields = tuple(
[
self.renamed_fields.get((*rename_key, to_field), to_field)
for to_field in new_field.to_fields
]
)
if old_from_fields := getattr(old_field, "from_fields", None):
old_field.from_fields = tuple(old_from_fields)
old_field.to_fields = tuple(old_field.to_fields)
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
new_field,
self.to_state,
)
)
if hasattr(new_field, "remote_field") and getattr(
new_field.remote_field, "through", None
):
rename_key = resolve_relation(
new_field.remote_field.through, app_label, model_name
)
if rename_key in self.renamed_models:
new_field.remote_field.through = old_field.remote_field.through
old_field_dec = self.deep_deconstruct(old_field)
new_field_dec = self.deep_deconstruct(new_field)
# If the field was confirmed to be renamed it means that only
# db_column was allowed to change which generate_renamed_fields()
# already accounts for by adding an AlterField operation.
if old_field_dec != new_field_dec and old_field_name == field_name:
both_m2m = old_field.many_to_many and new_field.many_to_many
neither_m2m = not old_field.many_to_many and not new_field.many_to_many
if both_m2m or neither_m2m:
# Either both fields are m2m or neither is
preserve_default = True
if (
old_field.null
and not new_field.null
and not new_field.has_default()
and not new_field.has_db_default()
and not new_field.many_to_many
):
field = new_field.clone()
new_default = self.questioner.ask_not_null_alteration(
field_name, model_name
)
if new_default is not models.NOT_PROVIDED:
field.default = new_default
preserve_default = False
else:
field = new_field
self.add_operation(
app_label,
operations.AlterField(
model_name=model_name,
name=field_name,
field=field,
preserve_default=preserve_default,
),
dependencies=dependencies,
)
else:
# We cannot alter between m2m and concrete fields
self._generate_removed_field(app_label, model_name, field_name)
self._generate_added_field(app_label, model_name, field_name)
def create_altered_indexes(self):
option_name = operations.AddIndex.option_name
self.renamed_index_together_values = defaultdict(list)
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_indexes = old_model_state.options[option_name]
new_indexes = new_model_state.options[option_name]
added_indexes = [idx for idx in new_indexes if idx not in old_indexes]
removed_indexes = [idx for idx in old_indexes if idx not in new_indexes]
renamed_indexes = []
# Find renamed indexes.
remove_from_added = []
remove_from_removed = []
for new_index in added_indexes:
new_index_dec = new_index.deconstruct()
new_index_name = new_index_dec[2].pop("name")
for old_index in removed_indexes:
old_index_dec = old_index.deconstruct()
old_index_name = old_index_dec[2].pop("name")
# Indexes are the same except for the names.
if (
new_index_dec == old_index_dec
and new_index_name != old_index_name
):
renamed_indexes.append((old_index_name, new_index_name, None))
remove_from_added.append(new_index)
remove_from_removed.append(old_index)
# Find index_together changed to indexes.
for (
old_value,
new_value,
index_together_app_label,
index_together_model_name,
dependencies,
) in self._get_altered_foo_together_operations(
operations.AlterIndexTogether.option_name
):
if (
app_label != index_together_app_label
or model_name != index_together_model_name
):
continue
removed_values = old_value.difference(new_value)
for removed_index_together in removed_values:
renamed_index_together_indexes = []
for new_index in added_indexes:
_, args, kwargs = new_index.deconstruct()
# Ensure only 'fields' are defined in the Index.
if (
not args
and new_index.fields == list(removed_index_together)
and set(kwargs) == {"name", "fields"}
):
renamed_index_together_indexes.append(new_index)
if len(renamed_index_together_indexes) == 1:
renamed_index = renamed_index_together_indexes[0]
remove_from_added.append(renamed_index)
renamed_indexes.append(
(None, renamed_index.name, removed_index_together)
)
self.renamed_index_together_values[
index_together_app_label, index_together_model_name
].append(removed_index_together)
# Remove renamed indexes from the lists of added and removed
# indexes.
added_indexes = [
idx for idx in added_indexes if idx not in remove_from_added
]
removed_indexes = [
idx for idx in removed_indexes if idx not in remove_from_removed
]
self.altered_indexes.update(
{
(app_label, model_name): {
"added_indexes": added_indexes,
"removed_indexes": removed_indexes,
"renamed_indexes": renamed_indexes,
}
}
)
def generate_added_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
dependencies = self._get_dependencies_for_model(app_label, model_name)
for index in alt_indexes["added_indexes"]:
self.add_operation(
app_label,
operations.AddIndex(
model_name=model_name,
index=index,
),
dependencies=dependencies,
)
def generate_removed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for index in alt_indexes["removed_indexes"]:
self.add_operation(
app_label,
operations.RemoveIndex(
model_name=model_name,
name=index.name,
),
)
def generate_renamed_indexes(self):
for (app_label, model_name), alt_indexes in self.altered_indexes.items():
for old_index_name, new_index_name, old_fields in alt_indexes[
"renamed_indexes"
]:
self.add_operation(
app_label,
operations.RenameIndex(
model_name=model_name,
new_name=new_index_name,
old_name=old_index_name,
old_fields=old_fields,
),
)
def _constraint_should_be_dropped_and_recreated(
self, old_constraint, new_constraint
):
old_path, old_args, old_kwargs = old_constraint.deconstruct()
new_path, new_args, new_kwargs = new_constraint.deconstruct()
for attr in old_constraint.non_db_attrs:
old_kwargs.pop(attr, None)
for attr in new_constraint.non_db_attrs:
new_kwargs.pop(attr, None)
# Replace renamed fields if the db_column is preserved.
for (
_,
_,
rem_db_column,
rem_field_name,
_,
_,
field,
field_name,
) in self.renamed_operations:
if field.db_column and rem_db_column == field.db_column:
new_fields = new_kwargs["fields"]
try:
new_field_idx = new_fields.index(field_name)
except ValueError:
continue
new_kwargs["fields"] = tuple(
new_fields[:new_field_idx]
+ (rem_field_name,)
+ new_fields[new_field_idx + 1 :]
)
return (old_path, old_args, old_kwargs) != (new_path, new_args, new_kwargs)
def create_altered_constraints(self):
option_name = operations.AddConstraint.option_name
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_constraints = old_model_state.options[option_name]
new_constraints = new_model_state.options[option_name]
alt_constraints = []
alt_constraints_name = []
for old_c in old_constraints:
for new_c in new_constraints:
old_c_dec = old_c.deconstruct()
new_c_dec = new_c.deconstruct()
if (
old_c_dec != new_c_dec
and old_c.name == new_c.name
and not self._constraint_should_be_dropped_and_recreated(
old_c, new_c
)
):
alt_constraints.append(new_c)
alt_constraints_name.append(new_c.name)
add_constraints = [
c
for c in new_constraints
if c not in old_constraints and c.name not in alt_constraints_name
]
rem_constraints = [
c
for c in old_constraints
if c not in new_constraints and c.name not in alt_constraints_name
]
self.altered_constraints.update(
{
(app_label, model_name): {
"added_constraints": add_constraints,
"removed_constraints": rem_constraints,
"altered_constraints": alt_constraints,
}
}
)
def generate_added_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
dependencies = self._get_dependencies_for_model(app_label, model_name)
for constraint in alt_constraints["added_constraints"]:
self.add_operation(
app_label,
operations.AddConstraint(
model_name=model_name,
constraint=constraint,
),
dependencies=dependencies,
)
def generate_removed_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
for constraint in alt_constraints["removed_constraints"]:
self.add_operation(
app_label,
operations.RemoveConstraint(
model_name=model_name,
name=constraint.name,
),
)
def generate_altered_constraints(self):
for (
app_label,
model_name,
), alt_constraints in self.altered_constraints.items():
dependencies = self._get_dependencies_for_model(app_label, model_name)
for constraint in alt_constraints["altered_constraints"]:
self.add_operation(
app_label,
operations.AlterConstraint(
model_name=model_name,
name=constraint.name,
constraint=constraint,
),
dependencies=dependencies,
)
@staticmethod
def _get_dependencies_for_foreign_key(app_label, model_name, field, project_state):
remote_field_model = None
if hasattr(field.remote_field, "model"):
remote_field_model = field.remote_field.model
else:
relations = project_state.relations[app_label, model_name]
for (remote_app_label, remote_model_name), fields in relations.items():
if any(
field == related_field.remote_field
for related_field in fields.values()
):
remote_field_model = f"{remote_app_label}.{remote_model_name}"
break
# Account for FKs to swappable models
swappable_setting = getattr(field, "swappable_setting", None)
if swappable_setting is not None:
dep_app_label = "__setting__"
dep_object_name = swappable_setting
else:
dep_app_label, dep_object_name = resolve_relation(
remote_field_model,
app_label,
model_name,
)
dependencies = [
OperationDependency(
dep_app_label, dep_object_name, None, OperationDependency.Type.CREATE
)
]
if getattr(field.remote_field, "through", None):
through_app_label, through_object_name = resolve_relation(
field.remote_field.through,
app_label,
model_name,
)
dependencies.append(
OperationDependency(
through_app_label,
through_object_name,
None,
OperationDependency.Type.CREATE,
)
)
return dependencies
def _get_dependencies_for_generated_field(self, field):
dependencies = []
referenced_base_fields = [
name
for name, *lookups in models.Model._get_expr_references(field.expression)
]
newly_added_fields = sorted(self.new_field_keys - self.old_field_keys)
for app_label, model_name, added_field_name in newly_added_fields:
added_field = self.to_state.models[app_label, model_name].get_field(
added_field_name
)
if (
added_field.remote_field and added_field.remote_field.model
) or added_field.name in referenced_base_fields:
dependencies.append(
OperationDependency(
app_label,
model_name,
added_field.name,
OperationDependency.Type.CREATE,
)
)
return dependencies
def _get_generated_field_dependencies_for_removed_field(
self, app_label, model_name, field_name
):
dependencies = []
model_state = self.from_state.models[app_label, model_name]
generated_fields = (f for f in model_state.fields.values() if f.generated)
for field in generated_fields:
if any(
field_name == name
for name, *_ in models.Model._get_expr_references(field.expression)
):
dependencies.append(
OperationDependency(
app_label,
model_name,
field.name,
OperationDependency.Type.REMOVE,
)
)
return dependencies
def _get_dependencies_for_model(self, app_label, model_name):
"""Return foreign key dependencies of the given model."""
dependencies = []
model_state = self.to_state.models[app_label, model_name]
for field in model_state.fields.values():
if field.is_relation:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
return dependencies
def _get_altered_foo_together_operations(self, option_name):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
# We run the old version through the field renames to account for
# those
old_value = old_model_state.options.get(option_name)
old_value = (
{
tuple(
self.renamed_fields.get((app_label, model_name, n), n)
for n in unique
)
for unique in old_value
}
if old_value
else set()
)
new_value = new_model_state.options.get(option_name)
new_value = set(new_value) if new_value else set()
if old_value != new_value:
dependencies = []
for foo_togethers in new_value:
for field_name in foo_togethers:
field = new_model_state.get_field(field_name)
if field.remote_field and field.remote_field.model:
dependencies.extend(
self._get_dependencies_for_foreign_key(
app_label,
model_name,
field,
self.to_state,
)
)
yield (
old_value,
new_value,
app_label,
model_name,
dependencies,
)
def _generate_removed_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
if operation == operations.AlterIndexTogether:
old_value = {
value
for value in old_value
if value
not in self.renamed_index_together_values[app_label, model_name]
}
removal_value = new_value.intersection(old_value)
if removal_value or old_value:
self.add_operation(
app_label,
operation(
name=model_name, **{operation.option_name: removal_value}
),
dependencies=dependencies,
)
def generate_removed_altered_unique_together(self):
self._generate_removed_altered_foo_together(operations.AlterUniqueTogether)
def _generate_altered_foo_together(self, operation):
for (
old_value,
new_value,
app_label,
model_name,
dependencies,
) in self._get_altered_foo_together_operations(operation.option_name):
removal_value = new_value.intersection(old_value)
if new_value != removal_value:
self.add_operation(
app_label,
operation(name=model_name, **{operation.option_name: new_value}),
dependencies=dependencies,
)
def generate_altered_unique_together(self):
self._generate_altered_foo_together(operations.AlterUniqueTogether)
def generate_altered_db_table(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys, self.kept_unmanaged_keys
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_name = old_model_state.options.get("db_table")
new_db_table_name = new_model_state.options.get("db_table")
if old_db_table_name != new_db_table_name:
self.add_operation(
app_label,
operations.AlterModelTable(
name=model_name,
table=new_db_table_name,
),
)
def generate_altered_db_table_comment(self):
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys, self.kept_unmanaged_keys
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_db_table_comment = old_model_state.options.get("db_table_comment")
new_db_table_comment = new_model_state.options.get("db_table_comment")
if old_db_table_comment != new_db_table_comment:
self.add_operation(
app_label,
operations.AlterModelTableComment(
name=model_name,
table_comment=new_db_table_comment,
),
)
def generate_altered_options(self):
"""
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys,
self.kept_unmanaged_keys,
# unmanaged converted to managed
self.old_unmanaged_keys & self.new_model_keys,
# managed converted to unmanaged
self.old_model_keys & self.new_unmanaged_keys,
)
for app_label, model_name in sorted(models_to_check):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
old_options = {
key: value
for key, value in old_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
new_options = {
key: value
for key, value in new_model_state.options.items()
if key in AlterModelOptions.ALTER_OPTION_KEYS
}
if old_options != new_options:
self.add_operation(
app_label,
operations.AlterModelOptions(
name=model_name,
options=new_options,
),
)
def generate_altered_order_with_respect_to(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.options.get(
"order_with_respect_to"
) != new_model_state.options.get("order_with_respect_to"):
# Make sure it comes second if we're adding
# (removal dependency is part of RemoveField)
dependencies = []
if new_model_state.options.get("order_with_respect_to"):
dependencies.append(
OperationDependency(
app_label,
model_name,
new_model_state.options["order_with_respect_to"],
OperationDependency.Type.CREATE,
)
)
# Actually generate the operation
self.add_operation(
app_label,
operations.AlterOrderWithRespectTo(
name=model_name,
order_with_respect_to=new_model_state.options.get(
"order_with_respect_to"
),
),
dependencies=dependencies,
)
def generate_altered_managers(self):
for app_label, model_name in sorted(self.kept_model_keys):
old_model_name = self.renamed_models.get(
(app_label, model_name), model_name
)
old_model_state = self.from_state.models[app_label, old_model_name]
new_model_state = self.to_state.models[app_label, model_name]
if old_model_state.managers != new_model_state.managers:
self.add_operation(
app_label,
operations.AlterModelManagers(
name=model_name,
managers=new_model_state.managers,
),
)
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
continue
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
new_name_parts = ["%04i" % next_number]
if migration_name:
new_name_parts.append(migration_name)
elif i == 0 and not app_leaf:
new_name_parts.append("initial")
else:
new_name_parts.append(migration.suggest_name()[:100])
new_name = "_".join(new_name_parts)
name_map[(app_label, migration.name)] = (app_label, new_name)
next_number += 1
migration.name = new_name
# Now fix dependencies
for migrations in changes.values():
for migration in migrations:
migration.dependencies = [
name_map.get(d, d) for d in migration.dependencies
]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
required_apps.update(
*[app_dependencies.get(app_label, ()) for app_label in required_apps]
)
# Remove all migrations that aren't needed
for app_label in list(changes):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def parse_number(cls, name):
"""
Given a migration name, try to extract a number from the beginning of
it. For a squashed migration such as '0001_squashed_0004…', return the
second number. If no number is found, return None.
"""
if squashed_match := re.search(r".*_squashed_(\d+)", name):
return int(squashed_match[1])
match = re.match(r"^\d+", name)
if match:
return int(match[0])
return None
| MigrationAutodetector |
python | huggingface__transformers | src/transformers/models/nougat/image_processing_nougat.py | {
"start": 1535,
"end": 2090
} | class ____(ImagesKwargs, total=False):
r"""
do_crop_margin (`bool`, *optional*, defaults to `True`):
Whether to crop the image margins.
do_thumbnail (`bool`, *optional*, defaults to `True`):
Whether to resize the image using thumbnail method.
do_align_long_axis (`bool`, *optional*, defaults to `False`):
Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.
"""
do_crop_margin: bool
do_thumbnail: bool
do_align_long_axis: bool
| NougatImageProcessorKwargs |
python | pydata__xarray | xarray/computation/arithmetic.py | {
"start": 4155,
"end": 4263
} | class ____(
SupportsArithmetic,
DatasetGroupByOpsMixin,
):
__slots__ = ()
| DatasetGroupbyArithmetic |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/sparse/csr_sparse_matrix_sparse_mat_mul_grad_test.py | {
"start": 1817,
"end": 5045
} | class ____(test.TestCase):
@classmethod
def setUpClass(cls):
super(CSRSparseMatrixGradTest, cls).setUpClass()
cls._gpu_available = test_util.is_gpu_available()
# TODO(penporn): Make these tests runnable on eager mode.
# (tf.gradients and gradient_checker only run in graph mode.)
@test_util.run_deprecated_v1
def _testLargeBatchSparseMatrixSparseMatMulGrad(self, datatype, transpose_a,
transpose_b, adjoint_a,
adjoint_b):
if not self._gpu_available:
return
sparsify = lambda m: m * (m > 0)
a_mats_val = sparsify(
np.random.randn(3, 5, 11) +
1.j * np.random.randn(3, 5, 11)).astype(datatype)
if transpose_a or adjoint_a:
a_mats_val = np.transpose(a_mats_val, (0, 2, 1))
if adjoint_a:
a_mats_val = np.conj(a_mats_val)
b_mats_val = sparsify(
np.random.randn(3, 11, 13) +
1.j * np.random.randn(3, 11, 13)).astype(datatype)
if transpose_b or adjoint_b:
b_mats_val = np.transpose(b_mats_val, (0, 2, 1))
if adjoint_b:
b_mats_val = np.conj(b_mats_val)
with self.test_session():
a_mats, a_sm = dense_and_sparse_from_vals(a_mats_val, datatype)
b_mats, b_sm = dense_and_sparse_from_vals(b_mats_val, datatype)
c_sm = sparse_csr_matrix_ops.sparse_matrix_sparse_mat_mul(
a_sm,
b_sm,
transpose_a=transpose_a,
transpose_b=transpose_b,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b,
type=datatype)
c_dense = sparse_csr_matrix_ops.csr_sparse_matrix_to_dense(
c_sm, type=datatype)
for ten, val, nn in [[a_mats, a_mats_val, "a"], [b_mats, b_mats_val,
"b"]]:
tf_logging.info("Testing gradients for %s" % nn)
theoretical, numerical = gradient_checker.compute_gradient(
ten,
ten.get_shape().as_list(),
c_dense,
c_dense.get_shape().as_list(),
x_init_value=val,
delta=1e-3)
self.assertAllClose(theoretical, numerical, atol=1e-3, rtol=1e-3)
# These tests are refactored from sparse_csr_matrix_grad_test to keep its size
# "medium".
for dtype in (np.float32, np.complex64):
for (t_a, t_b, adj_a, adj_b) in itertools.product(*(([False, True],) * 4)):
def create_sparse_mat_mul_test_fn(dtype_, t_a_, t_b_, adj_a_, adj_b_):
# Skip invalid cases.
if (t_a_ and adj_a_) or (t_b_ and adj_b_):
return
# Skip cases where we conjugate real matrices.
if dtype_ == np.float32 and (adj_a_ or adj_b_):
return
def test_fn(self):
self._testLargeBatchSparseMatrixSparseMatMulGrad(
dtype_, t_a_, t_b_, adj_a_, adj_b_)
return test_fn
name = (
"_testLargeBatchSparseMatrixSparseMatMulGrad_dtype_%s_t_a_%s_t_b_%s_"
"adj_a_%s_adj_b_%s" % (dtype.__name__, t_a, t_b, adj_a, adj_b))
_add_test(CSRSparseMatrixGradTest, "CSRSparseMatrixSparseGradTest", name,
create_sparse_mat_mul_test_fn(dtype, t_a, t_b, adj_a, adj_b))
if __name__ == "__main__":
test.main()
| CSRSparseMatrixGradTest |
python | ray-project__ray | python/ray/tune/tests/test_searchers.py | {
"start": 10914,
"end": 16523
} | class ____(unittest.TestCase):
"""
Test add_evaluated_point method in searchers that support it.
"""
def setUp(self):
self.param_name = "report"
self.valid_value = 1.0
self.space = {self.param_name: tune.uniform(0.0, 5.0)}
self.analysis = tune.run(
_dummy_objective,
config=self.space,
metric="metric",
num_samples=4,
verbose=0,
)
def tearDown(self):
pass
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4, num_gpus=0, include_dashboard=False)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def run_add_evaluated_point(self, point, searcher, get_len_X, get_len_y):
searcher = deepcopy(searcher)
len_X = get_len_X(searcher)
len_y = get_len_y(searcher)
self.assertEqual(len_X, 0)
self.assertEqual(len_y, 0)
searcher.add_evaluated_point(point, 1.0)
len_X = get_len_X(searcher)
len_y = get_len_y(searcher)
self.assertEqual(len_X, 1)
self.assertEqual(len_y, 1)
searcher.suggest("1")
def run_add_evaluated_trials(self, searcher, get_len_X, get_len_y):
searcher_copy = deepcopy(searcher)
searcher_copy.add_evaluated_trials(self.analysis, "metric")
self.assertEqual(get_len_X(searcher_copy), 4)
self.assertEqual(get_len_y(searcher_copy), 4)
searcher_copy.suggest("1")
searcher_copy = deepcopy(searcher)
searcher_copy.add_evaluated_trials(self.analysis.trials, "metric")
self.assertEqual(get_len_X(searcher_copy), 4)
self.assertEqual(get_len_y(searcher_copy), 4)
searcher_copy.suggest("1")
searcher_copy = deepcopy(searcher)
searcher_copy.add_evaluated_trials(self.analysis.trials[0], "metric")
self.assertEqual(get_len_X(searcher_copy), 1)
self.assertEqual(get_len_y(searcher_copy), 1)
searcher_copy.suggest("1")
def testOptuna(self):
from optuna.storages import JournalStorage
from optuna.storages.journal import JournalFileBackend
from optuna.trial import TrialState
from ray.tune.search.optuna import OptunaSearch
# OptunaSearch with in-memory storage
searcher = OptunaSearch(
space=self.space,
storage=None,
metric="metric",
mode="max",
points_to_evaluate=[{self.param_name: self.valid_value}],
evaluated_rewards=[1.0],
)
get_len = lambda s: len(s._ot_study.trials) # noqa E731
self.assertGreater(get_len(searcher), 0)
# OptunaSearch with external storage
storage_file_path = "/tmp/my_test_study.log"
searcher = OptunaSearch(
space=self.space,
study_name="my_test_study",
storage=JournalStorage(JournalFileBackend(file_path=storage_file_path)),
metric="metric",
mode="max",
points_to_evaluate=[{self.param_name: self.valid_value}],
evaluated_rewards=[1.0],
)
get_len = lambda s: len(s._ot_study.trials) # noqa E731
self.assertGreater(get_len(searcher), 0)
self.assertTrue(os.path.exists(storage_file_path))
searcher = OptunaSearch(
space=self.space,
metric="metric",
mode="max",
)
point = {
self.param_name: self.valid_value,
}
self.assertEqual(get_len(searcher), 0)
searcher.add_evaluated_point(point, 1.0, intermediate_values=[0.8, 0.9])
self.assertEqual(get_len(searcher), 1)
self.assertTrue(searcher._ot_study.trials[-1].state == TrialState.COMPLETE)
searcher.add_evaluated_point(
point, 1.0, intermediate_values=[0.8, 0.9], error=True
)
self.assertEqual(get_len(searcher), 2)
self.assertTrue(searcher._ot_study.trials[-1].state == TrialState.FAIL)
searcher.add_evaluated_point(
point, 1.0, intermediate_values=[0.8, 0.9], pruned=True
)
self.assertEqual(get_len(searcher), 3)
self.assertTrue(searcher._ot_study.trials[-1].state == TrialState.PRUNED)
searcher.suggest("1")
searcher = OptunaSearch(
space=self.space,
metric="metric",
mode="max",
)
self.run_add_evaluated_trials(searcher, get_len, get_len)
def dbr_space(trial):
return {self.param_name: trial.suggest_float(self.param_name, 0.0, 5.0)}
dbr_searcher = OptunaSearch(
space=dbr_space,
metric="metric",
mode="max",
)
with self.assertRaises(TypeError):
dbr_searcher.add_evaluated_point(point, 1.0)
@pytest.mark.skipif(
sys.version_info >= (3, 12), reason="HEBO doesn't support py312"
)
def testHEBO(self):
if Version(pandas.__version__) >= Version("2.0.0"):
pytest.skip("HEBO does not support pandas>=2.0.0")
from ray.tune.search.hebo import HEBOSearch
searcher = HEBOSearch(
space=self.space,
metric="metric",
mode="max",
)
point = {
self.param_name: self.valid_value,
}
get_len_X = lambda s: len(s._opt.X) # noqa E731
get_len_y = lambda s: len(s._opt.y) # noqa E731
self.run_add_evaluated_point(point, searcher, get_len_X, get_len_y)
self.run_add_evaluated_trials(searcher, get_len_X, get_len_y)
| AddEvaluatedPointTest |
python | huggingface__transformers | src/transformers/models/voxtral/processing_voxtral.py | {
"start": 1448,
"end": 1956
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": True,
},
"audio_kwargs": {
"sampling_rate": 16000,
"padding": True,
"truncation": False,
"pad_to_multiple_of": 480000,
"max_source_positions": 3000,
},
"common_kwargs": {
"return_tensors": "pt",
"return_dict": True,
"tokenize": True,
},
}
| VoxtralProcessorKwargs |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_device_class_spec.py | {
"start": 383,
"end": 7302
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config': 'list[V1beta1DeviceClassConfiguration]',
'extended_resource_name': 'str',
'selectors': 'list[V1beta1DeviceSelector]'
}
attribute_map = {
'config': 'config',
'extended_resource_name': 'extendedResourceName',
'selectors': 'selectors'
}
def __init__(self, config=None, extended_resource_name=None, selectors=None, local_vars_configuration=None): # noqa: E501
"""V1beta1DeviceClassSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config = None
self._extended_resource_name = None
self._selectors = None
self.discriminator = None
if config is not None:
self.config = config
if extended_resource_name is not None:
self.extended_resource_name = extended_resource_name
if selectors is not None:
self.selectors = selectors
@property
def config(self):
"""Gets the config of this V1beta1DeviceClassSpec. # noqa: E501
Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver. They are passed to the driver, but are not considered while allocating the claim. # noqa: E501
:return: The config of this V1beta1DeviceClassSpec. # noqa: E501
:rtype: list[V1beta1DeviceClassConfiguration]
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this V1beta1DeviceClassSpec.
Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver. They are passed to the driver, but are not considered while allocating the claim. # noqa: E501
:param config: The config of this V1beta1DeviceClassSpec. # noqa: E501
:type: list[V1beta1DeviceClassConfiguration]
"""
self._config = config
@property
def extended_resource_name(self):
"""Gets the extended_resource_name of this V1beta1DeviceClassSpec. # noqa: E501
ExtendedResourceName is the extended resource name for the devices of this class. The devices of this class can be used to satisfy a pod's extended resource requests. It has the same format as the name of a pod's extended resource. It should be unique among all the device classes in a cluster. If two device classes have the same name, then the class created later is picked to satisfy a pod's extended resource requests. If two classes are created at the same time, then the name of the class lexicographically sorted first is picked. This is an alpha field. # noqa: E501
:return: The extended_resource_name of this V1beta1DeviceClassSpec. # noqa: E501
:rtype: str
"""
return self._extended_resource_name
@extended_resource_name.setter
def extended_resource_name(self, extended_resource_name):
"""Sets the extended_resource_name of this V1beta1DeviceClassSpec.
ExtendedResourceName is the extended resource name for the devices of this class. The devices of this class can be used to satisfy a pod's extended resource requests. It has the same format as the name of a pod's extended resource. It should be unique among all the device classes in a cluster. If two device classes have the same name, then the class created later is picked to satisfy a pod's extended resource requests. If two classes are created at the same time, then the name of the class lexicographically sorted first is picked. This is an alpha field. # noqa: E501
:param extended_resource_name: The extended_resource_name of this V1beta1DeviceClassSpec. # noqa: E501
:type: str
"""
self._extended_resource_name = extended_resource_name
@property
def selectors(self):
"""Gets the selectors of this V1beta1DeviceClassSpec. # noqa: E501
Each selector must be satisfied by a device which is claimed via this class. # noqa: E501
:return: The selectors of this V1beta1DeviceClassSpec. # noqa: E501
:rtype: list[V1beta1DeviceSelector]
"""
return self._selectors
@selectors.setter
def selectors(self, selectors):
"""Sets the selectors of this V1beta1DeviceClassSpec.
Each selector must be satisfied by a device which is claimed via this class. # noqa: E501
:param selectors: The selectors of this V1beta1DeviceClassSpec. # noqa: E501
:type: list[V1beta1DeviceSelector]
"""
self._selectors = selectors
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1DeviceClassSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1DeviceClassSpec):
return True
return self.to_dict() != other.to_dict()
| V1beta1DeviceClassSpec |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v2_utils_test.py | {
"start": 5811,
"end": 7336
} | class ____(test.TestCase):
def test_table_config_repr(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=2, dim=4,
combiner='sum', name='table')
self.assertEqual(
repr(table),
'TableConfig(vocabulary_size=2, dim=4, initializer=None, '
'optimizer=None, combiner=\'sum\', name=\'table\', '
'quantization_config=None)')
def test_feature_config_repr(self):
table = tpu_embedding_v2_utils.TableConfig(
vocabulary_size=2, dim=4, initializer=None,
combiner='sum', name='table')
feature_config = tpu_embedding_v2_utils.FeatureConfig(
table=table, output_shape=[16, 4], name='feature')
self.assertEqual(
repr(feature_config),
'FeatureConfig(table=TableConfig(vocabulary_size=2, dim=4, '
'initializer=None, optimizer=None, combiner=\'sum\', '
'name=\'table\', quantization_config=None), max_sequence_length=0, '
'validate_weights_and_indices=True, output_shape=TensorShape([16, 4]), '
'name=\'feature\')')
def test_quantization_config_num_buckets(self):
with self.assertRaisesRegex(ValueError, 'num_buckets'):
tpu_embedding_v2_utils.QuantizationConfig(0, -1, 1)
def test_quantization_config_repr(self):
quantization_config = tpu_embedding_v2_utils.QuantizationConfig(
num_buckets=10, lower=-1.0, upper=1.0)
self.assertEqual(
repr(quantization_config),
'QuantizationConfig(num_buckets=10, lower=-1.0, upper=1.0)')
| ConfigTest |
python | rapidsai__cudf | python/cudf/cudf/core/udf/masked_typing.py | {
"start": 7061,
"end": 7456
} | class ____(models.StructModel):
def __init__(self, dmm, fe_type):
# This struct has two members, a value and a validity
# let the type of the `value` field be the same as the
# `value_type` and let `valid` be a boolean
members = [("value", fe_type.value_type), ("valid", types.bool_)]
models.StructModel.__init__(self, dmm, fe_type, members)
| MaskedModel |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_doughnut05.py | {
"start": 315,
"end": 1237
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_doughnut05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "doughnut"})
data = [
[2, 4, 6],
[60, 30, 10],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$3",
"values": "=Sheet1!$B$1:$B$3",
}
)
chart.set_rotation(360)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | bokeh__bokeh | src/bokeh/models/dom.py | {
"start": 8542,
"end": 10265
} | class ____(DOMElement):
""" A parsed HTML fragment with optional references to DOM nodes and UI elements. """
def __init__(self, *html: str | DOMNode | UIElement, **kwargs: Any) -> None:
if html and "html" in kwargs:
raise TypeError("'html' argument specified multiple times")
processed_html: Init[str | list[str | DOMNode | UIElement]]
if not html:
processed_html = kwargs.pop("html", Intrinsic)
else:
processed_html = list(html)
super().__init__(html=processed_html, **kwargs)
html = Required(Either(String, List(Either(String, Instance(DOMNode), Instance(UIElement)))), help="""
Either a parsed HTML string with optional references to Bokeh objects using
``<ref id="..."></ref>`` syntax. Or a list of parsed HTML interleaved with
Bokeh's objects. Any DOM node or UI element (even a plot) can be referenced
here.
""")
refs = List(Either(String, Instance(DOMNode), Instance(UIElement)), default=[], help="""
A collection of objects referenced by ``<ref id="..."></ref>`` from `the `html`` property.
Objects already included by instance in ``html`` don't have to be repeated here.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| HTML |
python | modin-project__modin | modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py | {
"start": 14303,
"end": 15533
} | class ____(MaterializationHook):
"""
Used by mask() for the slilced length computation.
Parameters
----------
ref : ObjectIDType
Non-materialized length to be sliced.
slc : slice
The slice to be applied.
"""
def __init__(self, ref: ObjectIDType, slc: slice):
self.ref = ref
self.slc = slc
def pre_materialize(self):
"""
Get the sliced length or object ref if not materialized.
Returns
-------
int or ObjectIDType
"""
if isinstance(self.ref, MetaListHook):
len_or_ref = self.ref.pre_materialize()
return (
compute_sliced_len(self.slc, len_or_ref)
if isinstance(len_or_ref, int)
else len_or_ref
)
return self.ref
def post_materialize(self, materialized):
"""
Get the sliced length.
Parameters
----------
materialized : list or int
Returns
-------
int
"""
if isinstance(self.ref, MetaListHook):
materialized = self.ref.post_materialize(materialized)
return compute_sliced_len(self.slc, materialized)
| SlicerHook |
python | openai__openai-python | examples/realtime/push_to_talk_app.py | {
"start": 2051,
"end": 9480
} | class ____(App[None]):
CSS = """
Screen {
background: #1a1b26; /* Dark blue-grey background */
}
Container {
border: double rgb(91, 164, 91);
}
Horizontal {
width: 100%;
}
#input-container {
height: 5; /* Explicit height for input container */
margin: 1 1;
padding: 1 2;
}
Input {
width: 80%;
height: 3; /* Explicit height for input */
}
Button {
width: 20%;
height: 3; /* Explicit height for button */
}
#bottom-pane {
width: 100%;
height: 82%; /* Reduced to make room for session display */
border: round rgb(205, 133, 63);
content-align: center middle;
}
#status-indicator {
height: 3;
content-align: center middle;
background: #2a2b36;
border: solid rgb(91, 164, 91);
margin: 1 1;
}
#session-display {
height: 3;
content-align: center middle;
background: #2a2b36;
border: solid rgb(91, 164, 91);
margin: 1 1;
}
Static {
color: white;
}
"""
client: AsyncOpenAI
should_send_audio: asyncio.Event
audio_player: AudioPlayerAsync
last_audio_item_id: str | None
connection: AsyncRealtimeConnection | None
session: Session | None
connected: asyncio.Event
def __init__(self) -> None:
super().__init__()
self.connection = None
self.session = None
self.client = AsyncOpenAI()
self.audio_player = AudioPlayerAsync()
self.last_audio_item_id = None
self.should_send_audio = asyncio.Event()
self.connected = asyncio.Event()
@override
def compose(self) -> ComposeResult:
"""Create child widgets for the app."""
with Container():
yield SessionDisplay(id="session-display")
yield AudioStatusIndicator(id="status-indicator")
yield RichLog(id="bottom-pane", wrap=True, highlight=True, markup=True)
async def on_mount(self) -> None:
self.run_worker(self.handle_realtime_connection())
self.run_worker(self.send_mic_audio())
async def handle_realtime_connection(self) -> None:
async with self.client.realtime.connect(model="gpt-realtime") as conn:
self.connection = conn
self.connected.set()
# note: this is the default and can be omitted
# if you want to manually handle VAD yourself, then set `'turn_detection': None`
await conn.session.update(
session={
"audio": {
"input": {"turn_detection": {"type": "server_vad"}},
},
"model": "gpt-realtime",
"type": "realtime",
}
)
acc_items: dict[str, Any] = {}
async for event in conn:
if event.type == "session.created":
self.session = event.session
session_display = self.query_one(SessionDisplay)
assert event.session.id is not None
session_display.session_id = event.session.id
continue
if event.type == "session.updated":
self.session = event.session
continue
if event.type == "response.output_audio.delta":
if event.item_id != self.last_audio_item_id:
self.audio_player.reset_frame_count()
self.last_audio_item_id = event.item_id
bytes_data = base64.b64decode(event.delta)
self.audio_player.add_data(bytes_data)
continue
if event.type == "response.output_audio_transcript.delta":
try:
text = acc_items[event.item_id]
except KeyError:
acc_items[event.item_id] = event.delta
else:
acc_items[event.item_id] = text + event.delta
# Clear and update the entire content because RichLog otherwise treats each delta as a new line
bottom_pane = self.query_one("#bottom-pane", RichLog)
bottom_pane.clear()
bottom_pane.write(acc_items[event.item_id])
continue
async def _get_connection(self) -> AsyncRealtimeConnection:
await self.connected.wait()
assert self.connection is not None
return self.connection
async def send_mic_audio(self) -> None:
import sounddevice as sd # type: ignore
sent_audio = False
device_info = sd.query_devices()
print(device_info)
read_size = int(SAMPLE_RATE * 0.02)
stream = sd.InputStream(
channels=CHANNELS,
samplerate=SAMPLE_RATE,
dtype="int16",
)
stream.start()
status_indicator = self.query_one(AudioStatusIndicator)
try:
while True:
if stream.read_available < read_size:
await asyncio.sleep(0)
continue
await self.should_send_audio.wait()
status_indicator.is_recording = True
data, _ = stream.read(read_size)
connection = await self._get_connection()
if not sent_audio:
asyncio.create_task(connection.send({"type": "response.cancel"}))
sent_audio = True
await connection.input_audio_buffer.append(audio=base64.b64encode(cast(Any, data)).decode("utf-8"))
await asyncio.sleep(0)
except KeyboardInterrupt:
pass
finally:
stream.stop()
stream.close()
async def on_key(self, event: events.Key) -> None:
"""Handle key press events."""
if event.key == "enter":
self.query_one(Button).press()
return
if event.key == "q":
self.exit()
return
if event.key == "k":
status_indicator = self.query_one(AudioStatusIndicator)
if status_indicator.is_recording:
self.should_send_audio.clear()
status_indicator.is_recording = False
if self.session and self.session.turn_detection is None:
# The default in the API is that the model will automatically detect when the user has
# stopped talking and then start responding itself.
#
# However if we're in manual `turn_detection` mode then we need to
# manually tell the model to commit the audio buffer and start responding.
conn = await self._get_connection()
await conn.input_audio_buffer.commit()
await conn.response.create()
else:
self.should_send_audio.set()
status_indicator.is_recording = True
if __name__ == "__main__":
app = RealtimeApp()
app.run()
| RealtimeApp |
python | eventlet__eventlet | tests/isolated/patcher_existing_locks_preexisting.py | {
"start": 178,
"end": 1168
} | class ____:
lock = threading.RLock()
class NS2:
lock = threading.RLock()
dict = {1: 2, 12: threading.RLock()}
list = [0, threading.RLock()]
def ensure_upgraded(lock):
if not isinstance(lock, python_lock):
raise RuntimeError(lock)
if __name__ == '__main__':
# These extra print()s caused either test failures or segfaults until
# https://github.com/eventlet/eventlet/issues/864 was fixed.
if sys.version_info[:2] > (3, 9):
print(unittest.mock.NonCallableMock._lock)
print(NS.lock)
# unittest.mock imports asyncio, so clear out asyncio.
for name in list(sys.modules.keys()):
if name.startswith("asyncio"):
del sys.modules[name]
eventlet.monkey_patch()
ensure_upgraded(NS.lock)
ensure_upgraded(NS.NS2.lock)
ensure_upgraded(NS.dict[12])
ensure_upgraded(NS.list[1])
if sys.version_info[:2] > (3, 9):
ensure_upgraded(unittest.mock.NonCallableMock._lock)
print("pass")
| NS |
python | ray-project__ray | python/ray/data/_internal/datasource/tfrecords_datasource.py | {
"start": 680,
"end": 1160
} | class ____:
"""
Specifies read options when reading TFRecord files with TFX.
"""
# An int representing the number of consecutive elements of
# this dataset to combine in a single batch when tfx-bsl is used to read
# the tfrecord files.
batch_size: int = 2048
# Toggles the schema inference applied; applicable
# only if tfx-bsl is used and tf_schema argument is missing.
# Defaults to True.
auto_infer_schema: bool = True
| TFXReadOptions |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 688122,
"end": 688577
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of LinkProjectV2ToTeam"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "team")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
team = sgqlc.types.Field("Team", graphql_name="team")
"""The team the project is linked to"""
| LinkProjectV2ToTeamPayload |
python | getsentry__sentry | tests/sentry/release_health/release_monitor/__init__.py | {
"start": 3205,
"end": 5297
} | class ____(TestCase, BaseMetricsTestCase):
__test__ = Abstract(__module__, __qualname__)
backend_class: type[BaseReleaseMonitorBackend]
def setUp(self) -> None:
self.project1 = self.create_project()
self.project2 = self.create_project()
self.environment1 = self.create_environment(project=self.project1)
self.environment2 = self.create_environment(project=self.project2)
self.release1 = self.create_release(project=self.project1)
self.release2 = self.create_release(project=self.project2)
self.backend = self.backend_class()
def test(self) -> None:
self.bulk_store_sessions(
[
self.build_session(
project_id=self.project1,
environment=self.environment1.name,
release=self.release1.version,
)
for _ in range(5)
]
+ [
self.build_session(
project_id=self.project2,
environment=self.environment2.name,
release=self.release2.version,
)
]
)
totals = self.backend.fetch_project_release_health_totals(
self.organization.id,
[self.project.id, self.project1.id, self.project2.id],
)
assert totals == {
self.project1.id: {
self.environment1.name: {
"total_sessions": 5,
"releases": {self.release1.version: 5},
}
},
self.project2.id: {
self.environment2.name: {
"total_sessions": 1,
"releases": {self.release2.version: 1},
}
},
}, totals
def test_no_data(self) -> None:
totals = self.backend.fetch_project_release_health_totals(
self.organization.id,
[self.project.id, self.project1.id, self.project2.id],
)
assert totals == {}
| BaseFetchProjectReleaseHealthTotalsTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/_processors_cy.py | {
"start": 2104,
"end": 2498
} | class ____:
type_: type
format_: str
__slots__ = ("type_", "format_")
def __init__(self, type_: type, scale: int):
self.type_ = type_
self.format_ = f"%.{scale}f"
def __call__(self, value: Optional[Any]) -> object:
if value is None:
return None
else:
return self.type_(self.format_ % value)
| to_decimal_processor_factory |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 50615,
"end": 51199
} | class ____:
def test_schema_with_default_values(self, spec):
spec.components.schema("DefaultValuesSchema", schema=DefaultValuesSchema)
definitions = get_schemas(spec)
props = definitions["DefaultValuesSchema"]["properties"]
assert props["number_auto_default"]["default"] == 12
assert props["number_manual_default"]["default"] == 42
assert "default" not in props["string_callable_default"]
assert props["string_manual_default"]["default"] == "Manual"
assert "default" not in props["numbers"]
| TestSchemaWithDefaultValues |
python | dask__distributed | distributed/spill.py | {
"start": 10721,
"end": 13767
} | class ____(zict.Func[Key, object, bytes]):
max_weight: int | Literal[False]
weight_by_key: dict[Key, SpilledSize]
total_weight: SpilledSize
def __init__(self, spill_directory: str, max_weight: int | Literal[False] = False):
compression = get_compression_settings(
"distributed.worker.memory.spill-compression"
)
# File is MutableMapping[str, bytes], but serialize_bytelist returns
# list[bytes | bytearray | memorymapping], which File.__setitem__ actually
# accepts despite its signature; File.__getitem__ actually returns
# bytearray. This headache is because MutableMapping doesn't allow for
# asymmetric VT in __getitem__ and __setitem__.
dump = cast(
Callable[[object], bytes],
partial(serialize_bytelist, compression=compression, on_error="raise"),
)
super().__init__(
dump,
deserialize_bytes,
cast(MutableMapping[Key, bytes], AnyKeyFile(spill_directory)),
)
self.max_weight = max_weight
self.weight_by_key = {}
self.total_weight = SpilledSize(0, 0)
def __getitem__(self, key: Key) -> object:
with context_meter.meter("disk-read", "seconds"):
pickled = self.d[key]
context_meter.digest_metric("disk-read", 1, "count")
context_meter.digest_metric("disk-read", len(pickled), "bytes")
out = self.load(pickled)
return out
def __setitem__(self, key: Key, value: object) -> None:
try:
pickled = self.dump(value)
except Exception as e:
# zict.LRU ensures that the key remains in fast if we raise.
# Wrap the exception so that it's recognizable by SpillBuffer,
# which will then unwrap it.
raise PickleError(key) from e
# Thanks to Buffer.__setitem__, we never update existing
# keys in slow, but always delete them and reinsert them.
assert key not in self.d
assert key not in self.weight_by_key
pickled_size = sum(map(nbytes, pickled))
if (
self.max_weight is not False
and self.total_weight.disk + pickled_size > self.max_weight
):
# Stop callbacks and ensure that the key ends up in SpillBuffer.fast
# To be caught by SpillBuffer.__setitem__
raise MaxSpillExceeded(key)
# Store to disk through File.
# This may raise OSError, which is caught by SpillBuffer above.
with context_meter.meter("disk-write", "seconds"):
self.d[key] = pickled
context_meter.digest_metric("disk-write", 1, "count")
context_meter.digest_metric("disk-write", pickled_size, "bytes")
weight = SpilledSize(safe_sizeof(value), pickled_size)
self.weight_by_key[key] = weight
self.total_weight += weight
def __delitem__(self, key: Key) -> None:
super().__delitem__(key)
self.total_weight -= self.weight_by_key.pop(key)
| Slow |
python | wandb__wandb | wandb/sdk/artifacts/_generated/link_artifact.py | {
"start": 333,
"end": 625
} | class ____(GQLResult):
version_index: Optional[int] = Field(alias="versionIndex")
artifact_membership: Optional[ArtifactMembershipFragment] = Field(
alias="artifactMembership", default=None
)
LinkArtifact.model_rebuild()
LinkArtifactResult.model_rebuild()
| LinkArtifactResult |
python | realpython__materials | python-async-iterators/large_file_iterator.py | {
"start": 34,
"end": 749
} | class ____:
def __init__(self, path, chunk_size=1024):
self.path = path
self.chunk_size = chunk_size
self.file = None
def __aiter__(self):
return self
async def __anext__(self):
if self.file is None:
self.file = await aiofiles.open(self.path, mode="rb")
chunk = await self.file.read(self.chunk_size)
if not chunk:
await self.file.close()
raise StopAsyncIteration
return chunk
async def main():
async for chunk in AsyncFileIterator("large-file.md"):
# Process the file chunk here...
await asyncio.sleep(0.2)
print(chunk.decode("utf-8"))
asyncio.run(main())
| AsyncFileIterator |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 403696,
"end": 409310
} | class ____(rv_continuous):
r"""A relativistic Breit-Wigner random variable.
%(before_notes)s
See Also
--------
cauchy: Cauchy distribution, also known as the Breit-Wigner distribution.
Notes
-----
The probability density function for `rel_breitwigner` is
.. math::
f(x, \rho) = \frac{k}{(x^2 - \rho^2)^2 + \rho^2}
where
.. math::
k = \frac{2\sqrt{2}\rho^2\sqrt{\rho^2 + 1}}
{\pi\sqrt{\rho^2 + \rho\sqrt{\rho^2 + 1}}}
The relativistic Breit-Wigner distribution is used in high energy physics
to model resonances [1]_. It gives the uncertainty in the invariant mass,
:math:`M` [2]_, of a resonance with characteristic mass :math:`M_0` and
decay-width :math:`\Gamma`, where :math:`M`, :math:`M_0` and :math:`\Gamma`
are expressed in natural units. In SciPy's parametrization, the shape
parameter :math:`\rho` is equal to :math:`M_0/\Gamma` and takes values in
:math:`(0, \infty)`.
Equivalently, the relativistic Breit-Wigner distribution is said to give
the uncertainty in the center-of-mass energy :math:`E_{\text{cm}}`. In
natural units, the speed of light :math:`c` is equal to 1 and the invariant
mass :math:`M` is equal to the rest energy :math:`Mc^2`. In the
center-of-mass frame, the rest energy is equal to the total energy [3]_.
%(after_notes)s
:math:`\rho = M/\Gamma` and :math:`\Gamma` is the scale parameter. For
example, if one seeks to model the :math:`Z^0` boson with :math:`M_0
\approx 91.1876 \text{ GeV}` and :math:`\Gamma \approx 2.4952\text{ GeV}`
[4]_ one can set ``rho=91.1876/2.4952`` and ``scale=2.4952``.
To ensure a physically meaningful result when using the `fit` method, one
should set ``floc=0`` to fix the location parameter to 0.
References
----------
.. [1] Relativistic Breit-Wigner distribution, Wikipedia,
https://en.wikipedia.org/wiki/Relativistic_Breit-Wigner_distribution
.. [2] Invariant mass, Wikipedia,
https://en.wikipedia.org/wiki/Invariant_mass
.. [3] Center-of-momentum frame, Wikipedia,
https://en.wikipedia.org/wiki/Center-of-momentum_frame
.. [4] M. Tanabashi et al. (Particle Data Group) Phys. Rev. D 98, 030001 -
Published 17 August 2018
%(example)s
"""
def _argcheck(self, rho):
return rho > 0
def _shape_info(self):
return [_ShapeInfo("rho", False, (0, np.inf), (False, False))]
def _pdf(self, x, rho):
# C = k / rho**2
C = np.sqrt(
2 * (1 + 1/rho**2) / (1 + np.sqrt(1 + 1/rho**2))
) * 2 / np.pi
with np.errstate(over='ignore'):
return C / (((x - rho)*(x + rho)/rho)**2 + 1)
def _cdf(self, x, rho):
# C = k / (2 * rho**2) / np.sqrt(1 + 1/rho**2)
C = np.sqrt(2/(1 + np.sqrt(1 + 1/rho**2)))/np.pi
result = (
np.sqrt(-1 + 1j/rho)
* np.arctan(x/np.sqrt(-rho*(rho + 1j)))
)
result = C * 2 * np.imag(result)
# Sometimes above formula produces values greater than 1.
return np.clip(result, None, 1)
def _munp(self, n, rho):
if n == 0:
return 1.
if n == 1:
# C = k / (2 * rho)
C = np.sqrt(
2 * (1 + 1/rho**2) / (1 + np.sqrt(1 + 1/rho**2))
) / np.pi * rho
return C * (np.pi/2 + np.arctan(rho))
if n == 2:
# C = pi * k / (4 * rho)
C = np.sqrt(
(1 + 1/rho**2) / (2 * (1 + np.sqrt(1 + 1/rho**2)))
) * rho
result = (1 - rho * 1j) / np.sqrt(-1 - 1j/rho)
return 2 * C * np.real(result)
else:
return np.inf
def _stats(self, rho):
# Returning None from stats makes public stats use _munp.
# nan values will be omitted from public stats. Skew and
# kurtosis are actually infinite.
return None, None, np.nan, np.nan
@inherit_docstring_from(rv_continuous)
def fit(self, data, *args, **kwds):
# Override rv_continuous.fit to better handle case where floc is set.
data, _, floc, fscale = _check_fit_input_parameters(
self, data, args, kwds
)
censored = isinstance(data, CensoredData)
if censored:
if data.num_censored() == 0:
# There are no censored values in data, so replace the
# CensoredData instance with a regular array.
data = data._uncensored
censored = False
if floc is None or censored:
return super().fit(data, *args, **kwds)
if fscale is None:
# The interquartile range approximates the scale parameter gamma.
# The median approximates rho * gamma.
p25, p50, p75 = np.quantile(data - floc, [0.25, 0.5, 0.75])
scale_0 = p75 - p25
rho_0 = p50 / scale_0
if not args:
args = [rho_0]
if "scale" not in kwds:
kwds["scale"] = scale_0
else:
M_0 = np.median(data - floc)
rho_0 = M_0 / fscale
if not args:
args = [rho_0]
return super().fit(data, *args, **kwds)
rel_breitwigner = rel_breitwigner_gen(a=0.0, name="rel_breitwigner")
# Collect names of classes and objects in this module.
pairs = list(globals().copy().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_continuous)
__all__ = _distn_names + _distn_gen_names + ['rv_histogram']
| rel_breitwigner_gen |
python | django__django | tests/model_fields/models.py | {
"start": 3506,
"end": 3586
} | class ____(models.Model):
value = models.SmallIntegerField()
| SmallIntegerModel |
python | huggingface__transformers | src/transformers/models/vitmatte/modeling_vitmatte.py | {
"start": 5896,
"end": 7560
} | class ____(nn.Module):
"""
Simple and lightweight Detail Capture Module for ViT Matting.
"""
def __init__(self, config):
super().__init__()
if len(config.fusion_hidden_sizes) != len(config.convstream_hidden_sizes) + 1:
raise ValueError(
"The length of fusion_hidden_sizes should be equal to the length of convstream_hidden_sizes + 1."
)
self.config = config
self.convstream = VitMatteConvStream(config)
self.conv_chans = self.convstream.conv_chans
self.fusion_blocks = nn.ModuleList()
self.fusion_channels = [config.hidden_size] + config.fusion_hidden_sizes
for i in range(len(self.fusion_channels) - 1):
self.fusion_blocks.append(
VitMatteFusionBlock(
config=config,
in_channels=self.fusion_channels[i] + self.conv_chans[-(i + 1)],
out_channels=self.fusion_channels[i + 1],
)
)
self.matting_head = VitMatteHead(config)
def forward(self, features, pixel_values):
detail_features = self.convstream(pixel_values)
for i in range(len(self.fusion_blocks)):
detailed_feature_map_name = "detailed_feature_map_" + str(len(self.fusion_blocks) - i - 1)
features = self.fusion_blocks[i](features, detail_features[detailed_feature_map_name])
alphas = torch.sigmoid(self.matting_head(features))
return alphas
@auto_docstring(
custom_intro="""
ViTMatte framework leveraging any vision backbone e.g. for ADE20k, CityScapes.
"""
)
| VitMatteDetailCaptureModule |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 257275,
"end": 257510
} | class ____(Response):
"""
Response of tasks.ping endpoint.
"""
_service = "tasks"
_action = "ping"
_version = "2.9"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
| PingResponse |
python | jina-ai__jina | jina/proto/docarray_v1/pb/jina_pb2_grpc.py | {
"start": 26117,
"end": 27013
} | class ____(object):
"""*
jina gRPC service to trigger a snapshot at the Executor Runtime.
"""
@staticmethod
def restore_status(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
'/jina.JinaExecutorRestoreProgress/restore_status',
jina__pb2.RestoreId.SerializeToString,
jina__pb2.RestoreSnapshotStatusProto.FromString,
options,
channel_credentials,
insecure,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| JinaExecutorRestoreProgress |
python | getsentry__sentry | src/sentry/issues/occurrence_consumer.py | {
"start": 1682,
"end": 19675
} | class ____(Exception):
pass
def create_rate_limit_key(project_id: int, fingerprint: str) -> str:
rate_limit_key = f"occurrence_rate_limit:{project_id}-{fingerprint}"
return rate_limit_key
def is_rate_limited(
project_id: int,
fingerprint: str,
) -> bool:
try:
rate_limit_enabled = options.get("issues.occurrence-consumer.rate-limit.enabled")
if not rate_limit_enabled:
return False
rate_limit_key = create_rate_limit_key(project_id, fingerprint)
rate_limit_quota = Quota(**options.get("issues.occurrence-consumer.rate-limit.quota"))
granted_quota = rate_limiter.check_and_use_quotas(
[
RequestedQuota(
rate_limit_key,
1,
[rate_limit_quota],
)
]
)[0]
return not granted_quota.granted
except Exception:
logger.exception("Failed to check issue platform rate limiter")
return False
@sentry_sdk.tracing.trace
def save_event_from_occurrence(
data: dict[str, Any],
**kwargs: Any,
) -> Event:
from sentry.event_manager import EventManager
data["type"] = "generic"
project_id = data.pop("project_id")
with metrics.timer("occurrence_consumer.save_event_occurrence.event_manager.save"):
manager = EventManager(data, remove_other=False)
event = manager.save(project_id=project_id)
return event
@sentry_sdk.tracing.trace
def lookup_event(project_id: int, event_id: str) -> Event:
data = nodestore.backend.get(Event.generate_node_id(project_id, event_id))
if data is None:
raise EventLookupError(f"Failed to lookup event({event_id}) for project_id({project_id})")
event = Event(event_id=event_id, project_id=project_id)
event.data = data
return event
@sentry_sdk.tracing.trace
def create_event(project_id: int, event_id: str, event_data: dict[str, Any]) -> Event:
return Event(
event_id=event_id,
project_id=project_id,
snuba_data={
"event_id": event_data["event_id"],
"project_id": event_data["project_id"],
"timestamp": event_data["timestamp"],
"release": event_data.get("release"),
"environment": event_data.get("environment"),
"platform": event_data.get("platform"),
"tags.key": [tag[0] for tag in event_data.get("tags", [])],
"tags.value": [tag[1] for tag in event_data.get("tags", [])],
},
)
@sentry_sdk.tracing.trace
def create_event_and_issue_occurrence(
occurrence_data: IssueOccurrenceData, event_data: dict[str, Any]
) -> tuple[IssueOccurrence, GroupInfo | None]:
"""With standalone span ingestion, we won't be storing events in
nodestore, so instead we create a light-weight event with a small
set of fields that lets us create occurrences.
"""
project_id = occurrence_data["project_id"]
event_id = occurrence_data["event_id"]
if occurrence_data["event_id"] != event_data["event_id"]:
raise ValueError(
f"event_id in occurrence({occurrence_data['event_id']}) is different from event_id in event_data({event_data['event_id']})"
)
event = create_event(project_id, event_id, event_data)
with metrics.timer(
"occurrence_consumer._process_message.save_issue_occurrence",
tags={"method": "create_event_and_issue_occurrence"},
):
return save_issue_occurrence(occurrence_data, event)
@sentry_sdk.tracing.trace
def process_event_and_issue_occurrence(
occurrence_data: IssueOccurrenceData, event_data: dict[str, Any]
) -> tuple[IssueOccurrence, GroupInfo | None]:
if occurrence_data["event_id"] != event_data["event_id"]:
raise ValueError(
f"event_id in occurrence({occurrence_data['event_id']}) is different from event_id in event_data({event_data['event_id']})"
)
event = save_event_from_occurrence(event_data)
with metrics.timer(
"occurrence_consumer._process_message.save_issue_occurrence",
tags={"method": "process_event_and_issue_occurrence"},
):
return save_issue_occurrence(occurrence_data, event)
@sentry_sdk.tracing.trace
def lookup_event_and_process_issue_occurrence(
occurrence_data: IssueOccurrenceData,
) -> tuple[IssueOccurrence, GroupInfo | None]:
project_id = occurrence_data["project_id"]
event_id = occurrence_data["event_id"]
try:
event = lookup_event(project_id, event_id)
except Exception:
raise EventLookupError(f"Failed to lookup event({event_id}) for project_id({project_id})")
with metrics.timer(
"occurrence_consumer._process_message.save_issue_occurrence",
tags={"method": "lookup_event_and_process_issue_occurrence"},
):
return save_issue_occurrence(occurrence_data, event)
@sentry_sdk.tracing.trace
def _get_kwargs(payload: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Processes the incoming message payload into a format we can use.
:raises InvalidEventPayloadError: when payload contains invalid data
"""
try:
with metrics.timer("occurrence_ingest.duration", instance="_get_kwargs"):
metrics.distribution("occurrence.ingest.size.data", len(payload), unit="byte")
assignee_identifier = None
payload_assignee = payload.get("assignee")
if payload_assignee:
project = Project.objects.get_from_cache(id=payload["project_id"])
try:
assignee = parse_and_validate_actor(payload_assignee, project.organization_id)
if assignee:
assignee_identifier = assignee.identifier
except Exception:
logger.exception("Failed to validate assignee for occurrence")
occurrence_data = {
"id": UUID(payload["id"]).hex,
"project_id": payload["project_id"],
"fingerprint": payload["fingerprint"],
"issue_title": payload["issue_title"],
"subtitle": payload["subtitle"],
"resource_id": payload.get("resource_id"),
"evidence_data": payload.get("evidence_data"),
"evidence_display": payload.get("evidence_display"),
"type": payload["type"],
"detection_time": payload["detection_time"],
"level": payload.get("level", DEFAULT_LEVEL),
"assignee": assignee_identifier,
}
process_occurrence_data(occurrence_data)
if payload.get("event_id"):
occurrence_data["event_id"] = UUID(payload["event_id"]).hex
if payload.get("culprit"):
occurrence_data["culprit"] = payload["culprit"]
if payload.get("priority") is not None:
occurrence_data["priority"] = payload["priority"]
else:
group_type = get_group_type_by_type_id(occurrence_data["type"])
occurrence_data["priority"] = group_type.default_priority
if "event" in payload:
event_payload = payload["event"]
if payload["project_id"] != event_payload.get("project_id"):
raise InvalidEventPayloadError(
f"project_id in occurrence ({payload['project_id']}) is different from project_id in event ({event_payload.get('project_id')})"
)
if not payload.get("event_id") and not event_payload.get("event_id"):
raise InvalidEventPayloadError("Payload must contain an event_id")
if not payload.get("event_id"):
occurrence_data["event_id"] = event_payload.get("event_id")
event_data = {
"event_id": UUID(event_payload.get("event_id")).hex,
"level": occurrence_data["level"],
"project_id": event_payload.get("project_id"),
"platform": event_payload.get("platform"),
"received": event_payload.get("received", timezone.now()),
"tags": event_payload.get("tags"),
"timestamp": event_payload.get("timestamp"),
}
optional_params = [
"breadcrumbs",
"contexts",
"debug_meta",
"dist",
"environment",
"extra",
"modules",
"release",
"request",
"sdk",
"server_name",
"stacktrace",
"trace_id",
"transaction",
"user",
]
for optional_param in optional_params:
if optional_param in event_payload:
event_data[optional_param] = event_payload.get(optional_param)
try:
jsonschema.validate(event_data, EVENT_PAYLOAD_SCHEMA)
except jsonschema.exceptions.ValidationError:
metrics.incr(
"occurrence_ingest.event_payload_invalid",
sample_rate=1.0,
tags={"occurrence_type": occurrence_data["type"]},
)
logger.exception(
"Error validating event payload, falling back to legacy validation"
)
try:
jsonschema.validate(event_data, LEGACY_EVENT_PAYLOAD_SCHEMA)
except jsonschema.exceptions.ValidationError:
metrics.incr(
"occurrence_ingest.legacy_event_payload_invalid",
sample_rate=1.0,
tags={"occurrence_type": occurrence_data["type"]},
)
raise
event_data["metadata"] = {
# This allows us to show the title consistently in discover
"title": occurrence_data["issue_title"],
}
return {
"occurrence_data": occurrence_data,
"event_data": event_data,
"is_buffered_spans": payload.get("is_buffered_spans") is True,
}
else:
if not payload.get("event_id"):
raise InvalidEventPayloadError(
"Payload must contain either event_id or event_data"
)
return {"occurrence_data": occurrence_data}
except InvalidGroupTypeError:
raise
except (KeyError, ValueError) as e:
raise InvalidEventPayloadError(e)
@sentry_sdk.tracing.trace
@metrics.wraps("occurrence_consumer.process_occurrence_message")
def process_occurrence_message(
message: Mapping[str, Any], txn: Transaction | NoOpSpan | Span
) -> tuple[IssueOccurrence, GroupInfo | None] | None:
with metrics.timer("occurrence_consumer._process_message._get_kwargs"):
kwargs = _get_kwargs(message)
occurrence_data = kwargs["occurrence_data"]
metric_tags = {"occurrence_type": occurrence_data["type"]}
is_buffered_spans = kwargs.get("is_buffered_spans", False)
metrics.incr(
"occurrence_ingest.messages",
sample_rate=1.0,
tags=metric_tags,
)
txn.set_tag("occurrence_type", occurrence_data["type"])
project = Project.objects.get_from_cache(id=occurrence_data["project_id"])
organization = Organization.objects.get_from_cache(id=project.organization_id)
txn.set_tag("organization_id", organization.id)
txn.set_tag("organization_slug", organization.slug)
txn.set_tag("project_id", project.id)
txn.set_tag("project_slug", project.slug)
group_type = get_group_type_by_type_id(occurrence_data["type"])
if not group_type.allow_ingest(organization):
metrics.incr(
"occurrence_ingest.dropped_feature_disabled",
sample_rate=1.0,
tags=metric_tags,
)
txn.set_tag("result", "dropped_feature_disabled")
return None
if is_rate_limited(project.id, fingerprint=occurrence_data["fingerprint"][0]):
metrics.incr(
"occurrence_ingest.dropped_rate_limited",
sample_rate=1.0,
tags=metric_tags,
)
txn.set_tag("result", "dropped_rate_limited")
return None
if "event_data" in kwargs and is_buffered_spans:
return create_event_and_issue_occurrence(kwargs["occurrence_data"], kwargs["event_data"])
elif "event_data" in kwargs:
txn.set_tag("result", "success")
with metrics.timer(
"occurrence_consumer._process_message.process_event_and_issue_occurrence",
tags=metric_tags,
):
return process_event_and_issue_occurrence(
kwargs["occurrence_data"], kwargs["event_data"]
)
else:
txn.set_tag("result", "success")
with metrics.timer(
"occurrence_consumer._process_message.lookup_event_and_process_issue_occurrence",
tags=metric_tags,
):
return lookup_event_and_process_issue_occurrence(kwargs["occurrence_data"])
@sentry_sdk.tracing.trace
@metrics.wraps("occurrence_consumer.process_message")
def _process_message(
message: Mapping[str, Any],
) -> tuple[IssueOccurrence | None, GroupInfo | None] | None:
"""
:raises InvalidEventPayloadError: when the message is invalid
:raises EventLookupError: when the provided event_id in the message couldn't be found.
"""
with sentry_sdk.start_transaction(
op="_process_message",
name="issues.occurrence_consumer",
) as txn:
try:
# Messages without payload_type default to an OCCURRENCE payload
payload_type = message.get("payload_type", PayloadType.OCCURRENCE.value)
if payload_type == PayloadType.STATUS_CHANGE.value:
group = process_status_change_message(message, txn)
if not group:
return None
return None, GroupInfo(group=group, is_new=False, is_regression=False)
elif payload_type == PayloadType.OCCURRENCE.value:
return process_occurrence_message(message, txn)
else:
metrics.incr(
"occurrence_consumer._process_message.dropped_invalid_payload_type",
sample_rate=1.0,
tags={"payload_type": payload_type},
)
except InvalidGroupTypeError as e:
metrics.incr(
"occurrence_ingest.invalid_group_type", tags={"occurrence_type": e.group_type_id}
)
except (ValueError, KeyError) as e:
txn.set_tag("result", "error")
raise InvalidEventPayloadError(e)
return None
@sentry_sdk.tracing.trace
@metrics.wraps("occurrence_consumer.process_batch")
def process_occurrence_batch(
worker: ThreadPoolExecutor, message: Message[ValuesBatch[KafkaPayload]]
) -> None:
"""
Receives batches of occurrences. This function will take the batch
and group them together by fingerprint (ensuring order is preserved) and
execute each group using a ThreadPoolWorker.
By batching we're able to process occurrences in parallel while guaranteeing
that no occurrences are processed out of order per group.
"""
batch = message.payload
occcurrence_mapping: Mapping[str, list[Mapping[str, Any]]] = defaultdict(list)
for item in batch:
assert isinstance(item, BrokerValue)
try:
payload = orjson.loads(item.payload.value)
except Exception:
logger.exception("Failed to unpack message payload")
continue
# group by the fingerprint, there should only be one of them
partition_key: str = payload["fingerprint"][0] if payload["fingerprint"] else ""
occcurrence_mapping[partition_key].append(payload)
# Number of occurrences that are being processed in this batch
metrics.gauge("occurrence_consumer.checkin.parallel_batch_count", len(batch))
# Number of groups we've collected to be processed in parallel
metrics.gauge("occurrence_consumer.checkin.parallel_batch_groups", len(occcurrence_mapping))
# Submit occurrences & status changes for processing
with sentry_sdk.start_transaction(op="process_batch", name="occurrence.occurrence_consumer"):
futures = [
worker.submit(process_occurrence_group, group) for group in occcurrence_mapping.values()
]
wait(futures)
@metrics.wraps("occurrence_consumer.process_occurrence_group")
def process_occurrence_group(items: list[Mapping[str, Any]]) -> None:
"""
Process a group of related occurrences (all part of the same group)
completely serially.
"""
status_changes = [
item for item in items if item.get("payload_type") == PayloadType.STATUS_CHANGE.value
]
if status_changes:
items = [
item for item in items if item.get("payload_type") != PayloadType.STATUS_CHANGE.value
] + status_changes[-1:]
metrics.incr(
"occurrence_consumer.process_occurrence_group.dropped_status_changes",
amount=len(status_changes) - 1,
sample_rate=1.0,
)
for item in items:
cache_key = f"occurrence_consumer.process_occurrence_group.{item['id']}"
if cache.get(cache_key):
logger.info("Skipping processing of occurrence %s due to cache hit", item["id"])
continue
_process_message(item)
# just need a 300 second cache
cache.set(cache_key, 1, 300)
| EventLookupError |
python | numpy__numpy | numpy/distutils/fcompiler/vast.py | {
"start": 98,
"end": 1667
} | class ____(GnuFCompiler):
compiler_type = 'vast'
compiler_aliases = ()
description = 'Pacific-Sierra Research Fortran 90 Compiler'
version_pattern = (r'\s*Pacific-Sierra Research vf90 '
r'(Personal|Professional)\s+(?P<version>[^\s]*)')
# VAST f90 does not support -o with -c. So, object files are created
# to the current directory and then moved to build directory
object_switch = ' && function _mvfile { mv -v `basename $1` $1 ; } && _mvfile '
executables = {
'version_cmd' : ["vf90", "-v"],
'compiler_f77' : ["g77"],
'compiler_fix' : ["f90", "-Wv,-ya"],
'compiler_f90' : ["f90"],
'linker_so' : ["<F90>"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = None #XXX Fix me
module_include_switch = None #XXX Fix me
def find_executables(self):
pass
def get_version_cmd(self):
f90 = self.compiler_f90[0]
d, b = os.path.split(f90)
vf90 = os.path.join(d, 'v'+b)
return vf90
def get_flags_arch(self):
vast_version = self.get_version()
gnu = GnuFCompiler()
gnu.customize(None)
self.version = gnu.get_version()
opt = GnuFCompiler.get_flags_arch(self)
self.version = vast_version
return opt
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='vast').get_version())
| VastFCompiler |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 188386,
"end": 189624
} | class ____(GeneratedAirbyteSource):
class PATCredentials:
@public
def __init__(self, personal_access_token: str):
self.personal_access_token = check.str_param(
personal_access_token, "personal_access_token"
)
class OAuthCredentials:
@public
def __init__(self, client_id: str, client_secret: str, refresh_token: str):
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
@public
def __init__(
self,
name: str,
credentials: Union["AsanaSource.PATCredentials", "AsanaSource.OAuthCredentials"],
):
"""Airbyte Source for Asana.
Args:
name (str): The name of the destination.
credentials (Union[AsanaSource.PATCredentials, AsanaSource.OAuthCredentials]): Choose how to authenticate to Github
"""
self.credentials = check.inst_param(
credentials, "credentials", (AsanaSource.PATCredentials, AsanaSource.OAuthCredentials)
)
super().__init__("Asana", name)
| AsanaSource |
python | networkx__networkx | networkx/algorithms/isomorphism/vf2userfunc.py | {
"start": 3071,
"end": 4736
} | class ____(vf2.GraphMatcher):
"""VF2 isomorphism checker for undirected graphs."""
def __init__(self, G1, G2, node_match=None, edge_match=None):
"""Initialize graph matcher.
Parameters
----------
G1, G2: graph
The graphs to be tested.
node_match: callable
A function that returns True iff node n1 in G1 and n2 in G2
should be considered equal during the isomorphism test. The
function will be called like::
node_match(G1.nodes[n1], G2.nodes[n2])
That is, the function will receive the node attribute dictionaries
of the nodes under consideration. If None, then no attributes are
considered when testing for an isomorphism.
edge_match: callable
A function that returns True iff the edge attribute dictionary for
the pair of nodes (u1, v1) in G1 and (u2, v2) in G2 should be
considered equal during the isomorphism test. The function will be
called like::
edge_match(G1[u1][v1], G2[u2][v2])
That is, the function will receive the edge attribute dictionaries
of the edges under consideration. If None, then no attributes are
considered when testing for an isomorphism.
"""
vf2.GraphMatcher.__init__(self, G1, G2)
self.node_match = node_match
self.edge_match = edge_match
# These will be modified during checks to minimize code repeat.
self.G1_adj = self.G1.adj
self.G2_adj = self.G2.adj
semantic_feasibility = _semantic_feasibility
| GraphMatcher |
python | ansible__ansible | lib/ansible/_internal/_yaml/_loader.py | {
"start": 1297,
"end": 1886
} | class ____(_YamlParser, AnsibleInstrumentedConstructor, Resolver):
"""Ansible YAML loader which supports Ansible custom behavior such as `Origin` tagging, but no Ansible-specific YAML tags."""
def __init__(self, stream: str | bytes | _io.IOBase) -> None:
_YamlParser.__init__(self, stream)
AnsibleInstrumentedConstructor.__init__(
self,
origin=_tags.Origin.get_or_create_tag(stream, self.name),
trusted_as_template=_tags.TrustedAsTemplate.is_tagged_on(stream),
)
Resolver.__init__(self)
| AnsibleInstrumentedLoader |
python | google__jax | jax/experimental/jax2tf/examples/mnist_lib.py | {
"start": 6576,
"end": 11512
} | class ____:
"""An MNIST model using Flax."""
name = "mnist_flax"
class Module(nn.Module):
"""A simple CNN model for MNIST.
There is an option for the model to skip the classifier layer, for
demonstrating reuse of the classifier-less model into a larger model.
See README.md.
"""
@nn.compact
def __call__(self, x, with_classifier=True):
x = nn.Conv(features=32, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = nn.Conv(features=64, kernel_size=(3, 3))(x)
x = nn.relu(x)
x = nn.avg_pool(x, window_shape=(2, 2), strides=(2, 2))
x = x.reshape((x.shape[0], -1)) # flatten
x = nn.Dense(features=256)(x)
x = nn.relu(x)
if not with_classifier:
return x
x = nn.Dense(features=10)(x)
x = nn.log_softmax(x)
return x
# Create the model and save it
model = Module()
@staticmethod
def predict(params, inputs, with_classifier=True):
return FlaxMNIST.model.apply({"params": params},
inputs,
with_classifier=with_classifier)
@staticmethod
def loss(params, inputs, labels): # Same as the pure JAX example
# Must use the classifier layer because the labels are classes
predictions = FlaxMNIST.predict(params, inputs, with_classifier=True)
return -jnp.mean(jnp.sum(predictions * labels, axis=1))
@staticmethod
def update(tx, params, opt_state, inputs, labels):
grad = jax.grad(FlaxMNIST.loss)(params, inputs, labels)
updates, opt_state = tx.update(grad, opt_state)
params = optax.apply_updates(params, updates)
return params, opt_state
@staticmethod
def train(train_ds, test_ds, num_epochs, with_classifier=True):
"""Trains a pure JAX MNIST predictor.
Returns:
a tuple with two elements:
- a predictor function with signature "(Params, ImagesBatch) ->
Predictions".
If `with_classifier=False` then the output of the predictor function
is the last layer of logits.
- the parameters "Params" for the predictor function
"""
rng = jax.random.PRNGKey(0)
momentum_mass = 0.9
init_shape = jnp.ones((1,) + input_shape, jnp.float32)
params = FlaxMNIST.model.init(rng, init_shape)["params"]
tx = optax.sgd(learning_rate=step_size, momentum=momentum_mass)
opt_state = tx.init(params)
for epoch in range(num_epochs):
start_time = time.time()
for inputs, labels in tfds.as_numpy(train_ds):
params, opt_state = jax.jit(FlaxMNIST.update,
static_argnums=0)(tx, params, opt_state,
inputs, labels)
epoch_time = time.time() - start_time
# Same accuracy function as for the pure JAX example
train_acc = PureJaxMNIST.accuracy(FlaxMNIST.predict, params,
train_ds)
test_acc = PureJaxMNIST.accuracy(FlaxMNIST.predict, params,
test_ds)
logging.info("%s: Epoch %d in %0.2f sec", FlaxMNIST.name, epoch,
epoch_time)
logging.info("%s: Training set accuracy %0.2f%%", FlaxMNIST.name,
100. * train_acc)
logging.info("%s: Test set accuracy %0.2f%%", FlaxMNIST.name,
100. * test_acc)
# See discussion in README.md for packaging Flax models for conversion
predict_fn = functools.partial(FlaxMNIST.predict,
with_classifier=with_classifier)
return (predict_fn, params)
def plot_images(ds,
nr_rows: int,
nr_cols: int,
title: str,
inference_fn: Callable | None = None):
"""Plots a grid of images with their predictions.
Params:
ds: a tensorflow dataset from where to pick the images and labels.
nr_rows, nr_cols: the size of the grid to plot
title: the title of the plot
inference_fn: if None then print the existing label, else use this function
on the batch of images to produce a batch of inference results, which
get printed.
inference_batch_size: the size of the batch of images passed to
`inference_fn`.
"""
count = nr_rows * nr_cols
fig = plt.figure(figsize=(8., 4.), num=title)
# Get the first batch
(images, labels), = list(tfds.as_numpy(ds.take(1)))
if inference_fn:
inferred_labels = inference_fn(images)
for i, image in enumerate(images[:count]):
digit = fig.add_subplot(nr_rows, nr_cols, i + 1)
if inference_fn:
digit_title = f"infer: {np.argmax(inferred_labels[i])}\n"
else:
digit_title = ""
digit_title += f"label: {np.argmax(labels[i])}"
digit.set_title(digit_title)
plt.imshow(
(np.reshape(image, (28, 28)) * 255).astype(np.uint8),
interpolation="nearest")
plt.show()
| FlaxMNIST |
python | apache__airflow | scripts/in_container/run_generate_openapi_spec_providers.py | {
"start": 1622,
"end": 3634
} | class ____(NamedTuple):
openapi_spec_file: Path
app: FastAPI | None
prefix: str
sys.path.insert(0, str(Path(__file__).parent.resolve()))
ProvidersManager().initialize_providers_configuration()
PROVIDERS_DEFS = {
"fab": ProviderDef(
openapi_spec_file=Path(FAB_AUTHMGR_API_PATH).parent
/ "openapi"
/ "v2-fab-auth-manager-generated.yaml",
app=FabAuthManager().get_fastapi_app(),
prefix="/auth",
),
"edge": ProviderDef(
openapi_spec_file=Path(EDGE_PATH).parent / "openapi" / "v2-edge-generated.yaml",
app=create_edge_worker_api_app(),
prefix="/edge_worker",
),
"keycloak": ProviderDef(
openapi_spec_file=Path(KEYCLOAK_AUTHMGR_PATH).parent
/ "openapi"
/ "v2-keycloak-auth-manager-generated.yaml",
app=KeycloakAuthManager().get_fastapi_app(),
prefix="/auth",
),
}
# Generate FAB auth manager openapi spec
def generate_openapi_specs(provider_name: str):
provider_def = PROVIDERS_DEFS.get(provider_name)
if provider_def is None:
console.print(f"[red]Provider '{provider_name}' not found. Skipping OpenAPI spec generation.[/]")
sys.exit(1)
app = provider_def.app
openapi_spec_file = provider_def.openapi_spec_file
if app:
generate_openapi_file(app=app, file_path=openapi_spec_file, prefix=provider_def.prefix)
validate_openapi_file(openapi_spec_file)
else:
console.print(
f"[red]Provider '{provider_name}' has no FastAPI app. Skipping OpenAPI spec generation.[/]"
)
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate openapi-spec for the specified provider.")
parser.add_argument(
"provider",
type=str,
help="The name of the provider whose openapi-spec should be compiled.",
choices=list(PROVIDERS_DEFS.keys()),
)
args = parser.parse_args()
generate_openapi_specs(args.provider)
| ProviderDef |
python | doocs__leetcode | lcof2/剑指 Offer II 097. 子序列的数目/Solution.py | {
"start": 0,
"end": 414
} | class ____:
def numDistinct(self, s: str, t: str) -> int:
m, n = len(s), len(t)
f = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
f[i][0] = 1
for i, a in enumerate(s, 1):
for j, b in enumerate(t, 1):
f[i][j] = f[i - 1][j]
if a == b:
f[i][j] += f[i - 1][j - 1]
return f[m][n]
| Solution |
python | pytorch__pytorch | torch/distributed/tensor/placement_types.py | {
"start": 855,
"end": 17130
} | class ____(torch._C._distributed.Shard):
"""
The ``Shard(dim)`` placement describes the DTensor sharding on tensor dimension
``dim`` over a corresponding ``DeviceMesh`` dimension, where each rank on the
DeviceMesh dimension only holds a shard/piece of the global Tensor. The
``Shard(dim)`` placement follows the ``torch.chunk(dim)`` semantic, where the
last few shards on the DeviceMesh dimension might be empty when the tensor dimension
is not evenly divisible on the DeviceMesh dimension. The ``Shard`` placement can be
used by all DTensor APIs (i.e. distribute_tensor, from_local, etc.)
Args:
dim (int): The tensor dimension that describes the DTensor is sharded over its
corresponding DeviceMesh dimension.
.. warning:: sharding on a tensor dimension where the tensor dimension size is not
evenly divisible on a DeviceMesh dimension is currently experimental and subject to change.
"""
def _split_tensor(
self,
tensor: torch.Tensor,
num_chunks: int,
*,
with_padding: bool = True,
contiguous: bool = True,
) -> tuple[list[torch.Tensor], list[int]]:
"""
This function uses torch.chunk to split a tensor into num_chunks shards along
the Shard placement dimension, and return a list of shards with their pad sizes.
Keyword args:
with_padding (bool, optional): when True, we pad the tensor on the last
few ranks before calling the collectives (i.e. scatter/all_gather, etc.).
This is because collectives usually require equal size tensor inputs
"""
assert self.dim <= tensor.ndim, (
f"Sharding dim {self.dim} greater than tensor ndim {tensor.ndim}"
)
# chunk tensor over dimension `dim` into n slices
tensor_list = list(torch.chunk(tensor, num_chunks, dim=self.dim))
tensor_list = fill_empty_tensor_to_shards(
tensor_list, self.dim, num_chunks - len(tensor_list)
)
# compute the chunk size inline with ``torch.chunk`` to calculate padding
full_chunk_size = (tensor.size(self.dim) + num_chunks - 1) // num_chunks
shard_list: list[torch.Tensor] = []
pad_sizes: list[int] = []
for shard in tensor_list:
if with_padding:
pad_size = Shard._get_shard_pad_size(full_chunk_size, shard, self.dim)
shard = pad_tensor(shard, self.dim, pad_size)
pad_sizes.append(pad_size)
if contiguous:
shard = shard.contiguous()
shard_list.append(shard)
return shard_list, pad_sizes
@staticmethod
@maybe_run_for_local_tensor
def local_shard_size_and_offset(
curr_local_size: int,
num_chunks: int,
rank: int,
) -> tuple[int, int]:
"""
Given the size of the current local tensor (which may already be sharded on some dimensions),
computes the new local shard size and offset given the desired number of chunks
(num_chunks is generally equal to the size of the current sharding dim).
Note: new local shard offset is relative to the current sharded tensor, not the global tensor.
See `_utils.compute_local_shape_and_global_offset` for computing global offset.
Returns (new local shard size, offset)
"""
# Compute the chunk size inline with ``torch.chunk``
if curr_local_size % num_chunks == 0:
full_chunk_size = curr_local_size // num_chunks
return full_chunk_size, full_chunk_size * rank
# uneven sharding case
full_chunk_size = (curr_local_size + num_chunks - 1) // num_chunks
shard_starting_idx = full_chunk_size * rank
if curr_local_size < shard_starting_idx:
return 0, curr_local_size
else:
local_shard_size = (
min(curr_local_size, shard_starting_idx + full_chunk_size)
- shard_starting_idx
)
return local_shard_size, shard_starting_idx
def _local_shard_size_and_offset(
self,
curr_local_size: int,
num_chunks: int,
rank: int,
) -> tuple[int, int | None]:
return Shard.local_shard_size_and_offset(curr_local_size, num_chunks, rank)
@staticmethod
@maybe_run_for_local_tensor
def _maybe_unpad_tensor_with_sizes(
dim, local_tensor, pad_sizes, mesh_dim_local_rank, make_contiguous
) -> torch.Tensor:
# Only unpad if the local_tensor was padded on the dimension.
if pad_sizes[mesh_dim_local_rank] > 0:
local_tensor = unpad_tensor(
local_tensor, dim, pad_sizes[mesh_dim_local_rank]
)
if make_contiguous:
local_tensor = local_tensor.contiguous()
return local_tensor
def _shard_tensor(
self,
tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
src_data_rank: int | None = 0,
) -> torch.Tensor:
"""
shard and scatter a tensor on a mesh dimension (use coordinate
0 on the mesh dimension as source of truth)
"""
my_coordinate = mesh.get_coordinate()
num_chunks = mesh.size(mesh_dim=mesh_dim)
if my_coordinate is None:
# if rank is not part of mesh, we simply return an empty tensor
return tensor.new_empty(0, requires_grad=tensor.requires_grad)
mesh_dim_local_rank = my_coordinate[mesh_dim]
if src_data_rank is None:
# src_data_rank specified as None explicitly means to skip the
# communications, simply split
scatter_list, _ = self._split_tensor(
tensor, num_chunks, with_padding=False, contiguous=True
)
return self._select_shard(scatter_list, mesh_dim_local_rank)
scatter_list, pad_sizes = self._split_tensor(
tensor, num_chunks, with_padding=True, contiguous=True
)
it = iter(scatter_list)
first = next(it)
# Tensors in the scatter list are expected to have the same shape because
# split is requested with padding.
assert all(first.shape == v.shape for v in it)
output = torch.empty_like(first)
# perform scatter from the src_data_rank as data source when it is not None
mesh_scatter(
output, scatter_list, mesh, mesh_dim=mesh_dim, group_src=src_data_rank
)
return Shard._maybe_unpad_tensor_with_sizes(
self.dim, output, pad_sizes, mesh_dim_local_rank, True
)
@classmethod
def _make_shard_tensor(
cls,
dim: int,
tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
src_data_rank: int | None = 0,
) -> torch.Tensor:
shard_placement = cls(dim)
return shard_placement._shard_tensor(tensor, mesh, mesh_dim, src_data_rank)
def _reduce_shard_tensor(
self,
tensor: torch.Tensor,
mesh: DeviceMesh,
reduce_op: str,
mesh_dim: int,
) -> torch.Tensor:
"""
reduce and scatter a tensor on a mesh dimension
"""
my_coordinate = mesh.get_coordinate()
num_chunks = mesh.size(mesh_dim=mesh_dim)
if my_coordinate is None:
# if rank is not part of mesh, we simply return local_tensor,
# which should be an empty tensor
return tensor
is_padded = tensor.size(self.dim) % num_chunks != 0
pad_sizes = None
if is_padded:
scattered_list, pad_sizes = self._split_tensor(
tensor, num_chunks, with_padding=True, contiguous=True
)
tensor = torch.cat(scattered_list, dim=self.dim)
elif not tensor.is_contiguous():
tensor = tensor.contiguous()
output = funcol.reduce_scatter_tensor(
tensor, reduce_op, scatter_dim=self.dim, group=(mesh, mesh_dim)
)
if is_padded:
assert pad_sizes is not None
output = Shard._maybe_unpad_tensor_with_sizes(
self.dim, output, pad_sizes, my_coordinate[mesh_dim], False
)
return output
@maybe_run_for_local_tensor
def _maybe_pad_tensor(
self,
local_tensor: torch.Tensor,
logical_dim_size: int,
num_chunks: int,
) -> torch.Tensor:
is_padded = logical_dim_size % num_chunks != 0
if is_padded:
full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
pad_size = full_chunk_size - local_tensor.size(self.dim)
local_tensor = pad_tensor(local_tensor, self.dim, pad_size)
if not local_tensor.is_contiguous():
local_tensor = local_tensor.contiguous()
return local_tensor
@maybe_run_for_local_tensor
def _maybe_unpad_tensor(
self,
local_tensor: torch.Tensor,
logical_dim_size: int,
num_chunks: int,
) -> torch.Tensor:
is_padded = logical_dim_size % num_chunks != 0
if is_padded:
full_chunk_size = (logical_dim_size + num_chunks - 1) // num_chunks
unpad_size = full_chunk_size * num_chunks - logical_dim_size # type: ignore[possibly-undefined]
local_tensor = unpad_tensor(local_tensor, self.dim, unpad_size)
return local_tensor
def _to_replicate_tensor(
self,
local_tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
current_logical_shape: list[int],
) -> torch.Tensor:
"""
This function all_gather all shards and return a tensor that
is replicated on the previously sharded mesh dimension
"""
num_chunks = mesh.size(mesh_dim=mesh_dim)
logical_dim_size = current_logical_shape[self.dim]
local_tensor = self._maybe_pad_tensor(
local_tensor, logical_dim_size, num_chunks
)
result = funcol.all_gather_tensor(
local_tensor,
gather_dim=self.dim,
group=(mesh, mesh_dim),
)
result = self._maybe_unpad_tensor(result, logical_dim_size, num_chunks)
return result
@staticmethod
@maybe_run_for_local_tensor
def _select_shard(shards: list[torch.Tensor], shard_index) -> torch.Tensor:
return shards[shard_index].clone()
def _replicate_to_shard(
self,
local_tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
shard_index: int,
) -> torch.Tensor:
"""
transform from replicated tensor to a sharded tensor on
the current rank, which would perform a local chunk
"""
num_chunks = mesh.size(mesh_dim=mesh_dim)
shards, _ = self._split_tensor(
local_tensor,
num_chunks,
with_padding=False,
contiguous=False,
)
return Shard._select_shard(shards, shard_index)
@staticmethod
@maybe_run_for_local_tensor
def _get_shard_pad_size(
full_size: int, local_tensor: torch.Tensor, dim: int
) -> int:
"""
Get the padding size of the local tensor on the shard dimension.
"""
return full_size - local_tensor.size(dim)
@staticmethod
def _compute_padding_info(
current_logical_shape: list[int],
num_chunks: int,
old_shard_dim: int,
new_shard_dim: int,
) -> tuple[bool, int, int, bool, int, int]:
results = []
for shard_dim in [old_shard_dim, new_shard_dim]:
dim_logical_size = current_logical_shape[shard_dim]
dim_padding = dim_logical_size % num_chunks != 0
dim_full_chunk_size = (dim_logical_size + num_chunks - 1) // num_chunks
results.append((dim_padding, dim_logical_size, dim_full_chunk_size))
return results[0] + results[1]
@staticmethod
@maybe_run_for_local_tensor
def _pad_for_new_shard_dim(
current_logical_shape: list[int],
local_tensor: torch.Tensor,
num_chunks: int,
old_shard_dim: int,
new_shard_dim: int,
) -> torch.Tensor:
(
old_dim_padding,
_,
old_dim_full_chunk_size,
new_dim_padding,
_,
new_dim_full_chunk_size,
) = Shard._compute_padding_info(
current_logical_shape, num_chunks, old_shard_dim, new_shard_dim
)
if old_dim_padding:
old_dim_pad_size = Shard._get_shard_pad_size(
old_dim_full_chunk_size, local_tensor, old_shard_dim
)
local_tensor = pad_tensor(local_tensor, old_shard_dim, old_dim_pad_size)
if new_dim_padding:
new_dim_pad_size = Shard._get_shard_pad_size(
new_dim_full_chunk_size * num_chunks, local_tensor, new_shard_dim
)
local_tensor = pad_tensor(local_tensor, new_shard_dim, new_dim_pad_size)
if not local_tensor.is_contiguous():
local_tensor = local_tensor.contiguous()
return local_tensor
@staticmethod
@maybe_run_for_local_tensor
def _unpad_for_new_shard_dim(
current_logical_shape: list[int],
local_tensor: torch.Tensor,
num_chunks: int,
old_shard_dim: int,
new_shard_dim: int,
local_rank: int,
) -> torch.Tensor:
(
old_dim_padding,
_,
old_dim_full_chunk_size,
new_dim_padding,
new_dim_logical_size,
new_dim_full_chunk_size,
) = Shard._compute_padding_info(
current_logical_shape, num_chunks, old_shard_dim, new_shard_dim
)
if old_dim_padding:
old_dim_unpad_size = (
old_dim_full_chunk_size * num_chunks
- current_logical_shape[old_shard_dim] # type: ignore[possibly-undefined]
)
local_tensor = unpad_tensor(local_tensor, old_shard_dim, old_dim_unpad_size) # type: ignore[possibly-undefined]
if new_dim_padding:
local_shard_size_on_new_dim = Shard.local_shard_size_and_offset(
new_dim_logical_size, num_chunks, local_rank
)[0]
new_dim_unpad_size = new_dim_full_chunk_size - local_shard_size_on_new_dim # type: ignore[possibly-undefined]
local_tensor = unpad_tensor(local_tensor, new_shard_dim, new_dim_unpad_size) # type: ignore[possibly-undefined]
return local_tensor
def _to_new_shard_dim(
self,
local_tensor: torch.Tensor,
mesh: DeviceMesh,
mesh_dim: int,
current_logical_shape: list[int],
new_shard_dim: int,
) -> torch.Tensor:
"""
transform from existing sharded tensor to a new sharded tensor on
that shard on a new dimension, which performs an alltoall
"""
my_coordinate = mesh.get_coordinate()
if my_coordinate is None:
# if rank is not part of mesh, we simply return local_tensor,
# which should be an empty tensor
return local_tensor
num_chunks = mesh.size(mesh_dim=mesh_dim)
local_tensor = Shard._pad_for_new_shard_dim(
current_logical_shape, local_tensor, num_chunks, self.dim, new_shard_dim
)
new_tensor = shard_dim_alltoall(
local_tensor, self.dim, new_shard_dim, mesh, mesh_dim
)
new_tensor = Shard._unpad_for_new_shard_dim(
current_logical_shape,
new_tensor,
num_chunks,
self.dim,
new_shard_dim,
my_coordinate[mesh_dim],
)
return new_tensor
def __hash__(self) -> int:
return hash(self.dim)
def __repr__(self) -> str:
"""
machine readable representation of the Shard placement
"""
return f"Shard(dim={self.dim})"
def __str__(self) -> str:
"""human readable representation of the Shard placement"""
return f"S({self.dim})"
# Need to inherit from Shard here so that isinstance(some_strided_shard, Shard) will work.
| Shard |
python | getsentry__sentry | src/sentry/web/frontend/base.py | {
"start": 28693,
"end": 29062
} | class ____(AbstractOrganizationView):
"""A view which accesses organization objects over RPC.
Only endpoints on the control silo should use this class (but it works anywhere).
"""
def _get_organization(self) -> RpcOrganization | None:
return self.active_organization.organization if self.active_organization else None
| ControlSiloOrganizationView |
python | jazzband__django-simple-history | simple_history/tests/view.py | {
"start": 518,
"end": 605
} | class ____(CreateView):
model = Poll
fields = ["question", "pub_date"]
| PollCreate |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 498776,
"end": 499992
} | class ____(VegaLiteSchema):
"""
JoinAggregateFieldDef schema wrapper.
Parameters
----------
op : :class:`AggregateOp`, Literal['argmax', 'argmin', 'average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
The aggregation operation to apply (e.g., ``"sum"``, ``"average"`` or ``"count"``).
See the list of all supported operations `here
<https://vega.github.io/vega-lite/docs/aggregate.html#ops>`__.
field : str, :class:`FieldName`
The data field for which to compute the aggregate function. This can be omitted for
functions that do not operate over a field such as ``"count"``.
as : str, :class:`FieldName`
The output name for the join aggregate operation.
"""
_schema = {"$ref": "#/definitions/JoinAggregateFieldDef"}
def __init__(
self,
op: Optional[SchemaBase | AggregateOp_T] = Undefined,
field: Optional[str | SchemaBase] = Undefined,
**kwds,
):
super().__init__(op=op, field=field, **kwds)
| JoinAggregateFieldDef |
python | ray-project__ray | rllib/algorithms/sac/sac.py | {
"start": 26844,
"end": 27795
} | class ____(DQN):
"""Soft Actor Critic (SAC) Algorithm class.
This file defines the distributed Algorithm class for the soft actor critic
algorithm.
See `sac_[tf|torch]_policy.py` for the definition of the policy loss.
Detailed documentation:
https://docs.ray.io/en/master/rllib-algorithms.html#sac
"""
def __init__(self, *args, **kwargs):
self._allow_unknown_subkeys += ["policy_model_config", "q_model_config"]
super().__init__(*args, **kwargs)
@classmethod
@override(DQN)
def get_default_config(cls) -> SACConfig:
return SACConfig()
@classmethod
@override(DQN)
def get_default_policy_class(
cls, config: AlgorithmConfig
) -> Optional[Type[Policy]]:
if config["framework"] == "torch":
from ray.rllib.algorithms.sac.sac_torch_policy import SACTorchPolicy
return SACTorchPolicy
else:
return SACTFPolicy
| SAC |
python | huggingface__transformers | tests/models/glm4_moe/test_modeling_glm4_moe.py | {
"start": 1706,
"end": 2091
} | class ____(CausalLMModelTest, unittest.TestCase):
model_tester_class = Glm4MoeModelTester
# used in `test_torch_compile_for_training`. Skip as "Dynamic control flow in MoE"
_torch_compile_train_cls = None
model_split_percents = [0.5, 0.85, 0.9] # it tries to offload everything with the default value
@require_torch_accelerator
@require_read_token
@slow
| Glm4MoeModelTest |
python | kamyu104__LeetCode-Solutions | Python/flip-equivalent-binary-trees.py | {
"start": 1110,
"end": 1967
} | class ____(object):
def flipEquiv(self, root1, root2):
"""
:type root1: TreeNode
:type root2: TreeNode
:rtype: bool
"""
stk1, stk2 = [root1], [root2]
while stk1 and stk2:
node1, node2 = stk1.pop(), stk2.pop()
if not node1 and not node2:
continue
if not node1 or not node2 or node1.val != node2.val:
return False
if (not node1.left and not node2.right) or \
(node1.left and node2.right and node1.left.val == node2.right.val):
stk1.extend([node1.right, node1.left])
else:
stk1.extend([node1.left, node1.right])
stk2.extend([node2.left, node2.right])
return not stk1 and not stk2
# Time: O(n)
# Space: O(h)
# recursive dfs solution
| Solution2 |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 29170,
"end": 29619
} | class ____(DiagnosticPipError):
reference = "failed-wheel-build-for-install"
def __init__(self, failed: list[InstallRequirement]) -> None:
super().__init__(
message=(
"Failed to build installable wheels for some "
"pyproject.toml based projects"
),
context=", ".join(r.name for r in failed), # type: ignore
hint_stmt=None,
)
| InstallWheelBuildError |
python | mahmoud__boltons | tests/test_strutils.py | {
"start": 3030,
"end": 5568
} | class ____(TestCase):
def test_simple_substitutions(self):
"""Test replacing multiple values."""
m = strutils.MultiReplace({r'cat': 'kedi', r'purple': 'mor', })
self.assertEqual(m.sub('The cat is purple'), 'The kedi is mor')
def test_shortcut_function(self):
"""Test replacing multiple values."""
self.assertEqual(
strutils.multi_replace(
'The cat is purple',
{r'cat': 'kedi', r'purple': 'mor', }
),
'The kedi is mor'
)
def test_substitutions_in_word(self):
"""Test replacing multiple values that are substrings of a word."""
m = strutils.MultiReplace({r'cat': 'kedi', r'purple': 'mor', })
self.assertEqual(m.sub('Thecatispurple'), 'Thekediismor')
def test_sub_with_regex(self):
"""Test substitutions with a regular expression."""
m = strutils.MultiReplace({
r'cat': 'kedi',
r'purple': 'mor',
r'q\w+?t': 'dinglehopper'
}, regex=True)
self.assertEqual(
m.sub('The purple cat ate a quart of jelly'),
'The mor kedi ate a dinglehopper of jelly'
)
def test_sub_with_list(self):
"""Test substitutions from an iterable instead of a dictionary."""
m = strutils.MultiReplace([
(r'cat', 'kedi'),
(r'purple', 'mor'),
(r'q\w+?t', 'dinglehopper'),
], regex=True)
self.assertEqual(
m.sub('The purple cat ate a quart of jelly'),
'The mor kedi ate a dinglehopper of jelly'
)
def test_sub_with_compiled_regex(self):
"""Test substitutions where some regular expressiosn are compiled."""
exp = re.compile(r'q\w+?t')
m = strutils.MultiReplace([
(r'cat', 'kedi'),
(r'purple', 'mor'),
(exp, 'dinglehopper'),
])
self.assertEqual(
m.sub('The purple cat ate a quart of jelly'),
'The mor kedi ate a dinglehopper of jelly'
)
def test_substitutions_with_regex_chars(self):
"""Test replacing values that have special regex characters."""
m = strutils.MultiReplace({'cat.+': 'kedi', r'purple': 'mor', })
self.assertEqual(m.sub('The cat.+ is purple'), 'The kedi is mor')
def test_roundzip():
aaa = b'a' * 10000
assert strutils.gunzip_bytes(strutils.gzip_bytes(aaa)) == aaa
assert strutils.gunzip_bytes(strutils.gzip_bytes(b'')) == b''
| TestMultiReplace |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1217201,
"end": 1229346
} | class ____(sgqlc.types.Type, Node):
"""A listing in the GitHub integration marketplace."""
__schema__ = github_schema
__field_names__ = (
"app",
"company_url",
"configuration_resource_path",
"configuration_url",
"documentation_url",
"extended_description",
"extended_description_html",
"full_description",
"full_description_html",
"has_published_free_trial_plans",
"has_terms_of_service",
"has_verified_owner",
"how_it_works",
"how_it_works_html",
"installation_url",
"installed_for_viewer",
"is_archived",
"is_draft",
"is_paid",
"is_public",
"is_rejected",
"is_unverified",
"is_unverified_pending",
"is_verification_pending_from_draft",
"is_verification_pending_from_unverified",
"is_verified",
"logo_background_color",
"logo_url",
"name",
"normalized_short_description",
"pricing_url",
"primary_category",
"privacy_policy_url",
"resource_path",
"screenshot_urls",
"secondary_category",
"short_description",
"slug",
"status_url",
"support_email",
"support_url",
"terms_of_service_url",
"url",
"viewer_can_add_plans",
"viewer_can_approve",
"viewer_can_delist",
"viewer_can_edit",
"viewer_can_edit_categories",
"viewer_can_edit_plans",
"viewer_can_redraft",
"viewer_can_reject",
"viewer_can_request_approval",
"viewer_has_purchased",
"viewer_has_purchased_for_all_organizations",
"viewer_is_listing_admin",
)
app = sgqlc.types.Field(App, graphql_name="app")
"""The GitHub App this listing represents."""
company_url = sgqlc.types.Field(URI, graphql_name="companyUrl")
"""URL to the listing owner's company site."""
configuration_resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="configurationResourcePath")
"""The HTTP path for configuring access to the listing's integration
or OAuth app
"""
configuration_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="configurationUrl")
"""The HTTP URL for configuring access to the listing's integration
or OAuth app
"""
documentation_url = sgqlc.types.Field(URI, graphql_name="documentationUrl")
"""URL to the listing's documentation."""
extended_description = sgqlc.types.Field(String, graphql_name="extendedDescription")
"""The listing's detailed description."""
extended_description_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="extendedDescriptionHTML")
"""The listing's detailed description rendered to HTML."""
full_description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="fullDescription")
"""The listing's introductory description."""
full_description_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="fullDescriptionHTML")
"""The listing's introductory description rendered to HTML."""
has_published_free_trial_plans = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasPublishedFreeTrialPlans")
"""Does this listing have any plans with a free trial?"""
has_terms_of_service = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasTermsOfService")
"""Does this listing have a terms of service link?"""
has_verified_owner = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="hasVerifiedOwner")
"""Whether the creator of the app is a verified org"""
how_it_works = sgqlc.types.Field(String, graphql_name="howItWorks")
"""A technical description of how this app works with GitHub."""
how_it_works_html = sgqlc.types.Field(sgqlc.types.non_null(HTML), graphql_name="howItWorksHTML")
"""The listing's technical description rendered to HTML."""
installation_url = sgqlc.types.Field(URI, graphql_name="installationUrl")
"""URL to install the product to the viewer's account or
organization.
"""
installed_for_viewer = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="installedForViewer")
"""Whether this listing's app has been installed for the current
viewer
"""
is_archived = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isArchived")
"""Whether this listing has been removed from the Marketplace."""
is_draft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isDraft")
"""Whether this listing is still an editable draft that has not been
submitted for review and is not publicly visible in the
Marketplace.
"""
is_paid = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPaid")
"""Whether the product this listing represents is available as part
of a paid plan.
"""
is_public = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isPublic")
"""Whether this listing has been approved for display in the
Marketplace.
"""
is_rejected = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isRejected")
"""Whether this listing has been rejected by GitHub for display in
the Marketplace.
"""
is_unverified = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isUnverified")
"""Whether this listing has been approved for unverified display in
the Marketplace.
"""
is_unverified_pending = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isUnverifiedPending")
"""Whether this draft listing has been submitted for review for
approval to be unverified in the Marketplace.
"""
is_verification_pending_from_draft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isVerificationPendingFromDraft")
"""Whether this draft listing has been submitted for review from
GitHub for approval to be verified in the Marketplace.
"""
is_verification_pending_from_unverified = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isVerificationPendingFromUnverified"
)
"""Whether this unverified listing has been submitted for review from
GitHub for approval to be verified in the Marketplace.
"""
is_verified = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="isVerified")
"""Whether this listing has been approved for verified display in the
Marketplace.
"""
logo_background_color = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="logoBackgroundColor")
"""The hex color code, without the leading '#', for the logo
background.
"""
logo_url = sgqlc.types.Field(
URI, graphql_name="logoUrl", args=sgqlc.types.ArgDict((("size", sgqlc.types.Arg(Int, graphql_name="size", default=400)),))
)
"""URL for the listing's logo image.
Arguments:
* `size` (`Int`): The size in pixels of the resulting square
image. (default: `400`)
"""
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
"""The listing's full name."""
normalized_short_description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="normalizedShortDescription")
"""The listing's very short description without a trailing period or
ampersands.
"""
pricing_url = sgqlc.types.Field(URI, graphql_name="pricingUrl")
"""URL to the listing's detailed pricing."""
primary_category = sgqlc.types.Field(sgqlc.types.non_null(MarketplaceCategory), graphql_name="primaryCategory")
"""The category that best describes the listing."""
privacy_policy_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="privacyPolicyUrl")
"""URL to the listing's privacy policy, may return an empty string
for listings that do not require a privacy policy URL.
"""
resource_path = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="resourcePath")
"""The HTTP path for the Marketplace listing."""
screenshot_urls = sgqlc.types.Field(sgqlc.types.non_null(sgqlc.types.list_of(String)), graphql_name="screenshotUrls")
"""The URLs for the listing's screenshots."""
secondary_category = sgqlc.types.Field(MarketplaceCategory, graphql_name="secondaryCategory")
"""An alternate category that describes the listing."""
short_description = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="shortDescription")
"""The listing's very short description."""
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
"""The short name of the listing used in its URL."""
status_url = sgqlc.types.Field(URI, graphql_name="statusUrl")
"""URL to the listing's status page."""
support_email = sgqlc.types.Field(String, graphql_name="supportEmail")
"""An email address for support for this listing's app."""
support_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="supportUrl")
"""Either a URL or an email address for support for this listing's
app, may return an empty string for listings that do not require a
support URL.
"""
terms_of_service_url = sgqlc.types.Field(URI, graphql_name="termsOfServiceUrl")
"""URL to the listing's terms of service."""
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
"""The HTTP URL for the Marketplace listing."""
viewer_can_add_plans = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanAddPlans")
"""Can the current viewer add plans for this Marketplace listing."""
viewer_can_approve = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanApprove")
"""Can the current viewer approve this Marketplace listing."""
viewer_can_delist = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanDelist")
"""Can the current viewer delist this Marketplace listing."""
viewer_can_edit = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanEdit")
"""Can the current viewer edit this Marketplace listing."""
viewer_can_edit_categories = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanEditCategories")
"""Can the current viewer edit the primary and secondary category of
this Marketplace listing.
"""
viewer_can_edit_plans = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanEditPlans")
"""Can the current viewer edit the plans for this Marketplace
listing.
"""
viewer_can_redraft = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanRedraft")
"""Can the current viewer return this Marketplace listing to draft
state so it becomes editable again.
"""
viewer_can_reject = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanReject")
"""Can the current viewer reject this Marketplace listing by
returning it to an editable draft state or rejecting it entirely.
"""
viewer_can_request_approval = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerCanRequestApproval")
"""Can the current viewer request this listing be reviewed for
display in the Marketplace as verified.
"""
viewer_has_purchased = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerHasPurchased")
"""Indicates whether the current user has an active subscription to
this Marketplace listing.
"""
viewer_has_purchased_for_all_organizations = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerHasPurchasedForAllOrganizations"
)
"""Indicates if the current user has purchased a subscription to this
Marketplace listing for all of the organizations the user owns.
"""
viewer_is_listing_admin = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="viewerIsListingAdmin")
"""Does the current viewer role allow them to administer this
Marketplace listing.
"""
| MarketplaceListing |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-analytics-v4/source_google_analytics_v4/source.py | {
"start": 18198,
"end": 18949
} | class ____(GoogleAnalyticsV4Stream):
cursor_field = "ga_date"
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
return {self.cursor_field: max(latest_record.get(self.cursor_field, ""), current_stream_state.get(self.cursor_field, ""))}
def read_records(
self,
sync_mode: SyncMode,
cursor_field: List[str] = None,
stream_slice: Mapping[str, Any] = None,
stream_state: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
if not stream_slice:
return []
return super().read_records(sync_mode, cursor_field, stream_slice, stream_state)
| GoogleAnalyticsV4IncrementalObjectsBase |
python | django__django | tests/model_forms/models.py | {
"start": 3415,
"end": 3644
} | class ____(models.Model):
writer = models.OneToOneField(Writer, models.CASCADE, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
| WriterProfile |
python | wandb__wandb | tests/system_tests/test_api/conftest.py | {
"start": 867,
"end": 3267
} | class ____:
"""Simple HTTP server for serving parquet files over HTTP."""
def __init__(self):
self.port = self.get_free_port()
self.server = None
self.thread = None
def get_free_port(self) -> int:
"""Get a free port."""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", 0))
return s.getsockname()[1]
def serve_data_as_parquet_file(self, path: str, data: dict[str, list]):
"""Coverts the given data to an in-memory parquet file and serves it at the given path.
Args:
path: The URL path to serve the parquet file at (e.g., "parquet/1.parquet")
data: The data to serve as a parquet file.
"""
table = pa.table(data)
buffer = io.BytesIO()
pq.write_table(table, buffer)
buffer.seek(0)
ParquetFileHandler.parquet_files[path] = buffer.read()
def start(self):
"""Starts the HTTP server in a background thread."""
self.server = socketserver.TCPServer(
("", self.port),
ParquetFileHandler,
bind_and_activate=False,
)
self.server.allow_reuse_address = True
self.server.server_bind()
self.server.server_activate()
self.thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.thread.start()
def stop(self):
if self.server:
self.server.shutdown()
self.server.server_close()
if self.thread:
self.thread.join(timeout=1)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def create_sample_parquet_file(
data: dict[str, list],
) -> bytes:
"""Create a sample parquet file with history data.
Returns:
Parquet file content as bytes
"""
table = pa.table(data)
# Write to bytes buffer
buffer = io.BytesIO()
pq.write_table(table, buffer)
buffer.seek(0)
return buffer.read()
@pytest.fixture()
def parquet_file_server() -> Generator[ParquetHTTPServer, None, None]:
"""Pytest fixture that provides an HTTP server for serving parquet files."""
server = ParquetHTTPServer()
server.start()
yield server
server.stop()
ParquetFileHandler.parquet_files.clear()
| ParquetHTTPServer |
python | google__jax | jax/_src/pallas/mosaic/sc_core.py | {
"start": 7455,
"end": 11134
} | class ____:
core_axis_name: str
subcore_axis_name: str
num_cores: int = dataclasses.field(default_factory=_get_num_cores)
num_subcores: int = dataclasses.field(
default_factory=_get_num_subcores, init=False
)
def __post_init__(self):
sc_info = get_sparse_core_info()
if self.num_cores > (num_expected := sc_info.num_cores):
raise ValueError(
f"Mesh has {self.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if self.num_subcores != sc_info.num_subcores:
raise ValueError(
f"Mesh has {self.num_subcores} subcores, but the current TPU chip has"
f" only {num_expected} subcores"
)
@property
def backend(self) -> str:
return "mosaic_tpu"
@property
def shape(self):
return collections.OrderedDict(
core=self.num_cores, subcore=self.num_subcores)
def discharges_effect(self, effect):
del effect # Unused.
return False
def _vector_subcore_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh,
jaxpr,
compiler_params,
interpret,
debug,
cost_estimate,
name,
metadata,
):
if not isinstance(mesh, VectorSubcoreMesh):
raise TypeError(f"Mesh must be a VectorSubcoreMesh, got {type(mesh)}")
assert len(mesh.shape) == 2
sc_info = get_sparse_core_info().num_cores
if mesh.num_cores > (num_expected := sc_info):
raise ValueError(
f"Mesh has {mesh.num_cores} cores, but the current TPU chip has only"
f" {num_expected} SparseCores"
)
if compiler_params is None:
compiler_params = tpu_core.CompilerParams()
if compiler_params.dimension_semantics is not None:
raise ValueError("VectorSubcoreMesh does not support dimension_semantics=")
return pallas_core.default_mesh_discharge_rule(
in_avals,
out_avals,
*args,
mesh=mesh,
jaxpr=jaxpr,
compiler_params=dataclasses.replace(
compiler_params,
dimension_semantics=["core_parallel", "subcore_parallel"],
kernel_type=tpu_core.KernelType.SC_VECTOR_SUBCORE,
),
interpret=interpret,
debug=debug,
cost_estimate=cost_estimate,
name=name,
memory_space=tpu_core.MemorySpace.HBM,
metadata=metadata,
scratch_shapes=tree_util.tree_leaves(gather_global_allocations(jaxpr)),
)
pallas_core._core_map_mesh_rules[VectorSubcoreMesh] = (
_vector_subcore_mesh_discharge_rule
)
# TODO(slebedev): Only keep the shapes which do not require unrolling.
SUPPORTED_VECTOR_SHAPES = collections.defaultdict(list)
for dtype in [jnp.int32, jnp.uint32, jnp.float32]:
SUPPORTED_VECTOR_SHAPES[jnp.dtype(dtype)].extend([
# fmt: off
(8,), (16,), (32,), (64,),
(1, 8), (1, 16),
(2, 8), (2, 16),
(4, 8), (4, 16),
# fmt: on
])
for dtype in [jnp.int16, jnp.uint16, jnp.float16, jnp.bfloat16]:
SUPPORTED_VECTOR_SHAPES[jnp.dtype(dtype)].extend([
# fmt: off
(16,), (32,), (64,),
(2, 8), (2, 16),
# fmt: on
])
for dtype in [jnp.float16, jnp.bfloat16]:
SUPPORTED_VECTOR_SHAPES[jnp.dtype(dtype)].extend([
# fmt: off
(4, 8), (4, 16),
# fmt: on
])
for dtype in [jnp.int8, jnp.uint8]:
SUPPORTED_VECTOR_SHAPES[jnp.dtype(dtype)].extend([
# fmt: off
(32,), (64,),
(4, 8), (4, 16),
# fmt: on
])
# Make sure all combinations are divisible by the vector register size.
supported_shapes: list[Any] = []
for dtype, supported_shapes in SUPPORTED_VECTOR_SHAPES.items():
for shape in supported_shapes:
assert (math.prod(shape) * dtype.itemsize) % 32 == 0
del dtype, supported_shapes
| VectorSubcoreMesh |
python | pypa__setuptools | setuptools/_vendor/packaging/_manylinux.py | {
"start": 2316,
"end": 9612
} | class ____(NamedTuple):
major: int
minor: int
def _glibc_version_string_confstr() -> str | None:
"""
Primary implementation of glibc_version_string using os.confstr.
"""
# os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
# to be broken or missing. This strategy is used in the standard library
# platform module.
# https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
try:
# Should be a string like "glibc 2.17".
version_string: str | None = os.confstr("CS_GNU_LIBC_VERSION")
assert version_string is not None
_, version = version_string.rsplit()
except (AssertionError, AttributeError, OSError, ValueError):
# os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
return None
return version
def _glibc_version_string_ctypes() -> str | None:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
import ctypes
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
def _glibc_version_string() -> str | None:
"""Returns glibc version string, or None if not using glibc."""
return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
def _parse_glibc_version(version_str: str) -> tuple[int, int]:
"""Parse glibc version.
We use a regexp instead of str.split because we want to discard any
random junk that might come after the minor version -- this might happen
in patched/forked versions of glibc (e.g. Linaro's version of glibc
uses version strings like "2.20-2014.11"). See gh-3588.
"""
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
f"Expected glibc version with 2 components major.minor,"
f" got: {version_str}",
RuntimeWarning,
stacklevel=2,
)
return -1, -1
return int(m.group("major")), int(m.group("minor"))
@functools.lru_cache
def _get_glibc_version() -> tuple[int, int]:
version_str = _glibc_version_string()
if version_str is None:
return (-1, -1)
return _parse_glibc_version(version_str)
# From PEP 513, PEP 600
def _is_compatible(arch: str, version: _GLibCVersion) -> bool:
sys_glibc = _get_glibc_version()
if sys_glibc < version:
return False
# Check for presence of _manylinux module.
try:
import _manylinux
except ImportError:
return True
if hasattr(_manylinux, "manylinux_compatible"):
result = _manylinux.manylinux_compatible(version[0], version[1], arch)
if result is not None:
return bool(result)
return True
if version == _GLibCVersion(2, 5):
if hasattr(_manylinux, "manylinux1_compatible"):
return bool(_manylinux.manylinux1_compatible)
if version == _GLibCVersion(2, 12):
if hasattr(_manylinux, "manylinux2010_compatible"):
return bool(_manylinux.manylinux2010_compatible)
if version == _GLibCVersion(2, 17):
if hasattr(_manylinux, "manylinux2014_compatible"):
return bool(_manylinux.manylinux2014_compatible)
return True
_LEGACY_MANYLINUX_MAP = {
# CentOS 7 w/ glibc 2.17 (PEP 599)
(2, 17): "manylinux2014",
# CentOS 6 w/ glibc 2.12 (PEP 571)
(2, 12): "manylinux2010",
# CentOS 5 w/ glibc 2.5 (PEP 513)
(2, 5): "manylinux1",
}
def platform_tags(archs: Sequence[str]) -> Iterator[str]:
"""Generate manylinux tags compatible to the current platform.
:param archs: Sequence of compatible architectures.
The first one shall be the closest to the actual architecture and be the part of
platform tag after the ``linux_`` prefix, e.g. ``x86_64``.
The ``linux_`` prefix is assumed as a prerequisite for the current platform to
be manylinux-compatible.
:returns: An iterator of compatible manylinux tags.
"""
if not _have_compatible_abi(sys.executable, archs):
return
# Oldest glibc to be supported regardless of architecture is (2, 17).
too_old_glibc2 = _GLibCVersion(2, 16)
if set(archs) & {"x86_64", "i686"}:
# On x86/i686 also oldest glibc to be supported is (2, 5).
too_old_glibc2 = _GLibCVersion(2, 4)
current_glibc = _GLibCVersion(*_get_glibc_version())
glibc_max_list = [current_glibc]
# We can assume compatibility across glibc major versions.
# https://sourceware.org/bugzilla/show_bug.cgi?id=24636
#
# Build a list of maximum glibc versions so that we can
# output the canonical list of all glibc from current_glibc
# down to too_old_glibc2, including all intermediary versions.
for glibc_major in range(current_glibc.major - 1, 1, -1):
glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
for arch in archs:
for glibc_max in glibc_max_list:
if glibc_max.major == too_old_glibc2.major:
min_minor = too_old_glibc2.minor
else:
# For other glibc major versions oldest supported is (x, 0).
min_minor = -1
for glibc_minor in range(glibc_max.minor, min_minor, -1):
glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
tag = "manylinux_{}_{}".format(*glibc_version)
if _is_compatible(arch, glibc_version):
yield f"{tag}_{arch}"
# Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
if glibc_version in _LEGACY_MANYLINUX_MAP:
legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
if _is_compatible(arch, glibc_version):
yield f"{legacy_tag}_{arch}"
| _GLibCVersion |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table03.py | {
"start": 315,
"end": 1003
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
# Turn off default URL format for testing.
workbook.default_url_format = None
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13")
worksheet.write("A1", "http://perl.com/")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/test_utils/tests.py | {
"start": 4827,
"end": 7181
} | class ____(TransactionTestCase):
available_apps = []
def test_skip_class_unless_db_feature(self):
@skipUnlessDBFeature("__class__")
class NotSkippedTests(TestCase):
def test_dummy(self):
return
@skipUnlessDBFeature("missing")
@skipIfDBFeature("__class__")
class SkippedTests(TestCase):
def test_will_be_skipped(self):
self.fail("We should never arrive here.")
@skipIfDBFeature("__dict__")
class SkippedTestsSubclass(SkippedTests):
pass
test_suite = unittest.TestSuite()
test_suite.addTest(NotSkippedTests("test_dummy"))
try:
test_suite.addTest(SkippedTests("test_will_be_skipped"))
test_suite.addTest(SkippedTestsSubclass("test_will_be_skipped"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised here.")
result = unittest.TextTestRunner(stream=StringIO()).run(test_suite)
# PY312: Python 3.12.1 does not include skipped tests in the number of
# running tests.
self.assertEqual(
result.testsRun, 1 if sys.version_info[:3] == (3, 12, 1) else 3
)
self.assertEqual(len(result.skipped), 2)
self.assertEqual(result.skipped[0][1], "Database has feature(s) __class__")
self.assertEqual(result.skipped[1][1], "Database has feature(s) __class__")
def test_missing_default_databases(self):
@skipIfDBFeature("missing")
class MissingDatabases(SimpleTestCase):
def test_assertion_error(self):
pass
suite = unittest.TestSuite()
try:
suite.addTest(MissingDatabases("test_assertion_error"))
except unittest.SkipTest:
self.fail("SkipTest should not be raised at this stage")
runner = unittest.TextTestRunner(stream=StringIO())
msg = (
"skipIfDBFeature cannot be used on <class 'test_utils.tests."
"SkippingClassTestCase.test_missing_default_databases.<locals>."
"MissingDatabases'> as it doesn't allow queries against the "
"'default' database."
)
with self.assertRaisesMessage(ValueError, msg):
runner.run(suite)
@override_settings(ROOT_URLCONF="test_utils.urls")
| SkippingClassTestCase |
python | django__django | tests/postgres_tests/test_search.py | {
"start": 6377,
"end": 6726
} | class ____(PostgreSQLSimpleTestCase):
def test_from_parameter(self):
self.assertIsNone(SearchConfig.from_parameter(None))
self.assertEqual(SearchConfig.from_parameter("foo"), SearchConfig("foo"))
self.assertEqual(
SearchConfig.from_parameter(SearchConfig("bar")), SearchConfig("bar")
)
| SearchConfigTests |
python | ansible__ansible | lib/ansible/_internal/_json/_profiles/_cache_persistence.py | {
"start": 1837,
"end": 1913
} | class ____(_profiles.AnsibleProfileJSONDecoder):
_profile = _Profile
| Decoder |
python | coleifer__peewee | playhouse/signals.py | {
"start": 1832,
"end": 2511
} | class ____(_Model):
def __init__(self, *args, **kwargs):
super(Model, self).__init__(*args, **kwargs)
pre_init.send(self)
def save(self, *args, **kwargs):
pk_value = self._pk if self._meta.primary_key else True
created = kwargs.get('force_insert', False) or not bool(pk_value)
pre_save.send(self, created=created)
ret = super(Model, self).save(*args, **kwargs)
post_save.send(self, created=created)
return ret
def delete_instance(self, *args, **kwargs):
pre_delete.send(self)
ret = super(Model, self).delete_instance(*args, **kwargs)
post_delete.send(self)
return ret
| Model |
python | walkccc__LeetCode | solutions/3375. Minimum Operations to Make Array Values Equal to K/3375.py | {
"start": 0,
"end": 218
} | class ____:
def minOperations(self, nums: list[int], k: int) -> int:
numsSet = set(nums)
mn = min(nums)
if mn < k:
return -1
if mn > k:
return len(numsSet)
return len(numsSet) - 1
| Solution |
python | django__django | tests/admin_views/admin.py | {
"start": 22056,
"end": 22254
} | class ____(admin.ModelAdmin):
list_filter = (("warm", CustomTemplateBooleanFieldListFilter),)
# For Selenium Prepopulated tests -------------------------------------
| CustomTemplateFilterColorAdmin |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_complex.py | {
"start": 3718,
"end": 41272
} | class ____(__TestCase):
def assertFloatIdentical(self, x, y):
"""Fail unless floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if math.isnan(x) or math.isnan(y):
if math.isnan(x) and math.isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif math.copysign(1.0, x) == math.copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertComplexesAreIdentical(self, x, y):
"""Fail unless complex numbers x and y have equal values and signs.
In particular, if x and y both have real (or imaginary) part
zero, but the zeros have different signs, this test will fail.
"""
self.assertFloatIdentical(x.real, y.real)
self.assertFloatIdentical(x.imag, y.imag)
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close"."""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close"."""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
@slowTest
def test_truediv(self):
simple_real = [float(i) for i in range(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in range(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(TypeError, operator.truediv, 1j, None)
self.assertRaises(TypeError, operator.truediv, None, 1j)
for denom_real, denom_imag in [(0, NAN), (NAN, 0), (NAN, NAN)]:
z = complex(0, 0) / complex(denom_real, denom_imag)
self.assertTrue(isnan(z.real))
self.assertTrue(isnan(z.imag))
def test_truediv_zero_division(self):
for a, b in ZERO_DIVISION:
with self.assertRaises(ZeroDivisionError):
a / b
def test_floordiv(self):
with self.assertRaises(TypeError):
(1+1j) // (1+0j)
with self.assertRaises(TypeError):
(1+1j) // 1.0
with self.assertRaises(TypeError):
(1+1j) // 1
with self.assertRaises(TypeError):
1.0 // (1+0j)
with self.assertRaises(TypeError):
1 // (1+0j)
def test_floordiv_zero_division(self):
for a, b in ZERO_DIVISION:
with self.assertRaises(TypeError):
a // b
def test_richcompare(self):
self.assertIs(complex.__eq__(1+1j, 1<<10000), False)
self.assertIs(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
for i in range(1, 100):
f = i / 100.0
self.assertIs(complex.__eq__(f+0j, f), True)
self.assertIs(complex.__ne__(f+0j, f), False)
self.assertIs(complex.__eq__(complex(f, f), f), False)
self.assertIs(complex.__ne__(complex(f, f), f), True)
self.assertIs(complex.__lt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__le__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__gt__(1+1j, 2+2j), NotImplemented)
self.assertIs(complex.__ge__(1+1j, 2+2j), NotImplemented)
self.assertRaises(TypeError, operator.lt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.le, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.gt, 1+1j, 2+2j)
self.assertRaises(TypeError, operator.ge, 1+1j, 2+2j)
self.assertIs(operator.eq(1+1j, 1+1j), True)
self.assertIs(operator.eq(1+1j, 2+2j), False)
self.assertIs(operator.ne(1+1j, 1+1j), False)
self.assertIs(operator.ne(1+1j, 2+2j), True)
self.assertIs(operator.eq(1+1j, 2.0), False)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_add(self):
self.assertEqual(1j + int(+1), complex(+1, 1))
self.assertEqual(1j + int(-1), complex(-1, 1))
self.assertRaises(OverflowError, operator.add, 1j, 10**1000)
self.assertRaises(TypeError, operator.add, 1j, None)
self.assertRaises(TypeError, operator.add, None, 1j)
def test_sub(self):
self.assertEqual(1j - int(+1), complex(-1, 1))
self.assertEqual(1j - int(-1), complex(1, 1))
self.assertRaises(OverflowError, operator.sub, 1j, 10**1000)
self.assertRaises(TypeError, operator.sub, 1j, None)
self.assertRaises(TypeError, operator.sub, None, 1j)
def test_mul(self):
self.assertEqual(1j * int(20), complex(0, 20))
self.assertEqual(1j * int(-1), complex(0, -1))
self.assertRaises(OverflowError, operator.mul, 1j, 10**1000)
self.assertRaises(TypeError, operator.mul, 1j, None)
self.assertRaises(TypeError, operator.mul, None, 1j)
def test_mod(self):
# % is no longer supported on complex numbers
with self.assertRaises(TypeError):
(1+1j) % (1+0j)
with self.assertRaises(TypeError):
(1+1j) % 1.0
with self.assertRaises(TypeError):
(1+1j) % 1
with self.assertRaises(TypeError):
1.0 % (1+0j)
with self.assertRaises(TypeError):
1 % (1+0j)
def test_mod_zero_division(self):
for a, b in ZERO_DIVISION:
with self.assertRaises(TypeError):
a % b
def test_divmod(self):
self.assertRaises(TypeError, divmod, 1+1j, 1+0j)
self.assertRaises(TypeError, divmod, 1+1j, 1.0)
self.assertRaises(TypeError, divmod, 1+1j, 1)
self.assertRaises(TypeError, divmod, 1.0, 1+0j)
self.assertRaises(TypeError, divmod, 1, 1+0j)
def test_divmod_zero_division(self):
for a, b in ZERO_DIVISION:
self.assertRaises(TypeError, divmod, a, b)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertEqual(pow(0+0j, 2000+0j), 0.0)
self.assertEqual(pow(0, 0+0j), 1.0)
self.assertEqual(pow(-1, 0+0j), 1.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertRaises(ZeroDivisionError, pow, 0+0j, -1000)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
self.assertRaises(TypeError, pow, 1j, None)
self.assertRaises(TypeError, pow, None, 1j)
self.assertAlmostEqual(pow(1j, 0.5), 0.7071067811865476+0.7071067811865475j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
# Check some boundary conditions; some of these used to invoke
# undefined behaviour (https://bugs.python.org/issue44698). We're
# not actually checking the results of these operations, just making
# sure they don't crash (for example when using clang's
# UndefinedBehaviourSanitizer).
values = (sys.maxsize, sys.maxsize+1, sys.maxsize-1,
-sys.maxsize, -sys.maxsize+1, -sys.maxsize+1)
for real in values:
for imag in values:
with self.subTest(real=real, imag=imag):
c = complex(real, imag)
try:
c ** real
except OverflowError:
pass
try:
c ** c
except OverflowError:
pass
# gh-113841: possible undefined division by 0 in _Py_c_pow()
x, y = 9j, 33j**3
with self.assertRaises(OverflowError):
x**y
def test_pow_with_small_integer_exponents(self):
# Check that small integer exponents are handled identically
# regardless of their type.
values = [
complex(5.0, 12.0),
complex(5.0e100, 12.0e100),
complex(-4.0, INF),
complex(INF, 0.0),
]
exponents = [-19, -5, -3, -2, -1, 0, 1, 2, 3, 5, 19]
for value in values:
for exponent in exponents:
with self.subTest(value=value, exponent=exponent):
try:
int_pow = value**exponent
except OverflowError:
int_pow = "overflow"
try:
float_pow = value**float(exponent)
except OverflowError:
float_pow = "overflow"
try:
complex_pow = value**complex(exponent)
except OverflowError:
complex_pow = "overflow"
self.assertEqual(str(float_pow), str(int_pow))
self.assertEqual(str(complex_pow), str(int_pow))
def test_boolcontext(self):
for i in range(100):
with torch._dynamo.error_on_graph_break(False):
r1 = random()
r2 = random()
self.assertTrue(complex(r1 + 1e-6, r2 + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
self.assertTrue(1j)
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
def check(z, x, y):
self.assertIs(type(z), complex)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
check(complex(), 0.0, 0.0)
check(complex(10), 10.0, 0.0)
check(complex(4.25), 4.25, 0.0)
check(complex(4.25+0j), 4.25, 0.0)
check(complex(4.25+0.5j), 4.25, 0.5)
check(complex(ComplexSubclass(4.25+0.5j)), 4.25, 0.5)
check(complex(WithComplex(4.25+0.5j)), 4.25, 0.5)
check(complex(1, 10), 1.0, 10.0)
check(complex(1, 10.0), 1.0, 10.0)
check(complex(1, 4.25), 1.0, 4.25)
check(complex(1.0, 10), 1.0, 10.0)
check(complex(4.25, 10), 4.25, 10.0)
check(complex(1.0, 10.0), 1.0, 10.0)
check(complex(4.25, 0.5), 4.25, 0.5)
check(complex(4.25+0j, 0), 4.25, 0.0)
check(complex(ComplexSubclass(4.25+0j), 0), 4.25, 0.0)
check(complex(WithComplex(4.25+0j), 0), 4.25, 0.0)
check(complex(4.25j, 0), 0.0, 4.25)
check(complex(0j, 4.25), 0.0, 4.25)
check(complex(0, 4.25+0j), 0.0, 4.25)
check(complex(0, ComplexSubclass(4.25+0j)), 0.0, 4.25)
with self.assertRaisesRegex(TypeError,
"second argument must be a number, not 'WithComplex'"):
complex(0, WithComplex(4.25+0j))
check(complex(0.0, 4.25j), -4.25, 0.0)
check(complex(4.25+0j, 0j), 4.25, 0.0)
check(complex(4.25j, 0j), 0.0, 4.25)
check(complex(0j, 4.25+0j), 0.0, 4.25)
check(complex(0j, 4.25j), -4.25, 0.0)
check(complex(real=4.25), 4.25, 0.0)
check(complex(real=4.25+0j), 4.25, 0.0)
check(complex(real=4.25+1.5j), 4.25, 1.5)
check(complex(imag=1.5), 0.0, 1.5)
check(complex(real=4.25, imag=1.5), 4.25, 1.5)
check(complex(4.25, imag=1.5), 4.25, 1.5)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats.
for x in 1.0, -1.0:
for y in 0.0, -0.0:
check(complex(x, y), x, y)
check(complex(y, x), y, x)
c = complex(4.25, 1.5)
self.assertIs(complex(c), c)
c2 = ComplexSubclass(c)
self.assertEqual(c2, c)
self.assertIs(type(c2), ComplexSubclass)
del c, c2
self.assertRaisesRegex(TypeError,
"first argument must be a string or a number, not 'dict'",
complex, {})
self.assertRaisesRegex(TypeError,
"first argument must be a string or a number, not 'NoneType'",
complex, None)
self.assertRaisesRegex(TypeError,
"first argument must be a string or a number, not 'dict'",
complex, {1:2}, 0)
self.assertRaisesRegex(TypeError,
"can't take second arg if first is a string",
complex, '1', 0)
self.assertRaisesRegex(TypeError,
"second argument must be a number, not 'dict'",
complex, 0, {1:2})
self.assertRaisesRegex(TypeError,
"second arg can't be a string",
complex, 0, '1')
self.assertRaises(TypeError, complex, WithComplex(1.5))
self.assertRaises(TypeError, complex, WithComplex(1))
self.assertRaises(TypeError, complex, WithComplex(None))
self.assertRaises(TypeError, complex, WithComplex(4.25+0j), object())
self.assertRaises(TypeError, complex, WithComplex(1.5), object())
self.assertRaises(TypeError, complex, WithComplex(1), object())
self.assertRaises(TypeError, complex, WithComplex(None), object())
with torch._dynamo.error_on_graph_break(False):
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
check(complex(WithFloat(4.25)), 4.25, 0.0)
check(complex(WithFloat(4.25), 1.5), 4.25, 1.5)
check(complex(1.5, WithFloat(4.25)), 1.5, 4.25)
self.assertRaises(TypeError, complex, WithFloat(42))
self.assertRaises(TypeError, complex, WithFloat(42), 1.5)
self.assertRaises(TypeError, complex, 1.5, WithFloat(42))
self.assertRaises(TypeError, complex, WithFloat(None))
self.assertRaises(TypeError, complex, WithFloat(None), 1.5)
self.assertRaises(TypeError, complex, 1.5, WithFloat(None))
check(complex(WithIndex(42)), 42.0, 0.0)
check(complex(WithIndex(42), 1.5), 42.0, 1.5)
check(complex(1.5, WithIndex(42)), 1.5, 42.0)
self.assertRaises(OverflowError, complex, WithIndex(2**2000))
self.assertRaises(OverflowError, complex, WithIndex(2**2000), 1.5)
self.assertRaises(OverflowError, complex, 1.5, WithIndex(2**2000))
self.assertRaises(TypeError, complex, WithIndex(None))
self.assertRaises(TypeError, complex, WithIndex(None), 1.5)
self.assertRaises(TypeError, complex, 1.5, WithIndex(None))
with torch._dynamo.error_on_graph_break(False):
class MyInt:
def __int__(self):
return 42
self.assertRaises(TypeError, complex, MyInt())
self.assertRaises(TypeError, complex, MyInt(), 1.5)
self.assertRaises(TypeError, complex, 1.5, MyInt())
with torch._dynamo.error_on_graph_break(False):
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
check(complex(complex0(1j)), 0.0, 42.0)
with self.assertWarns(DeprecationWarning):
check(complex(complex1(1j)), 0.0, 2.0)
self.assertRaises(TypeError, complex, complex2(1j))
def test___complex__(self):
z = 3 + 4j
self.assertEqual(z.__complex__(), z)
self.assertEqual(type(z.__complex__()), complex)
z = ComplexSubclass(3 + 4j)
self.assertEqual(z.__complex__(), 3 + 4j)
self.assertEqual(type(z.__complex__()), complex)
@support.requires_IEEE_754
def test_constructor_special_numbers(self):
for x in 0.0, -0.0, INF, -INF, NAN:
for y in 0.0, -0.0, INF, -INF, NAN:
with self.subTest(x=x, y=y):
z = complex(x, y)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = ComplexSubclass(x, y)
self.assertIs(type(z), ComplexSubclass)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = complex(ComplexSubclass(x, y))
self.assertIs(type(z), complex)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = ComplexSubclass(complex(x, y))
self.assertIs(type(z), ComplexSubclass)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
def test_constructor_from_string(self):
def check(z, x, y):
self.assertIs(type(z), complex)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
check(complex("1"), 1.0, 0.0)
check(complex("1j"), 0.0, 1.0)
check(complex("-1"), -1.0, 0.0)
check(complex("+1"), 1.0, 0.0)
check(complex("1+2j"), 1.0, 2.0)
check(complex("(1+2j)"), 1.0, 2.0)
check(complex("(1.5+4.25j)"), 1.5, 4.25)
check(complex("4.25+1J"), 4.25, 1.0)
check(complex(" ( +4.25-6J )"), 4.25, -6.0)
check(complex(" ( +4.25-J )"), 4.25, -1.0)
check(complex(" ( +4.25+j )"), 4.25, 1.0)
check(complex("J"), 0.0, 1.0)
check(complex("( j )"), 0.0, 1.0)
check(complex("+J"), 0.0, 1.0)
check(complex("( -j)"), 0.0, -1.0)
check(complex('1-1j'), 1.0, -1.0)
check(complex('1J'), 0.0, 1.0)
check(complex('1e-500'), 0.0, 0.0)
check(complex('-1e-500j'), 0.0, -0.0)
check(complex('1e-500+1e-500j'), 0.0, 0.0)
check(complex('-1e-500+1e-500j'), -0.0, 0.0)
check(complex('1e-500-1e-500j'), 0.0, -0.0)
check(complex('-1e-500-1e-500j'), -0.0, -0.0)
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(ValueError, complex, "")
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
self.assertRaises(ValueError, complex, "x")
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
# check that complex accepts long unicode strings
self.assertIs(type(complex("1"*500)), complex)
# check whitespace processing
self.assertEqual(complex('\N{EM SPACE}(\N{EN SPACE}1+1j ) '), 1+1j)
# Invalid unicode string
# See bpo-34087
self.assertRaises(ValueError, complex, '\u3053\u3093\u306b\u3061\u306f')
def test_constructor_negative_nans_from_string(self):
self.assertEqual(copysign(1., complex("-nan").real), -1.)
self.assertEqual(copysign(1., complex("-nanj").imag), -1.)
self.assertEqual(copysign(1., complex("-nan-nanj").real), -1.)
self.assertEqual(copysign(1., complex("-nan-nanj").imag), -1.)
def test_underscores(self):
# check underscores
for lit in VALID_UNDERSCORE_LITERALS:
if not any(ch in lit for ch in 'xXoObB'):
self.assertEqual(complex(lit), eval(lit))
self.assertEqual(complex(lit), complex(lit.replace('_', '')))
for lit in INVALID_UNDERSCORE_LITERALS:
if lit in ('0_7', '09_99'): # octals are not recognized here
continue
if not any(ch in lit for ch in 'xXoObB'):
self.assertRaises(ValueError, complex, lit)
def test_hash(self):
for x in range(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating-point
self.assertEqual(hash(x), hash(complex(x, 0.)))
self.assertNotEqual(hash(2000005 - 1j), -1)
def test_abs(self):
nums = [complex(x/3., y/7.) for x in range(-9,9) for y in range(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
self.assertRaises(OverflowError, abs, complex(DBL_MAX, DBL_MAX))
def test_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(1+6j, '(1+6j)')
test(1-6j, '(1-6j)')
test(-(1+0j), '(-1+-0j)', test_fn=self.assertNotEqual)
test(complex(1., INF), "(1+infj)")
test(complex(1., -INF), "(1-infj)")
test(complex(INF, 1), "(inf+1j)")
test(complex(-INF, INF), "(-inf+infj)")
test(complex(NAN, 1), "(nan+1j)")
test(complex(1, NAN), "(1+nanj)")
test(complex(NAN, NAN), "(nan+nanj)")
test(complex(-NAN, -NAN), "(nan+nanj)")
test(complex(0, INF), "infj")
test(complex(0, -INF), "-infj")
test(complex(0, NAN), "nanj")
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
@support.requires_IEEE_754
def test_negative_zero_repr_str(self):
def test(v, expected, test_fn=self.assertEqual):
test_fn(repr(v), expected)
test_fn(str(v), expected)
test(complex(0., 1.), "1j")
test(complex(-0., 1.), "(-0+1j)")
test(complex(0., -1.), "-1j")
test(complex(-0., -1.), "(-0-1j)")
test(complex(0., 0.), "0j")
test(complex(0., -0.), "-0j")
test(complex(-0., 0.), "(-0+0j)")
test(complex(-0., -0.), "(-0-0j)")
def test_pos(self):
self.assertEqual(+(1+6j), 1+6j)
self.assertEqual(+ComplexSubclass(1, 6), 1+6j)
self.assertIs(type(+ComplexSubclass(1, 6)), complex)
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
@support.requires_IEEE_754
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertFloatsAreIdentical(z1.imag, 0.0)
self.assertFloatsAreIdentical(z2.imag, -0.0)
@support.requires_IEEE_754
def test_negated_imaginary_literal(self):
z0 = -0j
z1 = -7j
z2 = -1e1000j
# Note: In versions of Python < 3.2, a negated imaginary literal
# accidentally ended up with real part 0.0 instead of -0.0, thanks to a
# modification during CST -> AST translation (see issue #9011). That's
# fixed in Python 3.2.
self.assertFloatsAreIdentical(z0.real, -0.0)
self.assertFloatsAreIdentical(z0.imag, -0.0)
self.assertFloatsAreIdentical(z1.real, -0.0)
self.assertFloatsAreIdentical(z1.imag, -7.0)
self.assertFloatsAreIdentical(z2.real, -0.0)
self.assertFloatsAreIdentical(z2.imag, -INF)
@support.requires_IEEE_754
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@support.requires_IEEE_754
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertComplexesAreIdentical(z, roundtrip)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# Issue 7094: Alternate formatting (specified by #)
self.assertEqual(format(1+1j, '.0e'), '1e+00+1e+00j')
self.assertEqual(format(1+1j, '#.0e'), '1.e+00+1.e+00j')
self.assertEqual(format(1+1j, '.0f'), '1+1j')
self.assertEqual(format(1+1j, '#.0f'), '1.+1.j')
self.assertEqual(format(1.1+1.1j, 'g'), '1.1+1.1j')
self.assertEqual(format(1.1+1.1j, '#g'), '1.10000+1.10000j')
# Alternate doesn't make a difference for these, they format the same with or without it
self.assertEqual(format(1+1j, '.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '#.1e'), '1.0e+00+1.0e+00j')
self.assertEqual(format(1+1j, '.1f'), '1.0+1.0j')
self.assertEqual(format(1+1j, '#.1f'), '1.0+1.0j')
# Misc. other alternate tests
self.assertEqual(format((-1.5+0.5j), '#f'), '-1.500000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '#.0f'), '-2.+0.j')
self.assertEqual(format((-1.5+0.5j), '#e'), '-1.500000e+00+5.000000e-01j')
self.assertEqual(format((-1.5+0.5j), '#.0e'), '-2.e+00+5.e-01j')
self.assertEqual(format((-1.5+0.5j), '#g'), '-1.50000+0.500000j')
self.assertEqual(format((-1.5+0.5j), '.0g'), '-2+0.5j')
self.assertEqual(format((-1.5+0.5j), '#.0g'), '-2.+0.5j')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382
self.assertEqual(format(complex(NAN, NAN), 'f'), 'nan+nanj')
self.assertEqual(format(complex(1, NAN), 'f'), '1.000000+nanj')
self.assertEqual(format(complex(NAN, 1), 'f'), 'nan+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'f'), 'nan-1.000000j')
self.assertEqual(format(complex(NAN, NAN), 'F'), 'NAN+NANj')
self.assertEqual(format(complex(1, NAN), 'F'), '1.000000+NANj')
self.assertEqual(format(complex(NAN, 1), 'F'), 'NAN+1.000000j')
self.assertEqual(format(complex(NAN, -1), 'F'), 'NAN-1.000000j')
self.assertEqual(format(complex(INF, INF), 'f'), 'inf+infj')
self.assertEqual(format(complex(1, INF), 'f'), '1.000000+infj')
self.assertEqual(format(complex(INF, 1), 'f'), 'inf+1.000000j')
self.assertEqual(format(complex(INF, -1), 'f'), 'inf-1.000000j')
self.assertEqual(format(complex(INF, INF), 'F'), 'INF+INFj')
self.assertEqual(format(complex(1, INF), 'F'), '1.000000+INFj')
self.assertEqual(format(complex(INF, 1), 'F'), 'INF+1.000000j')
self.assertEqual(format(complex(INF, -1), 'F'), 'INF-1.000000j')
if __name__ == "__main__":
run_tests()
| ComplexTest |
python | gevent__gevent | src/gevent/tests/test__ares_timeout.py | {
"start": 346,
"end": 1015
} | class ____(greentest.TestCase):
__timeout__ = 30
def test(self):
listener = self._close_on_teardown(udp_listener())
address = listener.getsockname()
def reader():
while True:
listener.recvfrom(10000)
greader = gevent.spawn(reader)
self._close_on_teardown(greader.kill)
r = Resolver(servers=[address[0]], timeout=0.001, tries=1,
udp_port=address[-1])
self._close_on_teardown(r)
with self.assertRaisesRegex(socket.herror, "ARES_ETIMEOUT"):
r.gethostbyname('www.google.com')
if __name__ == '__main__':
greentest.main()
| TestTimeout |
python | numba__numba | numba/tests/enum_usecases.py | {
"start": 33,
"end": 93
} | class ____(Enum):
red = 1
green = 2
blue = 3
| Color |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_sharding_test.py | {
"start": 875,
"end": 5177
} | class ____(test.TestCase):
def testFreeze(self):
"""Tests that freezing a policy applies default values."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
self.assertEqual(p1.number_of_shards,
tpu_sharding._DEFAULT_NUMBER_OF_SHARDS)
self.assertEqual(p1.shard_dimension, tpu_sharding._DEFAULT_SHARD_DIMENSION)
p2 = tpu_sharding.ShardingPolicy()
p2.set_number_of_shards(17)
p2.set_shard_dimension(23)
p2.freeze()
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
def testFrozen(self):
"""Tests that frozen policies can't be changed."""
p1 = tpu_sharding.ShardingPolicy()
p1.freeze()
with self.assertRaises(ValueError):
p1.set_number_of_shards(17)
with self.assertRaises(ValueError):
p1.set_shard_dimension(22)
def testStr(self):
"""Tests the string representation."""
p1 = tpu_sharding.ShardingPolicy()
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_number_of_shards(17)
self.assertEqual(str(p1), "ShardingPolicy(unset)")
p1.set_shard_dimension(8)
self.assertEqual(str(p1), "ShardingPolicy(17 shards dimension 8)")
def testMerge(self):
"""Tests that merging works."""
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p1.set_shard_dimension(23)
p2 = tpu_sharding.ShardingPolicy()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 23)
p1 = tpu_sharding.ShardingPolicy()
p1.set_shard_dimension(12)
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p2.freeze()
p2.merge(p1)
self.assertEqual(p2.number_of_shards, 17)
self.assertEqual(p2.shard_dimension, 12)
p1.set_number_of_shards(1)
with self.assertRaises(ValueError):
p2.merge(p1)
p1 = tpu_sharding.ShardingPolicy()
p1.set_number_of_shards(17)
p2.merge(p1)
p1.set_shard_dimension(2)
with self.assertRaises(ValueError):
p2.merge(p1)
def testGetShardedShape(self):
"""Tests getting a sharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(3)
p.set_shard_dimension(1)
self.assertEqual(p.get_sharded_shape([4, 9]), [4, 3])
p.freeze()
with self.assertRaises(ValueError):
p.set_shard_dimension(0)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=4)
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 9], shard_index=-1)
with self.assertRaises(TypeError):
_ = p.get_sharded_shape("not_a_shape")
with self.assertRaises(ValueError):
_ = p.get_sharded_shape(tensor_shape.TensorShape(None))
with self.assertRaises(ValueError):
_ = p.get_sharded_shape([4, 10], shard_index=-1)
def testGetUnpartitionedShape(self):
"""Tests getting a sharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(3)
p.set_shard_dimension(1)
p.set_number_of_partitions(4)
self.assertEqual(p.get_unpartitioned_shape([3, 5]), [3, 20])
p.freeze()
with self.assertRaises(ValueError):
_ = p.get_unpartitioned_shape([3, None])
def testGetUnshardedShape(self):
"""Tests getting an unsharded shape."""
p = tpu_sharding.ShardingPolicy()
p.set_number_of_shards(2)
p.set_shard_dimension(1)
self.assertEqual(p.get_unsharded_shape([[4, 3], [4, 3]]), [4, 6])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 3], [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[4, 3], [4, 2]])
with self.assertRaises(TypeError):
_ = p.get_unsharded_shape([[4, 3], "not_a_shape"])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([None, [4, 3]])
with self.assertRaises(ValueError):
_ = p.get_unsharded_shape([[2], [4, 3]])
def testScalar(self):
"""Tests sharding and unsharding scalars."""
p = tpu_sharding.ShardingPolicy()
p.freeze()
self.assertEqual(p.get_sharded_shape([]), [])
self.assertEqual(p.get_unsharded_shape([[]]), [])
if __name__ == "__main__":
test.main()
| ShardingTest |
python | getsentry__sentry | tests/sentry/utils/test_event_frames.py | {
"start": 16794,
"end": 41572
} | class ____(TestCase):
def test_crashing_event_with_exception_interface_but_no_frame_should_waterfall_to_thread_frames(
self,
) -> None:
event = self.store_event(
data={
"platform": "cocoa",
"exception": {
"values": [
{
"type": "C++ Exception",
"value": "NSt3__112system_errorE",
"thread_id": 9,
"mechanism": {
"type": "cpp_exception",
"handled": False,
"meta": {
"signal": {"number": 6, "code": 0, "name": "SIGABRT"},
"mach_exception": {
"exception": 10,
"code": 0,
"subcode": 0,
"name": "EXC_CRASH",
},
},
},
}
]
},
"threads": {
"values": [
{
"id": 0,
"stacktrace": {
"frames": [
{
"function": "<redacted>",
"in_app": False,
"data": {"symbolicator_status": "unknown_image"},
"image_addr": "0x0",
"instruction_addr": "0x1028d5aa4",
"symbol_addr": "0x0",
},
{
"function": "main",
"symbol": "main",
"package": "Runner",
"filename": "AppDelegate.swift",
"abs_path": "/Users/denis/Repos/sentry/sentry-mobile/ios/Runner/AppDelegate.swift",
"lineno": 5,
"in_app": True,
"data": {"symbolicator_status": "symbolicated"},
"image_addr": "0x102684000",
"instruction_addr": "0x10268ab9c",
"symbol_addr": "0x102684000",
},
{
"function": "UIApplicationMain",
"symbol": "UIApplicationMain",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "threadbase",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x184203954",
"symbol_addr": "0x18420312c",
},
{
"function": "-[UIApplication _run]",
"symbol": "-[UIApplication _run]",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "ui",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x184485084",
"symbol_addr": "0x184484c3c",
},
{
"function": "GSEventRunModal",
"symbol": "GSEventRunModal",
"package": "GraphicsServices",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x19d66d000",
"instruction_addr": "0x19d66e388",
"symbol_addr": "0x19d66e2e8",
},
{
"function": "CFRunLoopRunSpecific",
"symbol": "CFRunLoopRunSpecific",
"package": "CoreFoundation",
"in_app": False,
"data": {
"category": "indirection",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x181ac4000",
"instruction_addr": "0x181ae3464",
"symbol_addr": "0x181ae3210",
},
{
"function": "__CFRunLoopRun",
"symbol": "__CFRunLoopRun",
"package": "CoreFoundation",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x181ac4000",
"instruction_addr": "0x181acf8a0",
"symbol_addr": "0x181acf570",
},
{
"function": "__CFRunLoopDoSources0",
"symbol": "__CFRunLoopDoSources0",
"package": "CoreFoundation",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x181ac4000",
"instruction_addr": "0x181aca094",
"symbol_addr": "0x181ac9f8c",
},
{
"function": "__CFRunLoopDoSource0",
"symbol": "__CFRunLoopDoSource0",
"package": "CoreFoundation",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x181ac4000",
"instruction_addr": "0x181b8fd8c",
"symbol_addr": "0x181b8fcc0",
},
{
"function": "__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE0_PERFORM_FUNCTION__",
"symbol": "__CFRUNLOOP_IS_CALLING_OUT_TO_A_SOURCE0_PERFORM_FUNCTION__",
"package": "CoreFoundation",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x181ac4000",
"instruction_addr": "0x181b7f0cc",
"symbol_addr": "0x181b7f0b4",
},
{
"function": "-[FBSSerialQueue _performNextFromRunLoopSource]",
"symbol": "-[FBSSerialQueue _performNextFromRunLoopSource]",
"package": "FrontBoardServices",
"in_app": False,
"data": {"symbolicator_status": "symbolicated"},
"image_addr": "0x19378c000",
"instruction_addr": "0x19379b410",
"symbol_addr": "0x19379b3f8",
},
{
"function": "-[FBSSerialQueue _targetQueue_performNextIfPossible]",
"symbol": "-[FBSSerialQueue _targetQueue_performNextIfPossible]",
"package": "FrontBoardServices",
"in_app": False,
"data": {"symbolicator_status": "symbolicated"},
"image_addr": "0x19378c000",
"instruction_addr": "0x193796d88",
"symbol_addr": "0x193796cb0",
},
{
"function": "__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__",
"symbol": "__FBSSERIALQUEUE_IS_CALLING_OUT_TO_A_BLOCK__",
"package": "FrontBoardServices",
"in_app": False,
"data": {"symbolicator_status": "symbolicated"},
"image_addr": "0x19378c000",
"instruction_addr": "0x1937979c0",
"symbol_addr": "0x193797994",
},
{
"function": "_dispatch_block_invoke_direct",
"symbol": "_dispatch_block_invoke_direct",
"package": "libdispatch.dylib",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x1817cb000",
"instruction_addr": "0x1817d3124",
"symbol_addr": "0x1817d3020",
},
{
"function": "_dispatch_client_callout",
"symbol": "_dispatch_client_callout",
"package": "libdispatch.dylib",
"in_app": False,
"data": {
"category": "threadbase",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x1817cb000",
"instruction_addr": "0x1817cf66c",
"symbol_addr": "0x1817cf65c",
},
{
"function": "__63-[FBSWorkspaceScenesClient willTerminateWithTransitionContext:]_block_invoke",
"symbol": "__63-[FBSWorkspaceScenesClient willTerminateWithTransitionContext:]_block_invoke",
"package": "FrontBoardServices",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x19378c000",
"instruction_addr": "0x1937db714",
"symbol_addr": "0x1937db694",
},
{
"function": "-[FBSWorkspace _calloutQueue_executeCalloutFromSource:withBlock:]",
"symbol": "-[FBSWorkspace _calloutQueue_executeCalloutFromSource:withBlock:]",
"package": "FrontBoardServices",
"in_app": False,
"data": {"symbolicator_status": "symbolicated"},
"image_addr": "0x19378c000",
"instruction_addr": "0x193796068",
"symbol_addr": "0x193795f7c",
},
{
"function": "__63-[FBSWorkspaceScenesClient willTerminateWithTransitionContext:]_block_invoke_2",
"symbol": "__63-[FBSWorkspaceScenesClient willTerminateWithTransitionContext:]_block_invoke_2",
"package": "FrontBoardServices",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x19378c000",
"instruction_addr": "0x1937db77c",
"symbol_addr": "0x1937db730",
},
{
"function": "-[UIApplication workspaceShouldExit:withTransitionContext:]",
"symbol": "-[UIApplication workspaceShouldExit:withTransitionContext:]",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x184ebc298",
"symbol_addr": "0x184ebc1c8",
},
{
"function": "-[_UISceneLifecycleMultiplexer forceExitWithTransitionContext:scene:]",
"symbol": "-[_UISceneLifecycleMultiplexer forceExitWithTransitionContext:scene:]",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x18479ca7c",
"symbol_addr": "0x18479c9a0",
},
{
"function": "-[_UISceneLifecycleMultiplexer _evalTransitionToSettings:fromSettings:forceExit:withTransitionStore:]",
"symbol": "-[_UISceneLifecycleMultiplexer _evalTransitionToSettings:fromSettings:forceExit:withTransitionStore:]",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x1845a7b34",
"symbol_addr": "0x1845a7ab8",
},
{
"function": "-[UIApplication _terminateWithStatus:]",
"symbol": "-[UIApplication _terminateWithStatus:]",
"package": "UIKitCore",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x183f6b000",
"instruction_addr": "0x184ebf71c",
"symbol_addr": "0x184ebf528",
},
{
"function": "exit",
"symbol": "exit",
"package": "libsystem_c.dylib",
"in_app": False,
"data": {
"category": "shutdown",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x18ca01000",
"instruction_addr": "0x18ca1c224",
"symbol_addr": "0x18ca1c208",
},
{
"function": "__cxa_finalize_ranges",
"symbol": "__cxa_finalize_ranges",
"package": "libsystem_c.dylib",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x18ca01000",
"instruction_addr": "0x18ca218c0",
"symbol_addr": "0x18ca216f8",
},
{
"function": "__cxa_finalize_ranges",
"symbol": "__cxa_finalize_ranges",
"package": "libsystem_c.dylib",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x18ca01000",
"instruction_addr": "0x18ca218c0",
"symbol_addr": "0x18ca216f8",
},
{
"function": "<redacted>",
"package": "MetalPerformanceShadersGraph",
"in_app": False,
"data": {"symbolicator_status": "missing_symbol"},
"image_addr": "0x1bec7a000",
"instruction_addr": "0x1bf179c98",
"symbol_addr": "0x0",
},
]
},
"crashed": False,
"current": False,
},
{
"id": 1,
"stacktrace": {
"frames": [
{
"function": "_pthread_wqthread",
"symbol": "_pthread_wqthread",
"package": "libsystem_pthread.dylib",
"in_app": False,
"data": {
"category": "threadbase",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x1f2532000",
"instruction_addr": "0x1f253313c",
"symbol_addr": "0x1f2532fd4",
},
{
"function": "__workq_kernreturn",
"symbol": "__workq_kernreturn",
"package": "libsystem_kernel.dylib",
"in_app": False,
"data": {
"category": "internals",
"symbolicator_status": "symbolicated",
},
"image_addr": "0x1b9090000",
"instruction_addr": "0x1b9091b2c",
"symbol_addr": "0x1b9091b24",
},
]
},
"crashed": False,
"current": False,
},
]
},
},
project_id=self.project.id,
)
frames = find_stack_frames(event.data)
assert len(frames) == 0 # exception has no frames, no threads that are crashed, or current
| CocoaWaterFallTestCase |
python | Farama-Foundation__Gymnasium | gymnasium/vector/async_vector_env.py | {
"start": 1307,
"end": 34824
} | class ____(VectorEnv):
"""Vectorized environment that runs multiple environments in parallel.
It uses ``multiprocessing`` processes, and pipes for communication.
Example:
>>> import gymnasium as gym
>>> envs = gym.make_vec("Pendulum-v1", num_envs=2, vectorization_mode="async")
>>> envs
AsyncVectorEnv(Pendulum-v1, num_envs=2)
>>> envs = gym.vector.AsyncVectorEnv([
... lambda: gym.make("Pendulum-v1", g=9.81),
... lambda: gym.make("Pendulum-v1", g=1.62)
... ])
>>> envs
AsyncVectorEnv(num_envs=2)
>>> observations, infos = envs.reset(seed=42)
>>> observations
array([[-0.14995256, 0.9886932 , -0.12224312],
[ 0.5760367 , 0.8174238 , -0.91244936]], dtype=float32)
>>> infos
{}
>>> _ = envs.action_space.seed(123)
>>> observations, rewards, terminations, truncations, infos = envs.step(envs.action_space.sample())
>>> observations
array([[-0.1851753 , 0.98270553, 0.714599 ],
[ 0.6193494 , 0.7851154 , -1.0808398 ]], dtype=float32)
>>> rewards
array([-2.96495728, -1.00214607])
>>> terminations
array([False, False])
>>> truncations
array([False, False])
>>> infos
{}
"""
def __init__(
self,
env_fns: Sequence[Callable[[], Env]],
shared_memory: bool = True,
copy: bool = True,
context: str | None = None,
daemon: bool = True,
worker: (
Callable[
[int, Callable[[], Env], Connection, Connection, bool, Queue], None
]
| None
) = None,
observation_mode: str | Space = "same",
autoreset_mode: str | AutoresetMode = AutoresetMode.NEXT_STEP,
):
"""Vectorized environment that runs multiple environments in parallel.
Args:
env_fns: Functions that create the environments.
shared_memory: If ``True``, then the observations from the worker processes are communicated back through
shared variables. This can improve the efficiency if the observations are large (e.g. images).
copy: If ``True``, then the :meth:`AsyncVectorEnv.reset` and :meth:`AsyncVectorEnv.step` methods
return a copy of the observations.
context: Context for `multiprocessing`. If ``None``, then the default context is used.
daemon: If ``True``, then subprocesses have ``daemon`` flag turned on; that is, they will quit if
the head process quits. However, ``daemon=True`` prevents subprocesses to spawn children,
so for some environments you may want to have it set to ``False``.
worker: If set, then use that worker in a subprocess instead of a default one.
Can be useful to override some inner vector env logic, for instance, how resets on termination or truncation are handled.
observation_mode: Defines how environment observation spaces should be batched. 'same' defines that there should be ``n`` copies of identical spaces.
'different' defines that there can be multiple observation spaces with different parameters though requires the same shape and dtype,
warning, may raise unexpected errors. Passing a ``Tuple[Space, Space]`` object allows defining a custom ``single_observation_space`` and
``observation_space``, warning, may raise unexpected errors.
autoreset_mode: The Autoreset Mode used, see https://farama.org/Vector-Autoreset-Mode for more information.
Warnings:
worker is an advanced mode option. It provides a high degree of flexibility and a high chance
to shoot yourself in the foot; thus, if you are writing your own worker, it is recommended to start
from the code for ``_worker`` (or ``_async_worker``) method, and add changes.
Raises:
RuntimeError: If the observation space of some sub-environment does not match observation_space
(or, by default, the observation space of the first sub-environment).
ValueError: If observation_space is a custom space (i.e. not a default space in Gym,
such as gymnasium.spaces.Box, gymnasium.spaces.Discrete, or gymnasium.spaces.Dict) and shared_memory is True.
"""
self.env_fns = env_fns
self.shared_memory = shared_memory
self.copy = copy
self.context = context
self.daemon = daemon
self.worker = worker
self.observation_mode = observation_mode
self.autoreset_mode = (
autoreset_mode
if isinstance(autoreset_mode, AutoresetMode)
else AutoresetMode(autoreset_mode)
)
self.num_envs = len(env_fns)
# This would be nice to get rid of, but without it there's a deadlock between shared memory and pipes
# Create a dummy environment to gather the metadata and observation / action space of the environment
dummy_env = env_fns[0]()
# As we support `make_vec(spec)` then we can't include a `spec = dummy_env.spec` as this doesn't guarantee we can actual recreate the vector env.
self.metadata = dummy_env.metadata
self.metadata["autoreset_mode"] = self.autoreset_mode
self.render_mode = dummy_env.render_mode
self.single_action_space = dummy_env.action_space
self.action_space = batch_space(self.single_action_space, self.num_envs)
if isinstance(observation_mode, tuple) and len(observation_mode) == 2:
assert isinstance(observation_mode[0], Space)
assert isinstance(observation_mode[1], Space)
self.observation_space, self.single_observation_space = observation_mode
else:
if observation_mode == "same":
self.single_observation_space = dummy_env.observation_space
self.observation_space = batch_space(
self.single_observation_space, self.num_envs
)
elif observation_mode == "different":
# the environment is created and instantly destroy, might cause issues for some environment
# but I don't believe there is anything else we can do, for users with issues, pre-compute the spaces and use the custom option.
env_spaces = [env().observation_space for env in self.env_fns]
self.single_observation_space = env_spaces[0]
self.observation_space = batch_differing_spaces(env_spaces)
else:
raise ValueError(
f"Invalid `observation_mode`, expected: 'same' or 'different' or tuple of single and batch observation space, actual got {observation_mode}"
)
dummy_env.close()
del dummy_env
# Generate the multiprocessing context for the observation buffer
ctx = multiprocessing.get_context(context)
if self.shared_memory:
try:
_obs_buffer = create_shared_memory(
self.single_observation_space, n=self.num_envs, ctx=ctx
)
self.observations = read_from_shared_memory(
self.single_observation_space, _obs_buffer, n=self.num_envs
)
except CustomSpaceError as e:
raise ValueError(
"Using `AsyncVector(..., shared_memory=True)` caused an error, you can disable this feature with `shared_memory=False` however this is slower."
) from e
else:
_obs_buffer = None
self.observations = create_empty_array(
self.single_observation_space, n=self.num_envs, fn=np.zeros
)
self.parent_pipes, self.processes = [], []
self.error_queue = ctx.Queue()
target = worker or _async_worker
with clear_mpi_env_vars():
for idx, env_fn in enumerate(self.env_fns):
parent_pipe, child_pipe = ctx.Pipe()
process = ctx.Process(
target=target,
name=f"Worker<{type(self).__name__}>-{idx}",
args=(
idx,
CloudpickleWrapper(env_fn),
child_pipe,
parent_pipe,
_obs_buffer,
self.error_queue,
self.autoreset_mode,
),
)
self.parent_pipes.append(parent_pipe)
self.processes.append(process)
process.daemon = daemon
process.start()
child_pipe.close()
self._state = AsyncState.DEFAULT
self._check_spaces()
@property
def np_random_seed(self) -> tuple[int, ...]:
"""Returns a tuple of np_random seeds for all the wrapped envs."""
return self.get_attr("np_random_seed")
@property
def np_random(self) -> tuple[np.random.Generator, ...]:
"""Returns the tuple of the numpy random number generators for the wrapped envs."""
return self.get_attr("np_random")
def reset(
self,
*,
seed: int | list[int | None] | None = None,
options: dict[str, Any] | None = None,
) -> tuple[ObsType, dict[str, Any]]:
"""Resets all sub-environments in parallel and return a batch of concatenated observations and info.
Args:
seed: The environment reset seeds
options: If to return the options
Returns:
A batch of observations and info from the vectorized environment.
"""
self.reset_async(seed=seed, options=options)
return self.reset_wait()
def reset_async(
self,
seed: int | list[int | None] | None = None,
options: dict | None = None,
):
"""Send calls to the :obj:`reset` methods of the sub-environments.
To get the results of these calls, you may invoke :meth:`reset_wait`.
Args:
seed: List of seeds for each environment
options: The reset option
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
method (e.g. :meth:`step_async`). This can be caused by two consecutive
calls to :meth:`reset_async`, with no call to :meth:`reset_wait` in between.
"""
self._assert_is_running()
if seed is None:
seed = [None for _ in range(self.num_envs)]
elif isinstance(seed, int):
seed = [seed + i for i in range(self.num_envs)]
assert (
len(seed) == self.num_envs
), f"If seeds are passed as a list the length must match num_envs={self.num_envs} but got length={len(seed)}."
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `reset_async` while waiting for a pending call to `{self._state.value}` to complete",
str(self._state.value),
)
if options is not None and "reset_mask" in options:
reset_mask = options.pop("reset_mask")
assert isinstance(
reset_mask, np.ndarray
), f"`options['reset_mask': mask]` must be a numpy array, got {type(reset_mask)}"
assert reset_mask.shape == (
self.num_envs,
), f"`options['reset_mask': mask]` must have shape `({self.num_envs},)`, got {reset_mask.shape}"
assert (
reset_mask.dtype == np.bool_
), f"`options['reset_mask': mask]` must have `dtype=np.bool_`, got {reset_mask.dtype}"
assert np.any(
reset_mask
), f"`options['reset_mask': mask]` must contain a boolean array, got reset_mask={reset_mask}"
for pipe, env_seed, env_reset in zip(self.parent_pipes, seed, reset_mask):
if env_reset:
env_kwargs = {"seed": env_seed, "options": options}
pipe.send(("reset", env_kwargs))
else:
pipe.send(("reset-noop", None))
else:
for pipe, env_seed in zip(self.parent_pipes, seed):
env_kwargs = {"seed": env_seed, "options": options}
pipe.send(("reset", env_kwargs))
self._state = AsyncState.WAITING_RESET
def reset_wait(
self,
timeout: int | float | None = None,
) -> tuple[ObsType, dict[str, Any]]:
"""Waits for the calls triggered by :meth:`reset_async` to finish and returns the results.
Args:
timeout: Number of seconds before the call to ``reset_wait`` times out. If `None`, the call to ``reset_wait`` never times out.
Returns:
A tuple of batched observations and list of dictionaries
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
NoAsyncCallError: If :meth:`reset_wait` was called without any prior call to :meth:`reset_async`.
TimeoutError: If :meth:`reset_wait` timed out.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_RESET:
raise NoAsyncCallError(
"Calling `reset_wait` without any prior " "call to `reset_async`.",
AsyncState.WAITING_RESET.value,
)
if not self._poll_pipe_envs(timeout):
self._state = AsyncState.DEFAULT
raise multiprocessing.TimeoutError(
f"The call to `reset_wait` has timed out after {timeout} second(s)."
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
infos = {}
results, info_data = zip(*results)
for i, info in enumerate(info_data):
infos = self._add_info(infos, info, i)
if not self.shared_memory:
self.observations = concatenate(
self.single_observation_space, results, self.observations
)
self._state = AsyncState.DEFAULT
return (deepcopy(self.observations) if self.copy else self.observations), infos
def step(
self, actions: ActType
) -> tuple[ObsType, ArrayType, ArrayType, ArrayType, dict[str, Any]]:
"""Take an action for each parallel environment.
Args:
actions: element of :attr:`action_space` batch of actions.
Returns:
Batch of (observations, rewards, terminations, truncations, infos)
"""
self.step_async(actions)
return self.step_wait()
def step_async(self, actions: np.ndarray):
"""Send the calls to :meth:`Env.step` to each sub-environment.
Args:
actions: Batch of actions. element of :attr:`VectorEnv.action_space`
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
method (e.g. :meth:`reset_async`). This can be caused by two consecutive
calls to :meth:`step_async`, with no call to :meth:`step_wait` in
between.
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `step_async` while waiting for a pending call to `{self._state.value}` to complete.",
str(self._state.value),
)
iter_actions = iterate(self.action_space, actions)
for pipe, action in zip(self.parent_pipes, iter_actions, strict=True):
pipe.send(("step", action))
self._state = AsyncState.WAITING_STEP
def step_wait(
self, timeout: int | float | None = None
) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, dict]:
"""Wait for the calls to :obj:`step` in each sub-environment to finish.
Args:
timeout: Number of seconds before the call to :meth:`step_wait` times out. If ``None``, the call to :meth:`step_wait` never times out.
Returns:
The batched environment step information, (obs, reward, terminated, truncated, info)
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
NoAsyncCallError: If :meth:`step_wait` was called without any prior call to :meth:`step_async`.
TimeoutError: If :meth:`step_wait` timed out.
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_STEP:
raise NoAsyncCallError(
"Calling `step_wait` without any prior call " "to `step_async`.",
AsyncState.WAITING_STEP.value,
)
if not self._poll_pipe_envs(timeout):
self._state = AsyncState.DEFAULT
raise multiprocessing.TimeoutError(
f"The call to `step_wait` has timed out after {timeout} second(s)."
)
observations, rewards, terminations, truncations, infos = [], [], [], [], {}
successes = []
for env_idx, pipe in enumerate(self.parent_pipes):
env_step_return, success = pipe.recv()
successes.append(success)
if success:
observations.append(env_step_return[0])
rewards.append(env_step_return[1])
terminations.append(env_step_return[2])
truncations.append(env_step_return[3])
infos = self._add_info(infos, env_step_return[4], env_idx)
self._raise_if_errors(successes)
if not self.shared_memory:
self.observations = concatenate(
self.single_observation_space,
observations,
self.observations,
)
self._state = AsyncState.DEFAULT
return (
deepcopy(self.observations) if self.copy else self.observations,
np.array(rewards, dtype=np.float64),
np.array(terminations, dtype=np.bool_),
np.array(truncations, dtype=np.bool_),
infos,
)
def call(self, name: str, *args: Any, **kwargs: Any) -> tuple[Any, ...]:
"""Call a method from each parallel environment with args and kwargs.
Args:
name (str): Name of the method or property to call.
*args: Position arguments to apply to the method call.
**kwargs: Keyword arguments to apply to the method call.
Returns:
List of the results of the individual calls to the method or property for each environment.
"""
self.call_async(name, *args, **kwargs)
return self.call_wait()
def render(self) -> tuple[RenderFrame, ...] | None:
"""Returns a list of rendered frames from the environments."""
return self.call("render")
def call_async(self, name: str, *args, **kwargs):
"""Calls the method with name asynchronously and apply args and kwargs to the method.
Args:
name: Name of the method or property to call.
*args: Arguments to apply to the method call.
**kwargs: Keyword arguments to apply to the method call.
Raises:
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
AlreadyPendingCallError: Calling `call_async` while waiting for a pending call to complete
"""
self._assert_is_running()
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `call_async` while waiting for a pending call to `{self._state.value}` to complete.",
str(self._state.value),
)
for pipe in self.parent_pipes:
pipe.send(("_call", (name, args, kwargs)))
self._state = AsyncState.WAITING_CALL
def call_wait(self, timeout: int | float | None = None) -> tuple[Any, ...]:
"""Calls all parent pipes and waits for the results.
Args:
timeout: Number of seconds before the call to :meth:`step_wait` times out.
If ``None`` (default), the call to :meth:`step_wait` never times out.
Returns:
List of the results of the individual calls to the method or property for each environment.
Raises:
NoAsyncCallError: Calling :meth:`call_wait` without any prior call to :meth:`call_async`.
TimeoutError: The call to :meth:`call_wait` has timed out after timeout second(s).
"""
self._assert_is_running()
if self._state != AsyncState.WAITING_CALL:
raise NoAsyncCallError(
"Calling `call_wait` without any prior call to `call_async`.",
AsyncState.WAITING_CALL.value,
)
if not self._poll_pipe_envs(timeout):
self._state = AsyncState.DEFAULT
raise multiprocessing.TimeoutError(
f"The call to `call_wait` has timed out after {timeout} second(s)."
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
self._state = AsyncState.DEFAULT
return results
def get_attr(self, name: str) -> tuple[Any, ...]:
"""Get a property from each parallel environment.
Args:
name (str): Name of the property to be get from each individual environment.
Returns:
The property with name
"""
return self.call(name)
def set_attr(self, name: str, values: list[Any] | tuple[Any] | object):
"""Sets an attribute of the sub-environments.
Args:
name: Name of the property to be set in each individual environment.
values: Values of the property to be set to. If ``values`` is a list or
tuple, then it corresponds to the values for each individual
environment, otherwise a single value is set for all environments.
Raises:
ValueError: Values must be a list or tuple with length equal to the number of environments.
AlreadyPendingCallError: Calling :meth:`set_attr` while waiting for a pending call to complete.
"""
self._assert_is_running()
if not isinstance(values, (list, tuple)):
values = [values for _ in range(self.num_envs)]
if len(values) != self.num_envs:
raise ValueError(
"Values must be a list or tuple with length equal to the number of environments. "
f"Got `{len(values)}` values for {self.num_envs} environments."
)
if self._state != AsyncState.DEFAULT:
raise AlreadyPendingCallError(
f"Calling `set_attr` while waiting for a pending call to `{self._state.value}` to complete.",
str(self._state.value),
)
for pipe, value in zip(self.parent_pipes, values):
pipe.send(("_setattr", (name, value)))
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
def close_extras(self, timeout: int | float | None = None, terminate: bool = False):
"""Close the environments & clean up the extra resources (processes and pipes).
Args:
timeout: Number of seconds before the call to :meth:`close` times out. If ``None``,
the call to :meth:`close` never times out. If the call to :meth:`close`
times out, then all processes are terminated.
terminate: If ``True``, then the :meth:`close` operation is forced and all processes are terminated.
Raises:
TimeoutError: If :meth:`close` timed out.
"""
timeout = 0 if terminate else timeout
try:
if self._state != AsyncState.DEFAULT:
logger.warn(
f"Calling `close` while waiting for a pending call to `{self._state.value}` to complete."
)
function = getattr(self, f"{self._state.value}_wait")
function(timeout)
except multiprocessing.TimeoutError:
terminate = True
if terminate:
for process in self.processes:
if process.is_alive():
process.terminate()
else:
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.send(("close", None))
for pipe in self.parent_pipes:
if (pipe is not None) and (not pipe.closed):
pipe.recv()
for pipe in self.parent_pipes:
if pipe is not None:
pipe.close()
for process in self.processes:
process.join()
def _poll_pipe_envs(self, timeout: int | None = None):
self._assert_is_running()
if timeout is None:
return True
end_time = time.perf_counter() + timeout
for pipe in self.parent_pipes:
delta = max(end_time - time.perf_counter(), 0)
if pipe is None:
return False
if pipe.closed or (not pipe.poll(delta)):
return False
return True
def _check_spaces(self):
self._assert_is_running()
for pipe in self.parent_pipes:
pipe.send(
(
"_check_spaces",
(
self.observation_mode,
self.single_observation_space,
self.single_action_space,
),
)
)
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
self._raise_if_errors(successes)
same_observation_spaces, same_action_spaces = zip(*results)
if not all(same_observation_spaces):
if self.observation_mode == "same":
raise RuntimeError(
"AsyncVectorEnv(..., observation_mode='same') however some of the sub-environments observation spaces are not equivalent. If this is intentional, use `observation_mode='different'` instead."
)
else:
raise RuntimeError(
"AsyncVectorEnv(..., observation_mode='different' or custom space) however the sub-environment's observation spaces do not share a common shape and dtype."
)
if not all(same_action_spaces):
raise RuntimeError(
f"Some environments have an action space different from `{self.single_action_space}`. "
"In order to batch actions, the action spaces from all environments must be equal."
)
def _assert_is_running(self):
if self.closed:
raise ClosedEnvironmentError(
f"Trying to operate on `{type(self).__name__}`, after a call to `close()`."
)
def _raise_if_errors(self, successes: list[bool] | tuple[bool]):
if all(successes):
return
num_errors = self.num_envs - sum(successes)
assert num_errors > 0
for i in range(num_errors):
index, exctype, value, trace = self.error_queue.get()
logger.error(
f"Received the following error from Worker-{index} - Shutting it down"
)
logger.error(f"{trace}")
self.parent_pipes[index].close()
self.parent_pipes[index] = None
if i == num_errors - 1:
logger.error("Raising the last exception back to the main process.")
self._state = AsyncState.DEFAULT
raise exctype(value)
def __del__(self):
"""On deleting the object, checks that the vector environment is closed."""
if not getattr(self, "closed", True) and hasattr(self, "_state"):
self.close(terminate=True)
def _async_worker(
index: int,
env_fn: Callable,
pipe: Connection,
parent_pipe: Connection,
shared_memory: SynchronizedArray | dict[str, Any] | tuple[Any, ...],
error_queue: Queue,
autoreset_mode: AutoresetMode,
):
env = env_fn()
observation_space = env.observation_space
action_space = env.action_space
autoreset = False
observation = None
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation, info = env.reset(**data)
if shared_memory:
write_to_shared_memory(
observation_space, index, observation, shared_memory
)
observation = None
autoreset = False
pipe.send(((observation, info), True))
elif command == "reset-noop":
pipe.send(((observation, {}), True))
elif command == "step":
if autoreset_mode == AutoresetMode.NEXT_STEP:
if autoreset:
observation, info = env.reset()
reward, terminated, truncated = 0, False, False
else:
(
observation,
reward,
terminated,
truncated,
info,
) = env.step(data)
autoreset = terminated or truncated
elif autoreset_mode == AutoresetMode.SAME_STEP:
(
observation,
reward,
terminated,
truncated,
info,
) = env.step(data)
if terminated or truncated:
reset_observation, reset_info = env.reset()
info = {
"final_info": info,
"final_obs": observation,
**reset_info,
}
observation = reset_observation
elif autoreset_mode == AutoresetMode.DISABLED:
assert autoreset is False
(
observation,
reward,
terminated,
truncated,
info,
) = env.step(data)
else:
raise ValueError(f"Unexpected autoreset_mode: {autoreset_mode}")
if shared_memory:
write_to_shared_memory(
observation_space, index, observation, shared_memory
)
observation = None
pipe.send(((observation, reward, terminated, truncated, info), True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_call":
name, args, kwargs = data
if name in ["reset", "step", "close", "_setattr", "_check_spaces"]:
raise ValueError(
f"Trying to call function `{name}` with `call`, use `{name}` directly instead."
)
attr = env.get_wrapper_attr(name)
if callable(attr):
pipe.send((attr(*args, **kwargs), True))
else:
pipe.send((attr, True))
elif command == "_setattr":
name, value = data
env.set_wrapper_attr(name, value)
pipe.send((None, True))
elif command == "_check_spaces":
obs_mode, single_obs_space, single_action_space = data
pipe.send(
(
(
(
single_obs_space == observation_space
if obs_mode == "same"
else is_space_dtype_shape_equiv(
single_obs_space, observation_space
)
),
single_action_space == action_space,
),
True,
)
)
else:
raise RuntimeError(
f"Received unknown command `{command}`. Must be one of [`reset`, `step`, `close`, `_call`, `_setattr`, `_check_spaces`]."
)
except (KeyboardInterrupt, Exception):
error_type, error_message, _ = sys.exc_info()
trace = traceback.format_exc()
error_queue.put((index, error_type, error_message, trace))
pipe.send((None, False))
finally:
env.close()
| AsyncVectorEnv |
python | openai__openai-python | src/openai/types/container_create_params.py | {
"start": 591,
"end": 814
} | class ____(TypedDict, total=False):
anchor: Required[Literal["last_active_at"]]
"""Time anchor for the expiration time.
Currently only 'last_active_at' is supported.
"""
minutes: Required[int]
| ExpiresAfter |
python | ansible__ansible | lib/ansible/module_utils/six/__init__.py | {
"start": 15342,
"end": 18368
} | class ____(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
if sys.version_info[:2] < (3, 14):
_urllib_request_moved_attributes.extend(
[
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
]
)
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
| Module_six_moves_urllib_request |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 42940,
"end": 43169
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("NOT_RELEVANT", "PERSONAL_PREFERENCE", "TOO_GENERAL", "TOO_SPECIFIC")
| TopicSuggestionDeclineReason |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-bm25/llama_index/retrievers/bm25/base.py | {
"start": 1034,
"end": 9357
} | class ____(BaseRetriever):
r"""
A BM25 retriever that uses the BM25 algorithm to retrieve nodes.
Args:
nodes (List[BaseNode], optional):
The nodes to index. If not provided, an existing BM25 object must be passed.
stemmer (Stemmer.Stemmer, optional):
The stemmer to use. Defaults to an english stemmer.
language (str, optional):
The language to use for stopword removal. Defaults to "en".
existing_bm25 (bm25s.BM25, optional):
An existing BM25 object to use. If not provided, nodes must be passed.
similarity_top_k (int, optional):
The number of results to return. Defaults to DEFAULT_SIMILARITY_TOP_K.
callback_manager (CallbackManager, optional):
The callback manager to use. Defaults to None.
objects (List[IndexNode], optional):
The objects to retrieve. Defaults to None.
object_map (dict, optional):
A map of object IDs to nodes. Defaults to None.
token_pattern (str, optional):
The token pattern to use. Defaults to (?u)\\b\\w\\w+\\b.
skip_stemming (bool, optional):
Whether to skip stemming. Defaults to False.
verbose (bool, optional):
Whether to show progress. Defaults to False.
"""
def __init__(
self,
nodes: Optional[List[BaseNode]] = None,
stemmer: Optional[Stemmer.Stemmer] = None,
language: str = "en",
existing_bm25: Optional[bm25s.BM25] = None,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
callback_manager: Optional[CallbackManager] = None,
objects: Optional[List[IndexNode]] = None,
object_map: Optional[dict] = None,
verbose: bool = False,
skip_stemming: bool = False,
token_pattern: str = r"(?u)\b\w\w+\b",
filters: Optional[MetadataFilters] = None,
corpus_weight_mask: Optional[List[int]] = None,
) -> None:
self.stemmer = stemmer or Stemmer.Stemmer("english")
self.similarity_top_k = similarity_top_k
self.token_pattern = token_pattern
self.skip_stemming = skip_stemming
if existing_bm25 is not None:
self.bm25 = existing_bm25
self.corpus = existing_bm25.corpus
else:
if nodes is None:
raise ValueError("Please pass nodes or an existing BM25 object.")
self.corpus = [
node_to_metadata_dict(node) | {"node_id": node.node_id}
for node in nodes
]
corpus_tokens = bm25s.tokenize(
[node.get_content(metadata_mode=MetadataMode.EMBED) for node in nodes],
stopwords=language,
stemmer=self.stemmer if not skip_stemming else None,
token_pattern=self.token_pattern,
show_progress=verbose,
)
self.bm25 = bm25s.BM25()
self.bm25.index(corpus_tokens, show_progress=verbose)
if (
self.bm25.scores.get("num_docs")
and int(self.bm25.scores["num_docs"]) < self.similarity_top_k
):
if int(self.bm25.scores["num_docs"]) == 0:
raise ValueError(
"No nodes added to the retriever kindly add more data."
)
logger.warning(
"As bm25s.BM25 requires k less than or equal to number of nodes added. Overriding the value of similarity_top_k to number of nodes added."
)
self.similarity_top_k = int(self.bm25.scores["num_docs"])
self.corpus_weight_mask = corpus_weight_mask or None
if filters and self.corpus:
# Build a weight mask for each corpus to filter out only relevant nodes
_corpus_dict = {
corpus_token["node_id"]: corpus_token for corpus_token in self.corpus
}
_query_filter_fn = build_metadata_filter_fn(
lambda node_id: _corpus_dict[node_id], filters
)
self.corpus_weight_mask = [
int(_query_filter_fn(corpus_token["node_id"]))
for corpus_token in self.corpus
]
super().__init__(
callback_manager=callback_manager,
object_map=object_map,
objects=objects,
verbose=verbose,
)
@classmethod
def from_defaults(
cls,
index: Optional[VectorStoreIndex] = None,
nodes: Optional[List[BaseNode]] = None,
docstore: Optional[BaseDocumentStore] = None,
stemmer: Optional[Stemmer.Stemmer] = None,
language: str = "en",
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
verbose: bool = False,
skip_stemming: bool = False,
token_pattern: str = r"(?u)\b\w\w+\b",
filters: Optional[MetadataFilters] = None,
# deprecated
tokenizer: Optional[Callable[[str], List[str]]] = None,
) -> "BM25Retriever":
if tokenizer is not None:
logger.warning(
"The tokenizer parameter is deprecated and will be removed in a future release. "
"Use a stemmer from PyStemmer instead."
)
# ensure only one of index, nodes, or docstore is passed
if sum(bool(val) for val in [index, nodes, docstore]) != 1:
raise ValueError("Please pass exactly one of index, nodes, or docstore.")
if index is not None:
docstore = index.docstore
if docstore is not None:
nodes = cast(List[BaseNode], list(docstore.docs.values()))
assert nodes is not None, (
"Please pass exactly one of index, nodes, or docstore."
)
return cls(
nodes=nodes,
stemmer=stemmer,
language=language,
similarity_top_k=similarity_top_k,
verbose=verbose,
skip_stemming=skip_stemming,
token_pattern=token_pattern,
filters=filters,
)
def get_persist_args(self) -> Dict[str, Any]:
"""Get Persist Args Dict to Save."""
return {
DEFAULT_PERSIST_ARGS[key]: getattr(self, key)
for key in DEFAULT_PERSIST_ARGS
if hasattr(self, key)
}
def persist(self, path: str, encoding: str = "utf-8", **kwargs: Any) -> None:
"""Persist the retriever to a directory."""
self.bm25.save(path, corpus=self.corpus, **kwargs)
with open(
os.path.join(path, DEFAULT_PERSIST_FILENAME), "w", encoding=encoding
) as f:
json.dump(self.get_persist_args(), f, indent=2)
@classmethod
def from_persist_dir(
cls, path: str, encoding: str = "utf-8", **kwargs: Any
) -> "BM25Retriever":
"""Load the retriever from a directory."""
bm25 = bm25s.BM25.load(path, load_corpus=True, **kwargs)
with open(os.path.join(path, DEFAULT_PERSIST_FILENAME), encoding=encoding) as f:
retriever_data = json.load(f)
return cls(existing_bm25=bm25, **retriever_data)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
query = query_bundle.query_str
tokenized_query = bm25s.tokenize(
query,
stemmer=self.stemmer if not self.skip_stemming else None,
token_pattern=self.token_pattern,
show_progress=self._verbose,
)
indexes, scores = self.bm25.retrieve(
tokenized_query,
k=self.similarity_top_k,
show_progress=self._verbose,
weight_mask=np.array(self.corpus_weight_mask)
if self.corpus_weight_mask
else None,
)
# batched, but only one query
indexes = indexes[0]
scores = scores[0]
nodes: List[NodeWithScore] = []
for idx, score in zip(indexes, scores):
# idx can be an int or a dict of the node
if isinstance(idx, dict):
node = metadata_dict_to_node(idx)
else:
node_dict = self.corpus[int(idx)]
node = metadata_dict_to_node(node_dict)
nodes.append(NodeWithScore(node=node, score=float(score)))
return nodes
| BM25Retriever |
python | django__django | django/utils/autoreload.py | {
"start": 14771,
"end": 24804
} | class ____(BaseReloader):
def __init__(self):
self.roots = defaultdict(set)
self.processed_request = threading.Event()
self.client_timeout = int(os.environ.get("DJANGO_WATCHMAN_TIMEOUT", 5))
super().__init__()
@cached_property
def client(self):
return pywatchman.client(timeout=self.client_timeout)
def _watch_root(self, root):
# In practice this shouldn't occur, however, it's possible that a
# directory that doesn't exist yet is being watched. If it's outside of
# sys.path then this will end up a new root. How to handle this isn't
# clear: Not adding the root will likely break when subscribing to the
# changes, however, as this is currently an internal API, no files
# will be being watched outside of sys.path. Fixing this by checking
# inside watch_glob() and watch_dir() is expensive, instead this could
# could fall back to the StatReloader if this case is detected? For
# now, watching its parent, if possible, is sufficient.
if not root.exists():
if not root.parent.exists():
logger.warning(
"Unable to watch root dir %s as neither it or its parent exist.",
root,
)
return
root = root.parent
result = self.client.query("watch-project", str(root.absolute()))
if "warning" in result:
logger.warning("Watchman warning: %s", result["warning"])
logger.debug("Watchman watch-project result: %s", result)
return result["watch"], result.get("relative_path")
@lru_cache
def _get_clock(self, root):
return self.client.query("clock", root)["clock"]
def _subscribe(self, directory, name, expression):
root, rel_path = self._watch_root(directory)
# Only receive notifications of files changing, filtering out other
# types like special files:
# https://facebook.github.io/watchman/docs/type
only_files_expression = [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
expression,
]
query = {
"expression": only_files_expression,
"fields": ["name"],
"since": self._get_clock(root),
"dedup_results": True,
}
if rel_path:
query["relative_root"] = rel_path
logger.debug(
"Issuing watchman subscription %s, for root %s. Query: %s",
name,
root,
query,
)
self.client.query("subscribe", root, name, query)
def _subscribe_dir(self, directory, filenames):
if not directory.exists():
if not directory.parent.exists():
logger.warning(
"Unable to watch directory %s as neither it or its parent exist.",
directory,
)
return
prefix = "files-parent-%s" % directory.name
filenames = ["%s/%s" % (directory.name, filename) for filename in filenames]
directory = directory.parent
expression = ["name", filenames, "wholename"]
else:
prefix = "files"
expression = ["name", filenames]
self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
def _watch_glob(self, directory, patterns):
"""
Watch a directory with a specific glob. If the directory doesn't yet
exist, attempt to watch the parent directory and amend the patterns to
include this. It's important this method isn't called more than one per
directory when updating all subscriptions. Subsequent calls will
overwrite the named subscription, so it must include all possible glob
expressions.
"""
prefix = "glob"
if not directory.exists():
if not directory.parent.exists():
logger.warning(
"Unable to watch directory %s as neither it or its parent exist.",
directory,
)
return
prefix = "glob-parent-%s" % directory.name
patterns = ["%s/%s" % (directory.name, pattern) for pattern in patterns]
directory = directory.parent
expression = ["anyof"]
for pattern in patterns:
expression.append(["match", pattern, "wholename"])
self._subscribe(directory, "%s:%s" % (prefix, directory), expression)
def watched_roots(self, watched_files):
extra_directories = self.directory_globs.keys()
watched_file_dirs = [f.parent for f in watched_files]
sys_paths = list(sys_path_directories())
return frozenset((*extra_directories, *watched_file_dirs, *sys_paths))
def _update_watches(self):
watched_files = list(self.watched_files(include_globs=False))
found_roots = common_roots(self.watched_roots(watched_files))
logger.debug("Watching %s files", len(watched_files))
logger.debug("Found common roots: %s", found_roots)
# Setup initial roots for performance, shortest roots first.
for root in sorted(found_roots):
self._watch_root(root)
for directory, patterns in self.directory_globs.items():
self._watch_glob(directory, patterns)
# Group sorted watched_files by their parent directory.
sorted_files = sorted(watched_files, key=lambda p: p.parent)
for directory, group in itertools.groupby(sorted_files, key=lambda p: p.parent):
# These paths need to be relative to the parent directory.
self._subscribe_dir(
directory, [str(p.relative_to(directory)) for p in group]
)
def update_watches(self):
try:
self._update_watches()
except Exception as ex:
# If the service is still available, raise the original exception.
if self.check_server_status(ex):
raise
def _check_subscription(self, sub):
subscription = self.client.getSubscription(sub)
if not subscription:
return
logger.debug("Watchman subscription %s has results.", sub)
for result in subscription:
# When using watch-project, it's not simple to get the relative
# directory without storing some specific state. Store the full
# path to the directory in the subscription name, prefixed by its
# type (glob, files).
root_directory = Path(result["subscription"].split(":", 1)[1])
logger.debug("Found root directory %s", root_directory)
for file in result.get("files", []):
self.notify_file_changed(root_directory / file)
def request_processed(self, **kwargs):
logger.debug("Request processed. Setting update_watches event.")
self.processed_request.set()
def tick(self):
request_finished.connect(self.request_processed)
self.update_watches()
while True:
if self.processed_request.is_set():
self.update_watches()
self.processed_request.clear()
try:
self.client.receive()
except pywatchman.SocketTimeout:
pass
except pywatchman.WatchmanError as ex:
logger.debug("Watchman error: %s, checking server status.", ex)
self.check_server_status(ex)
else:
for sub in list(self.client.subs.keys()):
self._check_subscription(sub)
yield
# Protect against busy loops.
time.sleep(0.1)
def stop(self):
self.client.close()
super().stop()
def check_server_status(self, inner_ex=None):
"""Return True if the server is available."""
try:
self.client.query("version")
except Exception:
raise WatchmanUnavailable(str(inner_ex)) from inner_ex
return True
@classmethod
def check_availability(cls):
if not pywatchman:
raise WatchmanUnavailable("pywatchman not installed.")
client = pywatchman.client(timeout=0.1)
try:
result = client.capabilityCheck()
except Exception:
# The service is down?
raise WatchmanUnavailable("Cannot connect to the watchman service.")
version = get_version_tuple(result["version"])
# Watchman 4.9 includes multiple improvements to watching project
# directories as well as case insensitive filesystems.
logger.debug("Watchman version %s", version)
if version < (4, 9):
raise WatchmanUnavailable("Watchman 4.9 or later is required.")
def get_reloader():
"""Return the most suitable reloader for this environment."""
try:
WatchmanReloader.check_availability()
except WatchmanUnavailable:
return StatReloader()
return WatchmanReloader()
def start_django(reloader, main_func, *args, **kwargs):
ensure_echo_on()
main_func = check_errors(main_func)
django_main_thread = threading.Thread(
target=main_func, args=args, kwargs=kwargs, name="django-main-thread"
)
django_main_thread.daemon = True
django_main_thread.start()
while not reloader.should_stop:
reloader.run(django_main_thread)
def run_with_reloader(main_func, *args, **kwargs):
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get(DJANGO_AUTORELOAD_ENV) == "true":
reloader = get_reloader()
logger.info(
"Watching for file changes with %s", reloader.__class__.__name__
)
start_django(reloader, main_func, *args, **kwargs)
else:
exit_code = restart_with_reloader()
sys.exit(exit_code)
except KeyboardInterrupt:
pass
| WatchmanReloader |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.