language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/clipseg/test_processing_clipseg.py | {
"start": 991,
"end": 3489
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = CLIPSegProcessor
@classmethod
def _setup_tokenizer(cls):
tokenizer_class = cls._get_component_class_from_processor("tokenizer")
vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: skip
vocab_tokens = dict(zip(vocab, range(len(vocab))))
merges = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
with open(merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(merges))
return tokenizer_class.from_pretrained(cls.tmpdirname)
@classmethod
def _setup_image_processor(cls):
image_processor_class = cls._get_component_class_from_processor("image_processor")
image_processor_map = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
return image_processor_class(**image_processor_map)
def test_processor_text(self):
processor = self.get_processor()
input_str = "lower newer"
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ["input_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
def test_processor_visual_prompt(self):
processor = self.get_processor()
image_input = self.prepare_image_inputs()
visual_prompt_input = self.prepare_image_inputs()
inputs = processor(images=image_input, visual_prompt=visual_prompt_input)
self.assertListEqual(list(inputs.keys()), ["pixel_values", "conditional_pixel_values"])
# test if it raises when no input is passed
with pytest.raises(ValueError):
processor()
| CLIPSegProcessorTest |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 16407,
"end": 16760
} | class ____(Pix2SkyProjection, Zenithal):
r"""
Zenithal equidistant projection - pixel to sky.
Corresponds to the ``ZEA`` projection in FITS WCS.
See `Zenithal` for a definition of the full transformation.
.. math::
\theta = 90^\circ - 2 \sin^{-1} \left(\frac{\pi R_\theta}{360^\circ}\right)
"""
| Pix2Sky_ZenithalEqualArea |
python | doocs__leetcode | solution/1700-1799/1751.Maximum Number of Events That Can Be Attended II/Solution.py | {
"start": 0,
"end": 476
} | class ____:
def maxValue(self, events: List[List[int]], k: int) -> int:
@cache
def dfs(i: int, k: int) -> int:
if i >= len(events):
return 0
_, ed, val = events[i]
ans = dfs(i + 1, k)
if k:
j = bisect_right(events, ed, lo=i + 1, key=lambda x: x[0])
ans = max(ans, dfs(j, k - 1) + val)
return ans
events.sort()
return dfs(0, k)
| Solution |
python | pandas-dev__pandas | pandas/tests/frame/test_subclass.py | {
"start": 26245,
"end": 26350
} | class ____(Series):
"""A subclass of Series that does not define a constructor."""
| SimpleSeriesSubClass |
python | has2k1__plotnine | plotnine/composition/_plot_spacer.py | {
"start": 126,
"end": 1859
} | class ____(ggplot):
"""
Blank area as wide or as tall as a plot
Parameters
----------
fill :
Background color. The default is a transparent area, but it
can be changed through this parameter.
The color can also be modified by adding a [](`~plotnine.theme`)
and setting the [](`~plotnine.themes.themeable.plot_background`).
See Also
--------
plotnine.composition.Beside : To arrange plots side by side
plotnine.composition.Stack : To arrange plots vertically
plotnine.composition.Compose : For more on composing plots
"""
def __init__(
self,
fill: (
str
| tuple[float, float, float]
| tuple[float, float, float, float]
| None
) = None,
):
super().__init__()
self.theme = theme_void()
if fill:
self.theme += theme(plot_background=element_rect(fill=fill))
def __add__(self, rhs) -> plot_spacer: # pyright: ignore[reportIncompatibleMethodOverride]
"""
Add to spacer
All added objects are no ops except the `plot_background` in
in a theme.
"""
self = deepcopy(self)
if isinstance(rhs, theme):
fill = rhs.getp(("plot_background", "facecolor"))
self.theme += theme(
plot_background=element_rect(fill=fill),
# When a spacer is the "last plot" in a composition,
# it is used to determine the figure size and dpi
# and therefore those aspects should be modifiable.
figure_size=rhs.getp("figure_size"),
dpi=rhs.getp("dpi"),
)
return self
| plot_spacer |
python | keras-team__keras | keras/src/layers/regularization/spatial_dropout.py | {
"start": 4853,
"end": 7300
} | class ____(BaseSpatialDropout):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however, it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
data_format: `"channels_first"` or `"channels_last"`.
In `"channels_first"` mode, the channels dimension (the depth)
is at index 1, in `"channels_last"` mode is it at index 4.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be `"channels_last"`.
Call arguments:
inputs: A 5D tensor.
training: Python boolean indicating whether the layer
should behave in training mode (applying dropout)
or in inference mode (pass-through).
Input shape:
5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if
data_format='channels_first'
or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if
data_format='channels_last'.
Output shape: Same as input.
Reference:
- [Tompson et al., 2014](https://arxiv.org/abs/1411.4280)
"""
def __init__(
self, rate, data_format=None, seed=None, name=None, dtype=None
):
super().__init__(rate, seed=seed, name=name, dtype=dtype)
self.data_format = backend.standardize_data_format(data_format)
self.input_spec = InputSpec(ndim=5)
def _get_noise_shape(self, inputs):
input_shape = ops.shape(inputs)
if self.data_format == "channels_first":
return (input_shape[0], input_shape[1], 1, 1, 1)
elif self.data_format == "channels_last":
return (input_shape[0], 1, 1, 1, input_shape[4])
def get_config(self):
base_config = super().get_config()
config = {
"data_format": self.data_format,
}
return {**base_config, **config}
| SpatialDropout3D |
python | numpy__numpy | numpy/_core/tests/test_scalar_methods.py | {
"start": 9668,
"end": 12511
} | class ____:
# test that scalar types have a valid __text_signature__ or __signature__ set
@pytest.mark.parametrize(
"sctype",
[
*sctypes["int"],
*sctypes["uint"],
*sctypes["float"],
*sctypes["complex"],
*sctypes["others"],
np.datetime64,
np.timedelta64,
],
)
def test_constructor_signatures(self, sctype: type[np.generic]):
try:
sig = inspect.signature(sctype)
except ValueError:
pytest.fail(f"missing signature: {sctype}")
assert sig.parameters
@pytest.mark.parametrize(
"sctype",
[np.integer, *sctypes["int"], *sctypes["uint"], *sctypes["float"]],
)
def test_method_signatures_is_integer(self, sctype: type[np.integer | np.floating]):
try:
sig = inspect.signature(sctype.is_integer)
except ValueError:
pytest.fail(f"missing signature: {sctype.__name__}.is_integer")
assert len(sig.parameters) == 1
assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY
@pytest.mark.parametrize("sctype", sctypes["float"])
def test_method_signatures_as_integer_ratio(self, sctype: type[np.floating]):
try:
sig = inspect.signature(sctype.as_integer_ratio)
except ValueError:
pytest.fail(f"missing signature: {sctype.__name__}.as_integer_ratio")
assert len(sig.parameters) == 1
assert sig.parameters["self"].kind == inspect.Parameter.POSITIONAL_ONLY
@pytest.mark.parametrize(
"method_name",
[
"__array_namespace__", "__copy__", "__deepcopy__", "all", "any", "argmax",
"argmin", "argsort", "astype", "byteswap", "choose", "clip", "compress",
"conj", "conjugate", "copy", "cumprod", "cumsum", "diagonal", "dump",
"dumps", "fill", "flatten", "getfield", "item", "max", "mean", "min",
"nonzero", "prod", "put", "ravel", "repeat", "reshape", "resize", "round",
"searchsorted", "setfield", "setflags", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "to_device", "tobytes", "tofile", "tolist", "trace",
"transpose", "var", "view",
],
)
def test_array_scalar_method_signatures(self, method_name: str):
# methods shared by np.generic and np.ndarray should have the same signature
fn_generic = getattr(np.generic, method_name)
sig_generic = inspect.signature(fn_generic)
assert "self" in sig_generic.parameters
assert sig_generic.parameters["self"].kind is inspect.Parameter.POSITIONAL_ONLY
fn_ndarray = getattr(np.ndarray, method_name)
sig_ndarray = inspect.signature(fn_ndarray)
assert sig_generic == sig_ndarray
| TestSignature |
python | pytorch__pytorch | torch/ao/quantization/quantizer/xnnpack_quantizer_utils.py | {
"start": 1323,
"end": 2252
} | class ____:
input_activation: QuantizationSpec | None
output_activation: QuantizationSpec | None
weight: QuantizationSpec | None
bias: QuantizationSpec | None
# TODO: remove, since we can use observer_or_fake_quant_ctr to express this
is_qat: bool = False
# Use Annotated because list[Callable].__module__ is read-only.
OperatorPatternType = typing.Annotated[list[Callable], None]
OperatorPatternType.__module__ = (
"torch.ao.quantization.quantizer.xnnpack_quantizer_utils"
)
AnnotatorType = Callable[
[
torch.fx.GraphModule,
QuantizationConfig | None,
Callable[[Node], bool] | None,
],
list[list[Node]] | None,
]
OP_TO_ANNOTATOR: dict[str, AnnotatorType] = {}
def register_annotator(op: str) -> Callable[[AnnotatorType], None]:
def decorator(annotator: AnnotatorType) -> None:
OP_TO_ANNOTATOR[op] = annotator
return decorator
| QuantizationConfig |
python | google__jax | jax/example_libraries/optimizers.py | {
"start": 4308,
"end": 18725
} | class ____(NamedTuple):
init_fn: InitFn
update_fn: UpdateFn
params_fn: ParamsFn
Schedule = Callable[[Step], float]
def optimizer(opt_maker: Callable[...,
tuple[Callable[[Params], State],
Callable[[Step, Updates, Params], Params],
Callable[[State], Params]]]) -> Callable[..., Optimizer]:
"""Decorator to make an optimizer defined for arrays generalize to containers.
With this decorator, you can write init, update, and get_params functions that
each operate only on single arrays, and convert them to corresponding
functions that operate on pytrees of parameters. See the optimizers defined in
optimizers.py for examples.
Args:
opt_maker: a function that returns an ``(init_fun, update_fun, get_params)``
triple of functions that might only work with ndarrays, as per
.. code-block:: haskell
init_fun :: ndarray -> OptStatePytree ndarray
update_fun :: OptStatePytree ndarray -> OptStatePytree ndarray
get_params :: OptStatePytree ndarray -> ndarray
Returns:
An ``(init_fun, update_fun, get_params)`` triple of functions that work on
arbitrary pytrees, as per
.. code-block:: haskell
init_fun :: ParameterPytree ndarray -> OptimizerState
update_fun :: OptimizerState -> OptimizerState
get_params :: OptimizerState -> ParameterPytree ndarray
The OptimizerState pytree type used by the returned functions is isomorphic
to ``ParameterPytree (OptStatePytree ndarray)``, but may store the state
instead as e.g. a partially-flattened data structure for performance.
"""
@functools.wraps(opt_maker)
def tree_opt_maker(*args, **kwargs):
init, update, get_params = opt_maker(*args, **kwargs)
@functools.wraps(init)
def tree_init(x0_tree):
x0_flat, tree = jax.tree.flatten(x0_tree)
initial_states = [init(x0) for x0 in x0_flat]
states_flat, subtrees = unzip2(map(jax.tree.flatten, initial_states))
return OptimizerState(states_flat, tree, subtrees)
@functools.wraps(update)
def tree_update(i, grad_tree, opt_state):
states_flat, tree, subtrees = opt_state
grad_flat, tree2 = jax.tree.flatten(grad_tree)
if tree2 != tree:
msg = ("optimizer update function was passed a gradient tree that did "
"not match the parameter tree structure with which it was "
"initialized: parameter tree {} and grad tree {}.")
raise TypeError(msg.format(tree, tree2))
states = map(jax.tree.unflatten, subtrees, states_flat)
new_states = map(partial(update, i), grad_flat, states)
new_states_flat, subtrees2 = unzip2(map(jax.tree.flatten, new_states))
for subtree, subtree2 in zip(subtrees, subtrees2):
if subtree2 != subtree:
msg = ("optimizer update function produced an output structure that "
"did not match its input structure: input {} and output {}.")
raise TypeError(msg.format(subtree, subtree2))
return OptimizerState(new_states_flat, tree, subtrees)
@functools.wraps(get_params)
def tree_get_params(opt_state):
states_flat, tree, subtrees = opt_state
states = map(jax.tree.unflatten, subtrees, states_flat)
params = map(get_params, states)
return jax.tree.unflatten(tree, params)
return Optimizer(tree_init, tree_update, tree_get_params)
return tree_opt_maker
### optimizers
@optimizer
def sgd(step_size):
"""Construct optimizer triple for stochastic gradient descent.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
return x0
def update(i, g, x):
return x - step_size(i) * g
def get_params(x):
return x
return Optimizer(init, update, get_params)
@optimizer
def momentum(step_size: Schedule, mass: float):
"""Construct optimizer triple for SGD with momentum.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
mass: positive scalar representing the momentum coefficient.
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
v0 = jnp.zeros_like(x0)
return x0, v0
def update(i, g, state):
x, velocity = state
velocity = mass * velocity + g
x = x - step_size(i) * velocity
return x, velocity
def get_params(state):
x, _ = state
return x
return init, update, get_params
@optimizer
def nesterov(step_size: Schedule, mass: float):
"""Construct optimizer triple for SGD with Nesterov momentum.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
mass: positive scalar representing the momentum coefficient.
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
v0 = jnp.zeros_like(x0)
return x0, v0
def update(i, g, state):
x, velocity = state
velocity = mass * velocity + g
x = x - step_size(i) * (mass * velocity + g)
return x, velocity
def get_params(state):
x, _ = state
return x
return init, update, get_params
@optimizer
def adagrad(step_size, momentum=0.9):
"""Construct optimizer triple for Adagrad.
Adaptive Subgradient Methods for Online Learning and Stochastic Optimization:
http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
momentum: optional, a positive scalar value for momentum
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
g_sq = jnp.zeros_like(x0)
m = jnp.zeros_like(x0)
return x0, g_sq, m
def update(i, g, state):
x, g_sq, m = state
g_sq += jnp.square(g)
g_sq_inv_sqrt = jnp.where(g_sq > 0, 1. / jnp.sqrt(g_sq), 0.0)
m = (1. - momentum) * (g * g_sq_inv_sqrt) + momentum * m
x = x - step_size(i) * m
return x, g_sq, m
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
@optimizer
def rmsprop(step_size, gamma=0.9, eps=1e-8):
"""Construct optimizer triple for RMSProp.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
gamma: Decay parameter.
eps: Epsilon parameter.
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
avg_sq_grad = jnp.zeros_like(x0)
return x0, avg_sq_grad
def update(i, g, state):
x, avg_sq_grad = state
avg_sq_grad = avg_sq_grad * gamma + jnp.square(g) * (1. - gamma)
x = x - step_size(i) * g / jnp.sqrt(avg_sq_grad + eps)
return x, avg_sq_grad
def get_params(state):
x, _ = state
return x
return init, update, get_params
@optimizer
def rmsprop_momentum(step_size, gamma=0.9, eps=1e-8, momentum=0.9):
"""Construct optimizer triple for RMSProp with momentum.
This optimizer is separate from the rmsprop optimizer because it needs to
keep track of additional parameters.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
gamma: Decay parameter.
eps: Epsilon parameter.
momentum: Momentum parameter.
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
avg_sq_grad = jnp.zeros_like(x0)
mom = jnp.zeros_like(x0)
return x0, avg_sq_grad, mom
def update(i, g, state):
x, avg_sq_grad, mom = state
avg_sq_grad = avg_sq_grad * gamma + jnp.square(g) * (1. - gamma)
mom = momentum * mom + step_size(i) * g / jnp.sqrt(avg_sq_grad + eps)
x = x - mom
return x, avg_sq_grad, mom
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
@optimizer
def adam(step_size, b1=0.9, b2=0.999, eps=1e-8):
"""Construct optimizer triple for Adam.
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
b1: optional, a positive scalar value for beta_1, the exponential decay rate
for the first moment estimates (default 0.9).
b2: optional, a positive scalar value for beta_2, the exponential decay rate
for the second moment estimates (default 0.999).
eps: optional, a positive scalar value for epsilon, a small constant for
numerical stability (default 1e-8).
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
m0 = jnp.zeros_like(x0)
v0 = jnp.zeros_like(x0)
return x0, m0, v0
def update(i, g, state):
x, m, v = state
m = (1 - b1) * g + b1 * m # First moment estimate.
v = (1 - b2) * jnp.square(g) + b2 * v # Second moment estimate.
mhat = m / (1 - jnp.asarray(b1, m.dtype) ** (i + 1)) # Bias correction.
vhat = v / (1 - jnp.asarray(b2, m.dtype) ** (i + 1))
x = x - step_size(i) * mhat / (jnp.sqrt(vhat) + eps)
return x, m, v
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
@optimizer
def adamax(step_size, b1=0.9, b2=0.999, eps=1e-8):
"""Construct optimizer triple for AdaMax (a variant of Adam based on infinity norm).
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
b1: optional, a positive scalar value for beta_1, the exponential decay rate
for the first moment estimates (default 0.9).
b2: optional, a positive scalar value for beta_2, the exponential decay rate
for the second moment estimates (default 0.999).
eps: optional, a positive scalar value for epsilon, a small constant for
numerical stability (default 1e-8).
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def init(x0):
m0 = jnp.zeros_like(x0)
u0 = jnp.zeros_like(x0)
return x0, m0, u0
def update(i, g, state):
x, m, u = state
m = (1 - b1) * g + b1 * m # First moment estimate.
u = jnp.maximum(b2 * u, jnp.abs(g)) # Update exponentially weighted infinity norm.
x = (x - (step_size(i) / (1 - jnp.asarray(b1, m.dtype) ** (i + 1))) * m
/ (u + eps))
return x, m, u
def get_params(state):
x, _, _ = state
return x
return init, update, get_params
@optimizer
def sm3(step_size, momentum=0.9):
"""Construct optimizer triple for SM3.
Memory-Efficient Adaptive Optimization for Large-Scale Learning.
https://arxiv.org/abs/1901.11150
Args:
step_size: positive scalar, or a callable representing a step size schedule
that maps the iteration index to a positive scalar.
momentum: optional, a positive scalar value for momentum
Returns:
An (init_fun, update_fun, get_params) triple.
"""
step_size = make_schedule(step_size)
def splice(seq, i, x):
lst = list(seq)
lst[i:i+1] = x
return lst
def broadcast_into(ndim, x, axis):
idx = splice([None] * ndim, axis, [slice(None)])
return x[tuple(idx)]
def init(x0):
x_shape = x0.shape
x0 = jnp.atleast_1d(x0)
vs = [jnp.zeros(sz, dtype=x0.dtype) for sz in x0.shape]
return x0, jnp.zeros_like(x0), vs, x_shape
def update(i, g, state):
x, m, vs, x_shape = state
vs = [broadcast_into(g.ndim, v, i) for i, v in enumerate(vs)]
accum = functools.reduce(jnp.minimum, vs) + jnp.square(g)
accum_inv_sqrt = jnp.where(accum > 0, 1. / jnp.sqrt(accum), 0)
m = (1. - momentum) * (g * accum_inv_sqrt) + momentum * m
x = x - step_size(i) * m
vs = [accum.max(splice(range(x.ndim), j, [])) for j in range(x.ndim)]
return x, m, vs, x_shape
def get_params(state):
x, _, _, x_shape = state
return x.reshape(x_shape)
return init, update, get_params
### learning rate schedules
def constant(step_size) -> Schedule:
def schedule(i):
return step_size
return schedule
def exponential_decay(step_size, decay_steps, decay_rate):
def schedule(i):
return step_size * decay_rate ** (i / decay_steps)
return schedule
def inverse_time_decay(step_size, decay_steps, decay_rate, staircase=False):
if staircase:
def schedule(i):
return step_size / (1 + decay_rate * jnp.floor(i / decay_steps))
else:
def schedule(i):
return step_size / (1 + decay_rate * i / decay_steps)
return schedule
def polynomial_decay(step_size, decay_steps, final_step_size, power=1.0):
def schedule(step_num):
step_num = jnp.minimum(step_num, decay_steps)
step_mult = (1 - step_num / decay_steps) ** power
return step_mult * (step_size - final_step_size) + final_step_size
return schedule
def piecewise_constant(boundaries: Any, values: Any):
boundaries = jnp.array(boundaries)
values = jnp.array(values)
if not boundaries.ndim == values.ndim == 1:
raise ValueError("boundaries and values must be sequences")
if not boundaries.shape[0] == values.shape[0] - 1:
raise ValueError("boundaries length must be one shorter than values length")
def schedule(i):
return values[jnp.sum(i > boundaries)]
return schedule
def make_schedule(scalar_or_schedule: float | Schedule) -> Schedule:
if callable(scalar_or_schedule):
return scalar_or_schedule
elif jnp.ndim(scalar_or_schedule) == 0:
return constant(scalar_or_schedule)
else:
raise TypeError(type(scalar_or_schedule))
### utilities
def l2_norm(tree):
"""Compute the l2 norm of a pytree of arrays. Useful for weight decay."""
leaves, _ = jax.tree.flatten(tree)
return jnp.sqrt(sum(jnp.vdot(x, x) for x in leaves))
def clip_grads(grad_tree, max_norm):
"""Clip gradients stored as a pytree of arrays to maximum norm `max_norm`."""
norm = l2_norm(grad_tree)
normalize = lambda g: jnp.where(norm < max_norm, g, g * (max_norm / norm))
return jax.tree.map(normalize, grad_tree)
### serialization utilities
| Optimizer |
python | pydantic__pydantic | pydantic/v1/fields.py | {
"start": 13552,
"end": 48882
} | class ____(Representation):
__slots__ = (
'type_',
'outer_type_',
'annotation',
'sub_fields',
'sub_fields_mapping',
'key_field',
'validators',
'pre_validators',
'post_validators',
'default',
'default_factory',
'required',
'final',
'model_config',
'name',
'alias',
'has_alias',
'field_info',
'discriminator_key',
'discriminator_alias',
'validate_always',
'allow_none',
'shape',
'class_validators',
'parse_json',
)
def __init__(
self,
*,
name: str,
type_: Type[Any],
class_validators: Optional[Dict[str, Validator]],
model_config: Type['BaseConfig'],
default: Any = None,
default_factory: Optional[NoArgAnyCallable] = None,
required: 'BoolUndefined' = Undefined,
final: bool = False,
alias: Optional[str] = None,
field_info: Optional[FieldInfo] = None,
) -> None:
self.name: str = name
self.has_alias: bool = alias is not None
self.alias: str = alias if alias is not None else name
self.annotation = type_
self.type_: Any = convert_generics(type_)
self.outer_type_: Any = type_
self.class_validators = class_validators or {}
self.default: Any = default
self.default_factory: Optional[NoArgAnyCallable] = default_factory
self.required: 'BoolUndefined' = required
self.final: bool = final
self.model_config = model_config
self.field_info: FieldInfo = field_info or FieldInfo(default)
self.discriminator_key: Optional[str] = self.field_info.discriminator
self.discriminator_alias: Optional[str] = self.discriminator_key
self.allow_none: bool = False
self.validate_always: bool = False
self.sub_fields: Optional[List[ModelField]] = None
self.sub_fields_mapping: Optional[Dict[str, 'ModelField']] = None # used for discriminated union
self.key_field: Optional[ModelField] = None
self.validators: 'ValidatorsList' = []
self.pre_validators: Optional['ValidatorsList'] = None
self.post_validators: Optional['ValidatorsList'] = None
self.parse_json: bool = False
self.shape: int = SHAPE_SINGLETON
self.model_config.prepare_field(self)
self.prepare()
def get_default(self) -> Any:
return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory()
@staticmethod
def _get_field_info(
field_name: str, annotation: Any, value: Any, config: Type['BaseConfig']
) -> Tuple[FieldInfo, Any]:
"""
Get a FieldInfo from a root typing.Annotated annotation, value, or config default.
The FieldInfo may be set in typing.Annotated or the value, but not both. If neither contain
a FieldInfo, a new one will be created using the config.
:param field_name: name of the field for use in error messages
:param annotation: a type hint such as `str` or `Annotated[str, Field(..., min_length=5)]`
:param value: the field's assigned value
:param config: the model's config object
:return: the FieldInfo contained in the `annotation`, the value, or a new one from the config.
"""
field_info_from_config = config.get_field_info(field_name)
field_info = None
if get_origin(annotation) is Annotated:
field_infos = [arg for arg in get_args(annotation)[1:] if isinstance(arg, FieldInfo)]
if len(field_infos) > 1:
raise ValueError(f'cannot specify multiple `Annotated` `Field`s for {field_name!r}')
field_info = next(iter(field_infos), None)
if field_info is not None:
field_info = copy.copy(field_info)
field_info.update_from_config(field_info_from_config)
if field_info.default not in (Undefined, Required):
raise ValueError(f'`Field` default cannot be set in `Annotated` for {field_name!r}')
if value is not Undefined and value is not Required:
# check also `Required` because of `validate_arguments` that sets `...` as default value
field_info.default = value
if isinstance(value, FieldInfo):
if field_info is not None:
raise ValueError(f'cannot specify `Annotated` and value `Field`s together for {field_name!r}')
field_info = value
field_info.update_from_config(field_info_from_config)
elif field_info is None:
field_info = FieldInfo(value, **field_info_from_config)
value = None if field_info.default_factory is not None else field_info.default
field_info._validate()
return field_info, value
@classmethod
def infer(
cls,
*,
name: str,
value: Any,
annotation: Any,
class_validators: Optional[Dict[str, Validator]],
config: Type['BaseConfig'],
) -> 'ModelField':
from pydantic.v1.schema import get_annotation_from_field_info
field_info, value = cls._get_field_info(name, annotation, value, config)
required: 'BoolUndefined' = Undefined
if value is Required:
required = True
value = None
elif value is not Undefined:
required = False
annotation = get_annotation_from_field_info(annotation, field_info, name, config.validate_assignment)
return cls(
name=name,
type_=annotation,
alias=field_info.alias,
class_validators=class_validators,
default=value,
default_factory=field_info.default_factory,
required=required,
model_config=config,
field_info=field_info,
)
def set_config(self, config: Type['BaseConfig']) -> None:
self.model_config = config
info_from_config = config.get_field_info(self.name)
config.prepare_field(self)
new_alias = info_from_config.get('alias')
new_alias_priority = info_from_config.get('alias_priority') or 0
if new_alias and new_alias_priority >= (self.field_info.alias_priority or 0):
self.field_info.alias = new_alias
self.field_info.alias_priority = new_alias_priority
self.alias = new_alias
new_exclude = info_from_config.get('exclude')
if new_exclude is not None:
self.field_info.exclude = ValueItems.merge(self.field_info.exclude, new_exclude)
new_include = info_from_config.get('include')
if new_include is not None:
self.field_info.include = ValueItems.merge(self.field_info.include, new_include, intersect=True)
@property
def alt_alias(self) -> bool:
return self.name != self.alias
def prepare(self) -> None:
"""
Prepare the field but inspecting self.default, self.type_ etc.
Note: this method is **not** idempotent (because _type_analysis is not idempotent),
e.g. calling it it multiple times may modify the field and configure it incorrectly.
"""
self._set_default_and_type()
if self.type_.__class__ is ForwardRef or self.type_.__class__ is DeferredType:
# self.type_ is currently a ForwardRef and there's nothing we can do now,
# user will need to call model.update_forward_refs()
return
self._type_analysis()
if self.required is Undefined:
self.required = True
if self.default is Undefined and self.default_factory is None:
self.default = None
self.populate_validators()
def _set_default_and_type(self) -> None:
"""
Set the default value, infer the type if needed and check if `None` value is valid.
"""
if self.default_factory is not None:
if self.type_ is Undefined:
raise errors_.ConfigError(
f'you need to set the type of field {self.name!r} when using `default_factory`'
)
return
default_value = self.get_default()
if default_value is not None and self.type_ is Undefined:
self.type_ = default_value.__class__
self.outer_type_ = self.type_
self.annotation = self.type_
if self.type_ is Undefined:
raise errors_.ConfigError(f'unable to infer type for attribute "{self.name}"')
if self.required is False and default_value is None:
self.allow_none = True
def _type_analysis(self) -> None: # noqa: C901 (ignore complexity)
# typing interface is horrible, we have to do some ugly checks
if lenient_issubclass(self.type_, JsonWrapper):
self.type_ = self.type_.inner_type
self.parse_json = True
elif lenient_issubclass(self.type_, Json):
self.type_ = Any
self.parse_json = True
elif isinstance(self.type_, TypeVar):
if self.type_.__bound__:
self.type_ = self.type_.__bound__
elif self.type_.__constraints__:
self.type_ = Union[self.type_.__constraints__]
else:
self.type_ = Any
elif is_new_type(self.type_):
self.type_ = new_type_supertype(self.type_)
if self.type_ is Any or self.type_ is object:
if self.required is Undefined:
self.required = False
self.allow_none = True
return
elif self.type_ is Pattern or self.type_ is re.Pattern:
# python 3.7 only, Pattern is a typing object but without sub fields
return
elif is_literal_type(self.type_):
return
elif is_typeddict(self.type_):
return
if is_finalvar(self.type_):
self.final = True
if self.type_ is Final:
self.type_ = Any
else:
self.type_ = get_args(self.type_)[0]
self._type_analysis()
return
origin = get_origin(self.type_)
if origin is Annotated or is_typeddict_special(origin):
self.type_ = get_args(self.type_)[0]
self._type_analysis()
return
if self.discriminator_key is not None and not is_union(origin):
raise TypeError('`discriminator` can only be used with `Union` type with more than one variant')
# add extra check for `collections.abc.Hashable` for python 3.10+ where origin is not `None`
if origin is None or origin is CollectionsHashable:
# field is not "typing" object eg. Union, Dict, List etc.
# allow None for virtual superclasses of NoneType, e.g. Hashable
if isinstance(self.type_, type) and isinstance(None, self.type_):
self.allow_none = True
return
elif origin is Callable:
return
elif is_union(origin):
types_ = []
for type_ in get_args(self.type_):
if is_none_type(type_) or type_ is Any or type_ is object:
if self.required is Undefined:
self.required = False
self.allow_none = True
if is_none_type(type_):
continue
types_.append(type_)
if len(types_) == 1:
# Optional[]
self.type_ = types_[0]
# this is the one case where the "outer type" isn't just the original type
self.outer_type_ = self.type_
# re-run to correctly interpret the new self.type_
self._type_analysis()
else:
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{display_as_type(t)}') for t in types_]
if self.discriminator_key is not None:
self.prepare_discriminated_union_sub_fields()
return
elif issubclass(origin, Tuple): # type: ignore
# origin == Tuple without item type
args = get_args(self.type_)
if not args: # plain tuple
self.type_ = Any
self.shape = SHAPE_TUPLE_ELLIPSIS
elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...]
self.type_ = args[0]
self.shape = SHAPE_TUPLE_ELLIPSIS
self.sub_fields = [self._create_sub_type(args[0], f'{self.name}_0')]
elif args == ((),): # Tuple[()] means empty tuple
self.shape = SHAPE_TUPLE
self.type_ = Any
self.sub_fields = []
else:
self.shape = SHAPE_TUPLE
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(args)]
return
elif issubclass(origin, List):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'list_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_LIST
elif issubclass(origin, Set):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'set_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SET
elif issubclass(origin, FrozenSet):
# Create self validators
get_validators = getattr(self.type_, '__get_validators__', None)
if get_validators:
self.class_validators.update(
{f'frozenset_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())}
)
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_FROZENSET
elif issubclass(origin, Deque):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_DEQUE
elif issubclass(origin, Sequence):
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_SEQUENCE
# priority to most common mapping: dict
elif origin is dict or origin is Dict:
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DICT
elif issubclass(origin, DefaultDict):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_DEFAULTDICT
elif issubclass(origin, Counter):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = int
self.shape = SHAPE_COUNTER
elif issubclass(origin, Mapping):
self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True)
self.type_ = get_args(self.type_)[1]
self.shape = SHAPE_MAPPING
# Equality check as almost everything inherits form Iterable, including str
# check for Iterable and CollectionsIterable, as it could receive one even when declared with the other
elif origin in {Iterable, CollectionsIterable}:
self.type_ = get_args(self.type_)[0]
self.shape = SHAPE_ITERABLE
self.sub_fields = [self._create_sub_type(self.type_, f'{self.name}_type')]
elif issubclass(origin, Type): # type: ignore
return
elif hasattr(origin, '__get_validators__') or self.model_config.arbitrary_types_allowed:
# Is a Pydantic-compatible generic that handles itself
# or we have arbitrary_types_allowed = True
self.shape = SHAPE_GENERIC
self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(get_args(self.type_))]
self.type_ = origin
return
else:
raise TypeError(f'Fields of type "{origin}" are not supported.')
# type_ has been refined eg. as the type of a List and sub_fields needs to be populated
self.sub_fields = [self._create_sub_type(self.type_, '_' + self.name)]
def prepare_discriminated_union_sub_fields(self) -> None:
"""
Prepare the mapping <discriminator key> -> <ModelField> and update `sub_fields`
Note that this process can be aborted if a `ForwardRef` is encountered
"""
assert self.discriminator_key is not None
if self.type_.__class__ is DeferredType:
return
assert self.sub_fields is not None
sub_fields_mapping: Dict[str, 'ModelField'] = {}
all_aliases: Set[str] = set()
for sub_field in self.sub_fields:
t = sub_field.type_
if t.__class__ is ForwardRef:
# Stopping everything...will need to call `update_forward_refs`
return
alias, discriminator_values = get_discriminator_alias_and_values(t, self.discriminator_key)
all_aliases.add(alias)
for discriminator_value in discriminator_values:
sub_fields_mapping[discriminator_value] = sub_field
self.sub_fields_mapping = sub_fields_mapping
self.discriminator_alias = get_unique_discriminator_alias(all_aliases, self.discriminator_key)
def _create_sub_type(self, type_: Type[Any], name: str, *, for_keys: bool = False) -> 'ModelField':
if for_keys:
class_validators = None
else:
# validators for sub items should not have `each_item` as we want to check only the first sublevel
class_validators = {
k: Validator(
func=v.func,
pre=v.pre,
each_item=False,
always=v.always,
check_fields=v.check_fields,
skip_on_failure=v.skip_on_failure,
)
for k, v in self.class_validators.items()
if v.each_item
}
field_info, _ = self._get_field_info(name, type_, None, self.model_config)
return self.__class__(
type_=type_,
name=name,
class_validators=class_validators,
model_config=self.model_config,
field_info=field_info,
)
def populate_validators(self) -> None:
"""
Prepare self.pre_validators, self.validators, and self.post_validators based on self.type_'s __get_validators__
and class validators. This method should be idempotent, e.g. it should be safe to call multiple times
without mis-configuring the field.
"""
self.validate_always = getattr(self.type_, 'validate_always', False) or any(
v.always for v in self.class_validators.values()
)
class_validators_ = self.class_validators.values()
if not self.sub_fields or self.shape == SHAPE_GENERIC:
get_validators = getattr(self.type_, '__get_validators__', None)
v_funcs = (
*[v.func for v in class_validators_ if v.each_item and v.pre],
*(get_validators() if get_validators else list(find_validators(self.type_, self.model_config))),
*[v.func for v in class_validators_ if v.each_item and not v.pre],
)
self.validators = prep_validators(v_funcs)
self.pre_validators = []
self.post_validators = []
if self.field_info and self.field_info.const:
self.post_validators.append(make_generic_validator(constant_validator))
if class_validators_:
self.pre_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and v.pre)
self.post_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and not v.pre)
if self.parse_json:
self.pre_validators.append(make_generic_validator(validate_json))
self.pre_validators = self.pre_validators or None
self.post_validators = self.post_validators or None
def validate(
self, v: Any, values: Dict[str, Any], *, loc: 'LocStr', cls: Optional['ModelOrDc'] = None
) -> 'ValidateReturn':
assert self.type_.__class__ is not DeferredType
if self.type_.__class__ is ForwardRef:
assert cls is not None
raise ConfigError(
f'field "{self.name}" not yet prepared so type is still a ForwardRef, '
f'you might need to call {cls.__name__}.update_forward_refs().'
)
errors: Optional['ErrorList']
if self.pre_validators:
v, errors = self._apply_validators(v, values, loc, cls, self.pre_validators)
if errors:
return v, errors
if v is None:
if is_none_type(self.type_):
# keep validating
pass
elif self.allow_none:
if self.post_validators:
return self._apply_validators(v, values, loc, cls, self.post_validators)
else:
return None, None
else:
return v, ErrorWrapper(NoneIsNotAllowedError(), loc)
if self.shape == SHAPE_SINGLETON:
v, errors = self._validate_singleton(v, values, loc, cls)
elif self.shape in MAPPING_LIKE_SHAPES:
v, errors = self._validate_mapping_like(v, values, loc, cls)
elif self.shape == SHAPE_TUPLE:
v, errors = self._validate_tuple(v, values, loc, cls)
elif self.shape == SHAPE_ITERABLE:
v, errors = self._validate_iterable(v, values, loc, cls)
elif self.shape == SHAPE_GENERIC:
v, errors = self._apply_validators(v, values, loc, cls, self.validators)
else:
# sequence, list, set, generator, tuple with ellipsis, frozen set
v, errors = self._validate_sequence_like(v, values, loc, cls)
if not errors and self.post_validators:
v, errors = self._apply_validators(v, values, loc, cls, self.post_validators)
return v, errors
def _validate_sequence_like( # noqa: C901 (ignore complexity)
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
"""
Validate sequence-like containers: lists, tuples, sets and generators
Note that large if-else blocks are necessary to enable Cython
optimization, which is why we disable the complexity check above.
"""
if not sequence_like(v):
e: errors_.PydanticTypeError
if self.shape == SHAPE_LIST:
e = errors_.ListError()
elif self.shape in (SHAPE_TUPLE, SHAPE_TUPLE_ELLIPSIS):
e = errors_.TupleError()
elif self.shape == SHAPE_SET:
e = errors_.SetError()
elif self.shape == SHAPE_FROZENSET:
e = errors_.FrozenSetError()
else:
e = errors_.SequenceError()
return v, ErrorWrapper(e, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result = []
errors: List[ErrorList] = []
for i, v_ in enumerate(v):
v_loc = *loc, i
r, ee = self._validate_singleton(v_, values, v_loc, cls)
if ee:
errors.append(ee)
else:
result.append(r)
if errors:
return v, errors
converted: Union[List[Any], Set[Any], FrozenSet[Any], Tuple[Any, ...], Iterator[Any], Deque[Any]] = result
if self.shape == SHAPE_SET:
converted = set(result)
elif self.shape == SHAPE_FROZENSET:
converted = frozenset(result)
elif self.shape == SHAPE_TUPLE_ELLIPSIS:
converted = tuple(result)
elif self.shape == SHAPE_DEQUE:
converted = deque(result, maxlen=getattr(v, 'maxlen', None))
elif self.shape == SHAPE_SEQUENCE:
if isinstance(v, tuple):
converted = tuple(result)
elif isinstance(v, set):
converted = set(result)
elif isinstance(v, Generator):
converted = iter(result)
elif isinstance(v, deque):
converted = deque(result, maxlen=getattr(v, 'maxlen', None))
return converted, None
def _validate_iterable(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
"""
Validate Iterables.
This intentionally doesn't validate values to allow infinite generators.
"""
try:
iterable = iter(v)
except TypeError:
return v, ErrorWrapper(errors_.IterableError(), loc)
return iterable, None
def _validate_tuple(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
e: Optional[Exception] = None
if not sequence_like(v):
e = errors_.TupleError()
else:
actual_length, expected_length = len(v), len(self.sub_fields) # type: ignore
if actual_length != expected_length:
e = errors_.TupleLengthError(actual_length=actual_length, expected_length=expected_length)
if e:
return v, ErrorWrapper(e, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result = []
errors: List[ErrorList] = []
for i, (v_, field) in enumerate(zip(v, self.sub_fields)): # type: ignore
v_loc = *loc, i
r, ee = field.validate(v_, values, loc=v_loc, cls=cls)
if ee:
errors.append(ee)
else:
result.append(r)
if errors:
return v, errors
else:
return tuple(result), None
def _validate_mapping_like(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
try:
v_iter = dict_validator(v)
except TypeError as exc:
return v, ErrorWrapper(exc, loc)
loc = loc if isinstance(loc, tuple) else (loc,)
result, errors = {}, []
for k, v_ in v_iter.items():
v_loc = *loc, '__key__'
key_result, key_errors = self.key_field.validate(k, values, loc=v_loc, cls=cls) # type: ignore
if key_errors:
errors.append(key_errors)
continue
v_loc = *loc, k
value_result, value_errors = self._validate_singleton(v_, values, v_loc, cls)
if value_errors:
errors.append(value_errors)
continue
result[key_result] = value_result
if errors:
return v, errors
elif self.shape == SHAPE_DICT:
return result, None
elif self.shape == SHAPE_DEFAULTDICT:
return defaultdict(self.type_, result), None
elif self.shape == SHAPE_COUNTER:
return CollectionCounter(result), None
else:
return self._get_mapping_value(v, result), None
def _get_mapping_value(self, original: T, converted: Dict[Any, Any]) -> Union[T, Dict[Any, Any]]:
"""
When type is `Mapping[KT, KV]` (or another unsupported mapping), we try to avoid
coercing to `dict` unwillingly.
"""
original_cls = original.__class__
if original_cls == dict or original_cls == Dict:
return converted
elif original_cls in {defaultdict, DefaultDict}:
return defaultdict(self.type_, converted)
else:
try:
# Counter, OrderedDict, UserDict, ...
return original_cls(converted) # type: ignore
except TypeError:
raise RuntimeError(f'Could not convert dictionary to {original_cls.__name__!r}') from None
def _validate_singleton(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
if self.sub_fields:
if self.discriminator_key is not None:
return self._validate_discriminated_union(v, values, loc, cls)
errors = []
if self.model_config.smart_union and is_union(get_origin(self.type_)):
# 1st pass: check if the value is an exact instance of one of the Union types
# (e.g. to avoid coercing a bool into an int)
for field in self.sub_fields:
if v.__class__ is field.outer_type_:
return v, None
# 2nd pass: check if the value is an instance of any subclass of the Union types
for field in self.sub_fields:
# This whole logic will be improved later on to support more complex `isinstance` checks
# It will probably be done once a strict mode is added and be something like:
# ```
# value, error = field.validate(v, values, strict=True)
# if error is None:
# return value, None
# ```
try:
if isinstance(v, field.outer_type_):
return v, None
except TypeError:
# compound type
if lenient_isinstance(v, get_origin(field.outer_type_)):
value, error = field.validate(v, values, loc=loc, cls=cls)
if not error:
return value, None
# 1st pass by default or 3rd pass with `smart_union` enabled:
# check if the value can be coerced into one of the Union types
for field in self.sub_fields:
value, error = field.validate(v, values, loc=loc, cls=cls)
if error:
errors.append(error)
else:
return value, None
return v, errors
else:
return self._apply_validators(v, values, loc, cls, self.validators)
def _validate_discriminated_union(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc']
) -> 'ValidateReturn':
assert self.discriminator_key is not None
assert self.discriminator_alias is not None
try:
try:
discriminator_value = v[self.discriminator_alias]
except KeyError:
if self.model_config.allow_population_by_field_name:
discriminator_value = v[self.discriminator_key]
else:
raise
except KeyError:
return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc)
except TypeError:
try:
# BaseModel or dataclass
discriminator_value = getattr(v, self.discriminator_key)
except (AttributeError, TypeError):
return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc)
if self.sub_fields_mapping is None:
assert cls is not None
raise ConfigError(
f'field "{self.name}" not yet prepared so type is still a ForwardRef, '
f'you might need to call {cls.__name__}.update_forward_refs().'
)
try:
sub_field = self.sub_fields_mapping[discriminator_value]
except (KeyError, TypeError):
# KeyError: `discriminator_value` is not in the dictionary.
# TypeError: `discriminator_value` is unhashable.
assert self.sub_fields_mapping is not None
return v, ErrorWrapper(
InvalidDiscriminator(
discriminator_key=self.discriminator_key,
discriminator_value=discriminator_value,
allowed_values=list(self.sub_fields_mapping),
),
loc,
)
else:
if not isinstance(loc, tuple):
loc = (loc,)
return sub_field.validate(v, values, loc=(*loc, display_as_type(sub_field.type_)), cls=cls)
def _apply_validators(
self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'], validators: 'ValidatorsList'
) -> 'ValidateReturn':
for validator in validators:
try:
v = validator(cls, v, values, self, self.model_config)
except (ValueError, TypeError, AssertionError) as exc:
return v, ErrorWrapper(exc, loc)
return v, None
def is_complex(self) -> bool:
"""
Whether the field is "complex" eg. env variables should be parsed as JSON.
"""
from pydantic.v1.main import BaseModel
return (
self.shape != SHAPE_SINGLETON
or hasattr(self.type_, '__pydantic_model__')
or lenient_issubclass(self.type_, (BaseModel, list, set, frozenset, dict))
)
def _type_display(self) -> PyObjectStr:
t = display_as_type(self.type_)
if self.shape in MAPPING_LIKE_SHAPES:
t = f'Mapping[{display_as_type(self.key_field.type_)}, {t}]' # type: ignore
elif self.shape == SHAPE_TUPLE:
t = 'Tuple[{}]'.format(', '.join(display_as_type(f.type_) for f in self.sub_fields)) # type: ignore
elif self.shape == SHAPE_GENERIC:
assert self.sub_fields
t = '{}[{}]'.format(
display_as_type(self.type_), ', '.join(display_as_type(f.type_) for f in self.sub_fields)
)
elif self.shape != SHAPE_SINGLETON:
t = SHAPE_NAME_LOOKUP[self.shape].format(t)
if self.allow_none and (self.shape != SHAPE_SINGLETON or not self.sub_fields):
t = f'Optional[{t}]'
return PyObjectStr(t)
def __repr_args__(self) -> 'ReprArgs':
args = [('name', self.name), ('type', self._type_display()), ('required', self.required)]
if not self.required:
if self.default_factory is not None:
args.append(('default_factory', f'<function {self.default_factory.__name__}>'))
else:
args.append(('default', self.default))
if self.alt_alias:
args.append(('alias', self.alias))
return args
| ModelField |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 21275,
"end": 40024
} | class ____(
_AbstractRelationshipLoader, util.MemoizedSlots, log.Identified
):
"""Provide loading behavior for a :class:`.Relationship`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
"_lazywhere",
"_rev_lazywhere",
"_lazyload_reverse_option",
"_order_by",
"use_get",
"is_aliased_class",
"_bind_to_col",
"_equated_columns",
"_rev_bind_to_col",
"_rev_equated_columns",
"_simple_lazy_clause",
"_raise_always",
"_raise_on_sql",
)
_lazywhere: ColumnElement[bool]
_bind_to_col: Dict[str, ColumnElement[Any]]
_rev_lazywhere: ColumnElement[bool]
_rev_bind_to_col: Dict[str, ColumnElement[Any]]
parent_property: RelationshipProperty[Any]
def __init__(
self, parent: RelationshipProperty[Any], strategy_key: Tuple[Any, ...]
):
super().__init__(parent, strategy_key)
self._raise_always = self.strategy_opts["lazy"] == "raise"
self._raise_on_sql = self.strategy_opts["lazy"] == "raise_on_sql"
self.is_aliased_class = inspect(self.entity).is_aliased_class
join_condition = self.parent_property._join_condition
(
self._lazywhere,
self._bind_to_col,
self._equated_columns,
) = join_condition.create_lazy_clause()
(
self._rev_lazywhere,
self._rev_bind_to_col,
self._rev_equated_columns,
) = join_condition.create_lazy_clause(reverse_direction=True)
if self.parent_property.order_by:
self._order_by = util.to_list(self.parent_property.order_by)
else:
self._order_by = None
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
#
# TODO: the "not self.uselist" can be taken out entirely; a m2o
# load that populates for a list (very unusual, but is possible with
# the API) can still set for "None" and the attribute system will
# populate as an empty list.
self.use_get = (
not self.is_aliased_class
and not self.uselist
and self.entity._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
compare_keys=False,
equivalents=self.mapper._equivalent_columns,
)
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info(
"%s will use Session.get() to optimize instance loads", self
)
def init_class_attribute(self, mapper):
self.is_class_level = True
_legacy_inactive_history_style = (
self.parent_property._legacy_inactive_history_style
)
if self.parent_property.active_history:
active_history = True
_deferred_history = False
elif (
self.parent_property.direction is not interfaces.MANYTOONE
or not self.use_get
):
if _legacy_inactive_history_style:
active_history = True
_deferred_history = False
else:
active_history = False
_deferred_history = True
else:
active_history = _deferred_history = False
_register_attribute(
self.parent_property,
mapper,
useobject=True,
callable_=self._load_for_state,
typecallable=self.parent_property.collection_class,
active_history=active_history,
_deferred_history=_deferred_history,
)
def _memoized_attr__simple_lazy_clause(self):
lazywhere = self._lazywhere
criterion, bind_to_col = (lazywhere, self._bind_to_col)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
visitors.traverse(criterion, {}, {"bindparam": visit_bindparam})
def visit_bindparam(bindparam):
if bindparam._identifying_key in bind_to_col:
params.append(
(
bindparam.key,
bind_to_col[bindparam._identifying_key],
None,
)
)
elif bindparam.callable is None:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {"bindparam": visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys]
)
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & PassiveFlag.INIT_OK:
passive ^= PassiveFlag.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & PassiveFlag.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive
)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive
)
params[key] = value
return criterion, params
def _invoke_raise_load(self, state, passive, lazy):
raise sa_exc.InvalidRequestError(
"'%s' is not available due to lazy='%s'" % (self, lazy)
)
def _load_for_state(
self,
state,
passive,
loadopt=None,
extra_criteria=(),
extra_options=(),
alternate_effective_path=None,
execution_options=util.EMPTY_DICT,
):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return LoaderCallableStatus.ATTR_EMPTY
pending = not state.key
primary_key_identity = None
use_get = self.use_get and (not loadopt or not loadopt._extra_criteria)
if (not passive & PassiveFlag.SQL_OK and not use_get) or (
not passive & attributes.NON_PERSISTENT_OK and pending
):
return LoaderCallableStatus.PASSIVE_NO_RESULT
if (
# we were given lazy="raise"
self._raise_always
# the no_raise history-related flag was not passed
and not passive & PassiveFlag.NO_RAISE
and (
# if we are use_get and related_object_ok is disabled,
# which means we are at most looking in the identity map
# for history purposes or otherwise returning
# PASSIVE_NO_RESULT, don't raise. This is also a
# history-related flag
not use_get
or passive & PassiveFlag.RELATED_OBJECT_OK
)
):
self._invoke_raise_load(state, passive, "raise")
session = _state_session(state)
if not session:
if passive & PassiveFlag.NO_RAISE:
return LoaderCallableStatus.PASSIVE_NO_RESULT
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed"
% (orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if use_get:
primary_key_identity = self._get_ident_for_use_get(
session, state, passive
)
if LoaderCallableStatus.PASSIVE_NO_RESULT in primary_key_identity:
return LoaderCallableStatus.PASSIVE_NO_RESULT
elif LoaderCallableStatus.NEVER_SET in primary_key_identity:
return LoaderCallableStatus.NEVER_SET
# test for None alone in primary_key_identity based on
# allow_partial_pks preference. PASSIVE_NO_RESULT and NEVER_SET
# have already been tested above
if not self.mapper.allow_partial_pks:
if _none_only_set.intersection(primary_key_identity):
return None
else:
if _none_only_set.issuperset(primary_key_identity):
return None
if (
self.key in state.dict
and not passive & PassiveFlag.DEFERRED_HISTORY_LOAD
):
return LoaderCallableStatus.ATTR_WAS_SET
# look for this identity in the identity map. Delegate to the
# Query class in use, as it may have special rules for how it
# does this, including how it decides what the correct
# identity_token would be for this identity.
instance = session._identity_lookup(
self.entity,
primary_key_identity,
passive=passive,
lazy_loaded_from=state,
)
if instance is not None:
if instance is LoaderCallableStatus.PASSIVE_CLASS_MISMATCH:
return None
else:
return instance
elif (
not passive & PassiveFlag.SQL_OK
or not passive & PassiveFlag.RELATED_OBJECT_OK
):
return LoaderCallableStatus.PASSIVE_NO_RESULT
return self._emit_lazyload(
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
extra_options,
alternate_effective_path,
execution_options,
)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & PassiveFlag.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(state, dict_, self._equated_columns[pk], passive=passive)
for pk in self.mapper.primary_key
]
@util.preload_module("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self,
session,
state,
primary_key_identity,
passive,
loadopt,
extra_criteria,
extra_options,
alternate_effective_path,
execution_options,
):
strategy_options = util.preloaded.orm_strategy_options
clauseelement = self.entity.__clause_element__()
stmt = Select._create_raw_select(
_raw_columns=[clauseelement],
_propagate_attrs=clauseelement._propagate_attrs,
_compile_options=_ORMCompileState.default_compile_options,
)
load_options = QueryContext.default_load_options
load_options += {
"_invoke_all_eagers": False,
"_lazy_loaded_from": state,
}
if self.parent_property.secondary is not None:
stmt = stmt.select_from(
self.mapper, self.parent_property.secondary
)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
stmt._execution_options = util.immutabledict({"autoflush": False})
use_get = self.use_get
if state.load_options or (loadopt and loadopt._extra_criteria):
if alternate_effective_path is None:
effective_path = state.load_path[self.parent_property]
else:
effective_path = alternate_effective_path[self.parent_property]
opts = state.load_options
if loadopt and loadopt._extra_criteria:
use_get = False
opts += (
orm_util.LoaderCriteriaOption(self.entity, extra_criteria),
)
stmt._with_options = opts
elif alternate_effective_path is None:
# this path is used if there are not already any options
# in the query, but an event may want to add them
effective_path = state.mapper._path_registry[self.parent_property]
else:
# added by immediateloader
effective_path = alternate_effective_path[self.parent_property]
if extra_options:
stmt._with_options += extra_options
stmt._compile_options += {"_current_path": effective_path}
if use_get:
if self._raise_on_sql and not passive & PassiveFlag.NO_RAISE:
self._invoke_raise_load(state, passive, "raise_on_sql")
return loading._load_on_pk_identity(
session,
stmt,
primary_key_identity,
load_options=load_options,
execution_options=execution_options,
)
if self._order_by:
stmt._order_by_clauses = self._order_by
def _lazyload_reverse(compile_context):
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if (
rev.direction is interfaces.MANYTOONE
and rev._use_get
and not isinstance(rev.strategy, _LazyLoader)
):
strategy_options.Load._construct_for_existing_path(
compile_context.compile_options._current_path[
rev.parent
]
).lazyload(rev).process_compile_state(compile_context)
stmt = stmt._add_compile_state_func(
_lazyload_reverse, self.parent_property
)
lazy_clause, params = self._generate_lazy_clause(state, passive)
if execution_options:
execution_options = util.EMPTY_DICT.merge_with(
execution_options,
{
"_sa_orm_load_options": load_options,
},
)
else:
execution_options = {
"_sa_orm_load_options": load_options,
}
if (
self.key in state.dict
and not passive & PassiveFlag.DEFERRED_HISTORY_LOAD
):
return LoaderCallableStatus.ATTR_WAS_SET
if pending:
if util.has_intersection(orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
if self._raise_on_sql and not passive & PassiveFlag.NO_RAISE:
self._invoke_raise_load(state, passive, "raise_on_sql")
stmt._where_criteria = (lazy_clause,)
result = session.execute(
stmt, params, execution_options=execution_options
)
result = result.unique().scalars().all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property
)
return result[0]
else:
return None
def create_row_processor(
self,
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
):
key = self.key
if (
context.load_options._is_user_refresh
and context.query._compile_options._only_load_props
and self.key in context.query._compile_options._only_load_props
):
return self._immediateload_create_row_processor(
context,
query_entity,
path,
loadopt,
mapper,
result,
adapter,
populators,
)
if not self.is_class_level or (loadopt and loadopt._extra_criteria):
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = (
InstanceState._instance_level_callable_processor
)(
mapper.class_manager,
_LoadLazyAttribute(
key,
self,
loadopt,
(
loadopt._generate_extra_criteria(context)
if loadopt._extra_criteria
else None
),
),
key,
)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
| _LazyLoader |
python | cython__cython | Cython/Plex/Lexicons.py | {
"start": 238,
"end": 559
} | class ____:
"""
This class is used as part of a Plex.Lexicon specification to
introduce a user-defined state.
Constructor:
State(name, token_specifications)
"""
name = None
tokens = None
def __init__(self, name, tokens):
self.name = name
self.tokens = tokens
| State |
python | apache__airflow | airflow-core/src/airflow/providers_manager.py | {
"start": 7299,
"end": 12298
} | class ____(NamedTuple):
"""Connection Form Widget information."""
hook_class_name: str
package_name: str
field: Any
field_name: str
is_sensitive: bool
def log_optional_feature_disabled(class_name, e, provider_package):
"""Log optional feature disabled."""
log.debug(
"Optional feature disabled on exception when importing '%s' from '%s' package",
class_name,
provider_package,
exc_info=e,
)
log.info(
"Optional provider feature disabled when importing '%s' from '%s' package",
class_name,
provider_package,
)
def log_import_warning(class_name, e, provider_package):
"""Log import warning."""
log.warning(
"Exception when importing '%s' from '%s' package",
class_name,
provider_package,
exc_info=e,
)
# This is a temporary measure until all community providers will add AirflowOptionalProviderFeatureException
# where they have optional features. We are going to add tests in our CI to catch all such cases and will
# fix them, but until now all "known unhandled optional feature errors" from community providers
# should be added here
KNOWN_UNHANDLED_OPTIONAL_FEATURE_ERRORS = [("apache-airflow-providers-google", "No module named 'paramiko'")]
def _correctness_check(provider_package: str, class_name: str, provider_info: ProviderInfo) -> Any:
"""
Perform coherence check on provider classes.
For apache-airflow providers - it checks if it starts with appropriate package. For all providers
it tries to import the provider - checking that there are no exceptions during importing.
It logs appropriate warning in case it detects any problems.
:param provider_package: name of the provider package
:param class_name: name of the class to import
:return the class if the class is OK, None otherwise.
"""
if not _check_builtin_provider_prefix(provider_package, class_name):
return None
try:
imported_class = import_string(class_name)
except AirflowOptionalProviderFeatureException as e:
# When the provider class raises AirflowOptionalProviderFeatureException
# this is an expected case when only some classes in provider are
# available. We just log debug level here and print info message in logs so that
# the user is aware of it
log_optional_feature_disabled(class_name, e, provider_package)
return None
except ImportError as e:
if "No module named 'airflow.providers." in e.msg:
# handle cases where another provider is missing. This can only happen if
# there is an optional feature, so we log debug and print information about it
log_optional_feature_disabled(class_name, e, provider_package)
return None
for known_error in KNOWN_UNHANDLED_OPTIONAL_FEATURE_ERRORS:
# Until we convert all providers to use AirflowOptionalProviderFeatureException
# we assume any problem with importing another "provider" is because this is an
# optional feature, so we log debug and print information about it
if known_error[0] == provider_package and known_error[1] in e.msg:
log_optional_feature_disabled(class_name, e, provider_package)
return None
# But when we have no idea - we print warning to logs
log_import_warning(class_name, e, provider_package)
return None
except Exception as e:
log_import_warning(class_name, e, provider_package)
return None
return imported_class
# We want to have better control over initialization of parameters and be able to debug and test it
# So we add our own decorator
def provider_info_cache(cache_name: str) -> Callable[[Callable[PS, None]], Callable[PS, None]]:
"""
Decorate and cache provider info.
Decorator factory that create decorator that caches initialization of provider's parameters
:param cache_name: Name of the cache
"""
def provider_info_cache_decorator(func: Callable[PS, None]) -> Callable[PS, None]:
@wraps(func)
def wrapped_function(*args: PS.args, **kwargs: PS.kwargs) -> None:
providers_manager_instance = args[0]
if TYPE_CHECKING:
assert isinstance(providers_manager_instance, ProvidersManager)
if cache_name in providers_manager_instance._initialized_cache:
return
start_time = perf_counter()
log.debug("Initializing Providers Manager[%s]", cache_name)
func(*args, **kwargs)
providers_manager_instance._initialized_cache[cache_name] = True
log.debug(
"Initialization of Providers Manager[%s] took %.2f seconds",
cache_name,
perf_counter() - start_time,
)
return wrapped_function
return provider_info_cache_decorator
| ConnectionFormWidgetInfo |
python | mlflow__mlflow | mlflow/entities/trace_info_v2.py | {
"start": 1185,
"end": 6083
} | class ____(_MlflowObject):
"""Metadata about a trace.
Args:
request_id: id of the trace.
experiment_id: id of the experiment.
timestamp_ms: start time of the trace, in milliseconds.
execution_time_ms: duration of the trace, in milliseconds.
status: status of the trace.
request_metadata: Key-value pairs associated with the trace. Request metadata are designed
for immutable values like run ID associated with the trace.
tags: Tags associated with the trace. Tags are designed for mutable values like trace name,
that can be updated by the users after the trace is created, unlike request_metadata.
"""
request_id: str
experiment_id: str
timestamp_ms: int
execution_time_ms: int | None
status: TraceStatus
request_metadata: dict[str, str] = field(default_factory=dict)
tags: dict[str, str] = field(default_factory=dict)
assessments: list[Assessment] = field(default_factory=list)
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
@property
def trace_id(self) -> str:
"""Returns the trace ID of the trace info."""
return self.request_id
def to_proto(self):
proto = ProtoTraceInfo()
proto.request_id = self.request_id
proto.experiment_id = self.experiment_id
proto.timestamp_ms = self.timestamp_ms
# NB: Proto setter does not support nullable fields (even with 'optional' keyword),
# so we substitute None with 0 for execution_time_ms. This should be not too confusing
# as we only put None when starting a trace i.e. the execution time is actually 0.
proto.execution_time_ms = self.execution_time_ms or 0
proto.status = self.status.to_proto()
request_metadata = []
for key, value in _truncate_request_metadata(self.request_metadata).items():
attr = ProtoTraceRequestMetadata()
attr.key = key
attr.value = value
request_metadata.append(attr)
proto.request_metadata.extend(request_metadata)
tags = []
for key, value in _truncate_tags(self.tags).items():
tag = ProtoTraceTag()
tag.key = key
tag.value = str(value)
tags.append(tag)
proto.tags.extend(tags)
return proto
@classmethod
def from_proto(cls, proto, assessments=None):
return cls(
request_id=proto.request_id,
experiment_id=proto.experiment_id,
timestamp_ms=proto.timestamp_ms,
execution_time_ms=proto.execution_time_ms,
status=TraceStatus.from_proto(proto.status),
request_metadata={attr.key: attr.value for attr in proto.request_metadata},
tags={tag.key: tag.value for tag in proto.tags},
assessments=assessments or [],
)
def to_dict(self):
"""
Convert trace info to a dictionary for persistence.
Update status field to the string value for serialization.
"""
trace_info_dict = asdict(self)
trace_info_dict["status"] = self.status.value
# Client request ID field is only added for internal use, and should not be
# serialized for V2 TraceInfo.
trace_info_dict.pop("client_request_id", None)
return trace_info_dict
@classmethod
def from_dict(cls, trace_info_dict):
"""
Convert trace info dictionary to TraceInfo object.
"""
if "status" not in trace_info_dict:
raise ValueError("status is required in trace info dictionary.")
trace_info_dict["status"] = TraceStatus(trace_info_dict["status"])
return cls(**trace_info_dict)
def to_v3(self, request: str | None = None, response: str | None = None) -> TraceInfo:
return TraceInfo(
trace_id=self.request_id,
trace_location=TraceLocation.from_experiment_id(self.experiment_id),
request_preview=request,
response_preview=response,
request_time=self.timestamp_ms,
execution_duration=self.execution_time_ms,
state=self.status.to_state(),
trace_metadata=self.request_metadata.copy(),
tags=self.tags,
assessments=self.assessments,
)
@classmethod
def from_v3(cls, trace_info: TraceInfo) -> "TraceInfoV2":
return cls(
request_id=trace_info.trace_id,
experiment_id=trace_info.experiment_id,
timestamp_ms=trace_info.request_time,
execution_time_ms=trace_info.execution_duration,
status=TraceStatus.from_state(trace_info.state),
request_metadata=trace_info.trace_metadata.copy(),
tags=trace_info.tags,
)
| TraceInfoV2 |
python | apache__airflow | dev/breeze/src/airflow_breeze/global_constants.py | {
"start": 8724,
"end": 8807
} | class ____(SelectiveTestType):
PROVIDERS = "Providers"
| SelectiveProvidersTestType |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 73145,
"end": 73373
} | class ____(PassImageValidation):
def to_python(self, value):
if value.name == 'badimage.png':
raise serializers.ValidationError(self.error_messages['invalid_image'])
return value
| FailImageValidation |
python | huggingface__transformers | src/transformers/models/oneformer/modeling_oneformer.py | {
"start": 9500,
"end": 14813
} | class ____(nn.Module):
def __init__(
self, cost_class: float = 1.0, cost_mask: float = 1.0, cost_dice: float = 1.0, num_points: int = 12544
):
"""This class computes an assignment between the labels and the predictions of the network.
For efficiency reasons, the labels don't include the no_object. Because of this, in general, there are more
predictions than labels. In this case, we do a 1-to-1 matching of the best predictions, while the others are
un-matched (and thus treated as non-objects).
Params:
cost_class (float, *optional*, defaults to 1.0):
This is the relative weight of the classification error in the matching cost.
cost_mask (float, *optional*, defaults to 1.0):
This is the relative weight of the sigmoid ce loss of the binary mask in the matching cost.
cost_dice (float, *optional*, defaults to 1.0):
This is the relative weight of the dice loss of the binary mask in the matching cost
num_points (int, *optional*, defaults to 12544):
Number of points to be sampled for dice and mask loss matching cost.
"""
super().__init__()
if cost_class == 0 and cost_mask == 0 and cost_dice == 0:
raise ValueError("All costs can't be 0")
self.cost_class = cost_class
self.cost_mask = cost_mask
self.cost_dice = cost_dice
self.num_points = num_points
@torch.no_grad()
def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> list[tuple[Tensor]]:
"""Performs the matching
Params:
masks_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, num_labels` with the
classification logits.
class_queries_logits (`torch.Tensor`):
A tensor` of dim `batch_size, num_queries, height, width` with the
predicted masks.
class_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes` (where num_target_boxes is the number
of ground-truth objects in the target) containing the class labels.
mask_labels (`torch.Tensor`):
A tensor` of dim `num_target_boxes, height, width` containing the target
masks.
Returns:
`list[tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected labels (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_targets).
"""
indices: list[tuple[np.array]] = []
num_queries = class_queries_logits.shape[1]
preds_masks = masks_queries_logits
preds_probs = class_queries_logits
# iterate through batch size
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
pred_probs = pred_probs.softmax(-1)
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be omitted.
cost_class = -pred_probs[:, labels]
pred_mask = pred_mask[:, None]
target_mask = target_mask[:, None].to(pred_mask.device)
# all masks share the same set of points for efficient matching!
point_coords = torch.rand(1, self.num_points, 2, device=pred_mask.device)
# get ground truth labels
target_mask = sample_point(
target_mask,
point_coords.repeat(target_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
pred_mask = sample_point(
pred_mask,
point_coords.repeat(pred_mask.shape[0], 1, 1),
align_corners=False,
).squeeze(1)
with torch.autocast(device_type="cuda", enabled=False):
pred_mask = pred_mask.float()
target_mask = target_mask.float()
# compute the sigmoid ce loss
cost_mask = pair_wise_sigmoid_cross_entropy_loss(pred_mask, target_mask)
# Compute the dice loss
cost_dice = pair_wise_dice_loss(pred_mask, target_mask)
# final cost matrix
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
cost_matrix = cost_matrix.reshape(num_queries, -1).cpu()
# do the assigmented using the hungarian algorithm in scipy
assigned_indices: tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
# It could be stacked in one tensor
matched_indices = [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices
]
return matched_indices
| OneFormerHungarianMatcher |
python | huggingface__transformers | src/transformers/models/codegen/tokenization_codegen.py | {
"start": 1188,
"end": 9508
} | class ____(TokenizersBackend):
"""
Construct a CodeGen tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import CodeGenTokenizer
>>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*):
The token used for padding, for example when batching sequences of different lengths.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (CodeGen tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial beginning of sentence token to the input.
return_token_type_ids (`bool`, *optional*, defaults to `False`):
Whether to return token type IDs.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
merges (`list`, *optional*):
Custom merges list. If not provided, merges are loaded from merges_file.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
unk_token="<|endoftext|>",
bos_token="<|endoftext|>",
eos_token="<|endoftext|>",
pad_token=None,
add_prefix_space=False,
add_bos_token=False,
return_token_type_ids=False,
vocab: Optional[dict] = None,
merges: Optional[list] = None,
**kwargs,
):
self.return_token_type_ids = return_token_type_ids
if self.return_token_type_ids:
self.model_input_names.append("token_type_ids")
self.add_prefix_space = add_prefix_space
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {}
if merges is not None:
self._merges = merges
else:
self._merges = []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
self._tokenizer.decoder = decoders.ByteLevel()
self._tokenizer.post_processor = processors.ByteLevel(
add_prefix_space=True, use_regex=True, trim_offsets=False
)
tokenizer_object = self._tokenizer
# Set these before calling super().__init__() so the base class _post_init() can use them
self._add_bos_token = add_bos_token
self._add_eos_token = False
super().__init__(
tokenizer_object=tokenizer_object,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_prefix_space=add_prefix_space,
add_bos_token=add_bos_token,
return_token_type_ids=return_token_type_ids,
**kwargs,
)
self._post_init()
def _post_init(self):
self._tokenizer.post_processor = processors.ByteLevel(
add_prefix_space=True, use_regex=True, trim_offsets=False
)
# Ensure base class post-init runs to register special/extra tokens, etc.
super()._post_init()
def decode(
self,
token_ids: Union[int, list[int], np.ndarray, "torch.Tensor"],
skip_special_tokens: bool = False,
clean_up_tokenization_spaces: Optional[bool] = None,
truncate_before_pattern: Optional[list[str]] = None,
**kwargs,
) -> str:
"""
Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
tokens and clean up tokenization spaces.
Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.
Args:
token_ids (`Union[int, List[int], np.ndarray, torch.Tensor]`):
List of tokenized input ids. Can be obtained using the `__call__` method.
skip_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not to remove special tokens in the decoding.
clean_up_tokenization_spaces (`bool`, *optional*):
Whether or not to clean up the tokenization spaces. If `None`, will default to
`self.clean_up_tokenization_spaces` (available in the `tokenizer_config`).
truncate_before_pattern (`List[str]`, *optional*, defaults to `None`):
A list of regular expression strings that will be used to truncate the returned string. This can be
used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning
of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`.
kwargs (additional keyword arguments, *optional*):
Will be passed to the underlying model specific decode method.
Returns:
`str`: The decoded sentence.
"""
decoded_text = super().decode(
token_ids=token_ids,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
if truncate_before_pattern is not None and len(truncate_before_pattern) > 0:
decoded_text = self.truncate(decoded_text, truncate_before_pattern)
return decoded_text
def truncate(self, completion, truncate_before_pattern):
def find_re(string, pattern, start_pos):
m = pattern.search(string, start_pos)
return m.start() if m else -1
terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern]
prints = list(re.finditer("^print", completion, re.MULTILINE))
if len(prints) > 1:
completion = completion[: prints[1].start()]
defs = list(re.finditer("^def", completion, re.MULTILINE))
if len(defs) > 1:
completion = completion[: defs[1].start()]
start_pos = 0
terminals_pos = [
pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1
]
if len(terminals_pos) > 0:
return completion[: min(terminals_pos)]
else:
return completion
__all__ = ["CodeGenTokenizer"]
| CodeGenTokenizer |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 3107,
"end": 3511
} | class ____:
def setup(self):
# GH 46527
# unsorted and non-unique index
idx = np.arange(100)[::-1]
idx = Index(np.repeat(idx, 200), name="key")
self.df = DataFrame(np.random.randn(len(idx), 10), index=idx)
def time_groupby_apply_non_unique_unsorted_index(self):
self.df.groupby("key", group_keys=False).apply(lambda x: x)
| ApplyNonUniqueUnsortedIndex |
python | tensorflow__tensorflow | tensorflow/python/ops/losses/losses_impl.py | {
"start": 1575,
"end": 48155
} | class ____:
"""Types of loss reduction.
Contains the following values:
* `NONE`: Un-reduced weighted losses with the same shape as input.
* `SUM`: Scalar sum of weighted losses.
* `MEAN`: Scalar `SUM` divided by sum of weights. DEPRECATED.
* `SUM_OVER_BATCH_SIZE`: Scalar `SUM` divided by number of elements in losses.
* `SUM_OVER_NONZERO_WEIGHTS`: Scalar `SUM` divided by number of non-zero
weights. DEPRECATED.
* `SUM_BY_NONZERO_WEIGHTS`: Same as `SUM_OVER_NONZERO_WEIGHTS`. DEPRECATED.
"""
NONE = "none"
SUM = "weighted_sum"
SUM_OVER_BATCH_SIZE = "weighted_sum_over_batch_size"
MEAN = "weighted_mean"
SUM_BY_NONZERO_WEIGHTS = "weighted_sum_by_nonzero_weights"
SUM_OVER_NONZERO_WEIGHTS = SUM_BY_NONZERO_WEIGHTS
@classmethod
def all(cls):
return (
cls.NONE,
cls.SUM,
cls.MEAN,
cls.SUM_OVER_BATCH_SIZE,
cls.SUM_OVER_NONZERO_WEIGHTS,
cls.SUM_BY_NONZERO_WEIGHTS)
@classmethod
def validate(cls, key):
if key not in cls.all():
raise ValueError(f"Invalid Reduction Key {key}. Key should be one of "
f"{cls.all()}.")
def _safe_mean(losses, num_present):
"""Computes a safe mean of the losses.
Args:
losses: `Tensor` whose elements contain individual loss measurements.
num_present: The number of measurable elements in `losses`.
Returns:
A scalar representing the mean of `losses`. If `num_present` is zero,
then zero is returned.
"""
total_loss = math_ops.reduce_sum(losses)
return math_ops.div_no_nan(total_loss, num_present, name="value")
def _num_present(losses, weights, per_batch=False):
"""Computes the number of elements in the loss function induced by `weights`.
A given weights tensor induces different numbers of usable elements in the
`losses` tensor. The `weights` tensor is broadcast across `losses` for all
possible dimensions. For example, if `losses` is a tensor of dimension
`[4, 5, 6, 3]` and `weights` is a tensor of shape `[4, 5]`, then `weights` is,
in effect, tiled to match the shape of `losses`. Following this effective
tile, the total number of present elements is the number of non-zero weights.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: `Tensor` of shape `[]`, `[batch_size]` or
`[batch_size, d1, ... dK]`, where K < N.
per_batch: Whether to return the number of elements per batch or as a sum
total.
Returns:
The number of present (non-zero) elements in the losses tensor. If
`per_batch` is `True`, the value is returned as a tensor of size
`[batch_size]`. Otherwise, a single scalar tensor is returned.
"""
if ((isinstance(weights, float) and weights != 0.0) or
(context.executing_eagerly() and weights._rank() == 0 # pylint: disable=protected-access
and not math_ops.equal(weights, 0.0))):
return _num_elements(losses)
with ops.name_scope(None, "num_present", (losses, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
present = array_ops.where(
math_ops.equal(weights, 0.0),
array_ops.zeros_like(weights),
array_ops.ones_like(weights))
present = weights_broadcast_ops.broadcast_weights(present, losses)
if per_batch:
return math_ops.reduce_sum(
present,
axis=math_ops.range(1, array_ops.rank(present)),
keepdims=True,
name=scope)
return math_ops.reduce_sum(present, name=scope)
def _num_elements(losses):
"""Computes the number of elements in `losses` tensor."""
with ops.name_scope(None, "num_elements", values=[losses]) as scope:
return math_ops.cast(array_ops.size(losses, name=scope), dtype=losses.dtype)
@tf_export(v1=["losses.compute_weighted_loss"])
@dispatch.add_dispatch_support
def compute_weighted_loss(
losses, weights=1.0, scope=None, loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Computes the weighted loss.
Args:
losses: `Tensor` of shape `[batch_size, d1, ... dN]`.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`losses`, and must be broadcastable to `losses` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: the scope for the operations performed in computing the loss.
loss_collection: the loss will be added to these collections.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `losses`. If `reduction` is
`NONE`, this has the same shape as `losses`; otherwise, it is scalar.
Raises:
ValueError: If `weights` is `None` or the shape is not compatible with
`losses`, or if the number of dimensions (rank) of either `losses` or
`weights` is missing.
Note:
When calculating the gradient of a weighted loss contributions from
both `losses` and `weights` are considered. If your `weights` depend
on some model parameters but you do not want this to affect the loss
gradient, you need to apply `tf.stop_gradient` to `weights` before
passing them to `compute_weighted_loss`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
Reduction.validate(reduction)
with ops.name_scope(scope, "weighted_loss", (losses, weights)):
# Save the `reduction` argument for loss normalization when distributing
# to multiple replicas. Used only for estimator + v1 optimizer flow.
ops.get_default_graph()._last_loss_reduction = reduction # pylint: disable=protected-access
def compute_loss(losses, weights, loss_collection, reduction):
losses = ops.convert_to_tensor(losses)
input_dtype = losses.dtype
losses = math_ops.cast(losses, dtype=dtypes.float32)
weights = math_ops.cast(weights, dtype=dtypes.float32)
weighted_losses = math_ops.multiply(losses, weights)
if reduction == Reduction.NONE:
loss = weighted_losses
else:
loss = math_ops.reduce_sum(weighted_losses)
if reduction == Reduction.MEAN:
loss = _safe_mean(
loss, math_ops.reduce_sum(array_ops.ones_like(losses) * weights))
elif (reduction == Reduction.SUM_BY_NONZERO_WEIGHTS or
reduction == Reduction.SUM_OVER_NONZERO_WEIGHTS):
loss = _safe_mean(loss, _num_present(losses, weights))
elif reduction == Reduction.SUM_OVER_BATCH_SIZE:
loss = _safe_mean(loss, _num_elements(losses))
# Convert the result back to the input type.
loss = math_ops.cast(loss, input_dtype)
util.add_loss(loss, loss_collection)
return loss
# Skip the assert_broadcastable in XLA context because asserts are not
# supported so it only causes unnecessary ops. Also skip it because it uses
# a DenseToDenseSetOperation op that is incompatible with XLA when
# the shape(s) are dynamic.
if control_flow_ops.get_enclosing_xla_context() is not None:
return compute_loss(losses, weights, loss_collection, reduction)
else:
with ops.control_dependencies(
(weights_broadcast_ops.assert_broadcastable(weights, losses),)):
return compute_loss(losses, weights, loss_collection, reduction)
@tf_export(v1=["losses.absolute_difference"])
@dispatch.add_dispatch_support
def absolute_difference(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds an Absolute Difference loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a `Tensor` of
shape `[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of
`labels` or if the shape of `weights` is invalid or if `labels`
or `predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "absolute_difference",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.abs(math_ops.subtract(predictions, labels))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.cosine_distance"])
@dispatch.add_dispatch_support
@deprecated_args(None, "dim is deprecated, use axis instead", "dim")
def cosine_distance(
labels, predictions, axis=None, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS,
dim=None):
"""Adds a cosine-distance loss to the training procedure.
Note that the function assumes that `predictions` and `labels` are already
unit-normalized.
Args:
labels: `Tensor` whose shape matches 'predictions'
predictions: An arbitrary matrix.
axis: The dimension along which the cosine distance is computed.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: Type of reduction to apply to loss.
dim: The old (deprecated) name for `axis`.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If `predictions` shape doesn't match `labels` shape, or
`axis`, `labels`, `predictions` or `weights` is `None`.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
axis = deprecated_argument_lookup("axis", axis, "dim", dim)
if axis is None:
raise ValueError("You must specify argument `axis`.")
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "cosine_distance_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
radial_diffs = math_ops.multiply(predictions, labels)
losses = 1 - math_ops.reduce_sum(radial_diffs, axis=(axis,), keepdims=True)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.hinge_loss"])
@dispatch.add_dispatch_support
def hinge_loss(labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a hinge loss to the training procedure.
Args:
labels: The ground truth output tensor. Its shape should match the shape of
logits. The values of the tensor are expected to be 0.0 or 1.0. Internally
the {0,1} labels are converted to {-1,1} when calculating the hinge loss.
logits: The logits, a float tensor. Note that logits are assumed to be
unbounded and 0-centered. A value > 0 (resp. < 0) is considered a positive
(resp. negative) binary prediction.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits` and `labels` don't match or
if `labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if logits is None:
raise ValueError("Argument `logits` must not be None.")
with ops.name_scope(scope, "hinge_loss", (logits, labels, weights)) as scope:
logits = math_ops.cast(logits, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
logits.get_shape().assert_is_compatible_with(labels.get_shape())
# We first need to convert binary labels to -1/1 labels (as floats).
all_ones = array_ops.ones_like(labels)
labels = math_ops.subtract(2 * labels, all_ones)
losses = nn_ops.relu(
math_ops.subtract(all_ones, math_ops.multiply(labels, logits)))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.huber_loss"])
@dispatch.add_dispatch_support
def huber_loss(labels, predictions, weights=1.0, delta=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a [Huber Loss](https://en.wikipedia.org/wiki/Huber_loss) term to the training procedure.
For each value x in `error=labels-predictions`, the following is calculated:
```
0.5 * x^2 if |x| <= d
0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
delta: `float`, the point where the huber loss function changes from a
quadratic to linear.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or
`predictions` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "huber_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
error = math_ops.subtract(predictions, labels)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
# The following expression is the same in value as
# tf.maximum(abs_error - delta, 0), but importantly the gradient for the
# expression when abs_error == delta is 0 (for tf.maximum it would be 1).
# This is necessary to avoid doubling the gradient, since there is already a
# nonzero contribution to the gradient from the quadratic term.
linear = math_ops.subtract(abs_error, quadratic)
losses = math_ops.add(
math_ops.multiply(
ops.convert_to_tensor(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.log_loss"])
@dispatch.add_dispatch_support
def log_loss(labels, predictions, weights=1.0, epsilon=1e-7, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Log Loss term to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
epsilon: A small increment to add to avoid taking a log of zero.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "log_loss",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = -math_ops.multiply(
labels,
math_ops.log(predictions + epsilon)) - math_ops.multiply(
(1 - labels), math_ops.log(1 - predictions + epsilon))
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(b/37208492): Add reduction arg.
@tf_export(v1=["losses.mean_pairwise_squared_error"])
@dispatch.add_dispatch_support
def mean_pairwise_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES):
"""Adds a pairwise-errors-squared loss to the training procedure.
Unlike `mean_squared_error`, which is a measure of the differences between
corresponding elements of `predictions` and `labels`,
`mean_pairwise_squared_error` is a measure of the differences between pairs of
corresponding elements of `predictions` and `labels`.
For example, if `labels`=[a, b, c] and `predictions`=[x, y, z], there are
three pairs of differences are summed to compute the loss:
loss = [ ((a-b) - (x-y)).^2 + ((a-c) - (x-z)).^2 + ((b-c) - (y-z)).^2 ] / 3
Note that since the inputs are of shape `[batch_size, d0, ... dN]`, the
corresponding pairs are computed within each batch sample but not across
samples within a batch. For example, if `predictions` represents a batch of
16 grayscale images of dimension [batch_size, 100, 200], then the set of pairs
is drawn from each image, but not across images.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector.
Args:
labels: The ground truth output tensor, whose shape must match the shape of
`predictions`.
predictions: The predicted outputs, a tensor of size
`[batch_size, d0, .. dN]` where N+1 is the total number of dimensions in
`predictions`.
weights: Coefficients for the loss a scalar, a tensor of shape
`[batch_size]` or a tensor whose shape matches `predictions`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
Returns:
A scalar `Tensor` that returns the weighted loss.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "mean_pairwise_squared_error",
(predictions, labels, weights)) as scope:
weights = math_ops.cast(weights, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
def compute_loss(labels, predictions, weights, loss_collection):
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
diffs = math_ops.subtract(predictions, labels)
axis = math_ops.range(1, array_ops.rank(diffs))
sum_squares_diff_per_batch = math_ops.reduce_sum(
math_ops.square(diffs), axis=axis, keepdims=True)
num_present_per_batch = _num_present(diffs, weights, per_batch=True)
term1 = 2.0 * math_ops.div_no_nan(
sum_squares_diff_per_batch,
math_ops.maximum(num_present_per_batch - 1, 0),
name="value")
sum_diff = math_ops.reduce_sum(diffs, axis=axis, keepdims=True)
term2 = 2.0 * math_ops.div_no_nan(
math_ops.square(sum_diff),
math_ops.maximum(
math_ops.multiply(num_present_per_batch,
num_present_per_batch - 1), 0),
name="value")
weighted_losses = math_ops.multiply(term1 - term2, weights)
loss = math_ops.reduce_sum(weighted_losses)
mean_loss = array_ops.where(
math_ops.reduce_sum(num_present_per_batch) > 0,
loss,
array_ops.zeros_like(loss),
name="value")
util.add_loss(mean_loss, loss_collection)
return mean_loss
# Skip the assert_broadcastable in XLA context because asserts are not
# supported so it only causes unnecessary ops. Also skip it because it uses
# a DenseToDenseSetOperation op that is incompatible with XLA when
# the shape(s) are dynamic.
if control_flow_ops.get_enclosing_xla_context() is not None:
return compute_loss(labels, predictions, weights, loss_collection)
else:
with ops.control_dependencies(
(weights_broadcast_ops.assert_broadcastable(weights, labels),)):
return compute_loss(labels, predictions, weights, loss_collection)
@tf_export(v1=["losses.mean_squared_error"])
@dispatch.add_dispatch_support
def mean_squared_error(
labels, predictions, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Adds a Sum-of-Squares loss to the training procedure.
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
labels: The ground truth output tensor, same dimensions as 'predictions'.
predictions: The predicted outputs.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(TF2)
`tf.compat.v1.losses.mean_squared_error` is mostly compatible with eager
execution and `tf.function`. But, the `loss_collection` argument is
ignored when executing eagerly and no loss will be written to the loss
collections. You will need to either hold on to the return value manually
or rely on `tf.keras.Model` loss tracking.
To switch to native TF2 style, instantiate the
`tf.keras.losses.MeanSquaredError` class and call the object instead.
#### Structural Mapping to Native TF2
Before:
```python
loss = tf.compat.v1.losses.mean_squared_error(
labels=labels,
predictions=predictions,
weights=weights,
reduction=reduction)
```
After:
```python
loss_fn = tf.keras.losses.MeanSquaredError(
reduction=reduction)
loss = loss_fn(
y_true=labels,
y_pred=predictions,
sample_weight=weights)
```
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :-------------------- | :--------------- | :------------------------- |
| `labels` | `y_true` | In `__call__()` method |
| `predictions` | `y_pred` | In `__call__()` method |
| `weights` | `sample_weight` | In `__call__()` method. |
: : : The shape requirements for `sample_weight` is different from :
: : : `weights`. Please check the [argument definition][api_docs] for :
: : : details. :
| `scope` | Not supported | - |
| `loss_collection` | Not supported | Losses should be tracked |
: : : explicitly or with Keras APIs, for example, [add_loss][add_loss], :
: : : instead of via collections :
| `reduction` | `reduction` | In constructor. Value of |
: : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
: : : `tf.compat.v1.losses.Reduction.SUM`, :
: : : `tf.compat.v1.losses.Reduction.NONE` in :
: : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to :
: : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
: : : `tf.keras.losses.Reduction.SUM`, :
: : : `tf.keras.losses.Reduction.NONE`, respectively. If you :
: : : used other value for `reduction`, including the default value :
: : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is :
: : : no directly corresponding value. Please modify the loss :
: : : implementation manually. :
[add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss
[api_docs]:https://www.tensorflow.org/api_docs/python/tf/keras/losses/MeanSquaredError#__call__
#### Before & After Usage Example
Before:
>>> y_true = [1, 2, 3]
>>> y_pred = [1, 3, 5]
>>> weights = [0, 1, 0.25]
>>> # samples with zero-weight are excluded from calculation when `reduction`
>>> # argument is set to default value `Reduction.SUM_BY_NONZERO_WEIGHTS`
>>> tf.compat.v1.losses.mean_squared_error(
... labels=y_true,
... predictions=y_pred,
... weights=weights).numpy()
1.0
>>> tf.compat.v1.losses.mean_squared_error(
... labels=y_true,
... predictions=y_pred,
... weights=weights,
... reduction=tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE).numpy()
0.66667
After:
>>> y_true = [[1.0], [2.0], [3.0]]
>>> y_pred = [[1.0], [3.0], [5.0]]
>>> weights = [1, 1, 0.25]
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
>>> mse(y_true=y_true, y_pred=y_pred, sample_weight=weights).numpy()
0.66667
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if predictions is None:
raise ValueError("Argument `predictions` must not be None.")
with ops.name_scope(scope, "mean_squared_error",
(predictions, labels, weights)) as scope:
predictions = math_ops.cast(predictions, dtype=dtypes.float32)
labels = math_ops.cast(labels, dtype=dtypes.float32)
predictions.get_shape().assert_is_compatible_with(labels.get_shape())
losses = math_ops.squared_difference(predictions, labels)
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.sigmoid_cross_entropy"])
@dispatch.add_dispatch_support
def sigmoid_cross_entropy(
multi_class_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Creates a cross-entropy loss using tf.nn.sigmoid_cross_entropy_with_logits.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/2:
new_multiclass_labels = multiclass_labels * (1 - label_smoothing)
+ 0.5 * label_smoothing
Args:
multi_class_labels: `[batch_size, num_classes]` target integer labels in
`{0, 1}`.
logits: Float `[batch_size, num_classes]` logits outputs of the network.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`multi_class_labels`, and must be broadcastable to `multi_class_labels`
(i.e., all dimensions must be either `1`, or the same as the
corresponding `losses` dimension).
label_smoothing: If greater than `0` then smooth the labels.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `logits`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of
`multi_class_labels` or if the shape of `weights` is invalid, or if
`weights` is None. Also if `multi_class_labels` or `logits` is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if multi_class_labels is None:
raise ValueError("Argument `multi_class_labels` must not be None.")
if logits is None:
raise ValueError("Argument `logits` must not be None.")
with ops.name_scope(scope, "sigmoid_cross_entropy_loss",
(logits, multi_class_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
multi_class_labels = math_ops.cast(multi_class_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(multi_class_labels.get_shape())
if label_smoothing > 0:
multi_class_labels = (multi_class_labels * (1 - label_smoothing) +
0.5 * label_smoothing)
losses = nn.sigmoid_cross_entropy_with_logits(labels=multi_class_labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
@tf_export(v1=["losses.softmax_cross_entropy"])
@dispatch.add_dispatch_support
def softmax_cross_entropy(
onehot_labels, logits, weights=1.0, label_smoothing=0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
r"""Creates a cross-entropy loss using tf.nn.softmax_cross_entropy_with_logits_v2.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
If `label_smoothing` is nonzero, smooth the labels towards 1/num_classes:
new_onehot_labels = onehot_labels * (1 - label_smoothing)
+ label_smoothing / num_classes
Note that `onehot_labels` and `logits` must have the same shape,
e.g. `[batch_size, num_classes]`. The shape of `weights` must be
broadcastable to loss, whose shape is decided by the shape of `logits`.
In case the shape of `logits` is `[batch_size, num_classes]`, loss is
a `Tensor` of shape `[batch_size]`.
Args:
onehot_labels: One-hot-encoded labels.
logits: Logits outputs of the network.
weights: Optional `Tensor` that is broadcastable to loss.
label_smoothing: If greater than 0 then smooth the labels.
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has shape `[batch_size]`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `logits` doesn't match that of `onehot_labels`
or if the shape of `weights` is invalid or if `weights` is None. Also if
`onehot_labels` or `logits` is None.
@compatibility(TF2)
`tf.compat.v1.losses.softmax_cross_entropy` is mostly compatible with eager
execution and `tf.function`. But, the `loss_collection` argument is
ignored when executing eagerly and no loss will be written to the loss
collections. You will need to either hold on to the return value manually
or rely on `tf.keras.Model` loss tracking.
To switch to native TF2 style, instantiate the
`tf.keras.losses.CategoricalCrossentropy` class with `from_logits` set
as `True` and call the object instead.
#### Structural Mapping to Native TF2
Before:
```python
loss = tf.compat.v1.losses.softmax_cross_entropy(
onehot_labels=onehot_labels,
logits=logits,
weights=weights,
label_smoothing=smoothing)
```
After:
```python
loss_fn = tf.keras.losses.CategoricalCrossentropy(
from_logits=True,
label_smoothing=smoothing)
loss = loss_fn(
y_true=onehot_labels,
y_pred=logits,
sample_weight=weights)
```
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :-------------------- | :--------------- | :------------------------- |
| - | `from_logits` | Set `from_logits` as True |
: : : to have identical behavior :
| `onehot_labels` | `y_true` | In `__call__()` method |
| `logits` | `y_pred` | In `__call__()` method |
| `weights` | `sample_weight` | In `__call__()` method |
| `label_smoothing` | `label_smoothing`| In constructor |
| `scope` | Not supported | - |
| `loss_collection` | Not supported | Losses should be tracked |
: : : explicitly or with Keras :
: : : APIs, for example, :
: : : [add_loss][add_loss], :
: : : instead of via collections :
| `reduction` | `reduction` | In constructor. Value of |
: : : `tf.compat.v1.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
: : : `tf.compat.v1.losses.Reduction.SUM`, :
: : : `tf.compat.v1.losses.Reduction.NONE` in :
: : : `tf.compat.v1.losses.softmax_cross_entropy` correspond to :
: : : `tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE`, :
: : : `tf.keras.losses.Reduction.SUM`, :
: : : `tf.keras.losses.Reduction.NONE`, respectively. If you :
: : : used other value for `reduction`, including the default value :
: : : `tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS`, there is :
: : : no directly corresponding value. Please modify the loss :
: : : implementation manually. :
[add_loss]:https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer#add_loss
#### Before & After Usage Example
Before:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> weights = [0.3, 0.7]
>>> smoothing = 0.2
>>> tf.compat.v1.losses.softmax_cross_entropy(y_true, y_pred, weights=weights,
... label_smoothing=smoothing).numpy()
0.57618
After:
>>> cce = tf.keras.losses.CategoricalCrossentropy(from_logits=True,
... label_smoothing=smoothing)
>>> cce(y_true, y_pred, sample_weight=weights).numpy()
0.57618
@end_compatibility
"""
if onehot_labels is None:
raise ValueError("Argument `onehot_labels` must not be None.")
if logits is None:
raise ValueError("Argument `logits` must not be None.")
with ops.name_scope(scope, "softmax_cross_entropy_loss",
(logits, onehot_labels, weights)) as scope:
logits = ops.convert_to_tensor(logits)
onehot_labels = math_ops.cast(onehot_labels, logits.dtype)
logits.get_shape().assert_is_compatible_with(onehot_labels.get_shape())
if label_smoothing > 0:
num_classes = math_ops.cast(
array_ops.shape(onehot_labels)[-1], logits.dtype)
smooth_positives = 1.0 - label_smoothing
smooth_negatives = label_smoothing / num_classes
onehot_labels = onehot_labels * smooth_positives + smooth_negatives
onehot_labels = array_ops.stop_gradient(
onehot_labels, name="labels_stop_gradient")
losses = nn.softmax_cross_entropy_with_logits_v2(
labels=onehot_labels, logits=logits, name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
# TODO(ptucker): Merge this with similar method in metrics_impl.
def _remove_squeezable_dimensions(
labels, predictions, weights=None, expected_rank_diff=0):
"""Internal version of _remove_squeezable_dimensions which handles weights.
Squeezes `predictions` and `labels` if their ranks differ from expected by
exactly 1.
Squeezes `weights` if its rank is 1 more than the new rank of `predictions`
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
labels: Label values, a `Tensor` whose dimensions match `predictions`.
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
weights: Optional weight `Tensor`. It will be squeezed if it's not scalar,
and its rank is 1 more than the new rank of `labels`.
expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
Returns:
Tuple of `predictions`, `labels` and `weights`, possibly with the last
dimension squeezed.
"""
labels, predictions = confusion_matrix.remove_squeezable_dimensions(
labels, predictions, expected_rank_diff=expected_rank_diff)
if weights is not None:
weights = ops.convert_to_tensor(weights)
labels_rank = labels.get_shape().ndims
weights_shape = weights.get_shape()
weights_rank = weights_shape.ndims
if (labels_rank is not None) and (weights_rank is not None):
# Use static rank.
rank_diff = weights_rank - labels_rank
if rank_diff == 1:
weights = array_ops.squeeze(weights, [-1])
return labels, predictions, weights
# Use dynamic rank.
rank_diff = array_ops.rank(weights) - array_ops.rank(labels)
if (weights_rank is None) or (
weights_rank > 0 and weights_shape.dims[-1].is_compatible_with(1)):
weights = cond.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(weights, [-1]),
lambda: weights)
return labels, predictions, weights
@tf_export(v1=["losses.sparse_softmax_cross_entropy"])
@dispatch.add_dispatch_support
def sparse_softmax_cross_entropy(
labels, logits, weights=1.0, scope=None,
loss_collection=ops.GraphKeys.LOSSES,
reduction=Reduction.SUM_BY_NONZERO_WEIGHTS):
"""Cross-entropy loss using `tf.nn.sparse_softmax_cross_entropy_with_logits`.
`weights` acts as a coefficient for the loss. If a scalar is provided,
then the loss is simply scaled by the given value. If `weights` is a
tensor of shape `[batch_size]`, then the loss weights apply to each
corresponding sample.
Args:
labels: `Tensor` of shape `[d_0, d_1, ..., d_{r-1}]` (where `r` is rank of
`labels` and result) and dtype `int32` or `int64`. Each entry in `labels`
must be an index in `[0, num_classes)`. Other values will raise an
exception when this op is run on CPU, and return `NaN` for corresponding
loss and gradient rows on GPU.
logits: Unscaled log probabilities of shape
`[d_0, d_1, ..., d_{r-1}, num_classes]` and dtype `float16`, `float32` or
`float64`.
weights: Coefficients for the loss. This must be scalar or broadcastable to
`labels` (i.e. same rank and each dimension is either 1 or the same).
scope: the scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
Returns:
Weighted loss `Tensor` of the same type as `logits`. If `reduction` is
`NONE`, this has the same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shapes of `logits`, `labels`, and `weights` are
incompatible, or if any of them are None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if labels is None:
raise ValueError("Argument `labels` must not be None.")
if logits is None:
raise ValueError("Argument `logits` must not be None.")
with ops.name_scope(scope, "sparse_softmax_cross_entropy_loss",
(logits, labels, weights)) as scope:
# As documented above in Args, labels contain class IDs and logits contains
# 1 probability per class ID, so we expect rank(logits) - rank(labels) == 1;
# therefore, expected_rank_diff=1.
labels, logits, weights = _remove_squeezable_dimensions(
labels, logits, weights, expected_rank_diff=1)
losses = nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=logits,
name="xentropy")
return compute_weighted_loss(
losses, weights, scope, loss_collection, reduction=reduction)
| Reduction |
python | joke2k__faker | faker/providers/geo/bn_BD/__init__.py | {
"start": 77,
"end": 70032
} | class ____(GeoProvider):
"""
Implement GEO provider for ``bn_BD`` locale.
"""
land_coords = (
("42.50729", "1.53414", "লেস এসকালডেস", "AD", "ইউরোপ/অ্যান্ডোরা"),
("36.21544", "65.93249", "সার-ই পুল", "AF", "এশিয়া/কাবুল"),
("40.49748", "44.7662", "হরাজদান", "AM", "এশিয়া/ইয়েরেভান"),
("-11.78333", "19.91667", "লুয়েনা", "AO", "আফ্রিকা/লুয়ান্ডা"),
("-37.32167", "-59.13316", "তান্ডিল", "AR", "আমেরিকা/আর্জেন্টিনা/বুয়েনস_আয়ারস"),
("-34.74785", "-58.70072", "পন্টেভেদ্রা", "AR", "আমেরিকা/আর্জেন্টিনা/বুয়েনস_আয়ারস"),
("-34.64966", "-58.38341", "বারাকাস", "AR", "আমেরিকা/আর্জেন্টিনা/বুয়েনস_আয়ারস"),
("-54.8", "-68.3", "উশুইয়া", "AR", "আমেরিকা/আর্জেন্টিনা/উশুয়া"),
("-31.25033", "-61.4867", "রাফায়েলা", "AR", "আমেরিকা/আর্জেন্টিনা/কর্ডোবা"),
("-31.4488", "-60.93173", "এসপেরানজা", "AR", "আমেরিকা/আর্জেন্টিনা/কর্ডোবা"),
("-34.64167", "-60.47389", "চাকাবুকো", "AR", "আমেরিকা/আর্জেন্টিনা/বুয়েনস_আয়ারস"),
("-27.4338", "-65.61427", "অ্যাগুইলারেস", "AR", "আমেরিকা/আর্জেন্টিনা/টুকুমান"),
("47.05", "15.46667", "সাঙ্কট পিটার", "AT", "ইউরোপ/ভিয়েনা"),
("48.25", "16.4", "ফ্লোরিডসডর্ফ", "AT", "ইউরোপ/ভিয়েনা"),
("-31.95224", "115.8614", "পার্থ", "AU", "অস্ট্রেলিয়া/পার্থ"),
("-37.9", "145.18333", "হুইলার হিল", "AU", "অস্ট্রেলিয়া/মেলবোর্ন"),
("-33.88096", "151.07986", "স্ট্র্যাথফিল্ড", "AU", "অস্ট্রেলিয়া/সিডনি"),
("-34.88422", "150.60036", "নওরা", "AU", "অস্ট্রেলিয়া/সিডনি"),
("-25.54073", "152.70493", "মেরিবরো", "AU", "অস্ট্রেলিয়া/ব্রিসবেন"),
("-34.28853", "146.05093", "গ্রিফিথ", "AU", "অস্ট্রেলিয়া/সিডনি"),
("-33.79176", "151.08057", "ইস্টউড", "AU", "অস্ট্রেলিয়া/সিডনি"),
("-37.88333", "145.06667", "কার্নেগি", "AU", "অস্ট্রেলিয়া/মেলবোর্ন"),
("-33.75881", "150.99292", "বৌলখাম পাহাড়", "AU", "অস্ট্রেলিয়া/সিডনি"),
("-27.50578", "153.10236", "ক্যারিন্ডেল", "AU", "অস্ট্রেলিয়া/ব্রিসবেন"),
("-32.05251", "115.88782", "উইলেটন", "AU", "অস্ট্রেলিয়া/পার্থ"),
("-38.16604", "145.13643", "ফ্রাঙ্কস্টন সাউথ", "AU", "অস্ট্রেলিয়া/মেলবোর্ন"),
("38.45598", "48.87498", "আস্তারা", "AZ", "এশিয়া/বাকু"),
("41.09246", "45.36561", "কাজ্যাক্স", "AZ", "এশিয়া/বাকু"),
("44.75874", "19.21437", "বিজেলজিনা", "BA", "ইউরোপ/সারায়েভো"),
("23.80700", "90.40971", "ঢাকা", "BD", "এশিয়া/ঢাকা"),
("24.37648", "88.60527", "রাজশাহী", "BD", "এশিয়া/ঢাকা"),
("22.36135", "91.78067", "চট্টগ্রাম", "BD", "এশিয়া/ঢাকা"),
("22.84686", "89.53730", "খুলনা", "BD", "এশিয়া/ঢাকা"),
("22.70250", "90.35243", "বরিশাল", "BD", "এশিয়া/ঢাকা"),
("24.89108", "91.86156", "সিলেট", "BD", "এশিয়া/ঢাকা"),
("25.74364", "89.27526", "রংপুর", "BD", "এশিয়া/ঢাকা"),
("24.74564", "90.41537", "ময়মনসিংহ", "BD", "এশিয়া/ঢাকা"),
("23.46092", "91.18056", "কুমিল্লা", "BD", "এশিয়া/ঢাকা"),
("23.60156", "89.83321", "ফরিদপুর", "BD", "এশিয়া/ঢাকা"),
("21.42798", "92.00831", "কক্সবাজার", "BD", "এশিয়া/ঢাকা"),
("23.15413", "89.21153", "যশোর", "BD", "এশিয়া/ঢাকা"),
("24.84920", "89.36662", "বগুড়া", "BD", "এশিয়া/ঢাকা"),
("24.58064", "88.27124", "চাঁপাই নবাবগঞ্জ", "BD", "এশিয়া/ঢাকা"),
("24.68209", "88.15827", "শিবগঞ্জ", "BD", "এশিয়া/ঢাকা"),
("23.9028", "89.11943", "কুষ্টিয়া", "BD", "এশিয়া/ঢাকা"),
("22.83957", "91.84128", "মানিকছড়ি", "BD", "এশিয়া/ঢাকা"),
("50.8", "3.16667", "ওয়েভেলজেম", "BE", "ইউরোপ/ব্রাসেলস"),
("51.12794", "4.21372", "তেমসে", "BE", "ইউরোপ/ব্রাসেলস"),
("50.71229", "4.52529", "রিক্সেনসার্ট", "BE", "ইউরোপ/ব্রাসেলস"),
("50.74497", "3.20639", "মাউসক্রন", "BE", "ইউরোপ/ব্রাসেলস"),
("51.24197", "4.82313", "লিলে", "BE", "ইউরোপ/ব্রাসেলস"),
("51.03427", "5.37429", "হাউথালেন", "BE", "ইউরোপ/ব্রাসেলস"),
("50.56149", "4.69889", "জেমব্লুক্স", "BE", "ইউরোপ/ব্রাসেলস"),
("50.88506", "4.07601", "ডেন্ডারলিউ", "BE", "ইউরোপ/ব্রাসেলস"),
("51.21187", "4.25633", "বেভারেন", "BE", "ইউরোপ/ব্রাসেলস"),
("41.57439", "24.71204", "স্মোলিয়ান", "BG", "ইউরোপ/সোফিয়া"),
("43.4125", "23.225", "মন্টানা", "BG", "ইউরোপ/সোফিয়া"),
("42.7", "27.25", "আয়টোস", "BG", "ইউরোপ/সোফিয়া"),
("8.88649", "2.59753", "চাওরো", "BJ", "আফ্রিকা/পোর্তো-নভো"),
("-21.44345", "-65.71875", "টুপিজা", "BO", "আমেরিকা/লা_পাজ"),
("-0.71667", "-48.52333", "সাউর", "BR", "আমেরিকা/বেলেম"),
("-8.05389", "-34.88111", "রেসিফ", "BR", "আমেরিকা/রেসিফ"),
("-4.42472", "-41.45861", "পেড্রো দ্বিতীয়", "BR", "আমেরিকা/ফর্তালেজা"),
("-3.14306", "-58.44417", "ইটাকোটিয়ারা", "BR", "আমেরিকা/মানাস"),
("-4.16694", "-40.7475", "গুয়ারসিয়াবা দো নর্তে", "BR", "আমেরিকা/ফর্তালেজা"),
("-8.66667", "-35.71667", "ক্যাটেন্ডে", "BR", "আমেরিকা/রেসিফ"),
("-8.28333", "-35.03333", "কাবো", "BR", "আমেরিকা/রেসিফ"),
("-4.24444", "-42.29444", "বারাস", "BR", "আমেরিকা/ফর্তালেজা"),
("-3.20333", "-52.20639", "আলতামিরা", "BR", "আমেরিকা/সান্তারেম"),
("-20.87306", "-48.29694", "ভিরাডুরো", "BR", "আমেরিকা/সাও_পাওলো"),
("-22.97056", "-46.99583", "ভালিনহোস", "BR", "আমেরিকা/সাও_পাওলো"),
("-10.95817", "-38.79084", "টুকানো", "BR", "আমেরিকা/বাহিয়া"),
("-28.81833", "-52.51028", "সোলেডে", "BR", "আমেরিকা/সাও_পাওলো"),
("-23.44361", "-51.87389", "সারন্দি", "BR", "আমেরিকা/সাও_পাওলো"),
("-22.45667", "-47.53028", "সান্তা গার্ট্রুডস", "BR", "আমেরিকা/সাও_পাওলো"),
("-11.48472", "-37.93278", "রিও রিয়াল", "BR", "আমেরিকা/বাহিয়া"),
("-19.32556", "-41.25528", "প্রতিশোধক", "BR", "আমেরিকা/সাও_পাওলো"),
("-26.22861", "-52.67056", "প্যাটো ব্রাঙ্কো", "BR", "আমেরিকা/সাও_পাওলো"),
("-25.42944", "-50.00639", "পালমেইরা", "BR", "আমেরিকা/সাও_পাওলো"),
("-12.91667", "-39.25", "মুরিতিবা", "BR", "আমেরিকা/বাহিয়া"),
("-21.41222", "-42.19667", "মিরাসেমা", "BR", "আমেরিকা/সাও_পাওলো"),
("-28.44917", "-52.2", "মারাউ", "BR", "আমেরিকা/সাও_পাওলো"),
("-22.92306", "-53.13722", "লোয়ান্ডা", "BR", "আমেরিকা/সাও_পাওলো"),
("-10.91722", "-37.65", "লাগারতো", "BR", "আমেরিকা/ম্যাসিও"),
("-19.72806", "-50.19556", "ইতুরামা", "BR", "আমেরিকা/সাও_পাওলো"),
("-21.205", "-41.88778", "ইটাপেরুনা", "BR", "আমেরিকা/সাও_পাওলো"),
("-20.25333", "-43.80139", "ইতাবিরিতো", "BR", "আমেরিকা/সাও_পাওলো"),
("-28.24", "-48.67028", "ইম্বিটুবা", "BR", "আমেরিকা/সাও_পাওলো"),
("-22.53722", "-42.98194", "গুয়াপিমিরিম", "BR", "আমেরিকা/সাও_পাওলো"),
("-19.7625", "-44.31389", "এসমেরালদাস", "BR", "আমেরিকা/সাও_পাওলো"),
("-25.42778", "-49.27306", "কিউরিটিবা", "BR", "আমেরিকা/সাও_পাওলো"),
("-14.66463", "-52.35558", "নোভা জাভান্তিনা", "BR", "আমেরিকা/কুয়াবা"),
("-29.2975", "-51.50361", "কার্লোস বারবোসা", "BR", "আমেরিকা/সাও_পাওলো"),
("-15.675", "-38.94722", "ক্যানভিইরাস", "BR", "আমেরিকা/বাহিয়া"),
("-17.74431", "-48.62789", "কালডাস নোভাস", "BR", "আমেরিকা/সাও_পাওলো"),
("-23.7975", "-48.59278", "বুড়ি", "BR", "আমেরিকা/সাও_পাওলো"),
("-10.90889", "-37.03861", "বারা ডস কোকিরোস", "BR", "আমেরিকা/ম্যাসিও"),
("-22.57306", "-47.1725", "আর্টুর নোগুইরা", "BR", "আমেরিকা/সাও_পাওলো"),
("-10.91111", "-37.07167", "আরাকাজু", "BR", "আমেরিকা/ম্যাসিও"),
("-21.42917", "-45.94722", "আলফেনাস", "BR", "আমেরিকা/সাও_পাওলো"),
("-8.76194", "-63.90389", "পোর্টো ভেলহো", "BR", "আমেরিকা/পোর্টো_ভেলহো"),
("-21.44236", "27.46153", "টোনোটা", "BW", "আফ্রিকা/গ্যাবোরোন"),
("55.1904", "30.2049", "ভিটেবস্ক", "BY", "ইউরোপ/মিনস্ক"),
("53.5942", "25.8191", "নভোগ্রুডোক", "BY", "ইউরোপ/মিনস্ক"),
("52.4089", "31.3237", "ডোব্রাশ", "BY", "ইউরোপ/মিনস্ক"),
("45.43341", "-73.86586", "বীকনসফিল্ড", "CA", "আমেরিকা/টরন্টো"),
("46.23899", "-63.13414", "শার্লটটাউন", "CA", "আমেরিকা/হ্যালিফ্যাক্স"),
("45.4473", "-73.75335", "ডোরভাল", "CA", "আমেরিকা/টরন্টো"),
("49.88307", "-119.48568", "কেলোনা", "CA", "আমেরিকা/ভ্যাঙ্কুভার"),
("43.86682", "-79.2663", "মার্কহাম", "CA", "আমেরিকা/টরন্টো"),
("42.8334", "-80.38297", "নরফোক কাউন্টি", "CA", "আমেরিকা/টরন্টো"),
("45.44868", "-73.81669", "পয়েন্ট-ক্লেয়ার", "CA", "আমেরিকা/টরন্টো"),
("45.40008", "-73.58248", "সেন্ট-ক্যাথরিন", "CA", "আমেরিকা/টরন্টো"),
("53.51684", "-113.3187", "শেরউড পার্ক", "CA", "আমেরিকা/এডমন্টন"),
("50.26729", "-119.27337", "ভার্নন", "CA", "আমেরিকা/ভ্যাঙ্কুভার"),
("46.1351", "-60.1831", "সিডনি", "CA", "আমেরিকা/গ্লেস_বে"),
("0.76755", "24.43973", "ইয়াংগাম্বি", "CD", "আফ্রিকা/লুবুম্বাশি"),
("-8.73508", "24.99798", "কামিনা", "CD", "আফ্রিকা/লুবুম্বাশি"),
("0.49113", "29.47306", "বেনী", "CD", "আফ্রিকা/লুবুম্বাশি"),
("-4.5833", "15.16554", "কাসাংগুলু", "CD", "আফ্রিকা/কিনশাসা"),
("4.94273", "15.87735", "কার্নট", "CF", "আফ্রিকা/বাঙ্গুই"),
("-4.26613", "15.28318", "ব্রাজাভিল", "CG", "আফ্রিকা/ব্রাজাভিল"),
("46.18396", "6.10237", "ওয়ানেক্স", "CH", "ইউরোপ/জুরিখ"),
("47.30997", "8.52462", "অ্যাডলিসউইল", "CH", "ইউরোপ/জুরিখ"),
("5.84752", "-5.682", "লাকোটা", "CI", "আফ্রিকা/আবিজান"),
("5.27247", "-3.59625", "বনুয়া", "CI", "আফ্রিকা/আবিজান"),
("-33.59217", "-70.6996", "সান বার্নার্ডো", "CL", "আমেরিকা/সান্টিয়াগো"),
("-30.60106", "-71.19901", "ওভালে", "CL", "আমেরিকা/সান্টিয়াগো"),
("-32.45242", "-71.23106", "লা লিগুয়া", "CL", "আমেরিকা/সান্টিয়াগো"),
("-36.9256", "-73.02841", "চিগুয়ান্তে", "CL", "আমেরিকা/সান্টিয়াগো"),
("4.96667", "10.7", "টোঙ্গা", "CM", "আফ্রিকা/ডুয়ালা"),
("3.51667", "11.5", "এমবালমায়ো", "CM", "আফ্রিকা/ডুয়ালা"),
("4.2475", "9.00472", "আইডেনাও", "CM", "আফ্রিকা/ডুয়ালা"),
("46.51872", "86.00214", "হক্সটলগে", "CN", "এশিয়া/উরুমকি"),
("36.81667", "117.81667", "ঝাউকুন", "CN", "এশিয়া/সাংহাই"),
("34.86472", "117.55417", "জাওজুয়াং", "CN", "এশিয়া/সাংহাই"),
("23.73333", "114.68333", "হেয়ুয়ান", "CN", "এশিয়া/সাংহাই"),
("34.65918", "109.22921", "ইয়ানলিয়াং", "CN", "এশিয়া/সাংহাই"),
("38.40917", "112.73333", "জিনঝো", "CN", "এশিয়া/সাংহাই"),
("33.78333", "114.51667", "ওয়াচেং", "CN", "এশিয়া/সাংহাই"),
("27.85", "112.9", "জিয়াংটান", "CN", "এশিয়া/সাংহাই"),
("37.19723", "122.05228", "তিয়ানফু", "CN", "এশিয়া/সাংহাই"),
("34.85", "117.33333", "তাওজুয়াং", "CN", "এশিয়া/সাংহাই"),
("35.64889", "117.27583", "শিশুই", "CN", "এশিয়া/সাংহাই"),
("27.34089", "117.4831", "শাওউ", "CN", "এশিয়া/সাংহাই"),
("37.30553", "120.82747", "ঝুয়াংইয়ুয়ান", "CN", "এশিয়া/সাংহাই"),
("35.50056", "117.63083", "পিঙ্গি", "CN", "এশিয়া/সাংহাই"),
("27.92333", "118.53333", "পুচেং", "CN", "এশিয়া/সাংহাই"),
("24.28859", "116.11768", "মেইঝো", "CN", "এশিয়া/সাংহাই"),
("37.65181", "120.33063", "লংগ্যাং", "CN", "এশিয়া/সাংহাই"),
("23.29549", "113.82465", "লিচেং", "CN", "এশিয়া/সাংহাই"),
("36.19278", "117.65694", "লাইউউ", "CN", "এশিয়া/সাংহাই"),
("30.35028", "112.19028", "জিংঝো", "CN", "এশিয়া/সাংহাই"),
("32.50611", "120.14278", "জিয়াংইয়ান", "CN", "এশিয়া/সাংহাই"),
("30.24706", "115.04814", "হুয়াংশি", "CN", "এশিয়া/সাংহাই"),
("37.73222", "115.70111", "হেংশুই", "CN", "এশিয়া/সাংহাই"),
("28.88162", "120.03308", "গুলি", "CN", "এশিয়া/সাংহাই"),
("23.02677", "113.13148", "ফোশান", "CN", "এশিয়া/সাংহাই"),
("35.85", "117.7", "ডংডু", "CN", "এশিয়া/সাংহাই"),
("32.54278", "111.50861", "দানজিয়াংকু", "CN", "এশিয়া/সাংহাই"),
("35.20889", "111.73861", "চাংঝি", "CN", "এশিয়া/সাংহাই"),
("34.56861", "105.89333", "বেইদাও", "CN", "এশিয়া/সাংহাই"),
("29.98869", "122.20488", "ঝুশান", "CN", "এশিয়া/সাংহাই"),
("40.66482", "122.22833", "ইংকু", "CN", "এশিয়া/সাংহাই"),
("46.08333", "122.08333", "উলানহট", "CN", "এশিয়া/সাংহাই"),
("45.35", "126.28333", "শুয়াংচেং", "CN", "এশিয়া/সাংহাই"),
("41.09822", "120.74792", "নানপিয়াও", "CN", "এশিয়া/সাংহাই"),
("41.27194", "123.17306", "লিয়াওইয়াং", "CN", "এশিয়া/সাংহাই"),
("41.94175", "123.50266", "হুশিতাই", "CN", "এশিয়া/সাংহাই"),
("40.85158", "122.74754", "হাইচেং", "CN", "এশিয়া/সাংহাই"),
("42.64031", "125.51176", "ডংফেং", "CN", "এশিয়া/সাংহাই"),
("45.75279", "130.57211", "বলি", "CN", "এশিয়া/সাংহাই"),
("31.64615", "120.74221", "চাংশু সিটি", "CN", "এশিয়া/সাংহাই"),
("7.83389", "-72.47417", "ভিলা দেল রোজারিও", "CO", "আমেরিকা/বোগোটা"),
("6.46838", "-73.26022", "সোকোরো", "CO", "আমেরিকা/বোগোটা"),
("8.79577", "-75.69947", "সান কার্লোস", "CO", "আমেরিকা/বোগোটা"),
("10.98778", "-74.95472", "পুয়ের্তো কলম্বিয়া", "CO", "আমেরিকা/বোগোটা"),
("4.73245", "-74.26419", "মাদ্রিদ", "CO", "আমেরিকা/বোগোটা"),
("5.20856", "-74.73584", "হোন্ডা", "CO", "আমেরিকা/বোগোটা"),
("10.15031", "-73.9614", "এল কোপে", "CO", "আমেরিকা/বোগোটা"),
("3.8801", "-77.03116", "বুয়েনাভেন্টুরা", "CO", "আমেরিকা/বোগোটা"),
("5.6561", "-75.87877", "আন্ডিস", "CO", "আমেরিকা/বোগোটা"),
("9.92787", "-84.13722", "সান রাফায়েল", "CR", "আমেরিকা/কোস্টারিকা"),
("10.63504", "-85.43772", "লাইবেরিয়া", "CR", "আমেরিকা/কোস্টারিকা"),
("23.15678", "-81.24441", "ভারাদেরো", "CU", "আমেরিকা/হাভানা"),
("20.14298", "-77.43532", "মিডিয়া লুনা", "CU", "আমেরিকা/হাভানা"),
("23.04419", "-82.00919", "জারুকো", "CU", "আমেরিকা/হাভানা"),
("22.98212", "-80.58556", "কোরালিলো", "CU", "আমেরিকা/হাভানা"),
("23.0072", "-82.4017", "বোয়েরোস", "CU", "আমেরিকা/হাভানা"),
("50.50301", "13.63617", "অধিকাংশ", "CZ", "ইউরোপ/প্রাগ"),
("50.23271", "12.87117", "কারলোভি ভ্যারি", "CZ", "ইউরোপ/প্রাগ"),
("51.04962", "12.1369", "জিৎজ", "DE", "ইউরোপ/বার্লিন"),
("52.59319", "13.32127", "উইটেনউ", "DE", "ইউরোপ/বার্লিন"),
("50.82709", "6.9747", "ওয়েসেলিং", "DE", "ইউরোপ/বার্লিন"),
("50.9803", "11.32903", "ওয়েইমার", "DE", "ইউরোপ/বার্লিন"),
("52.86147", "9.5926", "ওয়ালরোড", "DE", "ইউরোপ/বার্লিন"),
("51.88333", "8.51667", "ভার্ল", "DE", "ইউরোপ/বার্লিন"),
("48.07667", "8.64409", "ট্রোসিংজেন", "DE", "ইউরোপ/বার্লিন"),
("48.78232", "9.17702", "স্টুটগার্ট", "DE", "ইউরোপ/বার্লিন"),
("53.59337", "9.47629", "স্টেড", "DE", "ইউরোপ/বার্লিন"),
("50.80019", "7.20769", "সিগবার্গ", "DE", "ইউরোপ/বার্লিন"),
("51.21667", "6.26667", "Schwalmtal", "DE", "ইউরোপ/বার্লিন"),
("54.52156", "9.5586", "শ্লেসউইগ", "DE", "ইউরোপ/বার্লিন"),
("50.72043", "11.34046", "রুডলস্ট্যাড", "DE", "ইউরোপ/বার্লিন"),
("48.49144", "9.20427", "রিউটলিংজেন", "DE", "ইউরোপ/বার্লিন"),
("51.20219", "7.36027", "রাদেভর্মওয়াল্ড", "DE", "ইউরোপ/বার্লিন"),
("48.46458", "9.22796", "ফুলিংজেন", "DE", "ইউরোপ/বার্লিন"),
("51.30001", "13.10984", "ওশ্যাটজ", "DE", "ইউরোপ/বার্লিন"),
("51.47805", "6.8625", "ওবারহাউসেন", "DE", "ইউরোপ/বার্লিন"),
("50.23805", "8.86704", "নিদ্দেরউ", "DE", "ইউরোপ/বার্লিন"),
("48.73218", "11.18709", "নিউবার্গ আন ডার ডোনাউ", "DE", "ইউরোপ/বার্লিন"),
("47.98372", "10.18527", "মেমিনজেন", "DE", "ইউরোপ/বার্লিন"),
("50.80904", "8.77069", "মারবার্গ আন ডার লাহন", "DE", "ইউরোপ/বার্লিন"),
("49.5099", "6.74549", "লোশেইম", "DE", "ইউরোপ/বার্লিন"),
("48.52961", "12.16179", "ল্যান্ডশাট", "DE", "ইউরোপ/বার্লিন"),
("51.19139", "6.51352", "কর্শেনব্রোইচ", "DE", "ইউরোপ/বার্লিন"),
("52.2", "8.63333", "কির্চলেঙ্গার্ন", "DE", "ইউরোপ/বার্লিন"),
("50.23019", "8.77155", "কারবেন", "DE", "ইউরোপ/বার্লিন"),
("50.09019", "8.4493", "হফহেইম অ্যাম টাউনাস", "DE", "ইউরোপ/বার্লিন"),
("52.61131", "13.31783", "হার্মসডর্ফ", "DE", "ইউরোপ/বার্লিন"),
("48.35149", "8.96317", "হেচিংজেন", "DE", "ইউরোপ/বার্লিন"),
("53.63333", "9.85", "হালস্টেনবেক", "DE", "ইউরোপ/বার্লিন"),
("52.21099", "7.02238", "গ্রনাউ", "DE", "ইউরোপ/বার্লিন"),
("52.47774", "10.5511", "গিফহর্ন", "DE", "ইউরোপ/বার্লিন"),
("48.06919", "11.37703", "গাটিং", "DE", "ইউরোপ/বার্লিন"),
("48.35693", "10.98461", "ফ্রাইডবার্গ", "DE", "ইউরোপ/বার্লিন"),
("51.168", "7.973", "ফিনেনট্রপ", "DE", "ইউরোপ/বার্লিন"),
("49.13645", "8.91229", "এপিংজেন", "DE", "ইউরোপ/বার্লিন"),
("48.28259", "9.72749", "এহিংগেন", "DE", "ইউরোপ/বার্লিন"),
("52.4581", "13.28702", "ডাহলেম", "DE", "ইউরোপ/বার্লিন"),
("51.08468", "7.11393", "বার্শেইড", "DE", "ইউরোপ/বার্লিন"),
("49.03685", "8.70745", "ব্রেটেন", "DE", "ইউরোপ/বার্লিন"),
("49.68369", "8.61839", "বেনশেইম", "DE", "ইউরোপ/বার্লিন"),
("53.94313", "10.30215", "ব্যাড সেজেবার্গ", "DE", "ইউরোপ/বার্লিন"),
("50.64336", "7.2278", "খারাপ হোনেফ", "DE", "ইউরোপ/বার্লিন"),
("49.97704", "9.15214", "আসকাফেনবার্গ", "DE", "ইউরোপ/বার্লিন"),
("48.21644", "9.02596", "আলবস্ট্যাড", "DE", "ইউরোপ/বার্লিন"),
("52.53048", "13.29371", "শার্লটেনবার্গ-নর্ড", "DE", "ইউরোপ/বার্লিন"),
("53.6052", "10.03988", "বারম্বেক-নর্ড", "DE", "ইউরোপ/বার্লিন"),
("11.15583", "42.7125", "আলি সাবিহ", "DJ", "আফ্রিকা/জিবুতি"),
("55.67938", "12.53463", "ফ্রেডেরিকসবার্গ", "DK", "ইউরোপ/কোপেনহেগেন"),
("18.20854", "-71.10077", "সান্তা ক্রুজ ডি বারাহোনা", "DO", "আমেরিকা/সান্টো_ডোমিঙ্গো"),
("36.76639", "3.47717", "বউমারদাস", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("36.72544", "3.55665", "থেনিয়া", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("34.15429", "3.50309", "মেসাদ", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("35.21222", "2.31889", "কসার চেল্লালা", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("35.06544", "1.04945", "ফ্রেন্ডা", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("36.06386", "4.62744", "এল আচির", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("36.76775", "2.95924", "চেরাগা", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("36.27462", "4.85668", "বোর্ডজ জেমুরা", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("36.61954", "4.08282", "বেনি দুআলা", "DZ", "আফ্রিকা/আলজিয়ার্স"),
("-2.13404", "-79.59415", "মিলাগ্রো", "EC", "আমেরিকা/গুয়াকিল"),
("-2.90055", "-79.00453", "কুয়েনকা", "EC", "আমেরিকা/গুয়াকিল"),
("59.37722", "28.19028", "নারভা", "EE", "ইউরোপ/টালিন"),
("26.67319", "31.4976", "জুহায়নাহ", "EG", "আফ্রিকা/কায়রো"),
("31.20176", "29.91582", "আলেকজান্দ্রিয়া", "EG", "আফ্রিকা/কায়রো"),
("39.96348", "-4.83076", "তালাভেরা দে লা রেইনা", "ES", "ইউরোপ/মাদ্রিদ"),
("37.35813", "-6.03731", "সান জুয়ান দে আজনালফারচে", "ES", "ইউরোপ/মাদ্রিদ"),
("38.68712", "-4.10734", "পুয়ের্টোলানো", "ES", "ইউরোপ/মাদ্রিদ"),
("38.38479", "-0.76773", "নভেলদা", "ES", "ইউরোপ/মাদ্রিদ"),
("27.76056", "-15.58602", "মাসপালোমাস", "ES", "আটলান্টিক/ক্যানারি"),
("38.47917", "-1.325", "জুমিল্লা", "ES", "ইউরোপ/মাদ্রিদ"),
("38.96667", "-0.18333", "গান্ডিয়া", "ES", "ইউরোপ/মাদ্রিদ"),
("38.10558", "-1.86343", "কারাভাকা", "ES", "ইউরোপ/মাদ্রিদ"),
("37.49073", "-2.77259", "বাজা", "ES", "ইউরোপ/মাদ্রিদ"),
("42.64685", "-5.55835", "ভিলাকিলামব্রে", "ES", "ইউরোপ/মাদ্রিদ"),
("42.06166", "-1.60452", "টুডেলা", "ES", "ইউরোপ/মাদ্রিদ"),
("40.42386", "-3.53261", "সান ফার্নান্দো ডি হেনারেস", "ES", "ইউরোপ/মাদ্রিদ"),
("41.15612", "1.10687", "রিউস", "ES", "ইউরোপ/মাদ্রিদ"),
("41.91738", "3.1631", "প্যালাফ্রুগেল", "ES", "ইউরোপ/মাদ্রিদ"),
("43.32686", "-2.98884", "লিওয়া", "ES", "ইউরোপ/মাদ্রিদ"),
("43.31667", "-2.68333", "গেরনিকা-লুমো", "ES", "ইউরোপ/মাদ্রিদ"),
("43.48961", "-8.2194", "ফেরল", "ES", "ইউরোপ/মাদ্রিদ"),
("41.63976", "2.35739", "কার্ডেডিউ", "ES", "ইউরোপ/মাদ্রিদ"),
("40.70995", "0.57856", "অ্যাম্পোস্টা", "ES", "ইউরোপ/মাদ্রিদ"),
("37.13548", "-3.67029", "লাস গ্যাবিয়াস", "ES", "ইউরোপ/মাদ্রিদ"),
("42.8139", "-1.64295", "সেগুন্ডো এনসানচে", "ES", "ইউরোপ/মাদ্রিদ"),
("41.41204", "2.18247", "এল ক্যাম্প দে ল আর্পা দেল ক্লট", "ES", "ইউরোপ/মাদ্রিদ"),
("11.85", "38.01667", "ডেব্রে তাবর", "ET", "আফ্রিকা/আদিস_আবাবা"),
("6.03333", "37.55", "আরবা মিঞ্চ", "ET", "আফ্রিকা/আদিস_আবাবা"),
("65.84811", "24.14662", "টর্নিও", "FI", "ইউরোপ/হেলসিঙ্কি"),
("60.18427", "24.95034", "কালিও", "FI", "ইউরোপ/হেলসিঙ্কি"),
("60.2052", "24.6522", "এসপু", "FI", "ইউরোপ/হেলসিঙ্কি"),
("45.51667", "4.86667", "ভিয়েন", "FR", "ইউরোপ/প্যারিস"),
("44.92801", "4.8951", "ভ্যালেন্স", "FR", "ইউরোপ/প্যারিস"),
("44.80477", "-0.59543", "প্রতিভা", "FR", "ইউরোপ/প্যারিস"),
("48.77644", "2.29026", "স্কাউক্স", "FR", "ইউরোপ/প্যারিস"),
("50.75", "2.25", "সন্ত-ওমর", "FR", "ইউরোপ/প্যারিস"),
("45.69558", "4.7934", "সেন্ট-জেনিস-লাভাল", "FR", "ইউরোপ/প্যারিস"),
("48.8765", "2.18967", "রুয়েল-মালমাইসন", "FR", "ইউরোপ/প্যারিস"),
("48", "-4.1", "কুইম্পার", "FR", "ইউরোপ/প্যারিস"),
("43.11667", "1.6", "পামিয়ার্স", "FR", "ইউরোপ/প্যারিস"),
("46.32313", "-0.45877", "নিওর্ট", "FR", "ইউরোপ/প্যারিস"),
("43.61092", "3.87723", "মন্টপেলিয়ার", "FR", "ইউরোপ/প্যারিস"),
("48.98333", "2.61667", "মিত্রি-মরি", "FR", "ইউরোপ/প্যারিস"),
("48.86667", "2.08333", "মারলি-লে-রোই", "FR", "ইউরোপ/প্যারিস"),
("46.67535", "5.55575", "লন্স-লে-সাউনিয়ার", "FR", "ইউরোপ/প্যারিস"),
("43.32393", "5.4584", "লেস অলিভস", "FR", "ইউরোপ/প্যারিস"),
("48.8222", "2.12213", "লে চেসনে", "FR", "ইউরোপ/প্যারিস"),
("48.90472", "2.2469", "লা গ্যারেনে-কলম্বস", "FR", "ইউরোপ/প্যারিস"),
("48.98994", "2.1699", "হার্বলে", "FR", "ইউরোপ/প্যারিস"),
("48.98693", "2.44892", "গোনেসে", "FR", "ইউরোপ/প্যারিস"),
("48.79325", "2.29275", "ফন্টেনাই-অক্স-রোসেস", "FR", "ইউরোপ/প্যারিস"),
("49.28669", "1.00288", "এলবেউফ", "FR", "ইউরোপ/প্যারিস"),
("43.71032", "-1.05366", "ড্যাক্স", "FR", "ইউরোপ/প্যারিস"),
("43.61058", "1.33467", "কলোমিয়ার্স", "FR", "ইউরোপ/প্যারিস"),
("43.83125", "5.03586", "ক্যাভিলন", "FR", "ইউরোপ/প্যারিস"),
("45.73333", "4.91667", "ব্রন", "FR", "ইউরোপ/প্যারিস"),
("48.90982", "2.45012", "ববিগনি", "FR", "ইউরোপ/প্যারিস"),
("48.77275", "5.16108", "বার-লে-ডুক", "FR", "ইউরোপ/প্যারিস"),
("43.67681", "4.63031", "আর্লস", "FR", "ইউরোপ/প্যারিস"),
("41.91886", "8.73812", "আজাচিও", "FR", "ইউরোপ/প্যারিস"),
("43.2907", "5.4384", "মারসেইল 11", "FR", "ইউরোপ/প্যারিস"),
("-1.63333", "13.58357", "ফ্রান্সভিল", "GA", "আফ্রিকা/লিব্রেভিল"),
("53.19146", "-2.52398", "উইনসফোর্ড", "GB", "ইউরোপ/লন্ডন"),
("51.26", "-2.1875", "ওয়েস্টবেরি", "GB", "ইউরোপ/লন্ডন"),
("51.84819", "1.26738", "ওয়ালটন-অন-দ্য-নেজ", "GB", "ইউরোপ/লন্ডন"),
("52.41667", "0.75", "থেটফোর্ড", "GB", "ইউরোপ/লন্ডন"),
("51.39323", "0.47713", "স্ট্রুড", "GB", "ইউরোপ/লন্ডন"),
("50.79205", "-1.08593", "দক্ষিণ সাগর", "GB", "ইউরোপ/লন্ডন"),
("53.78333", "-1.06667", "সেলবি", "GB", "ইউরোপ/লন্ডন"),
("55.82885", "-4.21376", "রাদারগ্লেন", "GB", "ইউরোপ/লন্ডন"),
("53.00974", "-3.05814", "রোসলানারক্রুগগ", "GB", "ইউরোপ/লন্ডন"),
("53.83333", "-2.98333", "পল্টন-লে-ফিল্ড", "GB", "ইউরোপ/লন্ডন"),
("50.11861", "-5.53715", "পেনজান্স", "GB", "ইউরোপ/লন্ডন"),
("50.82882", "-0.32247", "ল্যান্সিং", "GB", "ইউরোপ/লন্ডন"),
("51.40148", "-1.32471", "নিউবেরি", "GB", "ইউরোপ/লন্ডন"),
("53.49389", "-1.29243", "মেক্সবরো", "GB", "ইউরোপ/লন্ডন"),
("50.75767", "-1.5443", "লিমিংটন", "GB", "ইউরোপ/লন্ডন"),
("53.69786", "-2.68758", "লেল্যান্ড", "GB", "ইউরোপ/লন্ডন"),
("53.7446", "-0.33525", "হাল উপর কিংসটন", "GB", "ইউরোপ/লন্ডন"),
("57.47908", "-4.22398", "ইনভারনেস", "GB", "ইউরোপ/লন্ডন"),
("51.62907", "-0.74934", "হাই ওয়াইকম্ব", "GB", "ইউরোপ/লন্ডন"),
("51.38673", "0.30367", "হার্টলি", "GB", "ইউরোপ/লন্ডন"),
("52.66277", "-2.01111", "গ্রেট উইরলি", "GB", "ইউরোপ/লন্ডন"),
("53.38333", "-0.76667", "গেইনসবরো", "GB", "ইউরোপ/লন্ডন"),
("50.7236", "-3.52751", "এক্সেটার", "GB", "ইউরোপ/লন্ডন"),
("52.68333", "0.93333", "ইস্ট ডেরেহাম", "GB", "ইউরোপ/লন্ডন"),
("51.35084", "-1.99421", "ডিভাইস", "GB", "ইউরোপ/লন্ডন"),
("50.76306", "-1.29772", "গরু", "GB", "ইউরোপ/লন্ডন"),
("51.78967", "1.15597", "ক্ল্যাকটন-অন-সি", "GB", "ইউরোপ/লন্ডন"),
("53.46506", "-1.47217", "চ্যাপলটাউন", "GB", "ইউরোপ/লন্ডন"),
("51.64316", "-0.36053", "বুশে", "GB", "ইউরোপ/লন্ডন"),
("52.48173", "-2.12139", "ব্রিয়ারলি হিল", "GB", "ইউরোপ/লন্ডন"),
("53.81667", "-3.05", "ব্ল্যাকপুল", "GB", "ইউরোপ/লন্ডন"),
("53.0233", "-1.48119", "বেলপার", "GB", "ইউরোপ/লন্ডন"),
("51.65", "-0.2", "বারনেট", "GB", "ইউরোপ/লন্ডন"),
("56.56317", "-2.58736", "আরব্রোথ", "GB", "ইউরোপ/লন্ডন"),
("57.14369", "-2.09814", "আবারডিন", "GB", "ইউরোপ/লন্ডন"),
("51.39148", "-0.29825", "সারবিটন", "GB", "ইউরোপ/লন্ডন"),
("51.42708", "-0.91979", "লোয়ার আর্লি", "GB", "ইউরোপ/লন্ডন"),
("55.82737", "-4.0573", "ভিউপার্ক", "GB", "ইউরোপ/লন্ডন"),
("41.82143", "41.77921", "কবুলেতি", "GE", "এশিয়া/টিবিলিসি"),
("5.30383", "-1.98956", "তারকওয়া", "GH", "আফ্রিকা/আকরা"),
("7.06273", "-1.4001", "ম্যাম্পং", "GH", "আফ্রিকা/আকরা"),
("6.46346", "-2.31938", "বিবিয়ানী", "GH", "আফ্রিকা/আকরা"),
("13.56667", "-15.6", "ফরাফেন্নি", "GM", "আফ্রিকা/বানজুল"),
("9.535", "-13.68778", "ক্যামেয়েন", "GN", "আফ্রিকা/কোনাক্রি"),
("14.93333", "-91.11667", "চিচিকাস্টেনাঙ্গো", "GT", "আমেরিকা/গুয়েতেমালা"),
("22.37066", "114.10479", "সুয়েন ওয়ান", "HK", "এশিয়া/হংকং"),
("15.48131", "-86.57415", "ওলানচিটো", "HN", "আমেরিকা/টেগুসিগালপা"),
("43.50891", "16.43915", "বিভক্ত", "HR", "ইউরোপ/জাগরেব"),
("18.65297", "-72.09391", "থমাজেউ", "HT", "আমেরিকা/পোর্ট-অ-প্রিন্স"),
("18.57677", "-72.22625", "কোরিস-দেস-বুকেটস", "HT", "আমেরিকা/পোর্ট-অ-প্রিন্স"),
("3.3285", "99.1625", "তেবিংটিংগি", "ID", "এশিয়া/জাকার্তা"),
("3.7278", "98.6738", "লাবুহান ডেলি", "ID", "এশিয়া/জাকার্তা"),
("-7.51611", "109.05389", "ওয়াঙ্গন", "ID", "এশিয়া/জাকার্তা"),
("3.31332", "117.59152", "তারকান", "ID", "এশিয়া/মাকাসার"),
("-6.91806", "106.92667", "সুকাবুমি", "ID", "এশিয়া/জাকার্তা"),
("-1.26424", "104.09701", "সিম্পাং", "ID", "এশিয়া/জাকার্তা"),
("-7.0981", "109.3243", "রান্দুডংকাল", "ID", "এশিয়া/জাকার্তা"),
("0.51667", "101.44167", "পেকানবারু", "ID", "এশিয়া/জাকার্তা"),
("-7.01833", "107.60389", "পামেউং পিক", "ID", "এশিয়া/জাকার্তা"),
("-8.43333", "114.33333", "মুনকার", "ID", "এশিয়া/জাকার্তা"),
("-3.5403", "118.9707", "মজেনে", "ID", "এশিয়া/মাকাসার"),
("-6.8048", "110.8405", "কুদুস", "ID", "এশিয়া/জাকার্তা"),
("-7.81667", "112.01667", "কেদিরি", "ID", "এশিয়া/জাকার্তা"),
("-1.6", "103.61667", "জাম্বি সিটি", "ID", "এশিয়া/জাকার্তা"),
("-7.57897", "112.23109", "দিউইক", "ID", "এশিয়া/জাকার্তা"),
("-6.48167", "106.85417", "সিবিনং", "ID", "এশিয়া/জাকার্তা"),
("-7.73379", "113.69785", "বেসুকি", "ID", "এশিয়া/জাকার্তা"),
("-1.26753", "116.82887", "বালিকপাপন", "ID", "এশিয়া/মাকাসার"),
("-7.54972", "110.71639", "এনগেমপ্লাক", "ID", "এশিয়া/জাকার্তা"),
("53.53333", "-7.35", "এন মুইলিয়ান জিকার", "IE", "ইউরোপ/ডাবলিন"),
("53.43333", "-7.95", "অ্যাথলোন", "IE", "ইউরোপ/ডাবলিন"),
("31.92923", "34.86563", "রমলা", "IL", "এশিয়া/জেরুজালেম"),
("32.05971", "34.8732", "গনেই টিকভা", "IL", "এশিয়া/জেরুজালেম"),
("31.39547", "34.75699", "রাহাত", "IL", "এশিয়া/জেরুজালেম"),
("18.87813", "72.93924", "উরান", "IN", "এশিয়া/কলকাতা"),
("10.58806", "77.24779", "উদুমালাইপেত্তাই", "IN", "এশিয়া/কলকাতা"),
("9.82564", "78.25795", "তিরুপুবনম", "IN", "এশিয়া/কলকাতা"),
("25.49043", "85.94001", "তেঘরা", "IN", "এশিয়া/কলকাতা"),
("12.04161", "75.35927", "তালিপারম্বা", "IN", "এশিয়া/কলকাতা"),
("26.11527", "86.59509", "সুপল", "IN", "এশিয়া/কলকাতা"),
("34.08565", "74.80555", "শ্রীনগর", "IN", "এশিয়া/কলকাতা"),
("25.92493", "73.66633", "সোজাত", "IN", "এশিয়া/কলকাতা"),
("14.62072", "74.83554", "সিরসি", "IN", "এশিয়া/কলকাতা"),
("25.13915", "73.06784", "শেওগঞ্জ", "IN", "এশিয়া/কলকাতা"),
("11.50526", "77.23826", "সত্যমঙ্গলম", "IN", "এশিয়া/কলকাতা"),
("21.46527", "83.97573", "সম্বলপুর", "IN", "এশিয়া/কলকাতা"),
("25.87498", "86.59611", "সহরসা", "IN", "এশিয়া/কলকাতা"),
("12.95629", "78.27539", "রবার্টসনপেট", "IN", "এশিয়া/কলকাতা"),
("26.44931", "91.61356", "রঙ্গিয়া", "IN", "এশিয়া/কলকাতা"),
("33.37526", "74.3092", "রাজাওরি", "IN", "এশিয়া/কলকাতা"),
("24.81757", "84.63445", "রফিগঞ্জ", "IN", "এশিয়া/কলকাতা"),
("18.51957", "73.85535", "পুনে", "IN", "এশিয়া/কলকাতা"),
("11.93381", "79.82979", "পুদুচেরি", "IN", "এশিয়া/কলকাতা"),
("28.71271", "77.656", "পিলখুয়া", "IN", "এশিয়া/কলকাতা"),
("10.12268", "77.54372", "পেরিয়াকুলাম", "IN", "এশিয়া/কলকাতা"),
("31.28092", "74.85849", "পট্টি", "IN", "এশিয়া/কলকাতা"),
("20.88098", "75.11937", "পরোলা", "IN", "এশিয়া/কলকাতা"),
("23.07492", "88.28637", "পান্ডুয়া", "IN", "এশিয়া/কলকাতা"),
("18.18158", "76.03889", "ওসমানবাদ", "IN", "এশিয়া/কলকাতা"),
("25.6439", "77.9129", "নারওয়ার", "IN", "এশিয়া/কলকাতা"),
("30.81383", "75.16878", "মোগা", "IN", "এশিয়া/কলকাতা"),
("28.98002", "77.70636", "মিরাট", "IN", "এশিয়া/কলকাতা"),
("11.12018", "76.11996", "মঞ্জেরি", "IN", "এশিয়া/কলকাতা"),
("30.21121", "74.4818", "মালাউত", "IN", "এশিয়া/কলকাতা"),
("25.92127", "86.79271", "মধীপুরা", "IN", "এশিয়া/কলকাতা"),
("24.05979", "77.40858", "লেটরি", "IN", "এশিয়া/কলকাতা"),
("21.34222", "71.30633", "কুন্ডলা", "IN", "এশিয়া/কলকাতা"),
("22.75218", "72.68533", "খেদা", "IN", "এশিয়া/কলকাতা"),
("23.1959", "86.51499", "কেন্দা", "IN", "এশিয়া/কলকাতা"),
("29.21399", "78.95693", "কাশিপুর", "IN", "এশিয়া/কলকাতা"),
("11.00599", "77.5609", "কাঙ্গায়ম", "IN", "এশিয়া/কলকাতা"),
("22.88783", "84.13864", "যশপুরনগর", "IN", "এশিয়া/কলকাতা"),
("26.2649", "81.54855", "যাইস", "IN", "এশিয়া/কলকাতা"),
("16.06213", "76.0586", "হুংগুন্ড", "IN", "এশিয়া/কলকাতা"),
("29.22254", "79.5286", "হলদওয়ানি", "IN", "এশিয়া/কলকাতা"),
("26.76628", "83.36889", "গোরখপুর", "IN", "এশিয়া/কলকাতা"),
("12.25282", "79.41727", "জিঞ্জি", "IN", "এশিয়া/কলকাতা"),
("21.53889", "71.57737", "গড়িয়াধর", "IN", "এশিয়া/কলকাতা"),
("15.73628", "75.96976", "গজেন্দ্রগড়", "IN", "এশিয়া/কলকাতা"),
("17.54907", "82.85749", "এলামঞ্চিলি", "IN", "এশিয়া/কলকাতা"),
("19.21667", "73.08333", "ডম্বিভলি", "IN", "এশিয়া/কলকাতা"),
("22.19303", "88.18466", "ডায়মন্ড হারবার", "IN", "এশিয়া/কলকাতা"),
("12.1277", "78.15794", "ধর্মপুরী", "IN", "এশিয়া/কলকাতা"),
("25.75728", "75.37991", "দেওলি", "IN", "এশিয়া/কলকাতা"),
("14.46693", "75.92694", "দাভাঙ্গেরে", "IN", "এশিয়া/কলকাতা"),
("25.66795", "85.83636", "দলসিং সরাই", "IN", "এশিয়া/কলকাতা"),
("15.5439", "73.7553", "ক্যালাঙ্গুট", "IN", "এশিয়া/কলকাতা"),
("27.9247", "78.40102", "ছারা", "IN", "এশিয়া/কলকাতা"),
("32.55531", "76.12647", "চাম্বা", "IN", "এশিয়া/কলকাতা"),
("20.88197", "85.83334", "ভুবন", "IN", "এশিয়া/কলকাতা"),
("19.30157", "72.85107", "ভায়ান্দর", "IN", "এশিয়া/কলকাতা"),
("15.45144", "78.14797", "বেতামেরলা", "IN", "এশিয়া/কলকাতা"),
("26.32293", "91.00632", "বারপেটা", "IN", "এশিয়া/কলকাতা"),
("28.92694", "78.23456", "বাছরাও", "IN", "এশিয়া/কলকাতা"),
("21.59983", "71.21169", "আমরেলি", "IN", "এশিয়া/কলকাতা"),
("10.10649", "76.35484", "সর্বদা", "IN", "এশিয়া/কলকাতা"),
("24.41288", "76.56719", "আকলেরা", "IN", "এশিয়া/কলকাতা"),
("23.49668", "86.68363", "আদ্রা", "IN", "এশিয়া/কলকাতা"),
("22.4711", "88.1453", "পূজালি", "IN", "এশিয়া/কলকাতা"),
("22.10194", "85.37752", "বারবিল", "IN", "এশিয়া/কলকাতা"),
("17.34769", "78.55757", "লাল বাহাদুর নগর", "IN", "এশিয়া/কলকাতা"),
("23.18", "88.58", "আইস্তালা", "IN", "এশিয়া/কলকাতা"),
("9.57046", "76.32756", "কালাভুর", "IN", "এশিয়া/কলকাতা"),
("32.61603", "44.02488", "কারবালা", "IQ", "এশিয়া/বাগদাদ"),
("35.6803", "51.0193", "শাহরে জাদিদে আন্দিসেহ", "IR", "এশিয়া/তেহরান"),
("36.64852", "51.49621", "নওশহর", "IR", "এশিয়া/তেহরান"),
("33.14447", "47.3799", "দারেহ শাহর", "IR", "এশিয়া/তেহরান"),
("33.86419", "48.26258", "আলেশতার", "IR", "এশিয়া/তেহরান"),
("32.65246", "51.67462", "ইসফাহান", "IR", "এশিয়া/তেহরান"),
("38.07789", "13.44275", "ভিলাবাতে", "IT", "ইউরোপ/রোম"),
("36.92574", "14.72443", "রাগুসা", "IT", "ইউরোপ/রোম"),
("37.51803", "15.00913", "মিস্টারবিয়ানকো", "IT", "ইউরোপ/রোম"),
("37.49223", "15.07041", "ক্যাটানিয়া", "IT", "ইউরোপ/রোম"),
("37.31065", "13.57661", "Agrigento", "IT", "ইউরোপ/রোম"),
("43.78956", "7.60872", "ভেন্টিমিগ্লিয়া", "IT", "ইউরোপ/রোম"),
("44.89784", "8.86374", "টরটোনা", "IT", "ইউরোপ/রোম"),
("40.87329", "14.43865", "সোমা ভেসুভিয়ানা", "IT", "ইউরোপ/রোম"),
("40.72586", "8.55552", "সাসারী", "IT", "ইউরোপ/রোম"),
("45.39402", "9.29109", "সান গিউলিয়ানো মিলানিস", "IT", "ইউরোপ/রোম"),
("42.67164", "14.01481", "রোসেটো দেগলি অ্যাব্রুজি", "IT", "ইউরোপ/রোম"),
("45.78071", "12.84052", "পোর্টোগুয়ারো", "IT", "ইউরোপ/রোম"),
("43.1122", "12.38878", "পেরুজিয়া", "IT", "ইউরোপ/রোম"),
("45.44694", "8.62118", "নোভারা", "IT", "ইউরোপ/রোম"),
("45.50369", "11.412", "মন্টেচিও ম্যাগিওর-আল্টে সেকাতো", "IT", "ইউরোপ/রোম"),
("40.55851", "17.80774", "মেসেগনে", "IT", "ইউরোপ/রোম"),
("45.79377", "8.88104", "মালনাট", "IT", "ইউরোপ/রোম"),
("42.22718", "14.39024", "ল্যান্সিয়ানো", "IT", "ইউরোপ/রোম"),
("45.53069", "9.40531", "গরগনজোলা", "IT", "ইউরোপ/রোম"),
("40.53123", "17.58522", "ফ্রাঙ্কাভিলা ফন্টানা", "IT", "ইউরোপ/রোম"),
("43.62558", "13.39954", "ফ্যালকোনারা মারিত্তিমা", "IT", "ইউরোপ/রোম"),
("45.9836", "12.70038", "কর্ডেননস", "IT", "ইউরোপ/রোম"),
("44.31771", "9.32241", "চিয়াওয়ারি", "IT", "ইউরোপ/রোম"),
("44.59445", "11.04979", "ক্যাস্টেলফ্রাঙ্কো এমিলিয়া", "IT", "ইউরোপ/রোম"),
("41.55947", "14.66737", "ক্যাম্পোবাসো", "IT", "ইউরোপ/রোম"),
("41.24264", "16.50104", "বিসেগলি", "IT", "ইউরোপ/রোম"),
("41.72063", "12.6723", "আরিকিয়া", "IT", "ইউরোপ/রোম"),
("40.92298", "14.30935", "আফরাগোলা", "IT", "ইউরোপ/রোম"),
("40.87363", "14.34085", "ভোল্লা", "IT", "ইউরোপ/রোম"),
("18.00747", "-76.78319", "নিউ কিংস্টন", "JM", "আমেরিকা/জ্যামাইকা"),
("35.8", "137.23333", "জিরো", "JP", "এশিয়া/টোকিও"),
("34.61667", "135.6", "ইয়াও", "JP", "এশিয়া/টোকিও"),
("34.75856", "136.13108", "উয়েনো-ইবিসুমাচি", "JP", "এশিয়া/টোকিও"),
("34.81667", "137.4", "টোয়োকাওয়া", "JP", "এশিয়া/টোকিও"),
("34.4833", "136.84186", "তোবা", "JP", "এশিয়া/টোকিও"),
("36.65", "138.31667", "সুজাকা", "JP", "এশিয়া/টোকিও"),
("34.9", "137.5", "শিনশিরো", "JP", "এশিয়া/টোকিও"),
("35.06667", "135.21667", "সসায়ামা", "JP", "এশিয়া/টোকিও"),
("36", "139.55722", "ওকেগাওয়া", "JP", "এশিয়া/টোকিও"),
("36.53333", "136.61667", "নোনোইচি", "JP", "এশিয়া/টোকিও"),
("36.75965", "137.36215", "নামেরিকাওয়া", "JP", "এশিয়া/টোকিও"),
("35", "136.51667", "কোমোনো", "JP", "এশিয়া/টোকিও"),
("33.4425", "129.96972", "কারাতসু", "JP", "এশিয়া/টোকিও"),
("35.30889", "139.55028", "কামাকুরা", "JP", "এশিয়া/টোকিও"),
("34.25", "135.31667", "আইওয়াড", "JP", "এশিয়া/টোকিও"),
("35.82756", "137.95378", "ইনা", "JP", "এশিয়া/টোকিও"),
("33.3213", "130.94098", "হিতা", "JP", "এশিয়া/টোকিও"),
("36.24624", "139.07204", "ফুজিওকা", "JP", "এশিয়া/টোকিও"),
("36.33011", "138.89585", "আন্নাকা", "JP", "এশিয়া/টোকিও"),
("35.815", "139.6853", "শিমোতোদা", "JP", "এশিয়া/টোকিও"),
("39.46667", "141.95", "ইয়ামাদা", "JP", "এশিয়া/টোকিও"),
("37.56667", "140.11667", "ইনাওয়াশিরো", "JP", "এশিয়া/টোকিও"),
("43.82634", "144.09638", "মোটোমাচি", "JP", "এশিয়া/টোকিও"),
("44.35056", "142.45778", "নায়োরো", "JP", "এশিয়া/টোকিও"),
("41.77583", "140.73667", "হাকোদতে", "JP", "এশিয়া/টোকিও"),
("35.48199", "137.02166", "মিনোকামো", "JP", "এশিয়া/টোকিও"),
("0.03813", "36.36339", "ন্যাহুরুরু", "KE", "আফ্রিকা/নাইরোবি"),
("3.11988", "35.59642", "লোডওয়ার", "KE", "আফ্রিকা/নাইরোবি"),
("0.46005", "34.11169", "বুসিয়া", "KE", "আফ্রিকা/নাইরোবি"),
("40.93333", "73", "জালাল-আবাদ", "KG", "এশিয়া/বিশকেক"),
("13.65805", "102.56365", "পায় পায়ে", "KH", "এশিয়া/ফনম_পেন"),
("36.82167", "128.63083", "আইজেন", "KR", "এশিয়া/সিউল"),
("37.1759", "128.9889", "টি আওবায়েক", "KR", "এশিয়া/সিউল"),
("36.20389", "127.08472", "ননসান", "KR", "এশিয়া/সিউল"),
("37.65639", "126.835", "গোয়াং-সি", "KR", "এশিয়া/সিউল"),
("36.6009", "126.665", "হংসিওং", "KR", "এশিয়া/সিউল"),
("34.8825", "128.62667", "সিনহিওন", "KR", "এশিয়া/সিউল"),
("47.83333", "59.6", "শালকার", "KZ", "এশিয়া/আকতোব"),
("47.46657", "84.87144", "জায়সান", "KZ", "এশিয়া/আলমাটি"),
("44.85278", "65.50917", "কাইজি লাঁদা", "KZ", "এশিয়া/কিউজিলর্ডা"),
("43.41949", "77.0202", "ওটেজেন বাটিরা", "KZ", "এশিয়া/আলমাটি"),
("6.84019", "79.87116", "দেহিওয়ালা-মাউন্ট লাভিনিয়া", "LK", "এশিয়া/কলম্বো"),
("6.9909", "79.883", "হেন্ডালা", "LK", "এশিয়া/কলম্বো"),
("7.57944", "-8.53778", "নিউ ইয়েকেপা", "LR", "আফ্রিকা/মনরোভিয়া"),
("55.25", "24.75", "উকমার্জ", "LT", "ইউরোপ/ভিলনিয়াস"),
("54.39635", "24.04142", "অ্যালিটাস", "LT", "ইউরোপ/ভিলনিয়াস"),
("30.75545", "20.22625", "আজদাবিয়া", "LY", "আফ্রিকা/ত্রিপোলি"),
("24.96334", "10.18003", "ঘাট", "LY", "আফ্রিকা/ত্রিপোলি"),
("33.92866", "-6.90656", "তেমারা", "MA", "আফ্রিকা/ক্যাসাব্লাঙ্কা"),
("33.42585", "-6.00137", "ওলমেস", "MA", "আফ্রিকা/ক্যাসাব্লাঙ্কা"),
("34.31", "-2.16", "জেরদা", "MA", "আফ্রিকা/ক্যাসাব্লাঙ্কা"),
("33.43443", "-5.22126", "আজরু", "MA", "আফ্রিকা/ক্যাসাব্লাঙ্কা"),
("48.15659", "28.28489", "সোরোকা", "MD", "ইউরোপ/চিসিনাউ"),
("42.28639", "18.84", "বুদভা", "ME", "ইউরোপ/পডগোরিকা"),
("-22.9", "44.53333", "সাকারহা", "MG", "ভারতীয়/আন্তানানারিভো"),
("-21.15", "46.58333", "ইকালমাভনি", "MG", "ভারতীয়/আন্তানানারিভো"),
("-19.65", "47.31667", "অ্যান্টানিফোটসি", "MG", "ভারতীয়/আন্তানানারিভো"),
("-17.83333", "48.41667", "আম্বাতোন্দ্রাজাকা", "MG", "ভারতীয়/আন্তানানারিভো"),
("42", "21.32778", "সরজ", "MK", "ইউরোপ/স্কোপজে"),
("41.92361", "20.91361", "বোগোভিঞ্জে", "MK", "ইউরোপ/স্কোপজে"),
("12.74409", "-8.07257", "কাটি", "ML", "আফ্রিকা/বামাকো"),
("14.0823", "98.19151", "দাউই", "MM", "এশিয়া/ইয়াঙ্গুন"),
("16.68911", "98.50893", "ম্যাওয়াদি", "MM", "এশিয়া/ইয়াঙ্গুন"),
("17.30858", "97.01124", "কাইক্টো", "MM", "এশিয়া/ইয়াঙ্গুন"),
("47.90771", "106.88324", "উলান বাটোর", "MN", "এশিয়া/উলানবাতার"),
("14.67751", "-60.94228", "লে রবার্ট", "MQ", "আমেরিকা/মার্টিনিক"),
("35.89972", "14.51472", "ভ্যালেটা", "MT", "ইউরোপ/মাল্টা"),
("-13.7804", "34.4587", "সালিমা", "MW", "আফ্রিকা/ব্লান্টার"),
("16.75973", "-93.11308", "টাক্সটলা", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("19.8173", "-97.35992", "তেজিউটলান", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("21.28306", "-89.66123", "প্রগ্রেসো", "MX", "আমেরিকা/মেরিডা"),
("17.06542", "-96.72365", "ওক্সাকা", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("25.87972", "-97.50417", "হিরোইকা মাতামোরোস", "MX", "আমেরিকা/মাতামোরোস"),
("19.32932", "-98.1664", "কন্টলা", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("17.94979", "-94.91386", "আকাইউকান", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("19.32889", "-99.32556", "সান লরেঞ্জো অ্যাকোপিলকো", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("20.22816", "-103.5687", "জাকোয়ালকো ডি টরেস", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("20.74122", "-100.44843", "সান্তা রোজা জাউরেগুই", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("20.21322", "-100.88023", "সালভাতিয়েরা", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("19.64745", "-102.04897", "প্যারাচো দে ভার্দুজকো", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("20.28527", "-103.42897", "জোকোটেপেক", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("21.01858", "-101.2591", "গুয়ানাজুয়াতো", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("22.49396", "-105.36369", "অ্যাকাপোনেটা", "MX", "আমেরিকা/মাজাতলান"),
("19.04222", "-98.11889", "কাসা ব্লাঙ্কা", "MX", "আমেরিকা/মেক্সিকো_সিটি"),
("1.6561", "103.6032", "কুলাই", "MY", "এশিয়া/কুয়ালালামপুর"),
("5.90702", "116.10146", "ডংগনগন", "MY", "এশিয়া/কুচিং"),
("4.88441", "101.96857", "গুয়া মুসাং", "MY", "এশিয়া/কুয়ালালামপুর"),
("5.4709", "100.24529", "বাতু ফেরিংগি", "MY", "এশিয়া/কুয়ালালামপুর"),
("4.02219", "101.02083", "তেলুক ইন্তান", "MY", "এশিয়া/কুয়ালালামপুর"),
("1.6", "103.81667", "উলু তিরাম", "MY", "এশিয়া/কুয়ালালামপুর"),
("2.2139", "102.3278", "কাম্পুং আয়ের মোলেক", "MY", "এশিয়া/কুয়ালালামপুর"),
("-23.85972", "35.34722", "ম্যাক্সি", "MZ", "আফ্রিকা/মাপুতো"),
("-21.98333", "16.91667", "ওকাহান্দজা", "NA", "আফ্রিকা/উইন্ডহোক"),
("13.70727", "9.15013", "মিররিয়া", "NE", "আফ্রিকা/নিয়ামে"),
("4.92675", "6.26764", "ইয়েনাগোয়া", "NG", "আফ্রিকা/লাগোস"),
("6.8485", "3.64633", "শাগামু", "NG", "আফ্রিকা/লাগোস"),
("7.6", "4.18333", "ওলুপোনা", "NG", "আফ্রিকা/লাগোস"),
("6.15038", "6.83042", "এনকেপোর", "NG", "আফ্রিকা/লাগোস"),
("6.45407", "3.39467", "লাগোস", "NG", "আফ্রিকা/লাগোস"),
("9.58126", "8.2926", "কাফঞ্চন", "NG", "আফ্রিকা/লাগোস"),
("7.62789", "4.74161", "ইলেসা", "NG", "আফ্রিকা/লাগোস"),
("7.50251", "5.06258", "ইগবারা-ওডো", "NG", "আফ্রিকা/লাগোস"),
("11.86064", "9.0027", "গয়া", "NG", "আফ্রিকা/লাগোস"),
("7.65649", "4.92235", "ইফন-আলায়ে", "NG", "আফ্রিকা/লাগোস"),
("10.61285", "12.19458", "বিউ", "NG", "আফ্রিকা/লাগোস"),
("12.74482", "4.52514", "আর্গুঙ্গু", "NG", "আফ্রিকা/লাগোস"),
("13.48082", "-86.58208", "সোমোটো", "NI", "আমেরিকা/মানাগুয়া"),
("11.84962", "-86.19903", "জিনোটেপ", "NI", "আমেরিকা/মানাগুয়া"),
("52.09", "5.23333", "জিস্ট", "NL", "ইউরোপ/আমস্টারডাম"),
("51.65333", "5.2875", "ভুত", "NL", "ইউরোপ/আমস্টারডাম"),
("51.44889", "5.51978", "টোঙ্গেলরে", "NL", "ইউরোপ/আমস্টারডাম"),
("51.95838", "4.47124", "স্কিব্রেক", "NL", "ইউরোপ/আমস্টারডাম"),
("52.31333", "6.92917", "ওল্ডেনজাল", "NL", "ইউরোপ/আমস্টারডাম"),
("52.26083", "7.00417", "পরাজয়কারী", "NL", "ইউরোপ/আমস্টারডাম"),
("53.16167", "6.76111", "হুগেজান্ড", "NL", "ইউরোপ/আমস্টারডাম"),
("52.57583", "6.61944", "হার্ডেনবার্গ", "NL", "ইউরোপ/আমস্টারডাম"),
("52.71083", "5.74861", "এমেলরড", "NL", "ইউরোপ/আমস্টারডাম"),
("51.955", "5.22778", "কুলেমবুর্গ", "NL", "ইউরোপ/আমস্টারডাম"),
("52.14", "5.58472", "বারনেভেল্ড", "NL", "ইউরোপ/আমস্টারডাম"),
("68.79833", "16.54165", "হারস্তাদ", "NO", "ইউরোপ/অসলো"),
("-44.39672", "171.25364", "তিমারু", "NZ", "প্যাসিফিক/অকল্যান্ড"),
("-38.65333", "178.00417", "গিসবোর্ন", "NZ", "প্যাসিফিক/অকল্যান্ড"),
("8.88988", "-79.62603", "ভেরাক্রুজ", "PA", "আমেরিকা/পানামা"),
("9.15093", "-79.62098", "চিলিব্রে", "PA", "আমেরিকা/পানামা"),
("-3.74912", "-73.25383", "ইকুইটোস", "PE", "আমেরিকা/লিমা"),
("-16.25", "-69.08333", "ইয়ুনগুইয়ো", "PE", "আমেরিকা/লিমা"),
("-15.21194", "-75.11028", "মিনাস ডি মার্কোনা", "PE", "আমেরিকা/লিমা"),
("-11.94306", "-76.70944", "চসিকা", "PE", "আমেরিকা/লিমা"),
("-5.85746", "144.23058", "মাউন্ট হেগেন", "PG", "প্যাসিফিক/পোর্ট_মোরসবি"),
("6.33444", "124.95278", "টুপি", "PH", "এশিয়া/ম্যানিলা"),
("10.7375", "122.9666", "তালিসে", "PH", "এশিয়া/ম্যানিলা"),
("12.97389", "123.99333", "সরসোগন", "PH", "এশিয়া/ম্যানিলা"),
("9.3337", "122.8637", "সান্তা ক্যাটালিনা", "PH", "এশিয়া/ম্যানিলা"),
("12.35275", "121.06761", "সান জোসে", "PH", "এশিয়া/ম্যানিলা"),
("6.95194", "121.96361", "রেকোডো", "PH", "এশিয়া/ম্যানিলা"),
("14.66", "120.56528", "পিলার", "PH", "এশিয়া/ম্যানিলা"),
("10.20898", "123.758", "নাগা", "PH", "এশিয়া/ম্যানিলা"),
("12.37169", "123.62494", "মাসবাতে", "PH", "এশিয়া/ম্যানিলা"),
("16.0438", "120.4861", "মানোয়াগ", "PH", "এশিয়া/ম্যানিলা"),
("10.13361", "124.84472", "মাসিন", "PH", "এশিয়া/ম্যানিলা"),
("16.455", "120.5875", "লা ত্রিনিদাদ", "PH", "এশিয়া/ম্যানিলা"),
("9.6531", "124.3697", "জাগ্না", "PH", "এশিয়া/ম্যানিলা"),
("14.8361", "120.97844", "গুইয়ং", "PH", "এশিয়া/ম্যানিলা"),
("8.56697", "123.33471", "ডিপোলগ", "PH", "এশিয়া/ম্যানিলা"),
("10.31672", "123.89071", "সেবু সিটি", "PH", "এশিয়া/ম্যানিলা"),
("14.14989", "121.3152", "কলাউয়ান", "PH", "এশিয়া/ম্যানিলা"),
("15.72892", "120.57224", "বার্গোস", "PH", "এশিয়া/ম্যানিলা"),
("14.95472", "120.89694", "বালিউয়াগ", "PH", "এশিয়া/ম্যানিলা"),
("14.62578", "121.12251", "অ্যান্টিপোলো", "PH", "এশিয়া/ম্যানিলা"),
("27.52948", "68.75915", "খয়েরপুর মিরস", "PK", "এশিয়া/করাচি"),
("26.9423", "68.11759", "থারু শাহ", "PK", "এশিয়া/করাচি"),
("31.82539", "72.54064", "সিল্লানওয়ালি", "PK", "এশিয়া/করাচি"),
("31.71667", "73.38333", "সাংলা পাহাড়", "PK", "এশিয়া/করাচি"),
("30.29184", "71.67164", "কাদিরপুর রান", "PK", "এশিয়া/করাচি"),
("31.96258", "73.97117", "নৌশাহরা বিরকান", "PK", "এশিয়া/করাচি"),
("32.57756", "71.52847", "মিয়ানওয়ালি", "PK", "এশিয়া/করাচি"),
("27.55898", "68.21204", "লারকানা", "PK", "এশিয়া/করাচি"),
("30.46907", "70.96699", "কোট আদ্দু", "PK", "এশিয়া/করাচি"),
("30.76468", "74.12286", "কাঙ্গনপুর", "PK", "এশিয়া/করাচি"),
("25.95533", "68.88871", "ঝোল", "PK", "এশিয়া/করাচি"),
("29.69221", "72.54566", "হাসিলপুর", "PK", "এশিয়া/করাচি"),
("32.17629", "75.06583", "ফাজিলপুর", "PK", "এশিয়া/করাচি"),
("32.87533", "71.57118", "দাউদ খেলা", "PK", "এশিয়া/করাচি"),
("25.80565", "68.49143", "ভীত শাহ", "PK", "এশিয়া/করাচি"),
("29.38242", "70.91106", "আলিপুর", "PK", "এশিয়া/করাচি"),
("51.14942", "15.00835", "জগরজেলেক", "PL", "ইউরোপ/ওয়ারশ"),
("54.58048", "16.86194", "উসকা", "PL", "ইউরোপ/ওয়ারশ"),
("50.5107", "18.30056", "স্ট্রজেলস ওপোলস্কি", "PL", "ইউরোপ/ওয়ারশ"),
("54.60528", "18.34717", "রেদা", "PL", "ইউরোপ/ওয়ারশ"),
("50.20528", "19.27498", "জাওর্জনো", "PL", "ইউরোপ/ওয়ারশ"),
("50.86079", "17.4674", "ব্রজেগ", "PL", "ইউরোপ/ওয়ারশ"),
("18.42745", "-67.15407", "আগুয়াডিলা", "PR", "আমেরিকা/পুয়ের্তো_রিকো"),
("18.03496", "-66.8499", "ইয়াউকো", "PR", "আমেরিকা/পুয়ের্তো_রিকো"),
("31.78336", "35.23388", "পূর্ব জেরুজালেম", "PS", "এশিয়া/হেব্রন"),
("38.72706", "-9.24671", "কার্নাক্সাইড", "PT", "ইউরোপ/লিসবন"),
("37.08819", "-8.2503", "আলবুফেরা", "PT", "ইউরোপ/লিসবন"),
("41.20485", "-8.33147", "পারদেস", "PT", "ইউরোপ/লিসবন"),
("41.1053", "-7.32097", "কাস্টোয়াস", "PT", "ইউরোপ/লিসবন"),
("37.74615", "-25.66689", "পোন্তা দেলগাদা", "PT", "আটলান্টিক/অ্যাজোরস"),
("-20.88231", "55.4504", "সেন্ট-ডেনিস", "RE", "ভারতীয়/পুনর্মিলন"),
("44.43579", "26.01649", "সেক্টর 6", "RO", "ইউরোপ/বুখারেস্ট"),
("44.22639", "22.53083", "নেগোটিন", "RS", "ইউরোপ/বেলগ্রেড"),
("44.97639", "19.61222", "স্রেমস্কা মিত্রোভিকা", "RS", "ইউরোপ/বেলগ্রেড"),
("53.53395", "33.72798", "ঝুকভকা", "RU", "ইউরোপ/মস্কো"),
("46.7055", "38.2739", "ইয়েস্ক", "RU", "ইউরোপ/মস্কো"),
("44.98901", "38.94324", "ইয়াবলোনোভস্কি", "RU", "ইউরোপ/মস্কো"),
("56.03361", "35.96944", "ভোলোকোলামস্ক", "RU", "ইউরোপ/মস্কো"),
("57.97472", "33.2525", "ভালদিন", "RU", "ইউরোপ/মস্কো"),
("56.85836", "35.90057", "Tver", "RU", "ইউরোপ/মস্কো"),
("55.62047", "37.49338", "টাইপলি স্ট্যান", "RU", "ইউরোপ/মস্কো"),
("54.90083", "38.07083", "স্টুপিনো", "RU", "ইউরোপ/মস্কো"),
("55.63711", "37.38115", "সোল্টসেভো", "RU", "ইউরোপ/মস্কো"),
("59.80917", "30.38167", "শুশারী", "RU", "ইউরোপ/মস্কো"),
("64.5635", "39.8302", "সেভেরোডভিনস্ক", "RU", "ইউরোপ/মস্কো"),
("51.78771", "56.36091", "সর্কতাশ", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("53.95278", "32.86389", "রোসলাভল", "RU", "ইউরোপ/মস্কো"),
("51.40944", "46.04833", "প্রিভোলজস্কি", "RU", "ইউরোপ/সারাতোভ"),
("61.78491", "34.34691", "পেট্রোজাভোডস্ক", "RU", "ইউরোপ/মস্কো"),
("53.37596", "51.3452", "Otradnyy", "RU", "ইউরোপ/সামারা"),
("54.48147", "53.47103", "অক্টিয়াব্রএস্কি", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("43.96222", "43.63417", "নোভোপাভলভস্ক", "RU", "ইউরোপ/মস্কো"),
("53.53041", "43.67663", "নিঝনি লোমভ", "RU", "ইউরোপ/মস্কো"),
("55.38752", "36.73307", "নারো-ফমিনস্ক", "RU", "ইউরোপ/মস্কো"),
("50.06", "43.2379", "মিখাইলোভকা", "RU", "ইউরোপ/ভলগোগ্রাদ"),
("55.64776", "38.02486", "মালাখোভকা", "RU", "ইউরোপ/মস্কো"),
("55.85", "37.56667", "লিখবরী", "RU", "ইউরোপ/মস্কো"),
("51.4781", "57.3552", "কুভান্ডিক", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("44.92934", "37.99117", "ক্রিমস্ক", "RU", "ইউরোপ/মস্কো"),
("54.03876", "43.91385", "কোভিলকিনো", "RU", "ইউরোপ/মস্কো"),
("60.02427", "30.28491", "কলোম্যাগি", "RU", "ইউরোপ/মস্কো"),
("53.93361", "37.92792", "কিরেয়েভস্ক", "RU", "ইউরোপ/মস্কো"),
("54.84444", "38.16694", "কাশিরা", "RU", "ইউরোপ/মস্কো"),
("58.7002", "59.4839", "কাচকানার", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("43.35071", "46.10925", "গুডারমেস", "RU", "ইউরোপ/মস্কো"),
("57.30185", "39.85331", "গ্যাভ্রিলভ-ইয়াম", "RU", "ইউরোপ/মস্কো"),
("53.59782", "34.33825", "ডায়াটএকোভো", "RU", "ইউরোপ/মস্কো"),
("58.1908", "40.17171", "দানিলভ", "RU", "ইউরোপ/মস্কো"),
("42.819", "47.1192", "বুইনাকস্ক", "RU", "ইউরোপ/মস্কো"),
("53.77166", "38.12408", "বোগোরোডিটস্ক", "RU", "ইউরোপ/মস্কো"),
("54.39304", "53.26023", "বাভলি", "RU", "ইউরোপ/মস্কো"),
("55.39485", "43.83992", "আরজামাস", "RU", "ইউরোপ/মস্কো"),
("54.8421", "46.5813", "আলাটির", "RU", "ইউরোপ/মস্কো"),
("58.63667", "59.80222", "লেসনয়", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("55.8736", "85.4265", "ইয়াশকিনো", "RU", "এশিয়া/নোভোকুজনেটস্ক"),
("58.04254", "65.27258", "তাভদা", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("55.54028", "89.20083", "শারিপোভো", "RU", "এশিয়া/ক্রাসনোয়ারস্ক"),
("53.30972", "83.62389", "নোভোসিলিকাটনি", "RU", "এশিয়া/বার্নউল"),
("58.23583", "92.48278", "লেসোসিবিরস্ক", "RU", "এশিয়া/ক্রাসনোয়ারস্ক"),
("56.11281", "69.49015", "ইশিম", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("56.9083", "60.8019", "বেরিওজভস্কি", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("55.75556", "60.70278", "ওজারস্ক", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("51.82721", "107.60627", "উলান-উদে", "RU", "এশিয়া/ইরকুটস্ক"),
("45.47885", "133.42825", "লেসোজাভোডস্ক", "RU", "এশিয়া/ভ্লাদিভোস্টক"),
("65.93381", "111.4834", "আয়খাল", "RU", "এশিয়া/ইয়াকুটস্ক"),
("53.14657", "140.72287", "নিকোলায়েভস্ক-অন-আমুরে", "RU", "এশিয়া/ভ্লাদিভোস্টক"),
("60.97944", "76.92421", "ইজলুচিনস্ক", "RU", "এশিয়া/ইয়েকাটেরিনবার্গ"),
("-1.9487", "30.4347", "রওয়ামাগানা", "RW", "আফ্রিকা/কিগালি"),
("27.0174", "49.62251", "আল জুবাইল", "SA", "এশিয়া/রিয়াদ"),
("11.8659", "34.3869", "আর রুসেরিস", "SD", "আফ্রিকা/খার্তুম"),
("61.72744", "17.10558", "হুডিক্সভাল", "SE", "ইউরোপ/স্টকহোম"),
("59.33333", "18.28333", "বু", "SE", "ইউরোপ/স্টকহোম"),
("48.8449", "17.22635", "স্কালিকা", "SK", "ইউরোপ/ব্র্যাটিস্লাভা"),
("48.43174", "17.8031", "হলোহোভেক", "SK", "ইউরোপ/ব্রাটিস্লাভা"),
("8.48714", "-13.2356", "ফ্রিটাউন", "SL", "আফ্রিকা/ফ্রিটাউন"),
("-0.35817", "42.54536", "কিসমায়ো", "SO", "আফ্রিকা/মোগাদিশু"),
("9.89206", "43.38531", "বাকি", "SO", "আফ্রিকা/মোগাদিশু"),
("13.73417", "-89.71472", "সোনজাকেট", "SV", "আমেরিকা/এল_সালভাদর"),
("13.70167", "-89.10944", "ইলোপাঙ্গো", "SV", "আমেরিকা/এল_সালভাদর"),
("34.5624", "38.28402", "তাদমুর", "SY", "এশিয়া/দামাস্কাস"),
("35.95664", "36.7138", "বিন্নিশ", "SY", "এশিয়া/দামাস্কাস"),
("12.18441", "18.69303", "মঙ্গো", "TD", "আফ্রিকা/এনজামেনা"),
("15.46063", "99.89166", "থাপ থান", "TH", "এশিয়া/ব্যাংকক"),
("8.43333", "99.96667", "নাখোঁ সি থামমারাত", "TH", "এশিয়া/ব্যাংকক"),
("13.51825", "99.95469", "ড্যামনোয়েন সাদুয়াক", "TH", "এশিয়া/ব্যাংকক"),
("15.79408", "104.1451", "ইয়াসোথন", "TH", "এশিয়া/ব্যাংকক"),
("6.25947", "102.05461", "তক বাই", "TH", "এশিয়া/ব্যাংকক"),
("16.0567", "103.65309", "রই এট", "TH", "এশিয়া/ব্যাংকক"),
("13.44581", "101.18445", "ফানাত নিখোম", "TH", "এশিয়া/ব্যাংকক"),
("13.8196", "100.04427", "নাখোঁ পথম", "TH", "এশিয়া/ব্যাংকক"),
("14.64056", "104.64992", "কাঁথারলক", "TH", "এশিয়া/ব্যাংকক"),
("15.58552", "102.42587", "বুয়া ইয়াই", "TH", "এশিয়া/ব্যাংকক"),
("14.37395", "100.48528", "ব্যাং ব্যান", "TH", "এশিয়া/ব্যাংকক"),
("38.55632", "69.01354", "ওয়াহদাত", "TJ", "এশিয়া/দুশানবে"),
("-8.99167", "125.21972", "মালিয়ানা", "TL", "এশিয়া/দিলি"),
("36.08497", "9.37082", "সিলিয়ানা", "TN", "আফ্রিকা/তিউনিস"),
("35.72917", "10.58082", "মসকেন", "TN", "আফ্রিকা/তিউনিস"),
("36.46917", "10.78222", "বেনী খিয়ার", "TN", "আফ্রিকা/তিউনিস"),
("37.16911", "10.03478", "এল আলিয়া", "TN", "আফ্রিকা/তিউনিস"),
("38.13708", "41.00817", "সিলভান", "TR", "ইউরোপ/ইস্তাম্বুল"),
("39.22493", "42.85693", "প্যাটনোস", "TR", "ইউরোপ/ইস্তাম্বুল"),
("37.31309", "40.74357", "মর্দিন", "TR", "ইউরোপ/ইস্তাম্বুল"),
("37.58105", "29.26639", "সেরিনহিসার", "TR", "ইউরোপ/ইস্তাম্বুল"),
("37.05944", "37.3825", "গাজিয়ানটেপ", "TR", "ইউরোপ/ইস্তাম্বুল"),
("39.59611", "27.02444", "এডরেমিট", "TR", "ইউরোপ/ইস্তাম্বুল"),
("39.12074", "27.18052", "বারগামা", "TR", "ইউরোপ/ইস্তাম্বুল"),
("38.37255", "34.02537", "অক্ষরে", "TR", "ইউরোপ/ইস্তাম্বুল"),
("40.98894", "28.67582", "ইয়াকুপলু", "TR", "ইউরোপ/ইস্তাম্বুল"),
("40.1675", "34.37389", "সুঙ্গুরলু", "TR", "ইউরোপ/ইস্তাম্বুল"),
("40.37528", "28.88222", "মুদান্যা", "TR", "ইউরোপ/ইস্তাম্বুল"),
("10.66668", "-61.51889", "স্পেনের বন্দর", "TT", "আমেরিকা/পোর্ট_অফ_স্পেন"),
("23.5654", "119.58627", "মাগং", "TW", "এশিয়া/তাইপেই"),
("-2.68333", "33", "উসাগর", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-4.06667", "37.73333", "একই", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-6.25", "38.66667", "এমভোমেরো", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-4.83", "29.65806", "মওয়ান্ডিগা", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-6.8", "39.25", "মাগোমেনি", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-7.60361", "37.00438", "কিডোদি", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-7.76667", "35.7", "ইরিঙ্গা", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-5.41667", "38.01667", "চনিকা", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("-10.33333", "39.28333", "নিয়াগাও", "TZ", "আফ্রিকা/দার_এস_সালাম"),
("49.07866", "30.96755", "জেভেনিহোরোদকা", "UA", "ইউরোপ/কিয়েভ"),
("47.56494", "31.33078", "ভোজনেসেনস্ক", "UA", "ইউরোপ/কিয়েভ"),
("49.41029", "38.15035", "স্বাতভ", "UA", "ইউরোপ/জাপোরোজি"),
("50.18545", "27.06365", "শেপেটিভকা", "UA", "ইউরোপ/কিয়েভ"),
("47.48444", "36.25361", "পলোহী", "UA", "ইউরোপ/জাপোরোজি"),
("46.75451", "33.34864", "নোভা কাখোভকা", "UA", "ইউরোপ/কিয়েভ"),
("50.75932", "25.34244", "লুটস্ক", "UA", "ইউরোপ/কিয়েভ"),
("49.65186", "26.97253", "ক্র্যাসিলিভ", "UA", "ইউরোপ/কিয়েভ"),
("46.65581", "32.6178", "খেরসন", "UA", "ইউরোপ/কিয়েভ"),
("51.67822", "33.9162", "হলুখিভ", "UA", "ইউরোপ/কিয়েভ"),
("45.99194", "29.41824", "আর্টসিজ", "UA", "ইউরোপ/কিয়েভ"),
("2.41669", "30.98551", "পইধা", "UG", "আফ্রিকা/কাম্পালা"),
("3.27833", "32.88667", "কিটগাম", "UG", "আফ্রিকা/কাম্পালা"),
("3.02013", "30.91105", "আরুয়া", "UG", "আফ্রিকা/কাম্পালা"),
("33.45122", "-86.99666", "হুইটাউন", "US", "আমেরিকা/শিকাগো"),
("33.44872", "-86.78777", "ভেস্তাভিয়া পাহাড়", "US", "আমেরিকা/শিকাগো"),
("35.25064", "-91.73625", "সার্সি", "US", "আমেরিকা/শিকাগো"),
("26.68451", "-80.66756", "বেলে গ্লেড", "US", "আমেরিকা/নিউইয়র্ক"),
("28.54944", "-81.77285", "ক্লারমন্ট", "US", "আমেরিকা/নিউইয়র্ক"),
("28.90054", "-81.26367", "ডেল্টোনা", "US", "আমেরিকা/নিউইয়র্ক"),
("29.65163", "-82.32483", "গেইনসভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("25.67927", "-80.31727", "কেন্ডাল", "US", "আমেরিকা/নিউইয়র্ক"),
("28.15112", "-82.46148", "লুটজ", "US", "আমেরিকা/নিউইয়র্ক"),
("26.2173", "-80.22588", "উত্তর লডারডেল", "US", "আমেরিকা/নিউইয়র্ক"),
("30.17746", "-81.38758", "পাম ভ্যালি", "US", "আমেরিকা/নিউইয়র্ক"),
("26.91756", "-82.07842", "পান্তা গোর্দা দ্বীপপুঞ্জ", "US", "আমেরিকা/নিউইয়র্ক"),
("27.71809", "-82.35176", "সান সিটি সেন্টার", "US", "আমেরিকা/নিউইয়র্ক"),
("27.09978", "-82.45426", "ভেনিস", "US", "আমেরিকা/নিউইয়র্ক"),
("34.06635", "-84.67837", "অ্যাকওয়ার্থ", "US", "আমেরিকা/নিউইয়র্ক"),
("32.54044", "-82.90375", "ডাবলিন", "US", "আমেরিকা/নিউইয়র্ক"),
("33.08014", "-83.2321", "মিলজেভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("33.54428", "-84.23381", "স্টকব্রিজ", "US", "আমেরিকা/নিউইয়র্ক"),
("38.58894", "-89.99038", "ফেয়ারভিউ হাইটস", "US", "আমেরিকা/শিকাগো"),
("39.78504", "-85.76942", "গ্রিনফিল্ড", "US", "আমেরিকা/ইন্ডিয়ানা/ইন্ডিয়ানাপোলিস"),
("38.06084", "-97.92977", "হাচিনসন", "US", "আমেরিকা/শিকাগো"),
("39.08367", "-84.50855", "কভিংটন", "US", "আমেরিকা/নিউইয়র্ক"),
("36.61033", "-88.31476", "মারে", "US", "আমেরিকা/শিকাগো"),
("29.84576", "-90.10674", "এস্টেল", "US", "আমেরিকা/শিকাগো"),
("32.52515", "-93.75018", "শ্রেভপোর্ট", "US", "আমেরিকা/শিকাগো"),
("38.96372", "-76.99081", "চিলুম", "US", "আমেরিকা/নিউইয়র্ক"),
("38.70734", "-77.02303", "ফোর্ট ওয়াশিংটন", "US", "আমেরিকা/নিউইয়র্ক"),
("39.33427", "-76.43941", "মধ্য নদী", "US", "আমেরিকা/নিউইয়র্ক"),
("39.32011", "-76.51552", "রোজডেল", "US", "আমেরিকা/নিউইয়র্ক"),
("39.32288", "-76.72803", "উডলন", "US", "আমেরিকা/নিউইয়র্ক"),
("39.09112", "-94.41551", "স্বাধীনতা", "US", "আমেরিকা/শিকাগো"),
("37.95143", "-91.77127", "রোল্লা", "US", "আমেরিকা/শিকাগো"),
("33.41012", "-91.06177", "গ্রিনভিল", "US", "আমেরিকা/শিকাগো"),
("34.25807", "-88.70464", "টুপেলো", "US", "আমেরিকা/শিকাগো"),
("35.05266", "-78.87836", "ফয়েটভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("34.25628", "-78.04471", "লেল্যান্ড", "US", "আমেরিকা/নিউইয়র্ক"),
("35.88264", "-80.08199", "থমাসভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("39.71734", "-74.96933", "সিকলারভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("39.43534", "-84.20299", "লেবানন", "US", "আমেরিকা/নিউইয়র্ক"),
("34.77453", "-96.67834", "আডা", "US", "আমেরিকা/শিকাগো"),
("35.74788", "-95.36969", "মুস্কোজি", "US", "আমেরিকা/শিকাগো"),
("39.96097", "-75.60804", "ওয়েস্ট চেস্টার", "US", "আমেরিকা/নিউইয়র্ক"),
("33.98154", "-81.23621", "লেক্সিংটন", "US", "আমেরিকা/নিউইয়র্ক"),
("36.02506", "-86.77917", "ব্রেন্টউড এস্টেটস", "US", "আমেরিকা/শিকাগো"),
("35.61452", "-88.81395", "জ্যাকসন", "US", "আমেরিকা/শিকাগো"),
("32.44874", "-99.73314", "অ্যাবিলিন", "US", "আমেরিকা/শিকাগো"),
("30.16688", "-96.39774", "ব্রেনহাম", "US", "আমেরিকা/শিকাগো"),
("31.12406", "-97.90308", "কপারাস কোভ", "US", "আমেরিকা/শিকাগো"),
("29.53885", "-95.44744", "ফ্রেসনো", "US", "আমেরিকা/শিকাগো"),
("30.5427", "-97.54667", "হুট্টো", "US", "আমেরিকা/শিকাগো"),
("32.5007", "-94.74049", "দূর দৃষ্টি", "US", "আমেরিকা/শিকাগো"),
("31.76212", "-95.63079", "ফিলিস্তিন", "US", "আমেরিকা/শিকাগো"),
("26.18924", "-98.15529", "সান জুয়ান", "US", "আমেরিকা/শিকাগো"),
("32.35126", "-95.30106", "টাইলার", "US", "আমেরিকা/শিকাগো"),
("37.52487", "-77.55777", "বন এয়ার", "US", "আমেরিকা/নিউইয়র্ক"),
("38.91817", "-78.19444", "ফ্রন্ট রয়্যাল", "US", "আমেরিকা/নিউইয়র্ক"),
("37.60876", "-77.37331", "মেকানিক্সভিল", "US", "আমেরিকা/নিউইয়র্ক"),
("39.00622", "-77.4286", "স্টার্লিং", "US", "আমেরিকা/নিউইয়র্ক"),
("39.45621", "-77.96389", "মার্টিনসবার্গ", "US", "আমেরিকা/নিউইয়র্ক"),
("41.27621", "-72.86843", "ইস্ট হ্যাভেন", "US", "আমেরিকা/নিউইয়র্ক"),
("41.14676", "-73.49484", "নতুন কেনান", "US", "আমেরিকা/নিউইয়র্ক"),
("41.55815", "-73.0515", "ওয়াটারবেরি", "US", "আমেরিকা/নিউইয়র্ক"),
("41.6764", "-91.58045", "কোরালভিল", "US", "আমেরিকা/শিকাগো"),
("41.57721", "-93.71133", "ওয়েস্ট ডেস মইনেস", "US", "আমেরিকা/শিকাগো"),
("41.15376", "-87.88754", "বারবোনাইস", "US", "আমেরিকা/শিকাগো"),
("42.24113", "-88.3162", "ক্রিস্টাল লেক", "US", "আমেরিকা/শিকাগো"),
("41.72059", "-87.70172", "এভারগ্রিন পার্ক", "US", "আমেরিকা/শিকাগো"),
("42.16808", "-88.42814", "হান্টলি", "US", "আমেরিকা/শিকাগো"),
("41.8542", "-87.66561", "লোয়ার ওয়েস্ট সাইড", "US", "আমেরিকা/শিকাগো"),
("41.80753", "-87.65644", "নতুন শহর", "US", "আমেরিকা/শিকাগো"),
("40.56754", "-89.64066", "পেকিন", "US", "আমেরিকা/শিকাগো"),
("41.84364", "-87.71255", "সাউথ লন্ডেল", "US", "আমেরিকা/শিকাগো"),
("41.85059", "-87.882", "ওয়েস্টচেস্টার", "US", "আমেরিকা/শিকাগো"),
("41.75338", "-86.11084", "গ্রেঞ্জার", "US", "আমেরিকা/ইন্ডিয়ানা/ইন্ডিয়ানাপোলিস"),
("41.47892", "-87.45476", "শেরেরভিল", "US", "আমেরিকা/শিকাগো"),
("42.35843", "-71.05977", "বোস্টন", "US", "আমেরিকা/নিউইয়র্ক"),
("42.58342", "-71.8023", "ফিচবার্গ", "US", "আমেরিকা/নিউইয়র্ক"),
("42.4251", "-71.06616", "মালডেন", "US", "আমেরিকা/নিউইয়র্ক"),
("42.52787", "-70.92866", "পিবডি", "US", "আমেরিকা/নিউইয়র্ক"),
("41.9001", "-71.08977", "টনটন", "US", "আমেরিকা/নিউইয়র্ক"),
("43.91452", "-69.96533", "ব্রান্সউইক", "US", "আমেরিকা/নিউইয়র্ক"),
("42.30865", "-83.48216", "ক্যান্টন", "US", "আমেরিকা/ডেট্রয়েট"),
("46.09273", "-88.64235", "লোহা নদী", "US", "আমেরিকা/মেনোমিনি"),
("42.97086", "-82.42491", "পোর্ট হুরন", "US", "আমেরিকা/ডেট্রয়েট"),
("42.7392", "-84.62081", "ওয়েভারলি", "US", "আমেরিকা/ডেট্রয়েট"),
("45.0408", "-93.263", "কলাম্বিয়া হাইটস", "US", "আমেরিকা/শিকাগো"),
("45.16024", "-93.08883", "লিনো লেকস", "US", "আমেরিকা/শিকাগো"),
("44.73941", "-93.12577", "রোজমাউন্ট", "US", "আমেরিকা/শিকাগো"),
("47.92526", "-97.03285", "গ্র্যান্ড ফর্কস", "US", "আমেরিকা/শিকাগো"),
("42.93369", "-72.27814", "কিনে", "US", "আমেরিকা/নিউইয়র্ক"),
("40.94065", "-73.99681", "ডুমন্ট", "US", "আমেরিকা/নিউইয়র্ক"),
("40.72816", "-74.07764", "জার্সি সিটি", "US", "আমেরিকা/নিউইয়র্ক"),
("40.82232", "-74.15987", "নাটলি", "US", "আমেরিকা/নিউইয়র্ক"),
("40.65538", "-74.38987", "স্কচ সমভূমি", "US", "আমেরিকা/নিউইয়র্ক"),
("40.5576", "-74.28459", "উডব্রিজ", "US", "আমেরিকা/নিউইয়র্ক"),
("40.57788", "-73.95958", "ব্রাইটন বিচ", "US", "আমেরিকা/নিউইয়র্ক"),
("40.67705", "-73.89125", "সাইপ্রেস হিলস", "US", "আমেরিকা/নিউইয়র্ক"),
("40.60538", "-73.75513", "ফার রকওয়ে", "US", "আমেরিকা/নিউইয়র্ক"),
("40.72371", "-73.95097", "গ্রিনপয়েন্ট", "US", "আমেরিকা/নিউইয়র্ক"),
("40.64621", "-73.97069", "কেন্সিংটন", "US", "আমেরিকা/নিউইয়র্ক"),
("40.68066", "-73.47429", "মাসাপেকা", "US", "আমেরিকা/নিউইয়র্ক"),
("41.50343", "-74.01042", "নিউবার্গ", "US", "আমেরিকা/নিউইয়র্ক"),
("40.63316", "-74.13653", "পোর্ট রিচমন্ড", "US", "আমেরিকা/নিউইয়র্ক"),
("41.0051", "-73.78458", "স্কারসডেল", "US", "আমেরিকা/নিউইয়র্ক"),
("43.1009", "-75.23266", "ইউটিকা", "US", "আমেরিকা/নিউইয়র্ক"),
("40.93121", "-73.89875", "ইয়ঙ্কার্স", "US", "আমেরিকা/নিউইয়র্ক"),
("41.55838", "-81.56929", "কলিনউড", "US", "আমেরিকা/নিউইয়র্ক"),
("41.48199", "-81.79819", "লেকউড", "US", "আমেরিকা/নিউইয়র্ক"),
("41.24255", "-82.61573", "নরওয়াক", "US", "আমেরিকা/নিউইয়র্ক"),
("41.66394", "-83.55521", "টোলেডো", "US", "আমেরিকা/নিউইয়র্ক"),
("40.2737", "-76.88442", "হ্যারিসবার্গ", "US", "আমেরিকা/নিউইয়র্ক"),
("40.24537", "-75.64963", "পটসটাউন", "US", "আমেরিকা/নিউইয়র্ক"),
("41.54566", "-71.29144", "মিডলটাউন", "US", "আমেরিকা/নিউইয়র্ক"),
("43.61062", "-72.97261", "রাটল্যান্ড", "US", "আমেরিকা/নিউইয়র্ক"),
("44.27804", "-88.27205", "কাউকাউনা", "US", "আমেরিকা/শিকাগো"),
("42.55308", "-87.93341", "প্লিজেন্ট প্রেইরি", "US", "আমেরিকা/শিকাগো"),
("41.16704", "-73.20483", "ব্রিজপোর্ট", "US", "আমেরিকা/নিউইয়র্ক"),
("33.35283", "-111.78903", "গিলবার্ট", "US", "আমেরিকা/ফিনিক্স"),
("33.50921", "-111.89903", "স্কটসডেল", "US", "আমেরিকা/ফিনিক্স"),
("38.17492", "-122.2608", "আমেরিকান ক্যানিয়ন", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.92946", "-116.97725", "বিউমন্ট", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.21639", "-119.0376", "ক্যামারিলো", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.09668", "-117.71978", "ক্লেরমন্ট", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("38.54491", "-121.74052", "ডেভিস", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.03699", "-117.29198", "এনসিনিটাস", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.14251", "-118.25508", "গ্লেনডেল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.7207", "-116.21677", "ইন্দিও", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.52253", "-117.70755", "লাগুনা নিগুয়েল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.63915", "-120.45794", "লোমপোক", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("32.9156", "-117.14392", "মীরা মেসা", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.93113", "-117.54866", "নরকো", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.72255", "-116.37697", "পাম মরুভূমি", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("36.06523", "-119.01677", "পোর্টারভিল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("37.73604", "-120.93549", "নদীর তীর", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.09611", "-118.10583", "সান গ্যাব্রিয়েল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("34.95303", "-120.43572", "সান্তা মারিয়া", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.95015", "-118.03917", "সাউথ হুইটিয়ার", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("33.76446", "-117.79394", "উত্তর তুস্টিন", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("36.91023", "-121.75689", "ওয়াটসনভিল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("39.72943", "-104.83192", "অরোরা", "US", "আমেরিকা/ডেনভার"),
("39.57582", "-105.11221", "কেন ক্যারিল", "US", "আমেরিকা/ডেনভার"),
("32.42067", "-104.22884", "কার্লসব্যাড", "US", "আমেরিকা/ডেনভার"),
("36.20829", "-115.98391", "পাহারাম্প", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("31.84568", "-102.36764", "ওডেসা", "US", "আমেরিকা/শিকাগো"),
("40.58654", "-122.39168", "রেডিং", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("43.54072", "-116.56346", "নাম্পা", "US", "আমেরিকা/বোইস"),
("45.49428", "-122.86705", "আলোহা", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("44.99012", "-123.02621", "কেইজার", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("45.53929", "-122.38731", "ট্রাউটডেল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("40.65995", "-111.99633", "কার্নস", "US", "আমেরিকা/ডেনভার"),
("40.34912", "-111.90466", "সারাতোগা স্প্রিংস", "US", "আমেরিকা/ডেনভার"),
("47.76232", "-122.2054", "বোথেল", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("47.38093", "-122.23484", "কেন্ট", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("47.64995", "-117.23991", "সুযোগ", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("46.32374", "-120.00865", "রৌদ্রজ্জল দিক", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("20.88953", "-156.47432", "কাহুলুই", "US", "প্যাসিফিক/হনোলুলু"),
("40.81", "-73.9625", "মর্নিংসাইড হাইটস", "US", "আমেরিকা/নিউইয়র্ক"),
("43.16547", "-77.70066", "গেটস-উত্তর গেটস", "US", "আমেরিকা/নিউইয়র্ক"),
("47.4943", "-122.24092", "ব্রাইন মাওর-স্কাইওয়ে", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("47.80527", "-122.24064", "বোথেল ওয়েস্ট", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("37.71715", "-122.40433", "ভিজিটাসিয়ন ভ্যালি", "US", "আমেরিকা/লস_এঞ্জেলেস"),
("-33.38056", "-56.52361", "ডুরাজনো", "UY", "আমেরিকা/মন্টেভিডিও"),
("41.29444", "69.67639", "পার্কেন্ট", "UZ", "এশিয়া/তাসখন্দ"),
("40.11583", "67.84222", "জিজাক্স", "UZ", "এশিয়া/সমরকন্দ"),
("40.78206", "72.34424", "অ্যান্ডিজন", "UZ", "এশিয়া/তাসখন্দ"),
("9.91861", "-68.30472", "টিনাকুইলো", "VE", "আমেরিকা/কারাকাস"),
("10.22677", "-67.33122", "লা ভিক্টোরিয়া", "VE", "আমেরিকা/কারাকাস"),
("8.35122", "-62.64102", "সিউদাদ গায়ানা", "VE", "আমেরিকা/কারাকাস"),
("8.62261", "-70.20749", "বারিনাস", "VE", "আমেরিকা/কারাকাস"),
("10.29085", "105.75635", "সা ডিসেম্বর", "VN", "এশিয়া/হো_চি_মিন"),
("-17.73648", "168.31366", "পোর্ট-ভিলা", "VU", "প্যাসিফিক/ইফেট"),
("42.62833", "20.89389", "গ্লোগোভাক", "XK", "ইউরোপ/বেলগ্রেড"),
("14.53767", "46.83187", "আতক", "YE", "এশিয়া/এডেন"),
("-27.76952", "30.79165", "ভাইহাইড", "ZA", "আফ্রিকা/জোহানেসবার্গ"),
("-26.93366", "29.24152", "স্ট্যান্ডারটন", "ZA", "আফ্রিকা/জোহানেসবার্গ"),
("-24.19436", "29.00974", "মোকোপানে", "ZA", "আফ্রিকা/জোহানেসবার্গ"),
)
def local_latlng(self, country_code: str = "BD", coords_only: bool = False) -> Optional[Tuple[str, ...]]:
"""Set Bangladesh BD as country code."""
return super(self.__class__, self).local_latlng(country_code=country_code, coords_only=coords_only)
| Provider |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/black/cases/preview_long_strings__regression.py | {
"start": 16182,
"end": 22664
} | class ____:
async def foo(self):
msg = ""
for candidate in CANDIDATES:
msg += (
"**{candidate.object_type} {candidate.rev}**"
" - {candidate.description}\n"
)
temp_msg = (
f"{f'{humanize_number(pos)}.': <{pound_len+2}} "
f"{balance: <{bal_len + 5}} "
f"<<{author.display_name}>>\n"
)
assert str(suffix_arr) == (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) != (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) <= (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) >= (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) < (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) > (
"['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', "
"'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', "
"'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
)
assert str(suffix_arr) in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
assert str(suffix_arr) not in "['$', 'angaroo$', 'angrykangaroo$', 'aroo$', 'garoo$', 'grykangaroo$', 'kangaroo$', 'ngaroo$', 'ngrykangaroo$', 'o$', 'oo$', 'roo$', 'rykangaroo$', 'ykangaroo$']"
message = (
f"1. Go to Google Developers Console and log in with your Google account."
"(https://console.developers.google.com/)"
"2. You should be prompted to create a new project (name does not matter)."
"3. Click on Enable APIs and Services at the top."
"4. In the list of APIs choose or search for YouTube Data API v3 and "
"click on it. Choose Enable."
"5. Click on Credentials on the left navigation bar."
"6. Click on Create Credential at the top."
'7. At the top click the link for "API key".'
"8. No application restrictions are needed. Click Create at the bottom."
"9. You now have a key to add to `{prefix}set api youtube api_key`"
)
message = (
f"1. Go to Google Developers Console and log in with your Google account."
"(https://console.developers.google.com/)"
"2. You should be prompted to create a new project (name does not matter)."
f"3. Click on Enable APIs and Services at the top."
"4. In the list of APIs choose or search for YouTube Data API v3 and "
"click on it. Choose Enable."
f"5. Click on Credentials on the left navigation bar."
"6. Click on Create Credential at the top."
'7. At the top click the link for "API key".'
"8. No application restrictions are needed. Click Create at the bottom."
"9. You now have a key to add to `{prefix}set api youtube api_key`"
)
message = (
f"1. Go to Google Developers Console and log in with your Google account."
"(https://console.developers.google.com/)"
"2. You should be prompted to create a new project (name does not matter)."
f"3. Click on Enable APIs and Services at the top."
"4. In the list of APIs choose or search for YouTube Data API v3 and "
"click on it. Choose Enable."
f"5. Click on Credentials on the left navigation bar."
"6. Click on Create Credential at the top."
'7. At the top click the link for "API key".'
"8. No application restrictions are needed. Click Create at the bottom."
f"9. You now have a key to add to `{prefix}set api youtube api_key`"
)
# It shouldn't matter if the string prefixes are capitalized.
temp_msg = (
F"{F'{humanize_number(pos)}.': <{pound_len+2}} "
F"{balance: <{bal_len + 5}} "
F"<<{author.display_name}>>\n"
)
fstring = (
F"We have to remember to escape {braces}."
" Like {these}."
F" But not {this}."
)
welcome_to_programming = R"hello," R" world!"
fstring = F"f-strings definitely make things more {difficult} than they need to be for {{black}}. But boy they sure are handy. The problem is that some lines will need to have the 'f' whereas others do not. This {line}, for example, needs one."
x = F"This is a long string which contains an f-expr that should not split {{{[i for i in range(5)]}}}."
x = (
"\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}"
)
xxxxxx_xxx_xxxx_xx_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxx_xxxx_xxxxx = xxxx.xxxxxx.xxxxxxxxx.xxxxxxxxxxxxxxxxxxxx(
xx_xxxxxx={
"x3_xxxxxxxx": "xxx3_xxxxx_xxxxxxxx_xxxxxxxx_xxxxxxxxxx_xxxxxxxx_xxxxxx_xxxxxxx",
},
)
# Regression test for https://github.com/psf/black/issues/3117.
some_dict = {
"something_something":
r"Lorem ipsum dolor sit amet, an sed convenire eloquentiam \t"
r"signiferumque, duo ea vocibus consetetur scriptorem. Facer \t",
}
# Regression test for https://github.com/psf/black/issues/3459.
xxxx(
empty_str_as_first_split=''
f'xxxxxxx {xxxxxxxxxx} xxx xxxxxxxxxx xxxxx xxx xxx xx '
'xxxxx xxxxxxxxx xxxxxxx, xxx xxxxxxxxxxx xxx xxxxx. '
f'xxxxxxxxxxxxx xxxx xx xxxxxxxxxx. xxxxx: {x.xxx}',
empty_u_str_as_first_split=u''
f'xxxxxxx {xxxxxxxxxx} xxx xxxxxxxxxx xxxxx xxx xxx xx '
'xxxxx xxxxxxxxx xxxxxxx, xxx xxxxxxxxxxx xxx xxxxx. '
f'xxxxxxxxxxxxx xxxx xx xxxxxxxxxx. xxxxx: {x.xxx}',
)
# Regression test for https://github.com/psf/black/issues/3455.
a_dict = {
"/this/is/a/very/very/very/very/very/very/very/very/very/very/long/key/without/spaces":
# And there is a comment before the value
("item1", "item2", "item3"),
}
# Regression test for https://github.com/psf/black/issues/3506.
# Regressed again by https://github.com/psf/black/pull/4498
s = (
"With single quote: ' "
f" {my_dict['foo']}"
' With double quote: " '
f' {my_dict["bar"]}'
)
s = f'Lorem Ipsum is simply dummy text of the printing and typesetting industry:\'{my_dict["foo"]}\''
| X |
python | marshmallow-code__marshmallow | tests/test_registry.py | {
"start": 4626,
"end": 4724
} | class ____:
def __init__(self, _id, bs=None):
self.id = _id
self.bs = bs or []
| C |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 8063,
"end": 8442
} | class ____(_BaseComprehension, Sequence):
array_type = 'list'
def py__simple_getitem__(self, index):
if isinstance(index, slice):
return ValueSet([self])
all_types = list(self.py__iter__())
with reraise_getitem_errors(IndexError, TypeError):
lazy_value = all_types[index]
return lazy_value.infer()
| ListComprehension |
python | wandb__wandb | wandb/automations/events.py | {
"start": 7840,
"end": 8106
} | class ____(_BaseEventInput):
filter: Annotated[
JsonEncoded[MongoLikeFilter],
AfterValidator(wrap_mutation_event_filter),
] = And()
"""Additional conditions(s), if any, that are required for this event to trigger."""
| _BaseMutationEventInput |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 11813,
"end": 11886
} | class ____(_NumcodecsArrayBytesCodec, codec_name="pcodec"):
pass
| PCodec |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_scatter02.py | {
"start": 315,
"end": 1558
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_scatter02.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart(
{"type": "scatter", "subtype": "straight_with_markers"}
)
chart.axis_ids = [54514816, 45705856]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | vyperlang__vyper | vyper/venom/parser.py | {
"start": 2764,
"end": 2865
} | class ____:
def __init__(self, children: list) -> None:
self.children = children
| _TypedItem |
python | celery__celery | t/unit/utils/test_threads.py | {
"start": 1778,
"end": 2427
} | class ____:
def test_init(self):
x = LocalManager()
assert x.locals == []
assert x.ident_func
def ident():
return 1
loc = Local()
x = LocalManager([loc], ident_func=ident)
assert x.locals == [loc]
x = LocalManager(loc, ident_func=ident)
assert x.locals == [loc]
assert x.ident_func is ident
assert x.locals[0].__ident_func__ is ident
assert x.get_ident() == 1
with patch('celery.utils.threads.release_local') as release:
x.cleanup()
release.assert_called_with(loc)
assert repr(x)
| test_LocalManager |
python | Lightning-AI__lightning | src/lightning/fabric/strategies/launchers/subprocess_script.py | {
"start": 8170,
"end": 10052
} | class ____(threading.Thread):
def __init__(self, main_pid: int, child_processes: list[subprocess.Popen], sleep_period: int = 5) -> None:
super().__init__(daemon=True, name="child-process-observer") # thread stops if the main process exits
self._main_pid = main_pid
self._child_processes = child_processes
self._sleep_period = sleep_period
# Note: SIGTERM is not aggressive enough to terminate processes hanging in collectives
self._termination_signal = signal.SIGTERM if sys.platform == "win32" else signal.SIGKILL
self._finished = False
@override
def run(self) -> None:
while not self._finished:
time.sleep(self._sleep_period)
self._finished = self._run()
def _run(self) -> bool:
"""Runs once over all child processes to check whether they are still running."""
for proc in self._child_processes:
proc.poll()
return_codes = [proc.returncode for proc in self._child_processes]
if all(return_code == 0 for return_code in return_codes):
return True
for i, proc in enumerate(self._child_processes):
if proc.returncode:
message = rank_prefixed_message(
f"Child process with PID {proc.pid} terminated with code {proc.returncode}."
f" Forcefully terminating all other processes to avoid zombies 🧟",
rank=(i + 1),
)
_logger.info(message)
self._terminate_all()
return True
return False
def _terminate_all(self) -> None:
"""Terminates the main process and all its children."""
for p in self._child_processes:
p.send_signal(self._termination_signal)
os.kill(self._main_pid, self._termination_signal)
| _ChildProcessObserver |
python | getsentry__sentry | tests/acceptance/test_organization_switch.py | {
"start": 440,
"end": 3791
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.primary_projects = [
self.create_project(organization=self.organization, teams=[self.team], name=name)
for name in ["Bengal", "Sumatra", "Siberian"]
]
self.secondary_organization = self.create_organization(owner=self.user, name="Banana Duck")
self.secondary_team = self.create_team(
organization=self.secondary_organization, name="Second", members=[self.user]
)
self.secondary_projects = [
self.create_project(
organization=self.secondary_organization, teams=[self.secondary_team], name=name
)
for name in ["Gone Goose", "Peaceful Platypus"]
]
self.login_as(self.user)
def test_organization_switches(self) -> None:
def navigate_to_issues_page(org_slug: str) -> None:
issues_url = OrganizationSwitchTest.url_creator("issues", org_slug)
self.browser.get(issues_url)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
@TimedRetryPolicy.wrap(timeout=20, exceptions=(TimeoutException,))
def open_project_selector() -> None:
self.browser.click(selector='[data-test-id="page-filter-project-selector"]')
def get_project_elements_from_project_selector_dropdown() -> list[WebElement]:
selector = '[data-test-id="menu-list-item-label"]'
self.browser.wait_until(selector)
return self.browser.find_elements(by=By.CSS_SELECTOR, value=selector)
transition_urls = [
OrganizationSwitchTest.url_creator(page, self.organization.slug)
for page in ["issues", "releases", "discover", "user-feedback"]
]
with (
self.settings(SENTRY_SINGLE_ORGANIZATION=False),
self.feature("organizations:discover"),
):
for transition_url in transition_urls:
navigate_to_issues_page(self.organization.slug)
open_project_selector()
primary_projects_elements = get_project_elements_from_project_selector_dropdown()
OrganizationSwitchTest.expect_projects_element_text_to_match_projects_slug(
primary_projects_elements, self.primary_projects
)
self.browser.get(transition_url)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
navigate_to_issues_page(self.secondary_organization.slug)
open_project_selector()
secondary_projects_elements = get_project_elements_from_project_selector_dropdown()
OrganizationSwitchTest.expect_projects_element_text_to_match_projects_slug(
secondary_projects_elements, self.secondary_projects
)
@staticmethod
def expect_projects_element_text_to_match_projects_slug(
elements: list[WebElement], projects: list[Project]
) -> None:
assert len(elements) == len(projects)
assert {e.text for e in elements} == {p.slug for p in projects}
@staticmethod
def url_creator(page_path: str, org_slug: str) -> str:
return f"organizations/{org_slug}/{page_path}/"
| OrganizationSwitchTest |
python | apache__airflow | airflow-core/src/airflow/utils/log/file_task_handler.py | {
"start": 5827,
"end": 15611
} | class ____(str, Enum):
"""
Type of service from which we retrieve logs.
:meta private:
"""
TRIGGER = "trigger"
WORKER = "worker"
def _set_task_deferred_context_var():
"""
Tell task log handler that task exited with deferral.
This exists for the sole purpose of telling elasticsearch handler not to
emit end_of_log mark after task deferral.
Depending on how the task is run, we may need to set this in task command or in local task job.
Kubernetes executor requires the local task job invocation; local executor requires the task
command invocation.
:meta private:
"""
logger = logging.getLogger()
with suppress(StopIteration):
h = next(h for h in logger.handlers if hasattr(h, "ctx_task_deferred"))
h.ctx_task_deferred = True
def _fetch_logs_from_service(url: str, log_relative_path: str) -> Response:
# Import occurs in function scope for perf. Ref: https://github.com/apache/airflow/pull/21438
import requests
from airflow.api_fastapi.auth.tokens import JWTGenerator, get_signing_key
timeout = conf.getint("api", "log_fetch_timeout_sec", fallback=None)
generator = JWTGenerator(
secret_key=get_signing_key("api", "secret_key"),
# Since we are using a secret key, we need to be explicit about the algorithm here too
algorithm="HS512",
# We must set an empty private key here as otherwise it can be automatically loaded by JWTGenerator
# and secret_key and private_key cannot be set together
private_key=None, # type: ignore[arg-type]
issuer=None,
valid_for=conf.getint("webserver", "log_request_clock_grace", fallback=30),
audience="task-instance-logs",
)
response = requests.get(
url,
timeout=timeout,
headers={"Authorization": generator.generate({"filename": log_relative_path})},
stream=True,
)
response.encoding = "utf-8"
return response
_parse_timestamp = conf.getimport("logging", "interleave_timestamp_parser", fallback=None)
if not _parse_timestamp:
def _parse_timestamp(line: str):
timestamp_str, _ = line.split(" ", 1)
return pendulum.parse(timestamp_str.strip("[]"))
def _stream_lines_by_chunk(
log_io: IO[str],
) -> RawLogStream:
"""
Stream lines from a file-like IO object.
:param log_io: A file-like IO object to read from.
:return: A generator that yields individual lines within the specified range.
"""
# Skip processing if file is already closed
if log_io.closed:
return
# Seek to beginning if possible
if log_io.seekable():
try:
log_io.seek(0)
except Exception as e:
logger.error("Error seeking in log stream: %s", e)
return
buffer = ""
while True:
# Check if file is already closed
if log_io.closed:
break
try:
chunk = log_io.read(CHUNK_SIZE)
except Exception as e:
logger.error("Error reading log stream: %s", e)
break
if not chunk:
break
buffer += chunk
*lines, buffer = buffer.split("\n")
yield from lines
if buffer:
yield from buffer.split("\n")
def _log_stream_to_parsed_log_stream(
log_stream: RawLogStream,
) -> ParsedLogStream:
"""
Turn a str log stream into a generator of parsed log lines.
:param log_stream: The stream to parse.
:return: A generator of parsed log lines.
"""
from airflow._shared.timezones.timezone import coerce_datetime
timestamp = None
next_timestamp = None
idx = 0
for line in log_stream:
if line:
try:
log = StructuredLogMessage.model_validate_json(line)
except ValidationError:
with suppress(Exception):
# If we can't parse the timestamp, don't attach one to the row
if isinstance(line, str):
next_timestamp = _parse_timestamp(line)
log = StructuredLogMessage(event=str(line), timestamp=next_timestamp)
if log.timestamp:
log.timestamp = coerce_datetime(log.timestamp)
timestamp = log.timestamp
yield timestamp, idx, log
idx += 1
def _create_sort_key(timestamp: datetime | None, line_num: int) -> int:
"""
Create a sort key for log record, to be used in K-way merge.
:param timestamp: timestamp of the log line
:param line_num: line number of the log line
:return: a integer as sort key to avoid overhead of memory usage
"""
return int((timestamp or DEFAULT_SORT_DATETIME).timestamp() * 1000) * SORT_KEY_OFFSET + line_num
def _is_sort_key_with_default_timestamp(sort_key: int) -> bool:
"""
Check if the sort key was generated with the DEFAULT_SORT_TIMESTAMP.
This is used to identify log records that don't have timestamp.
:param sort_key: The sort key to check
:return: True if the sort key was generated with DEFAULT_SORT_TIMESTAMP, False otherwise
"""
# Extract the timestamp part from the sort key (remove the line number part)
timestamp_part = sort_key // SORT_KEY_OFFSET
return timestamp_part == DEFAULT_SORT_TIMESTAMP
def _add_log_from_parsed_log_streams_to_heap(
heap: list[tuple[int, StructuredLogMessage]],
parsed_log_streams: dict[int, ParsedLogStream],
) -> None:
"""
Add one log record from each parsed log stream to the heap, and will remove empty log stream from the dict after iterating.
:param heap: heap to store log records
:param parsed_log_streams: dict of parsed log streams
"""
# We intend to initialize the list lazily, as in most cases we don't need to remove any log streams.
# This reduces memory overhead, since this function is called repeatedly until all log streams are empty.
log_stream_to_remove: list[int] | None = None
for idx, log_stream in parsed_log_streams.items():
record: ParsedLog | None = next(log_stream, None)
if record is None:
if log_stream_to_remove is None:
log_stream_to_remove = []
log_stream_to_remove.append(idx)
continue
timestamp, line_num, line = record
# take int as sort key to avoid overhead of memory usage
heapq.heappush(heap, (_create_sort_key(timestamp, line_num), line))
# remove empty log stream from the dict
if log_stream_to_remove is not None:
for idx in log_stream_to_remove:
del parsed_log_streams[idx]
def _flush_logs_out_of_heap(
heap: list[tuple[int, StructuredLogMessage]],
flush_size: int,
last_log_container: list[StructuredLogMessage | None],
) -> Generator[StructuredLogMessage, None, None]:
"""
Flush logs out of the heap, deduplicating them based on the last log.
:param heap: heap to flush logs from
:param flush_size: number of logs to flush
:param last_log_container: a container to store the last log, to avoid duplicate logs
:return: a generator that yields deduplicated logs
"""
last_log = last_log_container[0]
for _ in range(flush_size):
sort_key, line = heapq.heappop(heap)
if line != last_log or _is_sort_key_with_default_timestamp(sort_key): # dedupe
yield line
last_log = line
# update the last log container with the last log
last_log_container[0] = last_log
def _interleave_logs(*log_streams: RawLogStream) -> StructuredLogStream:
"""
Merge parsed log streams using K-way merge.
By yielding HALF_CHUNK_SIZE records when heap size exceeds CHUNK_SIZE, we can reduce the chance of messing up the global order.
Since there are multiple log streams, we can't guarantee that the records are in global order.
e.g.
log_stream1: ----------
log_stream2: ----
log_stream3: --------
The first record of log_stream3 is later than the fourth record of log_stream1 !
:param parsed_log_streams: parsed log streams
:return: interleaved log stream
"""
# don't need to push whole tuple into heap, which increases too much overhead
# push only sort_key and line into heap
heap: list[tuple[int, StructuredLogMessage]] = []
# to allow removing empty streams while iterating, also turn the str stream into parsed log stream
parsed_log_streams: dict[int, ParsedLogStream] = {
idx: _log_stream_to_parsed_log_stream(log_stream) for idx, log_stream in enumerate(log_streams)
}
# keep adding records from logs until all logs are empty
last_log_container: list[StructuredLogMessage | None] = [None]
while parsed_log_streams:
_add_log_from_parsed_log_streams_to_heap(heap, parsed_log_streams)
# yield HALF_HEAP_DUMP_SIZE records when heap size exceeds HEAP_DUMP_SIZE
if len(heap) >= HEAP_DUMP_SIZE:
yield from _flush_logs_out_of_heap(heap, HALF_HEAP_DUMP_SIZE, last_log_container)
# yield remaining records
yield from _flush_logs_out_of_heap(heap, len(heap), last_log_container)
# free memory
del heap
del parsed_log_streams
def _is_logs_stream_like(log) -> bool:
"""Check if the logs are stream-like."""
return isinstance(log, (chain, GeneratorType))
def _get_compatible_log_stream(
log_messages: LogMessages,
) -> RawLogStream:
"""
Convert legacy log message blobs into a generator that yields log lines.
:param log_messages: List of legacy log message strings.
:return: A generator that yields interleaved log lines.
"""
yield from chain.from_iterable(
_stream_lines_by_chunk(io.StringIO(log_message)) for log_message in log_messages
)
| LogType |
python | celery__celery | t/unit/app/test_log.py | {
"start": 2205,
"end": 3973
} | class ____:
@patch('celery.utils.log.safe_str')
@patch('logging.Formatter.formatException')
def test_formatException_not_string(self, fe, safe_str):
x = ColorFormatter()
value = KeyError()
fe.return_value = value
assert x.formatException(value) is value
fe.assert_called()
safe_str.assert_not_called()
@patch('logging.Formatter.formatException')
@patch('celery.utils.log.safe_str')
def test_formatException_bytes(self, safe_str, fe):
x = ColorFormatter()
fe.return_value = b'HELLO'
try:
raise Exception()
except Exception:
assert x.formatException(sys.exc_info())
@patch('logging.Formatter.format')
def test_format_object(self, _format):
x = ColorFormatter()
x.use_color = True
record = Mock()
record.levelname = 'ERROR'
record.msg = object()
assert x.format(record)
@patch('celery.utils.log.safe_str')
def test_format_raises(self, safe_str):
x = ColorFormatter()
def on_safe_str(s):
try:
raise ValueError('foo')
finally:
safe_str.side_effect = None
safe_str.side_effect = on_safe_str
class Record:
levelname = 'ERROR'
msg = 'HELLO'
exc_info = 1
exc_text = 'error text'
stack_info = None
def __str__(self):
return on_safe_str('')
def getMessage(self):
return self.msg
record = Record()
safe_str.return_value = record
msg = x.format(record)
assert '<Unrepresentable' in msg
assert safe_str.call_count == 1
| test_ColorFormatter |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 11639,
"end": 11943
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
self.result = int(self.result)
######################################################################
| scharTestCase |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_lease_candidate_list.py | {
"start": 383,
"end": 7110
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1beta1LeaseCandidate]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1beta1LeaseCandidateList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1beta1LeaseCandidateList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1LeaseCandidateList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1LeaseCandidateList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1LeaseCandidateList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1beta1LeaseCandidateList. # noqa: E501
items is a list of schema objects. # noqa: E501
:return: The items of this V1beta1LeaseCandidateList. # noqa: E501
:rtype: list[V1beta1LeaseCandidate]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1beta1LeaseCandidateList.
items is a list of schema objects. # noqa: E501
:param items: The items of this V1beta1LeaseCandidateList. # noqa: E501
:type: list[V1beta1LeaseCandidate]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1beta1LeaseCandidateList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1LeaseCandidateList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1LeaseCandidateList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1LeaseCandidateList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1LeaseCandidateList. # noqa: E501
:return: The metadata of this V1beta1LeaseCandidateList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1LeaseCandidateList.
:param metadata: The metadata of this V1beta1LeaseCandidateList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1LeaseCandidateList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1LeaseCandidateList):
return True
return self.to_dict() != other.to_dict()
| V1beta1LeaseCandidateList |
python | scipy__scipy | scipy/stats/tests/test_distributions.py | {
"start": 183458,
"end": 183885
} | class ____:
"""gh-10300"""
def test_nan_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.nan])
assert_raises(ValueError, stats.uniform.fit, x)
def test_inf_raises_error(self):
# see gh-issue 10300
x = np.array([1.6483, 2.7169, 2.4667, 1.1791, 3.5433, np.inf])
assert_raises(ValueError, stats.uniform.fit, x)
| TestUniform |
python | huggingface__transformers | src/transformers/models/sam_hq/modeling_sam_hq.py | {
"start": 35970,
"end": 46276
} | class ____(nn.Module):
def __init__(self, config: SamHQMaskDecoderConfig):
super().__init__()
self.hidden_size = config.hidden_size
self.num_multimask_outputs = config.num_multimask_outputs
self.num_mask_tokens = config.num_multimask_outputs + 1
self.iou_token = nn.Embedding(1, self.hidden_size)
self.mask_tokens = nn.Embedding(self.num_mask_tokens, self.hidden_size)
self.transformer = SamHQTwoWayTransformer(config)
self.upscale_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.upscale_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
self.upscale_layer_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.activation = nn.GELU()
mlps_list = []
for _ in range(self.num_mask_tokens):
mlps_list += [SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)]
self.output_hypernetworks_mlps = nn.ModuleList(mlps_list)
self.iou_prediction_head = SamHQFeedForward(
self.hidden_size, config.iou_head_hidden_dim, self.num_mask_tokens, config.iou_head_depth
)
self.hq_token = nn.Embedding(1, self.hidden_size)
self.hq_mask_mlp = SamHQFeedForward(self.hidden_size, self.hidden_size, self.hidden_size // 8, 3)
self.num_mask_tokens = self.num_mask_tokens + 1
# Compress ViT features
self.compress_vit_conv1 = nn.ConvTranspose2d(config.vit_dim, self.hidden_size, kernel_size=2, stride=2)
self.compress_vit_norm = SamHQLayerNorm(self.hidden_size, data_format="channels_first")
self.compress_vit_conv2 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 8, kernel_size=2, stride=2)
# Embedding encoder
self.encoder_conv1 = nn.ConvTranspose2d(self.hidden_size, self.hidden_size // 4, kernel_size=2, stride=2)
self.encoder_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.encoder_conv2 = nn.ConvTranspose2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=2, stride=2)
# Embedding mask feature
self.mask_conv1 = nn.Conv2d(self.hidden_size // 8, self.hidden_size // 4, kernel_size=3, stride=1, padding=1)
self.mask_norm = SamHQLayerNorm(self.hidden_size // 4, data_format="channels_first")
self.mask_conv2 = nn.Conv2d(self.hidden_size // 4, self.hidden_size // 8, kernel_size=3, stride=1, padding=1)
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
hq_token_only: bool,
intermediate_embeddings: Optional[list[torch.Tensor]] = None,
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
) -> SamHQMMaskDecoderOutputs:
"""
Predict high-quality masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embedding (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (bool):
Whether to return multiple masks or a single mask.
hq_token_only (bool):
Whether to use only the high-quality token output or combine with SAM output.
intermediate_embeddings (`torch.Tensor`):
Intermediate embeddings from the vision encoder for feature fusion.
attention_similarity (`torch.Tensor`, *optional*):
Optional tensor for attention similarity computation.
target_embedding (`torch.Tensor`, *optional*):
Optional target embedding for transformer processing.
Returns:
`Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]`: A tuple of tensors containing:
- A tensor of shape `(batch_size, num_prompts, num_masks, height, width)` containing the output masks.
- A tensor of shape `(batch_size, num_prompts, num_masks)` containing the iou predictions for each mask.
- (Optional) A tuple containing attention tensors if output_attentions is True.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1] if sparse_prompt_embeddings is not None else 1
has_intermediate = intermediate_embeddings is not None and len(intermediate_embeddings) > 0
if has_intermediate:
vit_features = intermediate_embeddings[0].permute(0, 3, 1, 2).contiguous()
embed_encode = self.encoder_conv1(image_embeddings)
embed_encode = self.activation(self.encoder_norm(embed_encode))
embed_encode = self.encoder_conv2(embed_encode)
if has_intermediate:
compressed_vit_features = self.compress_vit_conv1(vit_features)
compressed_vit_features = self.activation(self.compress_vit_norm(compressed_vit_features))
compressed_vit_features = self.compress_vit_conv2(compressed_vit_features)
hq_features = embed_encode + compressed_vit_features
else:
hq_features = embed_encode
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hq_token.weight], dim=0)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings is not None:
tokens = torch.cat([output_tokens, sparse_prompt_embeddings], dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, 0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
point_embedding, iou_token_out = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
)
iou_token_out = point_embedding[:, :, 0, :]
mask_tokens_out = point_embedding[:, :, 1 : (1 + self.num_mask_tokens), :]
image_embeddings = image_embeddings.transpose(2, 3).reshape(
batch_size * point_batch_size, num_channels, height, width
)
upscaled_embedding = self.upscale_conv1(image_embeddings)
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding))
upscaled_embedding_hq = self.mask_conv1(upscaled_embedding)
upscaled_embedding_hq = self.activation(self.mask_norm(upscaled_embedding_hq))
upscaled_embedding_hq = self.mask_conv2(upscaled_embedding_hq)
if hq_features.shape[0] == 1:
hq_features = hq_features.repeat(batch_size * point_batch_size, 1, 1, 1)
elif hq_features.shape[0] == batch_size and batch_size * point_batch_size != batch_size:
hq_features = hq_features.repeat_interleave(point_batch_size, 0)
upscaled_embedding_hq = upscaled_embedding_hq + hq_features
hyper_in_list = []
for mask_token_index in range(self.num_mask_tokens):
if mask_token_index < self.num_mask_tokens - 1:
current_mlp = self.output_hypernetworks_mlps[mask_token_index]
else:
current_mlp = self.hq_mask_mlp
hyper_in_list += [current_mlp(mask_tokens_out[:, :, mask_token_index, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.reshape(batch_size, point_batch_size, num_channels, height * width)
upscaled_embedding_hq = upscaled_embedding_hq.reshape(
batch_size, point_batch_size, num_channels, height * width
)
masks_sam = (hyper_in[:, :, : self.num_mask_tokens - 1] @ upscaled_embedding).reshape(
batch_size, point_batch_size, -1, height, width
)
masks_hq = (hyper_in[:, :, self.num_mask_tokens - 1 :] @ upscaled_embedding_hq).reshape(
batch_size, point_batch_size, -1, height, width
)
masks = torch.cat([masks_sam, masks_hq], dim=2)
iou_pred = self.iou_prediction_head(iou_token_out)
if multimask_output:
mask_slice = slice(1, self.num_mask_tokens - 1)
iou_pred = iou_pred[:, :, mask_slice]
# Sort the IoU scores in descending order and get indices
iou_pred_sorted, sort_indices = torch.sort(iou_pred, dim=2, descending=True)
# Reorder the masks according to sorted scores
masks_sam = masks[:, :, mask_slice, :, :]
masks_sam = torch.gather(
masks_sam,
2,
sort_indices[..., None, None].expand(-1, -1, -1, masks_sam.shape[3], masks_sam.shape[4]),
)
# Update iou_pred with sorted scores
iou_pred = iou_pred_sorted
else:
mask_slice = slice(0, 1)
iou_pred = iou_pred[:, :, mask_slice]
masks_sam = masks[:, :, mask_slice, :, :]
masks_hq = masks[:, :, slice(self.num_mask_tokens - 1, self.num_mask_tokens), :, :]
if hq_token_only:
masks = masks_hq
else:
masks = masks_sam + masks_hq
return masks, iou_pred
@auto_docstring(
custom_intro="""
The vision model from SamHQ without any head or projection on top.
"""
)
| SamHQMaskDecoder |
python | anthropics__anthropic-sdk-python | src/anthropic/types/raw_message_delta_event.py | {
"start": 320,
"end": 432
} | class ____(BaseModel):
stop_reason: Optional[StopReason] = None
stop_sequence: Optional[str] = None
| Delta |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 119734,
"end": 119905
} | class ____(AsyncTestCase):
def test_listen(self):
app = Application([])
server = app.listen(0, address="127.0.0.1")
server.stop()
| ApplicationTest |
python | sympy__sympy | sympy/stats/stochastic_process_types.py | {
"start": 84659,
"end": 86402
} | class ____(CountingProcess):
"""
The Wiener process is a real valued continuous-time stochastic process.
In physics it is used to study Brownian motion and it is often also called
Brownian motion due to its historical connection with physical process of the
same name originally observed by Scottish botanist Robert Brown.
Parameters
==========
sym : Symbol/str
Examples
========
>>> from sympy.stats import WienerProcess, P, E
>>> from sympy import symbols, Contains, Interval
>>> X = WienerProcess("X")
>>> X.state_space
Reals
>>> t1, t2 = symbols('t1 t2', positive=True)
>>> P(X(t1) < 7).simplify()
erf(7*sqrt(2)/(2*sqrt(t1)))/2 + 1/2
>>> P((X(t1) > 2) | (X(t1) < 4), Contains(t1, Interval.Ropen(2, 4))).simplify()
-erf(1)/2 + erf(2)/2 + 1
>>> E(X(t1))
0
>>> E(X(t1) + 2*X(t2), Contains(t1, Interval.Lopen(0, 1))
... & Contains(t2, Interval.Lopen(1, 2)))
0
References
==========
.. [1] https://www.probabilitycourse.com/chapter11/11_4_0_brownian_motion_wiener_process.php
.. [2] https://en.wikipedia.org/wiki/Wiener_process
"""
def __new__(cls, sym):
sym = _symbol_converter(sym)
return Basic.__new__(cls, sym)
@property
def state_space(self):
return S.Reals
def distribution(self, key):
if isinstance(key, RandomIndexedSymbol):
self._deprecation_warn_distribution()
return NormalDistribution(0, sqrt(key.key))
return NormalDistribution(0, sqrt(key))
def density(self, x):
return exp(-x**2/(2*x.key)) / (sqrt(2*pi)*sqrt(x.key))
def simple_rv(self, rv):
return Normal(rv.name, 0, sqrt(rv.key))
| WienerProcess |
python | google__pytype | pytype/errors/error_types.py | {
"start": 3832,
"end": 4087
} | class ____(InvalidParameters):
"""E.g. an arg "x" is passed to a function as both a posarg and a kwarg."""
def __init__(self, sig, passed_args, ctx, duplicate):
super().__init__(sig, passed_args, ctx)
self.duplicate = duplicate
| DuplicateKeyword |
python | astropy__astropy | astropy/modeling/spline.py | {
"start": 17205,
"end": 18161
} | class ____(abc.ABC):
"""
Base Spline Fitter.
"""
def __init__(self):
self.fit_info = {"resid": None, "spline": None}
def _set_fit_info(self, spline):
self.fit_info["resid"] = spline.get_residual()
self.fit_info["spline"] = spline
@abc.abstractmethod
def _fit_method(self, model, x, y, **kwargs):
raise NotImplementedError("This has not been implemented for _SplineFitter.")
def __call__(self, model, x, y, z=None, **kwargs):
model_copy = model.copy()
if isinstance(model_copy, Spline1D):
if z is not None:
raise ValueError("1D model can only have 2 data points.")
spline = self._fit_method(model_copy, x, y, **kwargs)
else:
raise ModelDefinitionError(
"Only spline models are compatible with this fitter."
)
self._set_fit_info(spline)
return model_copy
| _SplineFitter |
python | walkccc__LeetCode | solutions/3173. Bitwise OR of Adjacent Elements/3173.py | {
"start": 0,
"end": 123
} | class ____:
def orArray(self, nums: list[int]) -> list[int]:
return [a | b for a, b in itertools.pairwise(nums)]
| Solution |
python | ansible__ansible | test/units/inventory/test_host.py | {
"start": 2314,
"end": 2670
} | class ____(TestHost):
ansible_port = 8822
def setUp(self):
self.hostA = Host(name='a', port=self.ansible_port)
self.hostB = Host(name='b', port=self.ansible_port)
def test_get_vars_ansible_port(self):
host_vars = self.hostA.get_vars()
self.assertEqual(host_vars['ansible_port'], self.ansible_port)
| TestHostWithPort |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-pathway/llama_index/readers/pathway/base.py | {
"start": 362,
"end": 3476
} | class ____:
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
):
"""
A client you can use to query :py:class:`VectorStoreServer`.
Please provide either the `url`, or `host` and `port`.
Args:
- host: host on which `:py:class:`VectorStoreServer` listens
- port: port on which `:py:class:`VectorStoreServer` listens
- url: url at which `:py:class:`VectorStoreServer` listens
"""
err = "Either (`host` and `port`) or `url` must be provided, but not both."
if url is not None:
if host or port:
raise ValueError(err)
self.url = url
else:
if host is None:
raise ValueError(err)
port = port or 80
self.url = f"http://{host}:{port}"
def query(
self, query: str, k: int = 3, metadata_filter: Optional[str] = None
) -> List[dict]:
"""
Perform a query to the vector store and fetch results.
Args:
- query:
- k: number of documents to be returned
- metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
"""
data = {"query": query, "k": k}
if metadata_filter is not None:
data["metadata_filter"] = metadata_filter
url = self.url + "/v1/retrieve"
response = requests.post(
url,
data=json.dumps(data),
headers={"Content-Type": "application/json"},
timeout=3,
)
return response.json()
# Make an alias
__call__ = query
def get_vectorstore_statistics(self) -> dict:
"""Fetch basic statistics about the vector store."""
url = self.url + "/v1/statistics"
response = requests.post(
url,
json={},
headers={"Content-Type": "application/json"},
)
return response.json()
def get_input_files(
self,
metadata_filter: Optional[str] = None,
filepath_globpattern: Optional[str] = None,
) -> list:
"""
Fetch information on documents in the vector store.
Args:
metadata_filter: optional string representing the metadata filtering query
in the JMESPath format. The search will happen only for documents
satisfying this filtering.
filepath_globpattern: optional glob pattern specifying which documents
will be searched for this query.
"""
url = self.url + "/v1/inputs"
response = requests.post(
url,
json={
"metadata_filter": metadata_filter,
"filepath_globpattern": filepath_globpattern,
},
headers={"Content-Type": "application/json"},
)
return response.json()
| _VectorStoreClient |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 40620,
"end": 40983
} | class ____(mapping_tests.BasicTestMappingProtocol):
@classmethod
def setUpClass(cls):
class MyOrderedDict(c_coll.OrderedDict):
pass
cls.type2test = MyOrderedDict
super().setUpClass()
def test_popitem(self):
d = self._empty_mapping()
self.assertRaises(KeyError, d.popitem)
| CPythonSubclassMappingTests |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc.py | {
"start": 2798,
"end": 3232
} | class ____(Benchmark):
def setup(self):
rng = np.random.default_rng(1)
self.vals = rng.random(10_000_000, dtype=np.float64)
self.idx = rng.integers(1000, size=10_000_000).astype(np.intp)
self.res = np.zeros(1000, dtype=self.vals.dtype)
def time_sum_at(self):
np.add.at(self.res, self.idx, self.vals)
def time_maximum_at(self):
np.maximum.at(self.res, self.idx, self.vals)
| At |
python | sphinx-doc__sphinx | sphinx/domains/javascript.py | {
"start": 10636,
"end": 10924
} | class ____(JSCallable):
"""Like a callable but with a different prefix."""
allow_nesting = True
def get_display_prefix(self) -> list[Node]:
return [
addnodes.desc_sig_keyword('class', 'class'),
addnodes.desc_sig_space(),
]
| JSConstructor |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_to_numpy.py | {
"start": 143,
"end": 2782
} | class ____:
def test_to_numpy(self):
df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4.5]])
result = df.to_numpy()
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_dtype(self):
df = DataFrame({"A": [1, 2], "B": [3, 4.5]})
expected = np.array([[1, 3], [2, 4]], dtype="int64")
result = df.to_numpy(dtype="int64")
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_copy(self):
arr = np.random.default_rng(2).standard_normal((4, 3))
df = DataFrame(arr)
assert df.values.base is not arr
assert df.to_numpy(copy=False).base is df.values.base
assert df.to_numpy(copy=True).base is not arr
# we still don't want a copy when na_value=np.nan is passed,
# and that can be respected because we are already numpy-float
assert df.to_numpy(copy=False).base is df.values.base
@pytest.mark.filterwarnings(
"ignore:invalid value encountered in cast:RuntimeWarning"
)
def test_to_numpy_mixed_dtype_to_str(self):
# https://github.com/pandas-dev/pandas/issues/35455
df = DataFrame([[Timestamp("2020-01-01 00:00:00"), 100.0]])
result = df.to_numpy(dtype=str)
expected = np.array([["2020-01-01 00:00:00", "100.0"]], dtype=str)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_datetime_with_na(self):
# GH #53115
dti = date_range("2016-01-01", periods=3, unit="ns")
df = DataFrame(dti)
df.iloc[0, 0] = NaT
expected = np.array([[np.nan], [1.45169280e18], [1.45177920e18]])
result = df.to_numpy(float, na_value=np.nan)
tm.assert_numpy_array_equal(result, expected)
df = DataFrame(
{
"a": [
Timestamp("1970-01-01").as_unit("s"),
Timestamp("1970-01-02").as_unit("s"),
NaT,
],
"b": [
Timestamp("1970-01-01").as_unit("s"),
np.nan,
Timestamp("1970-01-02").as_unit("s"),
],
"c": [
1,
np.nan,
2,
],
}
)
expected = np.array(
[
[0.00e00, 0.00e00, 1.00e00],
[8.64e04, np.nan, np.nan],
[np.nan, 8.64e04, 2.00e00],
]
)
result = df.to_numpy(float, na_value=np.nan)
tm.assert_numpy_array_equal(result, expected)
| TestToNumpy |
python | pytest-dev__pytest-xdist | testing/test_remote.py | {
"start": 568,
"end": 791
} | class ____:
def __init__(self, eventcall: tuple[str, dict[str, Any]]) -> None:
self.name, self.kwargs = eventcall
def __str__(self) -> str:
return f"<EventCall {self.name}(**{self.kwargs})>"
| EventCall |
python | openai__openai-python | src/openai/types/chat/chat_completion_content_part_image_param.py | {
"start": 247,
"end": 614
} | class ____(TypedDict, total=False):
url: Required[str]
"""Either a URL of the image or the base64 encoded image data."""
detail: Literal["auto", "low", "high"]
"""Specifies the detail level of the image.
Learn more in the
[Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
"""
| ImageURL |
python | allegroai__clearml | clearml/backend_api/services/v2_13/auth.py | {
"start": 4315,
"end": 4966
} | class ____(Request):
"""
Creates a new set of credentials for the authenticated user.
New key/secret is returned.
Note: Secret will never be returned in any other API call.
If a secret is lost or compromised, the key should be revoked
and a new set of credentials can be created.
"""
_service = "auth"
_action = "create_credentials"
_version = "2.13"
_schema = {
"additionalProperties": False,
"definitions": {},
"properties": {},
"type": "object",
}
| CreateCredentialsRequest |
python | nedbat__coveragepy | tests/test_debug.py | {
"start": 9578,
"end": 11044
} | class ____(CoverageTest):
"""Tests that we can direct debug output where we want."""
def setUp(self) -> None:
super().setUp()
# DebugOutputFile aggressively tries to start just one output file. We
# need to manually force it to make a new one.
DebugOutputFile._del_singleton_data()
def debug_sys(self) -> None:
"""Run just enough coverage to get full debug=sys output."""
cov = coverage.Coverage(debug=["sys"])
cov.start()
cov.stop()
def test_stderr_default(self) -> None:
self.debug_sys()
out, err = self.stdouterr()
assert "" == out
assert_good_debug_sys(err)
def test_envvar(self) -> None:
self.set_environ("COVERAGE_DEBUG_FILE", "debug.out")
self.debug_sys()
assert ("", "") == self.stdouterr()
with open("debug.out", encoding="utf-8") as f:
assert_good_debug_sys(f.read())
def test_config_file(self) -> None:
self.make_file(".coveragerc", "[run]\ndebug_file = lotsa_info.txt")
self.debug_sys()
assert ("", "") == self.stdouterr()
with open("lotsa_info.txt", encoding="utf-8") as f:
assert_good_debug_sys(f.read())
def test_stdout_alias(self) -> None:
self.set_environ("COVERAGE_DEBUG_FILE", "stdout")
self.debug_sys()
out, err = self.stdouterr()
assert "" == err
assert_good_debug_sys(out)
| DebugOutputTest |
python | sympy__sympy | sympy/functions/special/bessel.py | {
"start": 58071,
"end": 63011
} | class ____(AiryBase):
r"""
The derivative $\operatorname{Bi}^\prime$ of the Airy function of the first
kind.
Explanation
===========
The Airy function $\operatorname{Bi}^\prime(z)$ is defined to be the
function
.. math::
\operatorname{Bi}^\prime(z) := \frac{\mathrm{d} \operatorname{Bi}(z)}{\mathrm{d} z}.
Examples
========
Create an Airy function object:
>>> from sympy import airybiprime
>>> from sympy.abc import z
>>> airybiprime(z)
airybiprime(z)
Several special values are known:
>>> airybiprime(0)
3**(1/6)/gamma(1/3)
>>> from sympy import oo
>>> airybiprime(oo)
oo
>>> airybiprime(-oo)
0
The Airy function obeys the mirror symmetry:
>>> from sympy import conjugate
>>> conjugate(airybiprime(z))
airybiprime(conjugate(z))
Differentiation with respect to $z$ is supported:
>>> from sympy import diff
>>> diff(airybiprime(z), z)
z*airybi(z)
>>> diff(airybiprime(z), z, 2)
z*airybiprime(z) + airybi(z)
Series expansion is also supported:
>>> from sympy import series
>>> series(airybiprime(z), z, 0, 3)
3**(1/6)/gamma(1/3) + 3**(5/6)*z**2/(6*gamma(2/3)) + O(z**3)
We can numerically evaluate the Airy function to arbitrary precision
on the whole complex plane:
>>> airybiprime(-2).evalf(50)
0.27879516692116952268509756941098324140300059345163
Rewrite $\operatorname{Bi}^\prime(z)$ in terms of hypergeometric functions:
>>> from sympy import hyper
>>> airybiprime(z).rewrite(hyper)
3**(5/6)*z**2*hyper((), (5/3,), z**3/9)/(6*gamma(2/3)) + 3**(1/6)*hyper((), (1/3,), z**3/9)/gamma(1/3)
See Also
========
airyai: Airy function of the first kind.
airybi: Airy function of the second kind.
airyaiprime: Derivative of the Airy function of the first kind.
References
==========
.. [1] https://en.wikipedia.org/wiki/Airy_function
.. [2] https://dlmf.nist.gov/9
.. [3] https://encyclopediaofmath.org/wiki/Airy_functions
.. [4] https://mathworld.wolfram.com/AiryFunctions.html
"""
nargs = 1
unbranched = True
@classmethod
def eval(cls, arg):
if arg.is_Number:
if arg is S.NaN:
return S.NaN
elif arg is S.Infinity:
return S.Infinity
elif arg is S.NegativeInfinity:
return S.Zero
elif arg.is_zero:
return 3**Rational(1, 6) / gamma(Rational(1, 3))
if arg.is_zero:
return 3**Rational(1, 6) / gamma(Rational(1, 3))
def fdiff(self, argindex=1):
if argindex == 1:
return self.args[0]*airybi(self.args[0])
else:
raise ArgumentIndexError(self, argindex)
def _eval_evalf(self, prec):
z = self.args[0]._to_mpmath(prec)
with workprec(prec):
res = mp.airybi(z, derivative=1)
return Expr._from_mpmath(res, prec)
def _eval_rewrite_as_besselj(self, z, **kwargs):
tt = Rational(2, 3)
a = tt * Pow(-z, Rational(3, 2))
if re(z).is_negative:
return -z/sqrt(3) * (besselj(-tt, a) + besselj(tt, a))
def _eval_rewrite_as_besseli(self, z, **kwargs):
ot = Rational(1, 3)
tt = Rational(2, 3)
a = tt * Pow(z, Rational(3, 2))
if re(z).is_positive:
return z/sqrt(3) * (besseli(-tt, a) + besseli(tt, a))
else:
a = Pow(z, Rational(3, 2))
b = Pow(a, tt)
c = Pow(a, -tt)
return sqrt(ot) * (b*besseli(-tt, tt*a) + z**2*c*besseli(tt, tt*a))
def _eval_rewrite_as_hyper(self, z, **kwargs):
pf1 = z**2 / (2*root(3, 6)*gamma(Rational(2, 3)))
pf2 = root(3, 6) / gamma(Rational(1, 3))
return pf1 * hyper([], [Rational(5, 3)], z**3/9) + pf2 * hyper([], [Rational(1, 3)], z**3/9)
def _eval_expand_func(self, **hints):
arg = self.args[0]
symbs = arg.free_symbols
if len(symbs) == 1:
z = symbs.pop()
c = Wild("c", exclude=[z])
d = Wild("d", exclude=[z])
m = Wild("m", exclude=[z])
n = Wild("n", exclude=[z])
M = arg.match(c*(d*z**n)**m)
if M is not None:
m = M[m]
# The transformation is in principle
# given by 03.08.16.0001.01 but note
# that there is an error in this formula.
# https://functions.wolfram.com/Bessel-TypeFunctions/AiryBiPrime/16/01/01/0001/
if (3*m).is_integer:
c = M[c]
d = M[d]
n = M[n]
pf = (d**m * z**(n*m)) / (d * z**n)**m
newarg = c * d**m * z**(n*m)
return S.Half * (sqrt(3)*(pf - S.One)*airyaiprime(newarg) + (pf + S.One)*airybiprime(newarg))
| airybiprime |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/tracing_compilation_test.py | {
"start": 3642,
"end": 52123
} | class ____(test.TestCase, parameterized.TestCase):
@test_util.run_in_graph_and_eager_modes
def testBackwardNoneGradient(self):
model = variables.Variable(1.0, name='model')
count = variables.Variable(0)
@compiled_fn
def forward_pass(value):
count.assign_add(1)
residuals = value - model
loss = 0.5 * math_ops.reduce_mean(math_ops.pow(residuals, 2))
# Note: count is an integer, so its doutput will be None
return loss, count
def reduce_fn(x):
if context.executing_eagerly():
with backprop.GradientTape() as t:
loss, count = forward_pass(x)
return t.gradient(loss, model), count
loss, count = forward_pass(x)
grad_only = gradients_impl.gradients(loss, model)
return grad_only, count
g, _ = reduce_fn(constant_op.constant([7.0]))
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(nest.flatten(self.evaluate(g)), [-6.0])
def testExternalControlDependency(self):
with ops.Graph().as_default(), self.test_session():
v = variables.Variable(1.0)
v.initializer.run()
op = v.assign_add(1.0)
@compiled_fn
def f():
with ops.control_dependencies([op]):
return 1.0
self.evaluate(f())
self.assertAllEqual(self.evaluate(v), 2.0)
def testInputShapeFunctionRelaxation(self):
unknown_dim = [False]
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(reduce_retracing=True, function_cache=function_cache)
def func(a):
if a._shape_tuple()[0] is None:
unknown_dim[0] = True
return a + 1
func(constant_op.constant([]))
self.assertFalse(unknown_dim[0])
self.assertLen(function_cache, 1)
func(constant_op.constant([1.0]))
self.assertTrue(unknown_dim[0])
self.assertLen(function_cache, 2)
func(constant_op.constant([1.0, 2.0]))
self.assertTrue(unknown_dim[0])
def testNestedInputShapeFunctionRelaxation(self):
unknown_dim = [False]
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(reduce_retracing=True, function_cache=function_cache)
def func(a_, b_=None):
del a_ # Only used to check which cache is used.
self.assertEqual(b_[0]._shape_tuple(), ())
if b_[1]._shape_tuple()[0] is None:
unknown_dim[0] = True
return b_[0] + 1
a = 'hi'
b0 = constant_op.constant(1.0)
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(function_cache, 1)
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(function_cache, 2)
func(a, b_=[b0, constant_op.constant([1.0, 1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(function_cache, 2)
unknown_dim[0] = False
# Now do the same except with a new a which is not a tensor; this should
# change the cache key.
a = 'bye'
func(a, b_=[b0, constant_op.constant([])])
self.assertFalse(unknown_dim[0])
self.assertLen(function_cache, 3)
# We relax the type traced previously.
func(a, b_=[b0, constant_op.constant([1.0])])
self.assertTrue(unknown_dim[0])
self.assertLen(function_cache, 4)
@test_util.run_v2_only
def testGraphEagerIsolation(self):
def f_py():
self.v = variables.Variable(1.0)
return self.v.read_value()
f = lambda: tracing_compilation.call_function( # pylint: disable=g-long-lambda
tracing_options=tracing_compilation.TracingOptions(f_py, 'f')
)
self.assertAllEqual(f(), 1.0)
with ops.Graph().as_default():
self.assertEqual(f().shape, ())
@test_util.run_v2_only
def testCompilationNumpyArraysConvertedToTensors(self):
def f(x):
self.assertIsInstance(x, tensor_lib.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(f, function_cache=function_cache)
defined(x)
self.assertLen(function_cache, 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(function_cache, 1)
np_ones = numpy.ones([], numpy.float32)
np_zeros = numpy.zeros([], numpy.float32)
tf_ones = array_ops.ones([])
tf_zeros = array_ops.zeros([])
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1.0, defined(np_ones).numpy())
self.assertLen(function_cache, 2)
self.assertEqual(0.0, defined(np_zeros).numpy())
self.assertEqual(1.0, defined(tf_ones).numpy())
self.assertEqual(0.0, defined(tf_zeros).numpy())
self.assertLen(function_cache, 2)
# Test that mutable inputs are supported.
mutable = numpy.ones([], numpy.float32)
self.assertEqual(1.0, defined(mutable).numpy())
mutable.fill(0)
self.assertEqual(0.0, defined(mutable).numpy())
class MyNdarray(numpy.ndarray):
pass
# Test that the subclasses of ndarray are converted too.
self.assertEqual(1.0, defined(np_ones.view(MyNdarray)).numpy())
self.assertEqual(0.0, defined(np_zeros.view(MyNdarray)).numpy())
# We should not have triggered any re-tracing of the python function.
self.assertLen(function_cache, 2)
@test_util.run_v2_only
def testNumpyDtypeInputSupported(self):
@compiled_fn
def f(x, dtype):
return constant_op.constant(dtype(x))
self.assertEqual(f(1, numpy.float32).numpy(), numpy.float32(1))
self.assertEqual(f(2, numpy.float32).numpy(), numpy.float32(2))
self.assertEqual(f(1, numpy.int32).numpy(), numpy.int32(1))
self.assertEqual(f(2, numpy.int32).numpy(), numpy.int32(2))
@test_util.run_v2_only
def testCompilationNumpyArraysConvertedToTensorsInKwargs(self):
def f(**kwargs):
x = kwargs.pop('x')
self.assertIsInstance(x, tensor_lib.Tensor)
return x
x = random_ops.random_uniform([2, 2]).numpy()
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(f, function_cache=function_cache)
defined(x=x)
self.assertLen(function_cache, 1)
x = random_ops.random_uniform([2, 2]).numpy()
defined(x=x)
# A NumPy array with different values but the same shape and dtype
# shouldn't trigger another function definition.
self.assertLen(function_cache, 1)
# Test that the numpy array is properly an argument to the graph function.
self.assertEqual(1.0, defined(x=numpy.ones([])).numpy())
self.assertEqual(0.0, defined(x=numpy.zeros([])).numpy())
self.assertEqual(1.0, defined(x=array_ops.ones([])).numpy())
self.assertEqual(0.0, defined(x=array_ops.zeros([])).numpy())
@test_util.run_v2_only
def testFuncListAttr(self):
@compiled_fn
def test_function(val):
def fn1():
return array_ops.ones([10])
fn2 = lambda: array_ops.ones([10]) * 2
def fn3(x=3):
return array_ops.ones([10]) * x
fn4 = functools.partial(fn3, x=4)
fn5 = functools.partial(fn3, 5)
return gen_functional_ops.case(
val,
[],
[dtypes.float32],
[
compiled_fn(f).get_concrete_function()
for f in (fn1, fn2, fn3, fn4, fn5)
],
)
ones = array_ops.ones([10])
self.assertAllEqual([ones], test_function(0))
self.assertAllEqual([ones * 2], test_function(1))
self.assertAllEqual([ones * 3], test_function(2))
self.assertAllEqual([ones * 4], test_function(3))
self.assertAllEqual([ones * 5], test_function(4))
self.assertAllEqual([ones * 5], test_function(22)) # default branch
@test_util.enable_control_flow_v2
def testVariableInLoopInFunction(self):
def test_function_py():
def loop_test(_):
return False
def loop_body(_):
return variable_scope.get_variable('a', shape=())
return while_loop.while_loop(loop_test, loop_body, [0.0])
test_function = tracing_compilation.trace_function(
tracing_options=tracing_compilation.TracingOptions(
test_function_py, 'test_function'
)
)
self.assertEqual(test_function().shape, [])
@test_util.run_in_graph_and_eager_modes
def testCompilationForcesResourceVariables(self):
def variable_creator():
self.v = variables.Variable(0.0)
return self.v.read_value()
defined = tracing_compilation.trace_function(
tracing_options=tracing_compilation.TracingOptions(
variable_creator, 'variable_creator'
)
)
defined() # Create the variable.
self.assertIsInstance(self.v, resource_variable_ops.ResourceVariable)
@test_util.run_gpu_only
@test_util.run_in_graph_and_eager_modes
def testFunctionWithResourcesOnDifferentDevices(self):
with ops.device('/cpu:0'):
v_cpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
with ops.device('/gpu:0'):
v_gpu = resource_variable_ops.ResourceVariable([0.0, 1.0, 2.0])
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, gpu_result
defined = compiled_fn(sum_gather)
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
self.assertAllEqual(expected, self.evaluate(defined()))
@test_util.assert_no_new_pyobjects_executing_eagerly()
def testCallOptionsMemory(self):
@compiled_fn
def model(x):
return x + constant_op.constant(1.0)
# This happens with a lot of option toggles, e.g. soft device placement
context.context().function_call_options = None
model(constant_op.constant(2.0))
@test_util.run_in_graph_and_eager_modes
def testVariablesPlacedOnOutsideDevice(self):
class _Obj(object):
def __init__(self):
self.v = None
@compiled_fn
def f(self):
if self.v is None:
self.v = variables.Variable(1.0)
return self.v + 1.0
has_device = _Obj()
with ops.device('cpu:0'):
has_device.f()
self.assertIn('CPU', has_device.v.device)
def testCacheObjectHashCollisions(self):
class Foo:
def __hash__(self):
return 42
def func(foo):
return constant_op.constant([id(foo)])
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(func, function_cache=function_cache)
foo_1 = Foo()
defined(foo_1)
self.assertLen(function_cache, 1)
foo_2 = Foo()
defined(foo_2)
self.assertLen(function_cache, 2)
def testCacheTensorDtypeCollision(self):
def func(t):
return t + t
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(func, function_cache=function_cache)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(function_cache, 1)
t = constant_op.constant([[1.0]], dtype=dtypes.complex128)
defined(t)
self.assertLen(function_cache, 2)
def testCacheTensorShapeCollision(self):
def func(t):
return t + t
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(func, function_cache=function_cache)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(function_cache, 1)
t = constant_op.constant([1.0], dtype=dtypes.complex64)
defined(t)
self.assertLen(function_cache, 2)
def testCacheTensorShapeDtypeCollision(self):
def func(t):
return t + t
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(func, function_cache=function_cache)
t = constant_op.constant([[1.0]], dtype=dtypes.complex64)
defined(t)
self.assertLen(function_cache, 1)
t = constant_op.constant([1.0], dtype=dtypes.complex128)
defined(t)
self.assertLen(function_cache, 2)
def testCacheTensorUnknownShapesCollisionRelaxedShapes(self):
def func(t):
return t + t
with context.graph_mode(), self.cached_session():
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(
func, reduce_retracing=True, function_cache=function_cache
)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[])
defined(p)
self.assertLen(function_cache, 1)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[1])
defined(p)
self.assertLen(function_cache, 2)
p = array_ops.placeholder(dtype=dtypes.float32, shape=[2])
defined(p)
# Gradual shape relaxation is performed; and the common shape between
# [1] and [2] is one containing unknown dimensions.
self.assertLen(function_cache, 2)
t = constant_op.constant([1.0, 1.0, 1.0], dtype=dtypes.float32)
defined(t)
# Shape (3,) matches the relaxed shape TensorShape([None])
self.assertLen(function_cache, 2)
def testPythonFunctionWithDefaultArgs(self):
def func(foo, bar=1, baz=2):
del foo
del bar
del baz
return
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(func, function_cache=function_cache)
defined(0, baz=20)
self.assertLen(function_cache, 1)
defined(1) # bar=1, baz=2
self.assertLen(function_cache, 2)
# This matches the previous call.
defined(foo=1)
self.assertLen(function_cache, 2)
defined(1, 2, 3)
self.assertLen(function_cache, 3)
# This matches the previous call.
defined(1, bar=2, baz=3)
self.assertLen(function_cache, 3)
# This matches the previous call.
defined(1, baz=3, bar=2)
self.assertLen(function_cache, 3)
@test_util.run_v2_only
def testFunctoolsPartialUnwrappedCorrectly(self):
def full_function(a, b, c=3):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2)
defined = compiled_fn(partial)
func_a, func_b, func_c = defined(2)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
def testInputSignatureWithMatchingInputs(self):
def foo(a):
self.assertEqual(a.shape, (2,))
return a
function_cache = function_cache_lib.FunctionCache()
signature = [tensor_lib.TensorSpec(shape=(2,), dtype=dtypes.float32)]
defined = compiled_fn(
foo, input_signature=signature, function_cache=function_cache
)
a = array_ops.ones([2])
self.assertAllEqual(a, defined(a))
self.assertLen(function_cache, 1)
self.assertAllEqual(a, defined.get_concrete_function()(a))
self.assertAllEqual(a, defined.get_concrete_function(a)(a))
self.assertAllEqual(
a,
defined.get_concrete_function(
tensor_lib.TensorSpec((2,), dtype=dtypes.float32)
)(a),
)
self.assertLen(function_cache, 1)
def bar(a):
self.assertEqual(a._shape_tuple(), (2, None))
return a
signature = [tensor_lib.TensorSpec((2, None), dtypes.float32)]
defined = compiled_fn(bar, input_signature=signature)
a = array_ops.ones([2, 1])
out = defined(a)
self.assertLen(function_cache, 1)
self.assertAllEqual(out, a)
# Changing the second dimension shouldn't create a new function.
b = array_ops.ones([2, 3])
out = defined(b)
self.assertLen(function_cache, 1)
self.assertAllEqual(out, b)
def testInputSignatureWithDictInPositionalArgs(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def f(*_args, **_kwargs):
return None
f(1, x=2)
self.assertLen(function_cache, 1)
f(1, x=2)
self.assertLen(function_cache, 1)
f(1, {'x': 2})
self.assertLen(function_cache, 2)
def testInputSignatureWithCompatibleInputs(self):
rank2_spec = tensor_lib.TensorSpec(
shape=(None, None), dtype=dtypes.float32
)
@compiled_fn(input_signature=[rank2_spec])
def func(a):
self.assertEqual([None, None], a.shape.as_list())
return array_ops.shape(a)
self.assertAllEqual([3, 1], func([[0], [1.0], [1]]))
self.assertAllEqual([2, 2], func(numpy.array([[1, 1], [2, 2]])))
with self.assertRaises(TypeError):
func([0.0, 1.0, 2.0]) # Wrong shape.
with self.assertRaises(TypeError):
func([['wrong dtype']])
@test_util.run_v2_only
def testNestedInputSignatures(self):
def expected_foo(a, b):
return [a, b]
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(
input_signature=[
[tensor_lib.TensorSpec((2, None), dtypes.float32)] * 2,
tensor_lib.TensorSpec((1,), dtypes.float32),
],
function_cache=function_cache,
)
def foo(a, b):
self.assertEqual(a[0]._shape_tuple(), (2, None))
self.assertEqual(a[1]._shape_tuple(), (2, None))
self.assertEqual(b._shape_tuple(), (1,))
return [a, b]
a = array_ops.ones([2, 1])
b = array_ops.ones([1])
expected = expected_foo([a, a], b)
out = foo([a, a], b)
self.assertLen(function_cache, 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], a)
self.assertAllEqual(out[1], b)
# Changing the unspecified dimensions shouldn't create a new function.
a = array_ops.ones([2, 3])
b = array_ops.ones([2, 5])
c = array_ops.ones([1])
expected = expected_foo([a, b], c)
out = foo([a, b], c)
self.assertLen(function_cache, 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
c = c.numpy().tolist()
out = foo([a, b], c)
self.assertLen(function_cache, 1)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out[0][0], a)
self.assertAllEqual(out[0][1], b)
self.assertAllEqual(out[1], c)
@test_util.run_v2_only
def testNestedInputSignaturesWithDict(self):
def expected_bar(a):
return a
@compiled_fn(
input_signature=[{
'a': tensor_lib.TensorSpec((2, None), dtypes.float32),
'b': tensor_lib.TensorSpec((2, None), dtypes.float32),
'c': tensor_lib.TensorSpec((1,), dtypes.float32),
}]
)
def bar(a):
self.assertEqual(a['a']._shape_tuple(), (2, None))
self.assertEqual(a['b']._shape_tuple(), (2, None))
self.assertEqual(a['c']._shape_tuple(), (1,))
return a
a = array_ops.ones([2, 3])
b = array_ops.ones([1])
inputs = {'a': a, 'b': a, 'c': b}
expected = expected_bar(inputs)
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
# Passing compatible inputs should work.
a = a.numpy().tolist()
b = b.numpy().tolist()
inputs = {'a': a, 'b': a, 'c': b}
out = bar(inputs)
nest.assert_same_structure(out, expected)
self.assertAllEqual(out['a'], expected['a'])
self.assertAllEqual(out['b'], expected['b'])
self.assertAllEqual(out['c'], expected['c'])
def testInputSignatureMustBeSequenceOfTensorSpecs(self):
def foo(a, b):
del a
del b
# Signatures must be either lists or tuples on their outermost levels.
signature = {'t1': tensor_lib.TensorSpec([], dtypes.float32)}
with self.assertRaisesRegex(
TypeError, 'input_signature must be either a tuple or a list.*'
):
compiled_fn(foo, input_signature=signature)
def testInputsIncompatibleWithNestedSignatureRaisesError(self):
def foo(a, b):
return [a, b]
signature = [
[tensor_lib.TensorSpec((1,), dtypes.float32)] * 2,
[tensor_lib.TensorSpec((1,), dtypes.float32)] * 2,
]
defined = compiled_fn(foo, input_signature=signature)
a = array_ops.ones([1])
with self.assertRaises(TypeError):
defined([a, a, a], [a])
with self.assertRaises(TypeError):
defined([a], [a, a, a])
defined([a, a], [a, a])
@test_util.run_v2_only
def testUnderspecifiedInputSignature(self):
@compiled_fn(
input_signature=[
tensor_lib.TensorSpec([], dtypes.float32),
]
)
def foo(a, training=True):
if training:
return a
else:
return -1.0 * a
x = constant_op.constant(1.0)
with self.assertRaises(ValueError):
foo(x, training=False)
self.assertAllEqual(x.numpy(), foo(x).numpy())
@test_util.run_v2_only
def testInputSignatureWithPartialFunction(self):
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
a, b, c = partial(2.0)
signature = [tensor_lib.TensorSpec([], dtypes.float32)]
defined = compiled_fn(partial, input_signature=signature)
x = constant_op.constant(2.0)
func_a, func_b, func_c = defined(x)
self.assertEqual(func_a.numpy(), a)
self.assertEqual(func_b.numpy(), b)
self.assertEqual(func_c.numpy(), c)
@test_util.run_v2_only
def testInputSignatureWithKeywordPositionalArgs(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(
input_signature=[
tensor_lib.TensorSpec([], dtypes.float32),
tensor_lib.TensorSpec([], dtypes.int64),
],
function_cache=function_cache,
)
def foo(flt, integer):
return flt, integer
flt = constant_op.constant(1.0)
integer = constant_op.constant(2, dtypes.int64)
out1, out2 = foo(flt, integer)
self.assertLen(function_cache, 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt=flt, integer=integer)
self.assertLen(function_cache, 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(integer=integer, flt=flt)
self.assertLen(function_cache, 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
out1, out2 = foo(flt, integer=integer)
self.assertLen(function_cache, 1)
self.assertEqual(out1.numpy(), 1.0)
self.assertEqual(out2.numpy(), 2)
@test_util.run_v2_only
def testInputSignatureWithKeywordArgs(self):
def foo(a, b, **kwargs):
del kwargs
return a, b
x = compiled_fn(
foo,
input_signature=[
tensor_lib.TensorSpec([], dtypes.float32),
tensor_lib.TensorSpec([], dtypes.int32),
],
).get_concrete_function()
result = x(constant_op.constant(5.0), constant_op.constant(5))
self.assertAllEqual(result, [5.0, 5])
def testInputSignatureWithCompositeTensors(self):
def f(rt):
self.assertEqual(rt.values.shape.as_list(), [None])
self.assertEqual(rt.row_splits.shape.as_list(), [4])
return rt
signature = [
ragged_tensor.RaggedTensorSpec(shape=[3, None], dtype=dtypes.int32)
]
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(
f, input_signature=signature, function_cache=function_cache
)
rt1 = ragged_factory_ops.constant([[1], [], [2, 3, 4]])
out1 = defined(rt1)
self.assertLen(function_cache, 1)
self.assertAllEqual(out1.values, rt1.values)
self.assertAllEqual(out1.row_splits, rt1.row_splits)
# Changing the row lengths shouldn't create a new function.
rt2 = ragged_factory_ops.constant([[1, 2], [3, 4], [5]])
out2 = defined(rt2)
self.assertLen(function_cache, 1)
self.assertAllEqual(out2.values, rt2.values)
self.assertAllEqual(out2.row_splits, rt2.row_splits)
# Different number of rows
rt3 = ragged_factory_ops.constant([[1, 2], [3, 4], [5], [6]])
with self.assertRaises(TypeError):
defined(rt3)
# Different dtype
rt4 = ragged_factory_ops.constant([[1.0, 2.0], [], [3.0]])
with self.assertRaises(TypeError):
defined(rt4)
# Different rank
rt5 = ragged_factory_ops.constant([[[1]], [[2]], [[3]]])
with self.assertRaises(ValueError):
defined(rt5)
@test_util.run_v2_only
def testInputSignatureWithKeywordOnlyArgs(self):
def f(a, b, c=3, *, d=4):
self.assertIsInstance(a, tensor_lib.Tensor)
self.assertIsInstance(b, tensor_lib.Tensor)
self.assertIsInstance(c, int)
self.assertIsInstance(d, (int, tensor_lib.Tensor))
return a + b + c + d
signature = [
tensor_lib.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_lib.TensorSpec(shape=[], dtype=dtypes.int32),
]
defined = compiled_fn(f, input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 10)
defined = compiled_fn(functools.partial(f, c=4), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = compiled_fn(functools.partial(f, d=5), input_signature=signature)
self.assertEqual(defined(1, 2).numpy(), 11)
defined = compiled_fn(
functools.partial(f, d=array_ops.constant(5)), input_signature=signature
)
self.assertEqual(defined(1, 2).numpy(), 11)
mod = module.Module()
save(mod, '/tmp/kwonlyf', defined.get_concrete_function(*signature))
loaded = load('/tmp/kwonlyf')
result = loaded.signatures['serving_default'](
a=array_ops.constant(1),
b=array_ops.constant(2),
d=array_ops.constant(5),
)
self.assertEqual(result['output_0'].numpy(), 11)
def testInputSignatureWithKeywordOnlyArgsNoDefaults(self):
signature = [
tensor_lib.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_lib.TensorSpec(shape=[], dtype=dtypes.int32),
]
def test_func(a, *, b):
return a + b
with self.assertRaisesRegex(
TypeError,
(
'Since input_signature is defined, keyword-only parameter `b` must'
' have a default value'
),
):
compiled_fn(test_func, input_signature=signature)
test_func_lambda = lambda a, *, b: a + b
with self.assertRaisesRegex(
TypeError,
(
'Since input_signature is defined, keyword-only parameter `b` must'
' have a default value'
),
):
compiled_fn(test_func_lambda, input_signature=signature)
def testTensorKeywordArguments(self):
def foo(a, b):
del a
return b
function_cache = function_cache_lib.FunctionCache()
defined = compiled_fn(foo, function_cache=function_cache)
a = constant_op.constant(2.0)
b = constant_op.constant([1.0, 2.0])
one = defined(a, b)
self.assertLen(function_cache, 1)
two = defined(a=a, b=b)
self.assertLen(function_cache, 1)
three = defined(b=b, a=a)
self.assertLen(function_cache, 1)
four = defined(a, b=b)
self.assertLen(function_cache, 1)
# The next call corresponds to a new input signature, hence
# we expect another function to be defined.
five = defined(b, a)
self.assertLen(function_cache, 2)
six = defined(a=b, b=a)
self.assertLen(function_cache, 2)
seven = defined(b=a, a=b)
self.assertLen(function_cache, 2)
self.assertAllEqual(one, [1.0, 2.0])
self.assertAllEqual(two, [1.0, 2.0])
self.assertAllEqual(three, [1.0, 2.0])
self.assertAllEqual(four, [1.0, 2.0])
self.assertAllEqual(five, 2.0)
self.assertAllEqual(six, 2.0)
self.assertAllEqual(seven, 2.0)
def testFunctionWithInvalidAttribute(self):
def add(x, y):
return math_ops.add(x, y)
with self.assertRaisesRegex(
ValueError,
'Tracing compilation does not support `experimental_1` as an'
' attribute.',
):
tracing_compilation.trace_function(
(1, 2),
tracing_options=tracing_compilation.TracingOptions(
add, 'add', attributes={'experimental_1': 'value1'}
),
)
def testRegisterFunction(self):
@compiled_fn(name='add', function_cache=function_cache_lib.FunctionCache())
def add(x, y):
return math_ops.add(x, y)
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = compiled_fn(
matmul, name='matmul', function_cache=function_cache_lib.FunctionCache()
)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
concrete_func_matmul = defun_matmul.get_concrete_function(t, t)
concrete_func_matmul.add_to_graph()
concrete_func_matmul.add_gradient_functions_to_graph()
concrete_func_add = add.get_concrete_function(t, t)
concrete_func_add.add_to_graph()
concrete_func_add.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.cached_definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*matmul.*',
'.*forward.*matmul.*',
'.*inference.*backward.*matmul.*',
'.*inference.*add.*',
'.*forward.*add.*',
'.*inference.*backward.*add.*',
]
for i in range(len(functions)):
self.assertRegex(
captured_function_names[i], expected_func_name_regex[i]
)
# Check the forward and backward function has the correct attributes.
self.assertEqual(
functions[1].cached_definition.attr['backward_function_name'].s,
functions[2].name,
)
self.assertEqual(
functions[2].cached_definition.attr['forward_function_name'].s,
functions[1].name,
)
self.assertEqual(
functions[4].cached_definition.attr['backward_function_name'].s,
functions[5].name,
)
self.assertEqual(
functions[5].cached_definition.attr['forward_function_name'].s,
functions[4].name,
)
sq = defun_matmul(t, t)
double = add(t, t)
self.assertAllEqual(sq.eval().reshape(-1), [7, 10, 15, 22])
self.assertAllEqual(double.eval().reshape(-1), [2, 4, 6, 8])
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
functions = list(graph._functions.values())
for i in range(len(functions)):
self.assertEqual(
captured_function_names[i],
functions[i].cached_definition.signature.name,
)
@test_util.run_v2_only
def testRegisterConcreteFunction(self):
@compiled_fn(
name='py_add', function_cache=function_cache_lib.FunctionCache()
)
def py_add(x, y):
return math_ops.add(x, y)
py_add(array_ops.ones([]), array_ops.ones([]))
add = py_add.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32),
)
@compiled_fn(
name='py_composite', function_cache=function_cache_lib.FunctionCache()
)
def py_composite(x, y):
return x, add(x, y)
py_composite(array_ops.ones([]), array_ops.ones([]))
composite = py_composite.get_concrete_function(
tensor_lib.TensorSpec(None, dtypes.float32),
tensor_lib.TensorSpec(None, dtypes.float32),
)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
composite.add_to_graph()
composite.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 6)
# two sets of functions, each of them are (inference, forward, backward)
functions = list(graph._functions.values())
captured_function_names = [
f.cached_definition.signature.name for f in functions
]
expected_func_name_regex = [
'.*inference.*py_composite.*',
'.*inference.*py_add.*',
'.*forward.*py_composite.*',
'.*forward.*py_add.*',
'.*inference.*backward.*py_composite.*',
'.*inference.*backward.*py_add.*',
]
for expected, found in zip(
expected_func_name_regex, captured_function_names
):
self.assertRegex(found, expected)
composite_t, composite_double = composite(t, t)
double = add(t, t)
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(double))
self.assertAllEqual([[2, 4], [6, 8]], self.evaluate(composite_double))
self.assertAllEqual([[1, 2], [3, 4]], self.evaluate(composite_t))
# Make sure the pre registered function is used, and no other function
# is added.
self.assertLen(graph._functions, 6)
def testEagerCaptures(self):
with context.eager_mode():
large_tensor = array_ops.ones(shape=(256,))
self.assertGreater(256, capture_container._EAGER_CONST_THRESHOLD)
small_tensor = array_ops.ones(shape=(4,))
self.assertLessEqual(4, capture_container._EAGER_CONST_THRESHOLD)
v = resource_variable_ops.ResourceVariable(0.0)
for captured, op_type in [
(large_tensor, 'Placeholder'),
(small_tensor, 'Const'),
(v, 'Placeholder'),
]:
@compiled_fn
def test_fn():
return captured + 1 # pylint: disable=cell-var-from-loop
g = test_fn.get_concrete_function().graph
internal_captures = g.internal_captures
self.assertLen(internal_captures, 1)
self.assertEqual(internal_captures[0].op.type, op_type)
def testRegisterFunctionWithInputSignature(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = compiled_fn(
matmul,
input_signature=[
tensor_lib.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
tensor_lib.TensorSpec(shape=(2, 2), dtype=dtypes.float32),
],
function_cache=function_cache_lib.FunctionCache(),
)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
concrete_func = defun_matmul.get_concrete_function(t, t)
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
# Test register function with cache, note inputs are ignored.
concrete_func = defun_matmul.get_concrete_function()
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
self.assertLen(graph._functions, 3)
def testRegisterFunctionWithCache(self):
def matmul(x, y):
return math_ops.matmul(x, y)
defun_matmul = compiled_fn(
matmul, function_cache=function_cache_lib.FunctionCache()
)
with context.graph_mode(), self.cached_session():
with ops.get_default_graph().as_default():
t = constant_op.constant([[1.0, 2.0], [3.0, 4.0]])
t2 = constant_op.constant([[2.0, 3.0], [4.0, 5.0]])
concrete_func_t = defun_matmul.get_concrete_function(t, t)
concrete_func_t.add_to_graph()
concrete_func_t.add_gradient_functions_to_graph()
concrete_func_t2 = defun_matmul.get_concrete_function(t2, t2)
concrete_func_t2.add_to_graph()
concrete_func_t2.add_gradient_functions_to_graph()
graph = ops.get_default_graph()
# Only one function is registered since the input param are in same type
# pylint: disable=protected-access
self.assertLen(graph._functions, 3)
@test_util.run_v2_only
def testCallingFunctionWithDifferentVariables(self):
@compiled_fn
def foo(v):
v.assign_add(1.0)
return v.read_value()
v = resource_variable_ops.ResourceVariable(0.0)
graph_function = foo.get_concrete_function(v)
self.assertLen(graph_function.inputs, 1)
self.assertEmpty(graph_function.captured_inputs)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(v)), 2.0)
w = resource_variable_ops.ResourceVariable(0.0)
@compiled_fn
def bar(v):
del v
return constant_op.constant(1.0)
graph_function = bar.get_concrete_function(v)
self.assertEqual(float(graph_function(v)), 1.0)
self.assertEqual(float(graph_function(w)), 1.0)
def testCallingFunctionWithNonTensorsFails(self):
@compiled_fn
def foo(x):
return x
graph_function = foo.get_concrete_function(constant_op.constant(1.0))
with self.assertRaises((TypeError, ValueError)):
graph_function('Not a Tensor.')
@parameterized.parameters([
(
compiled_fn(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU',
}
),
compiled_fn(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU',
}
),
),
(
compiled_fn(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'CPU',
}
),
compiled_fn(
attributes={
'api_implements': 'random_boost',
'api_preferred_device': 'GPU',
}
),
),
])
@test_util.run_v2_only
def testSwapImplementationWithGrapplerPlugin(
self, cpu_decorator, gpu_decorator
):
# Set the min_graph_nodes to -1 since the graph in this test is too small,
# and will be ignored by grappler if don't set this.
rewrites = rewriter_config_pb2.RewriterConfig()
rewrites.implementation_selector = rewriter_config_pb2.RewriterConfig.ON
rewrites.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrites, build_cost_model=1
)
config_proto = config_pb2.ConfigProto(graph_options=graph_options)
with context.graph_mode(), self.cached_session(
config=config_proto, graph=ops.Graph(), use_gpu=True
):
@cpu_decorator
def cpu_boost(x):
return math_ops.add(x, 2.0)
@gpu_decorator
def gpu_boost(x):
return math_ops.add(x, 4.0)
x = constant_op.constant(1.0)
concrete_func = cpu_boost.get_concrete_function(x)
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
y = gpu_boost(x)
y_value = self.evaluate(y)
if test.is_gpu_available():
self.assertEqual(y_value, 5.0)
else:
# Grappler fallback to use the CPU impl even called with GPU function.
self.assertEqual(y_value, 3.0)
@test_util.disable_tfrt(
"b/174712583: TFRT doesn't support behavior "
'equivalent to implementation_selector for function'
)
def testSwapImplementationInEager(self):
if not context.executing_eagerly():
self.skipTest('eager only')
# testSharedRendezvous sets the disable_meta_optimizer flag to True
# if that subtest runs before this one, then having that set to True
# will cause this subtest to fail. To avoid that scenario, explicitly
# set the disable_meta_optimizer flag to false here
context.context().set_optimizer_experimental_options({
'min_graph_nodes': -1,
'implementation_selector': True,
'disable_meta_optimizer': False,
})
@compiled_fn(
attributes={
'api_implements': 'foo',
'api_preferred_device': 'CPU',
}
)
def on_cpu(x):
return x + 2
@compiled_fn(
attributes={
'api_implements': 'foo',
'api_preferred_device': 'GPU',
}
)
def on_gpu(x):
return x + 4
@compiled_fn
def run_on_cpu(t):
concrete_func = on_cpu.get_concrete_function(t)
concrete_func.add_to_graph()
concrete_func.add_gradient_functions_to_graph()
with ops.device('CPU:0'):
return on_gpu(t)
# Expect to run the on_cpu branch, regardless whether gpu is available.
self.assertEqual(run_on_cpu(constant_op.constant(1)).numpy(), 3)
def testCompilationFunctionSeparateGraphs(self):
with context.graph_mode():
add_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=add_cache)
def add(x):
return x + 5
maybe_add_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=maybe_add_cache)
def maybe_add(x, should_add):
if should_add:
return add(x)
else:
return x
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(maybe_add_cache, 1)
self.assertLen(add_cache, 1)
maybe_add(x, False)
self.assertLen(maybe_add_cache, 2)
self.assertLen(add_cache, 1)
with ops.Graph().as_default():
x = constant_op.constant(11)
maybe_add(x, True)
self.assertLen(maybe_add_cache, 3)
self.assertLen(add_cache, 2)
def testCacheKeyOverlappingShapes(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(t):
return t
defined(array_ops.zeros([12, 1]))
self.assertLen(function_cache, 1)
defined(array_ops.zeros([1, 21]))
self.assertLen(function_cache, 2)
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined_again(t):
return defined(t)
defined_again.get_concrete_function(array_ops.zeros([12, 1]))
self.assertLen(function_cache, 1)
defined_again.get_concrete_function(array_ops.zeros([1, 21]))
self.assertLen(function_cache, 2)
def testCacheTensorSpecIdenticalToTensor(self):
@compiled_fn(function_cache=function_cache_lib.FunctionCache())
def defined(t):
return t
z = array_ops.zeros([2, 2])
z_spec = tensor_lib.TensorSpec.from_tensor(z)
self.assertIs(
defined.get_concrete_function(z_spec), defined.get_concrete_function(z)
)
def testCacheKeyNestedLists(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(l):
return l
a = constant_op.constant(1.0)
b = constant_op.constant(2.0)
c = constant_op.constant(3.0)
defined([[a], b, c])
self.assertLen(function_cache, 1)
defined([[a, b], c])
self.assertLen(function_cache, 2)
def testCacheKeyAttrsClass(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class TestClass:
a = attr.ib()
b = attr.ib()
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(l):
return l
defined(
TestClass(
constant_op.constant(1.0),
[constant_op.constant(2.0), constant_op.constant(3.0)],
)
)
self.assertLen(function_cache, 1)
defined(
TestClass(
constant_op.constant(1.0),
[constant_op.constant(2.0), constant_op.constant(3.0)],
)
)
self.assertLen(function_cache, 1)
defined(
TestClass(
[constant_op.constant(1.0), constant_op.constant(2.0)],
constant_op.constant(3.0),
)
)
self.assertLen(function_cache, 2)
def testDistinctVariablesNoRetracing(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
# We generate cache keys based on unique combinations of resource ids.
defined(x, y, z)
self.assertLen(function_cache, 1)
# Re-arranging arguments should not cause cache miss
# because the three inputs are still distinct
defined(z, y, x)
self.assertLen(function_cache, 1)
def testRetracingOnDifferentVaribleCombinationPatterns(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(function_cache, 1)
# Retracing because the first two arguments are the same
defined(x, x, z)
self.assertLen(function_cache, 2)
# Replacing x with y does not cause cache miss
# because the combination stays the same as (x, x, z)
defined(y, y, z)
self.assertLen(function_cache, 2)
# A different combination pattern causes cache miss
defined(z, y, y)
self.assertLen(function_cache, 3)
defined(z, y, y)
self.assertLen(function_cache, 3)
@test_util.run_v2_only
def testDeepcopyVariableNoRetracing(self):
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def defined(a, b, c):
return a + b + c
x = resource_variable_ops.ResourceVariable(0.0)
y = resource_variable_ops.ResourceVariable(0.0)
z = resource_variable_ops.ResourceVariable(0.0)
defined(x, y, z)
self.assertLen(function_cache, 1)
x_copy = copy.deepcopy(x)
defined(x_copy, y, z)
self.assertLen(function_cache, 1)
@test_util.disable_tfrt('b/173429686')
@test_util.run_v2_only
def testExecutorType(self):
@compiled_fn
def add_five(x):
return x + 5
self.assertEqual(
5, add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy()
)
with self.assertRaisesRegex(errors.NotFoundError, 'NON_EXISTENT_EXECUTOR'):
with context.function_executor_type('NON_EXISTENT_EXECUTOR'):
add_five(constant_op.constant(0, dtype=dtypes.int32))
for executor_type in ('', 'DEFAULT', None):
with context.function_executor_type(executor_type):
self.assertAllEqual(
5, add_five(constant_op.constant(0, dtype=dtypes.int32)).numpy()
)
@test_util.assert_no_garbage_created
def testReferenceCycles(self):
fn = compiled_fn(lambda x: 2.0 * x)
fn(constant_op.constant(4.0))
weak_fn = weakref.ref(fn)
del fn
# Tests that the weak reference we made to the function is now dead, which
# means the object has been deleted. This should be true as long as the
# function itself is not involved in a reference cycle.
self.assertIs(None, weak_fn())
@test_util.run_in_graph_and_eager_modes
def testShapeCaching(self):
@compiled_fn
def func(x):
return array_ops.shape(x)
@compiled_fn(
input_signature=[tensor_lib.TensorSpec([None, None], dtypes.float32)]
)
def calls_func(x):
return func(x)
self.assertAllEqual([1, 1], self.evaluate(func(array_ops.zeros([1, 1]))))
self.assertAllEqual([2, 2], self.evaluate(func(array_ops.zeros([2, 2]))))
self.assertAllEqual(
[3, 3], self.evaluate(calls_func(array_ops.zeros([3, 3])))
)
def testLimitedRetracing(self):
trace_count = [0]
function_cache = function_cache_lib.FunctionCache()
@compiled_fn(function_cache=function_cache)
def func(x):
trace_count[0] += 1
return x
for _ in range(50):
func(constant_op.constant(3.0))
func(constant_op.constant(4.0))
func(constant_op.constant([[1.0, 2.0]]))
func(constant_op.constant([[]]))
func(constant_op.constant([[3.0, 4.0], [5.0, 6.0]]))
func(constant_op.constant([[3.0, 4.0], [5.0, 6.0], [7.0, 8.0]]))
# Tracing more than twice per input doesn't make sense.
self.assertLess(trace_count[0], 13)
| TracingCompilationTest |
python | django__django | tests/get_or_create/models.py | {
"start": 713,
"end": 1108
} | class ____(models.Model):
name = models.CharField(max_length=255)
tags = models.ManyToManyField(Tag)
@property
def capitalized_name_property(self):
return self.name
@capitalized_name_property.setter
def capitalized_name_property(self, val):
self.name = val.capitalize()
@property
def name_in_all_caps(self):
return self.name.upper()
| Thing |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 2271,
"end": 4428
} | class ____(AbstractFilter):
name_class = TreeNameDefinition
def __init__(self, parent_context, node_context=None):
if node_context is None:
node_context = parent_context
self._node_context = node_context
self._parser_scope = node_context.tree_node
module_context = node_context.get_root_context()
# It is quite hacky that we have to use that. This is for caching
# certain things with a WeakKeyDictionary. However, parso intentionally
# uses slots (to save memory) and therefore we end up with having to
# have a weak reference to the object that caches the tree.
#
# Previously we have tried to solve this by using a weak reference onto
# used_names. However that also does not work, because it has a
# reference from the module, which itself is referenced by any node
# through parents.
path = module_context.py__file__()
if path is None:
# If the path is None, there is no guarantee that parso caches it.
self._parso_cache_node = None
else:
self._parso_cache_node = get_parso_cache_node(
module_context.inference_state.latest_grammar
if module_context.is_stub() else module_context.inference_state.grammar,
path
)
self._used_names = module_context.tree_node.get_used_names()
self.parent_context = parent_context
def get(self, name):
return self._convert_names(self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name),
))
def _convert_names(self, names):
return [self.name_class(self.parent_context, name) for name in names]
def values(self):
return self._convert_names(
name
for name_key in self._used_names
for name in self._filter(
_get_definition_names(self._parso_cache_node, self._used_names, name_key),
)
)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.parent_context)
| _AbstractUsedNamesFilter |
python | google__pytype | pytype/pytd/serialize_ast.py | {
"start": 770,
"end": 1718
} | class ____(visitors.Visitor):
"""Visitor to undo module aliases in late types.
Since late types are loaded out of context, they need to contain the original
names of modules, not whatever they've been aliased to in the current module.
"""
def __init__(self):
super().__init__()
self._module_aliases = {}
def EnterTypeDeclUnit(self, node):
for alias in node.aliases:
if isinstance(alias.type, pytd.Module):
name = alias.name.removeprefix(f"{node.name}.")
self._module_aliases[name] = alias.type.module_name
def VisitLateType(self, node):
if "." not in node.name:
return node
prefix, suffix = node.name.rsplit(".", 1)
while prefix:
if prefix in self._module_aliases:
return node.Replace(name=self._module_aliases[prefix] + "." + suffix)
prefix, _, remainder = prefix.rpartition(".")
suffix = f"{remainder}.{suffix}"
return node
| UndoModuleAliasesVisitor |
python | huggingface__transformers | src/transformers/models/starcoder2/modeling_starcoder2.py | {
"start": 6662,
"end": 10047
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: Starcoder2Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.use_bias)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.use_bias)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.use_bias)
self.rotary_fn = apply_rotary_pos_emb
self.residual_dropout = config.residual_dropout
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
attn_output = nn.functional.dropout(
attn_output, p=self.residual_dropout, training=self.training
) # diff with Llama
return attn_output, attn_weights
| Starcoder2Attention |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 2536,
"end": 3008
} | class ____(_RequestInfo):
def __new__(
cls,
url: URL,
method: str,
headers: "CIMultiDictProxy[str]",
real_url: URL | _SENTINEL = sentinel,
) -> "RequestInfo":
"""Create a new RequestInfo instance.
For backwards compatibility, the real_url parameter is optional.
"""
return tuple.__new__(
cls, (url, method, headers, url if real_url is sentinel else real_url)
)
| RequestInfo |
python | huggingface__transformers | src/transformers/models/qwen2_audio/modeling_qwen2_audio.py | {
"start": 17455,
"end": 17937
} | class ____(nn.Module):
def __init__(self, config: Qwen2AudioConfig):
super().__init__()
self.linear = nn.Linear(config.audio_config.d_model, config.text_config.hidden_size, bias=True)
def forward(self, audio_features):
hidden_states = self.linear(audio_features)
return hidden_states
@auto_docstring(
custom_intro="""
The QWEN2AUDIO model which consists of a audio backbone and a language model.
"""
)
| Qwen2AudioMultiModalProjector |
python | numba__numba | numba/core/withcontexts.py | {
"start": 2540,
"end": 3980
} | class ____(WithContext):
"""A simple context-manager that tells the compiler to lift the body of the
with-block as another function.
"""
def mutate_with_body(
self,
func_ir,
blocks,
blk_start,
blk_end,
body_blocks,
dispatcher_factory,
extra,
):
assert extra is None
vlt = func_ir.variable_lifetime
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=vlt.livemap,
callfrom=blk_start,
returnto=blk_end,
body_block_ids=set(body_blocks),
)
lifted_blks = {k: blocks[k] for k in body_blocks}
_mutate_with_block_callee(
lifted_blks, blk_start, blk_end, inputs, outputs
)
# XXX: transform body-blocks to return the output variables
lifted_ir = func_ir.derive(
blocks=lifted_blks,
arg_names=tuple(inputs),
arg_count=len(inputs),
force_non_generator=True,
)
dispatcher = dispatcher_factory(lifted_ir)
newblk = _mutate_with_block_caller(
dispatcher,
blocks,
blk_start,
blk_end,
inputs,
outputs,
)
blocks[blk_start] = newblk
_clear_blocks(blocks, body_blocks)
return dispatcher
call_context = _CallContextType()
| _CallContextType |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/hooks/test_kubernetes.py | {
"start": 33532,
"end": 34226
} | class ____:
@pytest.mark.parametrize(
"conn_uri",
(
"kubernetes://?kube_config_path=/tmp/&kube_config=[1,2,3]",
"kubernetes://?kube_config_path=/tmp/&in_cluster=[1,2,3]",
"kubernetes://?kube_config=/tmp/&in_cluster=[1,2,3]",
),
)
def test_should_raise_exception_on_invalid_configuration(self, conn_uri):
kubernetes_hook = KubernetesHook()
with (
mock.patch.dict("os.environ", AIRFLOW_CONN_KUBERNETES_DEFAULT=conn_uri),
pytest.raises(AirflowException, match="Invalid connection configuration"),
):
kubernetes_hook.get_conn()
| TestKubernetesHookIncorrectConfiguration |
python | kamyu104__LeetCode-Solutions | Python/score-of-parentheses.py | {
"start": 443,
"end": 789
} | class ____(object):
def scoreOfParentheses(self, S):
"""
:type S: str
:rtype: int
"""
stack = [0]
for c in S:
if c == '(':
stack.append(0)
else:
last = stack.pop()
stack[-1] += max(1, 2*last)
return stack[0]
| Solution2 |
python | scipy__scipy | scipy/signal/tests/test_signaltools.py | {
"start": 101878,
"end": 105653
} | class ____:
# Tests that don't depend on dtype
@skip_xp_backends(np_only=True)
def test_invalid_shapes(self, xp):
# By "invalid," we mean that no one
# array has dimensions that are all at
# least as large as the corresponding
# dimensions of the other array. This
# setup should throw a ValueError.
a = np.arange(1, 7).reshape((2, 3))
b = np.arange(-6, 0).reshape((3, 2))
assert_raises(ValueError, correlate, *(a, b), **{'mode': 'valid'})
assert_raises(ValueError, correlate, *(b, a), **{'mode': 'valid'})
@skip_xp_backends(np_only=True)
def test_invalid_params(self, xp):
a = [3, 4, 5]
b = [1, 2, 3]
assert_raises(ValueError, correlate, a, b, mode='spam')
assert_raises(ValueError, correlate, a, b, mode='eggs', method='fft')
assert_raises(ValueError, correlate, a, b, mode='ham', method='direct')
assert_raises(ValueError, correlate, a, b, mode='full', method='bacon')
assert_raises(ValueError, correlate, a, b, mode='same', method='bacon')
@skip_xp_backends(np_only=True)
def test_mismatched_dims(self, xp):
# Input arrays should have the same number of dimensions
assert_raises(ValueError, correlate, [1], 2, method='direct')
assert_raises(ValueError, correlate, 1, [2], method='direct')
assert_raises(ValueError, correlate, [1], 2, method='fft')
assert_raises(ValueError, correlate, 1, [2], method='fft')
assert_raises(ValueError, correlate, [1], [[2]])
assert_raises(ValueError, correlate, [3], 2)
@skip_xp_backends(cpu_only=True, exceptions=['cupy'])
@skip_xp_backends("jax.numpy", reason="dtype differs")
def test_numpy_fastpath(self, xp):
a = xp.asarray([1, 2, 3])
b = xp.asarray([4, 5])
xp_assert_close(correlate(a, b, mode='same'), xp.asarray([5, 14, 23]))
a = xp.asarray([1, 2, 3])
b = xp.asarray([4, 5, 6])
xp_assert_close(correlate(a, b, mode='same'), xp.asarray([17, 32, 23]))
xp_assert_close(correlate(a, b, mode='full'), xp.asarray([6, 17, 32, 23, 12]))
xp_assert_close(correlate(a, b, mode='valid'), xp.asarray([32]))
@make_xp_test_case(correlation_lags)
@pytest.mark.parametrize("mode", ["valid", "same", "full"])
@pytest.mark.parametrize("behind", [True, False])
@pytest.mark.parametrize("input_size", [100, 101, 1000, 1001,
pytest.param(10000, marks=[pytest.mark.slow]),
pytest.param(10001, marks=[pytest.mark.slow])]
)
def test_correlation_lags(mode, behind, input_size, xp):
# generate random data
rng = np.random.RandomState(0)
in1 = rng.standard_normal(input_size)
offset = int(input_size/10)
# generate offset version of array to correlate with
if behind:
# y is behind x
in2 = np.concatenate([rng.standard_normal(offset), in1])
expected = -offset
else:
# y is ahead of x
in2 = in1[offset:]
expected = offset
# cross correlate, returning lag information
correlation = correlate(in1, in2, mode=mode)
lags = correlation_lags(in1.size, in2.size, mode=mode)
# identify the peak
lag_index = np.argmax(correlation)
# Check as expected
xp_assert_equal(lags[lag_index], expected)
# Correlation and lags shape should match
assert lags.shape == correlation.shape
@make_xp_test_case(correlation_lags)
def test_correlation_lags_invalid_mode(xp):
with pytest.raises(ValueError, match="Mode asdfgh is invalid"):
correlation_lags(100, 100, mode="asdfgh")
@make_xp_test_case(correlate)
@pytest.mark.parametrize('dt_name', ['complex64', 'complex128'])
| TestCorrelate |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 40712,
"end": 41907
} | class ____:
def test_dynamic(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"scripts": 9000, "dynamic": ["scripts"]}})
with pytest.raises(
ValueError,
match="Metadata field `scripts` cannot be both statically defined and listed in field `project.dynamic`",
):
_ = metadata.core.scripts
def test_not_table(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"scripts": 10}})
with pytest.raises(TypeError, match="Field `project.scripts` must be a table"):
_ = metadata.core.scripts
def test_entry_not_string(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"scripts": {"foo": 7}}})
with pytest.raises(TypeError, match="Object reference `foo` of field `project.scripts` must be a string"):
_ = metadata.core.scripts
def test_correct(self, isolation):
metadata = ProjectMetadata(str(isolation), None, {"project": {"scripts": {"foo": "bar", "bar": "baz"}}})
assert metadata.core.scripts == metadata.core.scripts == {"bar": "baz", "foo": "bar"}
| TestScripts |
python | realpython__materials | python-textual/static_and_label.py | {
"start": 72,
"end": 961
} | class ____(App):
def compose(self):
self.static = Static(
"I am a [bold red]Static[/bold red] widget!",
)
yield self.static
self.label = Label(
"I am a [yellow italic]Label[/yellow italic] widget!",
)
yield self.label
def on_mount(self):
# Styling the static
self.static.styles.background = "blue"
self.static.styles.border = ("solid", "white")
self.static.styles.text_align = "center"
self.static.styles.padding = (1, 1)
self.static.styles.margin = (4, 4)
# Styling the label
self.label.styles.background = "darkgreen"
self.label.styles.border = ("double", "red")
self.label.styles.padding = (1, 1)
self.label.styles.margin = (2, 4)
if __name__ == "__main__":
app = StaticAndLabelApp()
app.run()
| StaticAndLabelApp |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/reader.py | {
"start": 1203,
"end": 2433
} | class ____(YAMLError):
def __init__(self, name, position, character, encoding, reason):
# type: (Any, Any, Any, Any, Any) -> None
self.name = name
self.character = character
self.position = position
self.encoding = encoding
self.reason = reason
def __str__(self):
# type: () -> Any
if isinstance(self.character, bytes):
return _F(
"'{self_encoding!s}' codec can't decode byte #x{ord_self_character:02x}: "
'{self_reason!s}\n'
' in "{self_name!s}", position {self_position:d}',
self_encoding=self.encoding,
ord_self_character=ord(self.character),
self_reason=self.reason,
self_name=self.name,
self_position=self.position,
)
else:
return _F(
'unacceptable character #x{self_character:04x}: {self_reason!s}\n'
' in "{self_name!s}", position {self_position:d}',
self_character=self.character,
self_reason=self.reason,
self_name=self.name,
self_position=self.position,
)
| ReaderError |
python | facebook__pyre-check | client/command_arguments.py | {
"start": 6837,
"end": 7017
} | class ____:
output: str = TEXT
no_start: bool = False
start_arguments: StartArguments = field(default_factory=StartArguments)
@dataclass(frozen=True)
| IncrementalArguments |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_workflow_details.py | {
"start": 1999,
"end": 4855
} | class ____(OrganizationDetectorWorkflowAPITestCase):
method = "delete"
@mock.patch(
"sentry.workflow_engine.endpoints.organization_detector_workflow_details.create_audit_entry"
)
def test_simple(self, mock_audit: mock.MagicMock) -> None:
with outbox_runner():
self.get_success_response(
self.organization.slug,
self.detector_workflow.id,
)
# verify it was deleted
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
# verify audit log
mock_audit.assert_called_once_with(
request=mock.ANY,
organization=self.organization,
target_object=self.detector_workflow.id,
event=audit_log.get_event_id("DETECTOR_WORKFLOW_REMOVE"),
data=self.detector_workflow.get_audit_log_data(),
)
def test_does_not_exist(self) -> None:
with outbox_runner():
self.get_error_response(self.organization.slug, 50000, status_code=404)
# verify it wasn't deleted
assert not RegionScheduledDeletion.objects.filter(
model_name="DetectorWorkflow",
object_id=self.detector_workflow.id,
).exists()
def test_member_can_disconnect_user_detectors(self) -> None:
self.organization.update_option("sentry:alerts_member_write", True)
self.organization.flags.allow_joinleave = False
self.organization.save()
self.login_as(user=self.member_user)
detector = self.create_detector(
project=self.create_project(organization=self.organization),
created_by_id=self.user.id,
)
detector_workflow = self.create_detector_workflow(
detector=detector,
workflow=self.workflow,
)
with outbox_runner():
self.get_success_response(
self.organization.slug,
detector_workflow.id,
)
def test_member_cannot_disconnect_detectors_when_alerts_member_write_disabled(self) -> None:
self.organization.update_option("sentry:alerts_member_write", False)
self.organization.flags.allow_joinleave = True
self.organization.save()
self.login_as(user=self.member_user)
detector = self.create_detector(
project=self.create_project(organization=self.organization, teams=[self.team]),
created_by_id=self.user.id,
)
detector_workflow = self.create_detector_workflow(
detector=detector,
workflow=self.workflow,
)
with outbox_runner():
self.get_error_response(
self.organization.slug,
detector_workflow.id,
status_code=403,
)
| OrganizationDetectorWorkflowDetailsDeleteTest |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 3612,
"end": 3711
} | class ____(BaseModel):
"""Container for comment nodes."""
nodes: list[CommentsNode]
| Comments |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/data_table_tabs.py | {
"start": 134,
"end": 908
} | class ____(App):
"""Dashboard"""
BINDINGS = [
("d", "toggle_dark", "Toggle dark mode"),
Binding("ctrl+q", "app.quit", "Quit", show=True),
]
TITLE = "Dashboard"
CSS = """
DataTable {
height: auto;
}
"""
def compose(self) -> ComposeResult:
"""Create child widgets for the app."""
with TabbedContent("Workflows"):
yield DataTable(id="table")
def on_mount(self) -> None:
table = self.query_one(DataTable)
table.add_columns("Id", "Description", "Status", "Result Id")
for row in [(1, 2, 3, 4), ("a", "b", "c", "d"), ("fee", "fy", "fo", "fum")]:
table.add_row(key=row[0], *row)
if __name__ == "__main__":
app = Dashboard()
app.run()
| Dashboard |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/src/connectors_qa/checks/documentation/documentation.py | {
"start": 23587,
"end": 23710
} | class ____(CheckSection):
header = "For Airbyte Cloud:"
expected_section_index = 1
| CheckForAirbyteCloudSectionContent |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/declarative/extensions.py | {
"start": 971,
"end": 3971
} | class ____:
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :meth:`.MapperEvents.after_configured` event.
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = "employee"
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "employee",
"concrete": True,
}
class Manager(Employee):
__tablename__ = "manager"
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
"polymorphic_identity": "manager",
"concrete": True,
}
The name of the discriminator column used by :func:`.polymorphic_union`
defaults to the name ``type``. To suit the use case of a mapping where an
actual column in a mapped table is already named ``type``, the
discriminator name can be configured by setting the
``_concrete_discriminator_name`` attribute::
class Employee(ConcreteBase, Base):
_concrete_discriminator_name = "_concrete_discriminator"
.. versionchanged:: 1.4.2 The ``_concrete_discriminator_name`` attribute
need only be placed on the basemost class to take correct effect for
all subclasses. An explicit error message is now raised if the
mapped column names conflict with the discriminator name, whereas
in the 1.3.x series there would be some warnings and then a non-useful
query would be generated.
.. seealso::
:class:`.AbstractConcreteBase`
:ref:`concrete_inheritance`
"""
@classmethod
def _create_polymorphic_union(cls, mappers, discriminator_name):
return polymorphic_union(
OrderedDict(
(mp.polymorphic_identity, mp.local_table) for mp in mappers
),
discriminator_name,
"pjoin",
)
@classmethod
def __declare_first__(cls):
m = cls.__mapper__
if m.with_polymorphic:
return
discriminator_name = (
getattr(cls, "_concrete_discriminator_name", None) or "type"
)
mappers = list(m.self_and_descendants)
pjoin = cls._create_polymorphic_union(mappers, discriminator_name)
m._set_with_polymorphic(("*", pjoin))
m._set_polymorphic_on(pjoin.c[discriminator_name])
| ConcreteBase |
python | jazzband__django-simple-history | simple_history/exceptions.py | {
"start": 409,
"end": 504
} | class ____(Exception):
"""Manager does not belong to model"""
pass
| AlternativeManagerError |
python | mozilla__bleach | bleach/_vendor/parse.py | {
"start": 11461,
"end": 11602
} | class ____(_SplitResultBase, _NetlocResultMixinBytes):
__slots__ = ()
def geturl(self):
return urlunsplit(self)
| SplitResultBytes |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 19488,
"end": 19840
} | class ____(PrefectBaseModel):
"""Filter by text search across log content."""
query: str = Field(
description="Text search query string",
examples=[
"error",
"error -debug",
'"connection timeout"',
"+required -excluded",
],
max_length=200,
)
| LogFilterTextSearch |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/schema.py | {
"start": 140482,
"end": 140594
} | class ____(Protocol):
def __call__(self, context: ExecutionContext) -> Any: ...
| _CallableColumnDefaultProtocol |
python | spyder-ide__spyder | spyder/plugins/history/widgets.py | {
"start": 1191,
"end": 1352
} | class ____:
Main = 'main_section'
# --- Widgets
# ----------------------------------------------------------------------------
| HistoryWidgetOptionsMenuSections |
python | scrapy__scrapy | tests/test_crawler.py | {
"start": 24194,
"end": 24581
} | class ____(TestCrawlerRunnerHasSpider):
@staticmethod
def _runner():
return AsyncCrawlerRunner(get_reactor_settings())
@staticmethod
def _crawl(runner, spider):
return deferred_from_coro(runner.crawl(spider))
def test_crawler_runner_asyncio_enabled_true(self):
pytest.skip("This test is only for CrawlerRunner")
| TestAsyncCrawlerRunnerHasSpider |
python | apache__thrift | lib/py/src/transport/TTwisted.py | {
"start": 9098,
"end": 9668
} | class ____(ClientFactory):
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
| ThriftClientFactory |
python | Netflix__metaflow | metaflow/plugins/cards/card_server.py | {
"start": 5502,
"end": 11772
} | class ____(BaseHTTPRequestHandler):
card_options: CardServerOptions = None
run_watcher: RunWatcher = None
def do_GET(self):
try:
_, path = self.path.split("/", 1)
try:
prefix, suffix = path.split("/", 1)
except:
prefix = path
suffix = None
except:
prefix = None
if prefix in self.ROUTES:
self.ROUTES[prefix](self, suffix)
else:
self._response(open(VIEWER_PATH).read().encode("utf-8"))
def get_runinfo(self, suffix):
run_id_changed = self.card_options.refresh_run()
if run_id_changed:
self.log_message(
"RunID changed in the background to %s"
% self.card_options.run_object.pathspec
)
_ClickLogger(
"RunID changed in the background to %s"
% self.card_options.run_object.pathspec,
fg="blue",
)
if self.card_options.run_object is None:
self._response(
{"status": "No Run Found", "flow": self.card_options.flow_name},
code=404,
is_json=True,
)
return
task_card_generator = cards_for_run(
self.card_options.flow_datastore,
self.card_options.run_object,
self.card_options.only_running,
max_cards=self.card_options.max_cards,
)
flow_name = self.card_options.run_object.parent.id
run_id = self.card_options.run_object.id
cards = []
for pathspec, card in task_card_generator:
step, task = pathspec.split("/")[-2:]
_task = self.card_options.run_object[step][task]
task_finished = True if _task.finished else False
cards.append(
dict(
task=pathspec,
label="%s/%s %s" % (step, task, card.hash),
card_object=dict(
hash=card.hash,
type=card.type,
path=card.path,
id=card.id,
),
finished=task_finished,
card="%s/%s" % (pathspec, card.hash),
)
)
resp = {
"status": "ok",
"flow": flow_name,
"run_id": run_id,
"cards": cards,
"poll_interval": self.card_options.poll_interval,
}
self._response(resp, is_json=True)
def get_card(self, suffix):
_suffix = urlparse(self.path).path
_, flow, run_id, step, task_id, card_hash = _suffix.strip("/").split("/")
pathspec = "/".join([flow, run_id, step, task_id])
cards = list(
cards_for_task(
self.card_options.flow_datastore, pathspec, card_hash=card_hash
)
)
if len(cards) == 0:
self._response({"status": "Card Not Found"}, code=404)
return
selected_card = cards[0]
self._response(selected_card.get().encode("utf-8"))
def get_data(self, suffix):
_suffix = urlparse(self.path).path
_, flow, run_id, step, task_id, card_hash = _suffix.strip("/").split("/")
pathspec = "/".join([flow, run_id, step, task_id])
cards = list(
cards_for_task(
self.card_options.flow_datastore, pathspec, card_hash=card_hash
)
)
if len(cards) == 0:
self._response(
{
"status": "Card Not Found",
},
is_json=True,
code=404,
)
return
status = "ok"
try:
task_object = self.card_options.run_object[step][task_id]
except KeyError:
return self._response(
{"status": "Task Not Found", "is_complete": False},
is_json=True,
code=404,
)
is_complete = task_object.finished
selected_card = cards[0]
card_data = selected_card.get_data()
if card_data is not None:
self.log_message(
"Task Success: %s, Task Finished: %s"
% (task_object.successful, is_complete)
)
if not task_object.successful and is_complete:
status = "Task Failed"
self._response(
{"status": status, "payload": card_data, "is_complete": is_complete},
is_json=True,
)
else:
self._response(
{"status": "ok", "is_complete": is_complete},
is_json=True,
code=404,
)
def _response(self, body, is_json=False, code=200):
self.send_response(code)
mime = "application/json" if is_json else "text/html"
self.send_header("Content-type", mime)
self.end_headers()
if is_json:
self.wfile.write(json.dumps(body).encode("utf-8"))
else:
self.wfile.write(body)
ROUTES = {"runinfo": get_runinfo, "card": get_card, "data": get_data}
def _is_debug_mode():
debug_flag = os.environ.get("METAFLOW_DEBUG_CARD_SERVER")
if debug_flag is None:
return False
return debug_flag.lower() in ["true", "1"]
def create_card_server(card_options: CardServerOptions, port, ctx_obj):
CardViewerRoutes.card_options = card_options
global _ClickLogger
_ClickLogger = ctx_obj.echo
if card_options.follow_new_runs:
CardViewerRoutes.run_watcher = RunWatcher(
card_options.flow_name, card_options.child_conn
)
CardViewerRoutes.run_watcher.start()
server_addr = ("", port)
ctx_obj.echo(
"Starting card server on port %d " % (port),
fg="green",
bold=True,
)
# Disable logging if not in debug mode
if not _is_debug_mode():
CardViewerRoutes.log_request = lambda *args, **kwargs: None
CardViewerRoutes.log_message = lambda *args, **kwargs: None
server = ThreadingHTTPServer(server_addr, CardViewerRoutes)
server.serve_forever()
| CardViewerRoutes |
python | huggingface__transformers | src/transformers/integrations/integration_utils.py | {
"start": 49564,
"end": 50452
} | class ____(TrainerCallback):
"""
A [`TrainerCallback`] that sends the logs to [AzureML](https://pypi.org/project/azureml-sdk/).
"""
def __init__(self, azureml_run=None):
if not is_azureml_available():
raise RuntimeError("AzureMLCallback requires azureml to be installed. Run `pip install azureml-sdk`.")
self.azureml_run = azureml_run
def on_init_end(self, args, state, control, **kwargs):
from azureml.core.run import Run
if self.azureml_run is None and state.is_world_process_zero:
self.azureml_run = Run.get_context()
def on_log(self, args, state, control, logs=None, **kwargs):
if self.azureml_run and state.is_world_process_zero:
for k, v in logs.items():
if isinstance(v, (int, float)):
self.azureml_run.log(k, v, description=k)
| AzureMLCallback |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 1345,
"end": 1436
} | class ____(BaseModel):
x: int
model_config = ConfigDict(frozen=True)
| NoMutationModel |
python | nedbat__coveragepy | coverage/plugin_support.py | {
"start": 4419,
"end": 5263
} | class ____:
"""A Debug writer, but with labels for prepending to the messages."""
def __init__(self, label: str, debug: TDebugCtl, prev_labels: Iterable[str] = ()):
self.labels = list(prev_labels) + [label]
self.debug = debug
def add_label(self, label: str) -> LabelledDebug:
"""Add a label to the writer, and return a new `LabelledDebug`."""
return LabelledDebug(label, self.debug, self.labels)
def message_prefix(self) -> str:
"""The prefix to use on messages, combining the labels."""
prefixes = self.labels + [""]
return ":\n".join(" " * i + label for i, label in enumerate(prefixes))
def write(self, message: str) -> None:
"""Write `message`, but with the labels prepended."""
self.debug.write(f"{self.message_prefix()}{message}")
| LabelledDebug |
python | django-import-export__django-import-export | tests/core/forms.py | {
"start": 282,
"end": 407
} | class ____(AuthorFormMixin, ImportForm):
"""Customized ImportForm, with author field required"""
pass
| CustomImportForm |
python | pikepdf__pikepdf | tests/test_object.py | {
"start": 8860,
"end": 10316
} | class ____:
def check(self, a, b):
assert a == b, "invalid test case"
assert hash(a) == hash(b), "hash violation"
def test_unequal_but_similar(self):
assert Name('/Foo') != String('/Foo')
def test_numbers(self):
self.check(Object.parse(b'1.0'), 1)
self.check(Object.parse(b'42'), 42)
def test_bool_comparison(self):
self.check(Object.parse(b'0.0'), False)
self.check(True, 1)
def test_string(self):
utf16 = b'\xfe\xff' + 'hello'.encode('utf-16be')
self.check(String(utf16), String('hello'))
def test_name(self):
self.check(Name.This, Name('/This'))
def test_operator(self):
self.check(Operator('q'), Operator('q'))
def test_array_not_hashable(self):
with pytest.raises(TypeError):
{Array([3]): None} # pylint: disable=expression-not-assigned
def test_not_constructible():
with pytest.raises(TypeError, match="constructor"):
Object()
def test_operator_inline(resources):
with pikepdf.open(resources / 'image-mono-inline.pdf') as pdf:
instructions = parse_content_stream(pdf.pages[0], operators='BI ID EI')
assert len(instructions) == 1
_operands, operator = instructions[0]
assert operator == pikepdf.Operator("INLINE IMAGE")
def test_utf16_error():
with pytest.raises((UnicodeEncodeError, RuntimeError)):
str(encode('\ud801'))
| TestHashViolation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocolExplicit1.py | {
"start": 477,
"end": 531
} | class ____(Protocol2, Protocol):
cm11: int
| Protocol3 |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 7993,
"end": 8434
} | class ____(unittest.TestCase):
def test_letter_combinations(self):
digit1 = "23"
answer1 = ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"]
self.assertEqual(sorted(letter_combinations(digit1)), sorted(answer1))
digit2 = "34"
answer2 = ['dg', 'dh', 'di', 'eg', 'eh', 'ei', 'fg', 'fh', 'fi']
self.assertEqual(sorted(letter_combinations(digit2)), sorted(answer2))
| TestLetterCombinations |
python | facebook__pyre-check | tools/generate_taint_models/generator_specifications.py | {
"start": 2110,
"end": 2559
} | class ____(NamedTuple):
def __hash__(self) -> int:
parameter_type = self.parameter_type
parameter_name = self.parameter_name
return hash(
(
parameter_type and tuple(sorted(parameter_type)),
parameter_name and tuple(sorted(parameter_name)),
)
)
parameter_type: Optional[Set[str]] = None
parameter_name: Optional[Set[str]] = None
| WhitelistSpecification |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 61361,
"end": 61632
} | class ____(BiffRecord):
"""
This record represents a cell that contains an IEEE-754 floating-point value.
"""
_REC_ID = 0x0203
def __init__(self, row, col, xf_index, number):
self._rec_data = pack('<3Hd', row, col, xf_index, number)
| NumberRecord |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/hdf5.py | {
"start": 807,
"end": 4874
} | class ____(pg.PlotCurveItem):
def __init__(self, *args, **kwds):
self.hdf5 = None
self.limit = 10000 # maximum number of samples to be plotted
pg.PlotCurveItem.__init__(self, *args, **kwds)
def setHDF5(self, data):
self.hdf5 = data
self.updateHDF5Plot()
def viewRangeChanged(self):
self.updateHDF5Plot()
def updateHDF5Plot(self):
if self.hdf5 is None:
self.setData([])
return
vb = self.getViewBox()
if vb is None:
return # no ViewBox yet
# Determine what data range must be read from HDF5
range_ = vb.viewRange()[0]
start = max(0,int(range_[0])-1)
stop = min(len(self.hdf5), int(range_[1]+2))
# Decide by how much we should downsample
ds = int((stop-start) / self.limit) + 1
if ds == 1:
# Small enough to display with no intervention.
visible = self.hdf5[start:stop]
scale = 1
else:
# Here convert data into a down-sampled array suitable for visualizing.
# Must do this piecewise to limit memory usage.
samples = 1 + ((stop-start) // ds)
visible = np.zeros(samples*2, dtype=self.hdf5.dtype)
sourcePtr = start
targetPtr = 0
# read data in chunks of ~1M samples
chunkSize = (1000000//ds) * ds
while sourcePtr < stop-1:
chunk = self.hdf5[sourcePtr:min(stop,sourcePtr+chunkSize)]
sourcePtr += len(chunk)
# reshape chunk to be integral multiple of ds
chunk = chunk[:(len(chunk)//ds) * ds].reshape(len(chunk)//ds, ds)
# compute max and min
chunkMax = chunk.max(axis=1)
chunkMin = chunk.min(axis=1)
# interleave min and max into plot data to preserve envelope shape
visible[targetPtr:targetPtr+chunk.shape[0]*2:2] = chunkMin
visible[1+targetPtr:1+targetPtr+chunk.shape[0]*2:2] = chunkMax
targetPtr += chunk.shape[0]*2
visible = visible[:targetPtr]
scale = ds * 0.5
self.setData(visible) # update the plot
self.setPos(start, 0) # shift to match starting index
self.resetTransform()
self.scale(scale, 1) # scale to match downsampling
def createFile(finalSize=2000000000):
"""Create a large HDF5 data file for testing.
Data consists of 1M random samples tiled through the end of the array.
"""
chunk = np.random.normal(size=1000000).astype(np.float32)
f = h5py.File('test.hdf5', 'w')
f.create_dataset('data', data=chunk, chunks=True, maxshape=(None,))
data = f['data']
nChunks = finalSize // (chunk.size * chunk.itemsize)
with pg.ProgressDialog("Generating test.hdf5...", 0, nChunks) as dlg:
for i in range(nChunks):
newshape = [data.shape[0] + chunk.shape[0]]
data.resize(newshape)
data[-chunk.shape[0]:] = chunk
dlg += 1
if dlg.wasCanceled():
f.close()
os.remove('test.hdf5')
sys.exit()
dlg += 1
f.close()
if len(sys.argv) > 1:
fileName = sys.argv[1]
else:
fileName = 'test.hdf5'
if not os.path.isfile(fileName):
size, ok = QtWidgets.QInputDialog.getDouble(None, "Create HDF5 Dataset?", "This demo requires a large HDF5 array. To generate a file, enter the array size (in GB) and press OK.", 2.0)
if not ok:
sys.exit(0)
else:
createFile(int(size*1e9))
#raise Exception("No suitable HDF5 file found. Use createFile() to generate an example file.")
f = h5py.File(fileName, 'r')
curve = HDF5Plot()
curve.setHDF5(f['data'])
plt.addItem(curve)
if __name__ == '__main__':
pg.exec()
| HDF5Plot |
python | allegroai__clearml | clearml/automation/optimization.py | {
"start": 53171,
"end": 109300
} | class ____(object):
"""
Hyperparameter search controller. Clones the base experiment, changes arguments and tries to maximize/minimize
the defined objective.
"""
_tag = "optimization"
def __init__(
self,
base_task_id: str,
hyper_parameters: Sequence[Parameter],
objective_metric_title: Union[str, Sequence[str]],
objective_metric_series: Union[str, Sequence[str]],
objective_metric_sign: Union[str, Sequence[str]] = "min",
optimizer_class: Union[SearchStrategy, type(SearchStrategy)] = RandomSearch,
max_number_of_concurrent_tasks: int = 10,
execution_queue: str = "default",
optimization_time_limit: Optional[float] = None,
compute_time_limit: Optional[float] = None,
auto_connect_task: Union[bool, Task] = True,
always_create_task: bool = False,
spawn_project: Optional[str] = None,
save_top_k_tasks_only: Optional[int] = None,
**optimizer_kwargs: Any
) -> ():
"""
Create a new hyperparameter controller. The newly created object will launch and monitor the new experiments.
:param str base_task_id: The Task ID to be used as template experiment to optimize.
:param list hyper_parameters: The list of Parameter objects to optimize over.
:param objective_metric_title: The Objective metric title(s) to maximize / minimize
(for example, ``validation``, ``["validation", "loss"]``). If ``objective_metric_title`` is a sequence
(used to optimize multiple objectives at the same time), then ``objective_metric_series`` and
``objective_metric_sign`` have to be sequences of the same length. Each title will be matched
with the respective series and sign
:param Union[str, Sequence[str]] objective_metric_series: The Objective metric series to maximize / minimize
(for example, ``loss_series``, ``["validation_series", "loss_series"]``).
:param Union[str, Sequence[str]] objective_metric_sign: The objectives to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param class.SearchStrategy optimizer_class: The SearchStrategy optimizer to use for the hyperparameter search
:param int max_number_of_concurrent_tasks: The maximum number of concurrent Tasks (experiments) running at the
same time.
:param str execution_queue: The execution queue to use for launching Tasks (experiments).
:param float optimization_time_limit: The maximum time (minutes) for the entire optimization process. The
default is ``None``, indicating no time limit.
:param float compute_time_limit: The maximum compute time in minutes. When time limit is exceeded,
all jobs aborted. (Optional)
:param bool auto_connect_task: Store optimization arguments and configuration in the Task.
The values are:
- ``True`` - The optimization argument and configuration will be stored in the Task. All arguments will
be under the hyperparameter section ``opt``, and the optimization hyper_parameters space will be
stored in the Task configuration object section.
- ``False`` - Do not store with Task.
- ``Task`` - A specific Task object to connect the optimization process with.
:param bool always_create_task: Always create a new Task.
The values are:
- ``True`` - No current Task initialized. Create a new task named ``optimization`` in the ``base_task_id``
project.
- ``False`` - Use the :py:meth:`task.Task.current_task` (if exists) to report statistics.
:param str spawn_project: If project name is specified, create all optimization Jobs (Tasks) in the
specified project instead of the original base_task_id project.
:param int save_top_k_tasks_only: If specified and above 0, keep only the top_k performing Tasks,
and archive the rest of the created Tasks. Default: -1 keep everything, nothing will be archived.
:param ** optimizer_kwargs: Arguments passed directly to the optimizer constructor.
Example:
.. code-block:: py
:linenos:
:caption: Example
from clearml import Task
from clearml.automation import UniformParameterRange, DiscreteParameterRange
from clearml.automation import GridSearch, RandomSearch, HyperParameterOptimizer
task = Task.init('examples', 'HyperParameterOptimizer example')
an_optimizer = HyperParameterOptimizer(
base_task_id='fa30fa45d95d4927b87c323b5b04dc44',
hyper_parameters=[
UniformParameterRange('lr', min_value=0.01, max_value=0.3, step_size=0.05),
DiscreteParameterRange('network', values=['ResNet18', 'ResNet50', 'ResNet101']),
],
objective_metric_title='title',
objective_metric_series='series',
objective_metric_sign='min',
max_number_of_concurrent_tasks=5,
optimizer_class=RandomSearch,
execution_queue='workers', time_limit_per_job=120, pool_period_min=0.2)
# This will automatically create and print the optimizer new task id
# for later use. if a Task was already created, it will use it.
an_optimizer.set_time_limit(in_minutes=10.)
an_optimizer.start()
# we can create a pooling loop if we like
while not an_optimizer.reached_time_limit():
top_exp = an_optimizer.get_top_experiments(top_k=3)
print(top_exp)
# wait until optimization completed or timed-out
an_optimizer.wait()
# make sure we stop all jobs
an_optimizer.stop()
"""
if type(objective_metric_title) is not type(objective_metric_series) or type(
objective_metric_title
) is not type(objective_metric_sign):
raise TypeError(
"objective_metric_series, objective_metric_title and objective_metric_sign have to be of the same type"
" (strings if doing single objective optimization and lists of the same length"
" if doing multi-objective optimization)"
)
if isinstance(objective_metric_title, str):
objective_metric_series = [objective_metric_series]
objective_metric_title = [objective_metric_title]
objective_metric_sign = [objective_metric_sign]
if len(objective_metric_series) != len(objective_metric_title) or len(objective_metric_series) != len(
objective_metric_sign
):
raise ValueError(
"Can not use multiple objective optimization when objective_metric_series, objective_metric_title"
" or objective_metric_sign do not have the same length"
)
# create a new Task, if we do not have one already
self._task = auto_connect_task if isinstance(auto_connect_task, Task) else Task.current_task()
self._readonly_task = isinstance(auto_connect_task, Task) and str(self._task.status) not in (
"created",
"in_progress",
)
if not self._task and always_create_task:
base_task = Task.get_task(task_id=base_task_id)
self._task = Task.init(
project_name=base_task.get_project_name(),
task_name="Optimizing: {}".format(base_task.name),
task_type=Task.TaskTypes.optimizer,
)
opts = dict(
base_task_id=base_task_id,
objective_metric_title=objective_metric_title,
objective_metric_series=objective_metric_series,
objective_metric_sign=objective_metric_sign,
max_number_of_concurrent_tasks=max_number_of_concurrent_tasks,
execution_queue=execution_queue,
optimization_time_limit=optimization_time_limit,
compute_time_limit=compute_time_limit,
optimizer_kwargs=optimizer_kwargs,
)
# make sure all the created tasks are our children, as we are creating them
if self._task and not self._readonly_task:
self._task.add_tags([self._tag])
if auto_connect_task:
optimizer_class, hyper_parameters, opts = self._connect_args(
optimizer_class=optimizer_class, hyper_param_configuration=hyper_parameters, **opts
)
self.base_task_id = opts["base_task_id"]
self.hyper_parameters = hyper_parameters
self.max_number_of_concurrent_tasks = opts["max_number_of_concurrent_tasks"]
self.execution_queue = opts["execution_queue"]
self._objective_metric = MultiObjective(
title=opts["objective_metric_title"],
series=opts["objective_metric_series"],
order=["min" if sign_ in ("min", "min_global") else "max" for sign_ in opts["objective_metric_sign"]],
extremum=[sign_.endswith("_global") for sign_ in opts["objective_metric_sign"]],
)
optuna_error_message = (
"Multi parameter optimization is only supported via Optuna. Please install Optuna via"
+ " `pip install optuna and set the `optimizer_class` to `clearml.automation.optuna.OptimizerOptuna`"
)
try:
if self._objective_metric.len != 1:
from .optuna import OptimizerOptuna
if optimizer_class != OptimizerOptuna:
raise ValueError(optuna_error_message)
except Exception:
raise ValueError(optuna_error_message)
# if optimizer_class is an instance, use it as is.
if not isinstance(optimizer_class, type):
self.optimizer = optimizer_class
else:
self.optimizer = optimizer_class(
base_task_id=opts["base_task_id"],
hyper_parameters=hyper_parameters,
objective_metric=self._objective_metric,
execution_queue=opts["execution_queue"],
num_concurrent_workers=opts["max_number_of_concurrent_tasks"],
compute_time_limit=opts["compute_time_limit"],
**opts.get("optimizer_kwargs", {})
)
self.optimizer.set_optimizer_task(self._task)
self.optimization_timeout = None
self.optimization_start_time = None
self._thread = None
self._stop_event = None
self._report_period_min = 5.0
self._thread_reporter = None
self._experiment_completed_cb = None
self._save_top_k_tasks_only = max(0, save_top_k_tasks_only or 0)
self.optimizer.set_job_default_parent(self._task.id if self._task else None, project_name=spawn_project or None)
self.set_time_limit(in_minutes=opts["optimization_time_limit"])
def get_num_active_experiments(self) -> int:
"""
Return the number of current active experiments.
:return: The number of active experiments.
"""
if not self.optimizer:
return 0
return len(self.optimizer.get_running_jobs())
def get_active_experiments(self) -> Sequence[Task]:
"""
Return a list of Tasks of the current active experiments.
:return: A list of Task objects, representing the current active experiments.
"""
if not self.optimizer:
return []
return [j.task for j in self.optimizer.get_running_jobs()]
def start_locally(
self,
job_complete_callback: Optional[Callable[[str, float, int, dict, str], None]] = None,
) -> bool:
"""
Start the HyperParameterOptimizer controller completely locally. Both the optimizer task
and all spawned substasks are run on the local machine using the current environment.
If the calling process is stopped, then the controller stops as well.
:param Callable job_complete_callback: Callback function, called when a job is completed.
.. code-block:: py
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
self.optimizer.set_job_class(LocalClearmlJob)
return self.start(job_complete_callback=job_complete_callback)
def start(
self,
job_complete_callback: Optional[Callable[[str, float, int, dict, str], None]] = None,
) -> bool:
"""
Start the HyperParameterOptimizer controller. If the calling process is stopped, then the controller stops
as well.
:param Callable job_complete_callback: Callback function, called when a job is completed.
.. code-block:: py
def job_complete_callback(
job_id, # type: str
objective_value, # type: float
objective_iteration, # type: int
job_parameters, # type: dict
top_performance_job_id # type: str
):
pass
:return: True, if the controller started. False, if the controller did not start.
"""
if not self.optimizer:
return False
if self._thread:
return True
self.optimization_start_time = time()
self._experiment_completed_cb = job_complete_callback
self._stop_event = Event()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._thread_reporter = Thread(target=self._report_daemon)
self._thread_reporter.daemon = True
self._thread_reporter.start()
return True
def stop(
self,
timeout: Optional[float] = None,
wait_for_reporter: Optional[bool] = True,
) -> ():
"""
Stop the HyperParameterOptimizer controller and the optimization thread.
:param float timeout: Wait timeout for the optimization thread to exit (minutes).
The default is ``None``, indicating do not wait to terminate immediately.
:param wait_for_reporter: Wait for reporter to flush data.
"""
if not self._thread or not self._stop_event or not self.optimizer:
if self._thread_reporter and wait_for_reporter:
self._thread_reporter.join()
return
_thread = self._thread
self._stop_event.set()
self.optimizer.stop()
# wait for optimizer thread
if timeout is not None:
_thread.join(timeout=timeout * 60.0)
# stop all running tasks:
for j in self.optimizer.get_running_jobs():
j.abort()
# clear thread
self._thread = None
if wait_for_reporter:
# wait for reporter to flush
self._thread_reporter.join()
def is_active(self) -> bool:
"""
Is the optimization procedure active (still running)
The values are:
- ``True`` - The optimization procedure is active (still running).
- ``False`` - The optimization procedure is not active (not still running).
.. note::
If the daemon thread has not yet started, ``is_active`` returns ``True``.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._stop_event is None or self._thread is not None
def is_running(self) -> bool:
"""
Is the optimization controller is running
The values are:
- ``True`` - The optimization procedure is running.
- ``False`` - The optimization procedure is running.
:return: A boolean indicating whether the optimization procedure is active (still running) or stopped.
"""
return self._thread is not None
def wait(self, timeout: Optional[float] = None) -> bool:
"""
Wait for the optimizer to finish.
.. note::
This method does not stop the optimizer. Call :meth:`stop` to terminate the optimizer.
:param float timeout: The timeout to wait for the optimization to complete (minutes).
If ``None``, then wait until we reached the timeout, or optimization completed.
:return: True, if the optimization finished. False, if the optimization timed out.
"""
if not self.is_running():
return True
if timeout is not None:
timeout *= 60.0
else:
timeout = (
max(0, self.optimization_timeout - self.optimization_start_time) if self.optimization_timeout else None
)
_thread = self._thread
_thread.join(timeout=timeout)
if _thread.is_alive():
return False
return True
def set_time_limit(
self,
in_minutes: Optional[float] = None,
specific_time: Optional[datetime] = None,
) -> ():
"""
Set a time limit for the HyperParameterOptimizer controller. If we reached the time limit, stop the optimization
process. If ``specific_time`` is provided, use it; otherwise, use the ``in_minutes``.
:param float in_minutes: The maximum processing time from current time (minutes).
:param datetime specific_time: The specific date/time limit.
"""
if specific_time:
self.optimization_timeout = specific_time.timestamp()
else:
self.optimization_timeout = (float(in_minutes) * 60.0) + time() if in_minutes else None
def get_time_limit(self) -> datetime:
"""
Return the controller optimization time limit.
:return: The absolute datetime limit of the controller optimization process.
"""
return datetime.fromtimestamp(self.optimization_timeout)
def elapsed(self) -> float:
"""
Return minutes elapsed from controller stating time stamp.
:return: The minutes from controller start time. A negative value means the process has not started yet.
"""
if self.optimization_start_time is None:
return -1.0
return (time() - self.optimization_start_time) / 60.0
def reached_time_limit(self) -> bool:
"""
Did the optimizer reach the time limit
The values are:
- ``True`` - The time limit passed.
- ``False`` - The time limit did not pass.
This method returns immediately, it does not wait for the optimizer.
:return: True, if optimizer is running and we passed the time limit, otherwise returns False.
"""
if self.optimization_start_time is None:
return False
if not self.is_running():
return False
return time() > self.optimization_timeout
def get_top_experiments(self, top_k: int) -> Sequence[Task]:
"""
Return a list of Tasks of the top performing experiments, based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
if not self.optimizer:
return []
return self.optimizer.get_top_experiments(top_k=top_k)
def get_top_experiments_details(
self,
top_k: int,
all_metrics: bool = False,
all_hyper_parameters: bool = False,
only_completed: bool = False,
) -> Sequence[Union[str, dict]]:
"""
Return a list of dictionaries of the top performing experiments.
Example: ``[{'task_id': Task-ID, 'metrics': scalar-metric-dict, 'hyper_parameters': Hyper-Parameters},]``
Order is based on the controller ``Objective`` object.
:param int top_k: The number of Tasks (experiments) to return.
:param all_metrics: Default False, only return the objective metric on the metrics dictionary.
If True, return all scalar metrics of the experiment
:param all_hyper_parameters: Default False. If True, return all the hyperparameters from all the sections.
:param only_completed: return only completed Tasks. Default False.
:return: A list of dictionaries ``({task_id: '', hyper_parameters: {}, metrics: {}})``, ordered by performance,
where index 0 is the best performing Task.
Example w/ all_metrics=False:
.. code-block:: py
[
{
task_id: '0593b76dc7234c65a13a301f731958fa',
hyper_parameters: {'General/lr': '0.03', 'General/batch_size': '32'},
metrics: {
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
}
},
]
Example w/ all_metrics=True:
.. code-block:: py
[
{
task_id: '0593b76dc7234c65a13a301f731958fa',
hyper_parameters: {'General/lr': '0.03', 'General/batch_size': '32'},
metrics: {
'accuracy per class/cat': {
'metric': 'accuracy per class',
'variant': 'cat',
'value': 0.119,
'min_value': 0.119,
'max_value': 0.782
},
'accuracy per class/deer': {
'metric': 'accuracy per class',
'variant': 'deer',
'value': 0.219,
'min_value': 0.219,
'max_value': 0.282
},
}
},
]
"""
if not self.optimizer:
return []
return self.optimizer.get_top_experiments_details(
top_k=top_k,
all_metrics=all_metrics,
all_hyper_parameters=all_hyper_parameters,
only_completed=only_completed,
)
def get_optimizer(self) -> SearchStrategy:
"""
Return the currently used optimizer object.
:return: The SearchStrategy object used.
"""
return self.optimizer
def set_default_job_class(self, job_class: ClearmlJob) -> ():
"""
Set the Job class to use when the optimizer spawns new Jobs.
:param ClearmlJob job_class: The Job Class type.
"""
self.optimizer.set_job_class(job_class)
def set_report_period(self, report_period_minutes: float) -> ():
"""
Set reporting period for the accumulated objective report (minutes). This report is sent on the Optimizer Task,
and collects the Objective metric from all running jobs.
:param float report_period_minutes: The reporting period (minutes). The default is once every 10 minutes.
"""
self._report_period_min = float(report_period_minutes)
@classmethod
def get_optimizer_top_experiments(
cls,
objective_metric_title: Union[str, List[str]],
objective_metric_series: Union[str, List[str]],
objective_metric_sign: Union[str, List[str]],
optimizer_task_id: str,
top_k: int,
) -> Sequence[Task]:
"""
Return a list of Tasks of the top performing experiments
for a specific HyperParameter Optimization session (i.e. Task ID), based on the title/series objective.
:param str objective_metric_title: The Objective metric title to maximize / minimize (for example,
``validation``).
:param str objective_metric_series: The Objective metric series to maximize / minimize (for example, ``loss``).
:param str objective_metric_sign: The objective to maximize / minimize.
The values are:
- ``min`` - Minimize the last reported value for the specified title/series scalar.
- ``max`` - Maximize the last reported value for the specified title/series scalar.
- ``min_global`` - Minimize the min value of *all* reported values for the specific title/series scalar.
- ``max_global`` - Maximize the max value of *all* reported values for the specific title/series scalar.
:param str optimizer_task_id: Parent optimizer Task ID
:param top_k: The number of Tasks (experiments) to return.
:return: A list of Task objects, ordered by performance, where index 0 is the best performing Task.
"""
objective = Objective(
title=objective_metric_title,
series=objective_metric_series,
order=objective_metric_sign,
)
return objective.get_top_tasks(top_k=top_k, optimizer_task_id=optimizer_task_id)
@property
def objective_metric(self) -> Union[Objective, MultiObjective]:
if self._objective_metric.len == 1:
return self._objective_metric.objectives[0]
return self._objective_metric
def _connect_args(
self, optimizer_class: SearchStrategy = None, hyper_param_configuration: dict = None, **kwargs: Any
) -> (SearchStrategy, list, dict):
if not self._task or self._readonly_task:
logger.warning(
"Auto Connect turned on but no Task was found, "
"hyper-parameter optimization argument logging disabled"
)
return optimizer_class, hyper_param_configuration, kwargs
configuration_dict = {"parameter_optimization_space": [c.to_dict() for c in hyper_param_configuration]}
self._task.connect_configuration(configuration_dict)
# this is the conversion back magic:
configuration_dict = {
"parameter_optimization_space": [
Parameter.from_dict(c) for c in configuration_dict["parameter_optimization_space"]
]
}
complex_optimizer_kwargs = None
if "optimizer_kwargs" in kwargs:
# do not store complex optimizer kwargs:
optimizer_kwargs = kwargs.pop("optimizer_kwargs", {})
complex_optimizer_kwargs = {
k: v
for k, v in optimizer_kwargs.items()
if not isinstance(
v,
six.string_types + six.integer_types + (six.text_type, float, list, tuple, dict, type(None)),
)
}
kwargs["optimizer_kwargs"] = {
k: v for k, v in optimizer_kwargs.items() if k not in complex_optimizer_kwargs
}
# skip non basic types:
arguments = {"opt": kwargs}
if not isinstance(optimizer_class, type):
logger.warning("Auto Connect optimizer_class disabled, {} is already instantiated".format(optimizer_class))
self._task.connect(arguments)
else:
arguments["opt"]["optimizer_class"] = (
str(optimizer_class).split(".")[-1][:-2] if not isinstance(optimizer_class, str) else optimizer_class
)
self._task.connect(arguments)
# this is the conversion back magic:
original_class = optimizer_class
optimizer_class = arguments["opt"].pop("optimizer_class", None)
if optimizer_class == "RandomSearch":
optimizer_class = RandomSearch
elif optimizer_class == "GridSearch":
optimizer_class = GridSearch
elif optimizer_class == "OptimizerBOHB":
from .hpbandster import OptimizerBOHB
optimizer_class = OptimizerBOHB
elif optimizer_class == "OptimizerOptuna":
from .optuna import OptimizerOptuna
optimizer_class = OptimizerOptuna
else:
logger.warning(
"Could not resolve optimizer_class {} reverting to original class {}".format(
optimizer_class, original_class
)
)
optimizer_class = original_class
if complex_optimizer_kwargs:
if "optimizer_kwargs" not in arguments["opt"]:
arguments["opt"]["optimizer_kwargs"] = complex_optimizer_kwargs
else:
arguments["opt"]["optimizer_kwargs"].update(complex_optimizer_kwargs)
return (
optimizer_class,
configuration_dict["parameter_optimization_space"],
arguments["opt"],
)
def _daemon(self) -> ():
"""
Implement the main pooling thread, calling loop every ``self.pool_period_minutes`` minutes.
"""
self.optimizer.start()
self._thread = None
def _report_daemon(self) -> ():
title_series = self._objective_metric.get_objective_metric()
title = ["{}/{}".format(ts[0], ts[1]) for ts in title_series]
counter = 0
completed_jobs = dict()
task_logger = None
cur_completed_jobs = set()
cur_task = self._task or Task.current_task()
if cur_task and self.optimizer:
# noinspection PyProtectedMember
child_tasks = self.optimizer._get_child_tasks(parent_task_id=cur_task.id, status=["completed", "stopped"])
hyper_parameters = [h.name for h in self.hyper_parameters]
for task in child_tasks:
params = {k: v for k, v in task.get_parameters().items() if k in hyper_parameters}
params["status"] = str(task.status)
# noinspection PyProtectedMember
iteration_value = task.get_last_iteration()
objective = self._objective_metric.get_objective(task)
completed_jobs[task.id] = (
objective if objective is not None else ([-1] * self._objective_metric.len),
[iteration_value] * self._objective_metric.len
if iteration_value is not None
else ([-1] * self._objective_metric.len),
params,
)
while self._thread is not None:
timeout = self.optimization_timeout - time() if self.optimization_timeout else 0.0
if timeout >= 0:
timeout = min(
self._report_period_min * 60.0,
timeout if timeout else self._report_period_min * 60.0,
)
# make sure that we have the first report fired before we actually go to sleep, wait for 15 sec.
if counter <= 0:
timeout = 15
print("Progress report #{} completed, sleeping for {} minutes".format(counter, timeout / 60.0))
if self._stop_event.wait(timeout=timeout):
# wait for one last report
timeout = -1
counter += 1
# get task to report on.
cur_task = self._task or Task.current_task()
if cur_task and not self._readonly_task:
task_logger = cur_task.get_logger()
# do some reporting
self._report_remaining_budget(task_logger, counter)
if (
self.optimizer.budget.compute_time.used
and self.optimizer.budget.compute_time.limit
and self.optimizer.budget.compute_time.used >= self.optimizer.budget.compute_time.limit
):
logger.warning(
"Optimizer task reached compute time limit (used {:.2f} out of {:.2f})".format(
self.optimizer.budget.compute_time.limit,
self.optimizer.compute_time.used,
)
)
timeout = -1
self._report_resources(task_logger, counter)
# collect a summary of all the jobs and their final objective values
cur_completed_jobs = set(self.optimizer.get_created_jobs_ids().keys()) - {
j.task_id() for j in self.optimizer.get_running_jobs()
}
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
# if we should leave, stop everything now.
if timeout < 0:
# we should leave
self.stop(wait_for_reporter=False)
return
if task_logger and counter and not self._readonly_task:
counter += 1
self._report_remaining_budget(task_logger, counter)
self._report_resources(task_logger, counter)
self._report_completed_status(completed_jobs, cur_completed_jobs, task_logger, title, force=True)
self._report_completed_tasks_best_results(set(completed_jobs.keys()), task_logger, title, counter)
self._auto_archive_low_performance_tasks(completed_jobs)
def _report_completed_status(
self,
completed_jobs: Mapping[str, Tuple[Union[List[float], float], Union[List[int], int], dict]],
cur_completed_jobs: Set[str],
task_logger: Logger,
title: Union[str, List[str]],
force: bool = False,
) -> None:
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
best_experiment = (
(
self._objective_metric.get_normalized_objective(job_ids_sorted_by_objective[0]),
job_ids_sorted_by_objective[0],
)
if job_ids_sorted_by_objective
else ([float("-inf")], None)
)
if force or cur_completed_jobs != set(completed_jobs.keys()):
pairs = []
labels = []
created_jobs = copy(self.optimizer.get_created_jobs_ids())
created_jobs_tasks = self.optimizer.get_created_jobs_tasks()
id_status = {j_id: j_run.status() for j_id, j_run in created_jobs_tasks.items()}
for i, (job_id, params) in enumerate(created_jobs.items()):
value = self._objective_metric.get_objective(job_id)
if job_id in completed_jobs:
if value != completed_jobs[job_id][0]:
iteration_value = self._objective_metric.get_current_raw_objective(job_id)
if iteration_value:
iteration = [it_[0] if it_ else -1 for it_ in iteration_value]
else:
iteration = [-1]
completed_jobs[job_id] = (
value,
iteration,
copy(dict(status=id_status.get(job_id), **params)),
)
elif completed_jobs.get(job_id):
completed_jobs[job_id] = (
completed_jobs[job_id][0],
completed_jobs[job_id][1],
copy(dict(status=id_status.get(job_id), **params)),
)
pairs.append((i, completed_jobs[job_id][0]))
labels.append(str(completed_jobs[job_id][2])[1:-1])
elif value is not None and all(v is not None for v in value):
pairs.append((i, value))
labels.append(str(params)[1:-1])
iteration_value = self._objective_metric.get_current_raw_objective(job_id)
if iteration_value:
iteration = [it_[0] if it_ else -1 for it_ in iteration_value]
else:
iteration = [-1]
completed_jobs[job_id] = (
value,
iteration,
copy(dict(status=id_status.get(job_id), **params)),
)
# callback new experiment completed
if self._experiment_completed_cb:
normalized_value = self._objective_metric.get_normalized_objective(job_id)
if (
self._objective_metric.len == 1
and normalized_value is not None
and normalized_value[0] > best_experiment[0][0]
):
best_experiment = normalized_value, job_id
elif (
self._objective_metric.len != 1
and normalized_value is not None
and all(n is not None for n in normalized_value)
and (
best_experiment[0] == float("-inf")
or MultiObjective._dominates(normalized_value, best_experiment[0])
)
): # noqa
best_experiment = normalized_value, job_id
c = completed_jobs[job_id]
self._experiment_completed_cb(job_id, c[0], c[1], c[2], best_experiment[1])
if pairs:
print("Updating job performance summary plot/table")
if isinstance(title, list):
for i, title_ in enumerate(title):
# update scatter plot
task_logger.report_scatter2d(
title="Optimization Objective",
series=title_,
scatter=[(p[0], p[1][i]) for p in pairs],
iteration=0,
labels=labels,
mode="markers",
xaxis="job #",
yaxis="objective",
)
else:
task_logger.report_scatter2d(
title="Optimization Objective",
series=title,
scatter=pairs,
iteration=0,
labels=labels,
mode="markers",
xaxis="job #",
yaxis="objective",
)
# update summary table
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
# sort the columns except for 'objective', 'iteration'
columns = list(sorted(set([c for k, v in completed_jobs.items() for c in v[2].keys()])))
concat_iterations = True
if self._objective_metric.len == 1:
# add the index column (task id) and the first two columns 'objective', 'iteration' then the rest
table_values = [["task id", "objective", "iteration"] + columns]
table_values += [
(
[job, completed_jobs[job][0][0], completed_jobs[job][1][0]]
+ [completed_jobs[job][2].get(c, "") for c in columns]
)
for job in job_ids_sorted_by_objective
]
else:
table_values = ["task id"]
for job in job_ids_sorted_by_objective:
if not all(iter_ == completed_jobs[job][1][0] for iter_ in completed_jobs[job][1]):
concat_iterations = False
break
if concat_iterations:
for objective in self._objective_metric.objectives:
table_values.append(objective.title + "/" + objective.series)
table_values.append("iteration")
table_values = [table_values + columns]
for job in job_ids_sorted_by_objective:
entry = [job]
for val in completed_jobs[job][0]:
entry += [val]
entry += [completed_jobs[job][1][0]]
entry += [completed_jobs[job][2].get(c, "") for c in columns]
table_values.append(entry)
else:
for objective in self._objective_metric.objectives:
table_values.append(objective.title + "/" + objective.series)
table_values.append("iteration " + objective.title + "/" + objective.series)
table_values = [table_values + columns]
for job in job_ids_sorted_by_objective:
entry = [job]
for val, iter_ in zip(completed_jobs[job][0], completed_jobs[job][1]):
entry += [val, iter_]
entry += [completed_jobs[job][2].get(c, "") for c in columns]
table_values.append(entry)
# create links for task id in the table
task_link_template = (
self._task.get_output_log_web_page()
.replace("/{}/".format(self._task.project), "/{project}/")
.replace("/{}/".format(self._task.id), "/{task}/")
)
# create links for task id in the table
table_values_with_links = deepcopy(table_values)
for i in range(1, len(table_values_with_links)):
task_id = table_values_with_links[i][0]
project_id = created_jobs_tasks[task_id].task.project if task_id in created_jobs_tasks else "*"
table_values_with_links[i][0] = '<a href="{}"> {} </a>'.format(
task_link_template.format(project=project_id, task=task_id), task_id
)
task_logger.report_table(
"summary",
"job",
0,
table_plot=table_values_with_links,
extra_layout={
"title": "objective: {}".format(title if not isinstance(title, list) else ", ".join(title))
},
)
# Build parallel Coordinates: convert to columns, and reorder accordingly
if len(table_values) > 1:
table_values_columns = [[row[i] for row in table_values] for i in range(len(table_values[0]))]
if self._objective_metric.len == 1:
table_values_columns = (
[[table_values_columns[0][0]] + [c[:6] + "..." for c in table_values_columns[0][1:]]]
+ table_values_columns[2:-1]
+ [[title] + table_values_columns[1][1:]]
)
else:
if not concat_iterations:
new_table_values_columns = []
handled = []
for i in range(1, 2 * len(self._objective_metric.objectives), 2):
handled.append(i)
new_table_values_columns.append(table_values_columns[i])
prefix = []
for i in range(len(table_values_columns)):
if i in handled or table_values_columns[i][0] == "status":
continue
prefix.append(table_values_columns[i])
table_values_columns = prefix + new_table_values_columns
else:
table_values_columns = (
[table_values_columns[0]]
+ table_values_columns[len(self._objective_metric.objectives) + 1 : -1]
+ table_values_columns[1 : len(self._objective_metric.objectives) + 1]
)
for i in range(len(table_values_columns[0]) - 1):
table_values_columns[0][i + 1] = table_values_columns[0][i + 1][:6] + "..."
pcc_dims = []
for col in table_values_columns:
# test if all values are numbers:
try:
# try to cast all values to float
values = [float(v) for v in col[1:]]
d = dict(label=col[0], values=values)
except (ValueError, TypeError):
values = list(range(len(col[1:])))
ticks = col[1:]
unique_ticks = list(set(ticks))
d = dict(label=col[0], values=values, tickvals=values, ticktext=ticks)
if len(ticks) != len(unique_ticks): # Mapping duplicate ticktext
ticktext = {key: i for i, key in enumerate(unique_ticks)}
d["values"] = [ticktext[tick] for tick in ticks]
d["tickvals"] = list(range(len(ticktext)))
d["ticktext"] = list(sorted(ticktext, key=ticktext.get))
pcc_dims.append(d)
# report parallel coordinates
plotly_pcc = dict(
data=[
dict(
type="parcoords",
line=dict(
colorscale="Viridis",
reversescale=(
self._objective_metric.len == 1 and self._objective_metric.objectives[0].sign >= 0,
),
color=table_values_columns[-1][1:],
),
dimensions=pcc_dims,
)
],
layout={},
)
task_logger.report_plotly(
title="Parallel Coordinates",
series="",
iteration=0,
figure=plotly_pcc,
)
# upload summary as artifact
if force:
task = self._task or Task.current_task()
if task:
task.upload_artifact(name="summary", artifact_object={"table": table_values})
def _report_remaining_budget(self, task_logger: Logger, counter: int) -> None:
# noinspection PyBroadException
try:
budget = self.optimizer.budget.to_dict()
except Exception:
budget = {}
# report remaining budget
for budget_part, value in budget.items():
task_logger.report_scalar(
title="remaining budget",
series="{} %".format(budget_part),
iteration=counter,
value=round(100 - value["used"] * 100.0, ndigits=1),
)
if self.optimization_timeout and self.optimization_start_time:
task_logger.report_scalar(
title="remaining budget",
series="time %",
iteration=counter,
value=round(
100
- (
100.0
* (time() - self.optimization_start_time)
/ (self.optimization_timeout - self.optimization_start_time)
),
ndigits=1,
),
)
def _report_completed_tasks_best_results(
self,
completed_jobs: Set[str],
task_logger: Logger,
title: str,
counter: int,
) -> ():
if not completed_jobs:
return
objectives = self._objective_metric.objectives
if not isinstance(title, list):
title = [title]
for objective, title_ in zip(objectives, title):
value_func, series_name = (max, "max") if objective.get_objective_sign() > 0 else (min, "min")
latest_completed, obj_values = self._get_latest_completed_task_value(
completed_jobs, series_name, objective.title, objective.series
)
if latest_completed:
val = value_func(obj_values)
task_logger.report_scalar(title=title_, series=series_name, iteration=counter, value=val)
task_logger.report_scalar(
title=title_,
series="last reported",
iteration=counter,
value=latest_completed,
)
def _report_resources(self, task_logger: Logger, iteration: int) -> ():
self._report_active_workers(task_logger, iteration)
self._report_tasks_status(task_logger, iteration)
def _report_active_workers(self, task_logger: Logger, iteration: int) -> ():
res = self.__get_session().send(workers_service.GetAllRequest())
response = res.wait()
if response.ok():
all_workers = response
queue_workers = len(
[
worker.get("id")
for worker in all_workers.response_data.get("workers")
for q in worker.get("queues")
if q.get("name") == self.execution_queue
]
)
task_logger.report_scalar(
title="resources",
series="queue workers",
iteration=iteration,
value=queue_workers,
)
def _report_tasks_status(self, task_logger: Logger, iteration: int) -> ():
tasks_status = {"running tasks": 0, "pending tasks": 0}
for job in self.optimizer.get_running_jobs():
if job.is_running():
tasks_status["running tasks"] += 1
else:
tasks_status["pending tasks"] += 1
for series, val in tasks_status.items():
task_logger.report_scalar(title="resources", series=series, iteration=iteration, value=val)
def _get_latest_completed_task_value(
self,
cur_completed_jobs: Set[str],
series_name: str,
title: str,
series: str,
) -> Tuple[Optional[float], List[float]]:
completed_value = None
latest_completed = None
obj_values = []
cur_task = self._task or Task.current_task()
for j in cur_completed_jobs:
res = cur_task.send(tasks_service.GetByIdRequest(task=j))
response = res.wait()
if not response.ok() or response.response_data["task"].get("status") != Task.TaskStatusEnum.completed:
continue
completed_time = datetime_from_isoformat(response.response_data["task"]["completed"].partition("+")[0])
completed_time = completed_time.timestamp()
completed_values = self._get_last_value(response, title, series)
obj_values.append(completed_values["max_value"] if series_name == "max" else completed_values["min_value"])
if not latest_completed or completed_time > latest_completed:
latest_completed = completed_time
completed_value = completed_values["value"]
return completed_value, obj_values
def _get_last_value(self, response: Any, title: str, series: str) -> Any:
metrics, title, series, values = ClearmlJob.get_metric_req_params(title, series)
last_values = response.response_data["task"]["last_metrics"][title][series]
return last_values
def _auto_archive_low_performance_tasks(
self, completed_jobs: Mapping[str, Tuple[List[float], List[int], dict]]
) -> None:
if self._save_top_k_tasks_only <= 0:
return
# sort based on performance
job_ids_sorted_by_objective = self.__sort_jobs_by_objective(completed_jobs)
# query system_tags only
res = self.__get_session().send(
tasks_service.GetAllRequest(
id=job_ids_sorted_by_objective,
status=["completed", "stopped"],
only_fields=["id", "system_tags"],
)
)
response = res.wait()
if not response.ok():
return
tasks_system_tags_lookup = {
task.get("id"): task.get("system_tags") for task in response.response_data.get("tasks")
}
for i, task_id in enumerate(job_ids_sorted_by_objective):
system_tags = tasks_system_tags_lookup.get(task_id, [])
if i < self._save_top_k_tasks_only and Task.archived_tag in system_tags:
print(
"Restoring from archive Task id={} (#{} objective={})".format(
task_id, i, completed_jobs[task_id][0]
)
)
# top_k task and is archived, remove archive tag
system_tags = list(set(system_tags) - {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True)
)
res.wait()
elif i >= self._save_top_k_tasks_only and Task.archived_tag not in system_tags:
print("Archiving Task id={} (#{} objective={})".format(task_id, i, completed_jobs[task_id][0]))
# Not in top_k task and not archived, add archive tag
system_tags = list(set(system_tags) | {Task.archived_tag})
res = self.__get_session().send(
tasks_service.EditRequest(task=task_id, system_tags=system_tags, force=True)
)
res.wait()
def __get_session(self) -> Session:
cur_task = self._task or Task.current_task()
if cur_task:
return cur_task.default_session
# noinspection PyProtectedMember
return Task._get_default_session()
def __sort_jobs_by_objective(self, completed_jobs: Mapping[str, Tuple[List[float], List[int], dict]]) -> List[str]:
if not completed_jobs:
return []
if self._objective_metric.len != 1:
# noinspection PyProtectedMember
return self._objective_metric._sort_jobs_by_domination(completed_jobs)
else:
return list(
sorted(
completed_jobs.keys(),
key=lambda x: completed_jobs[x][0],
reverse=bool(self._objective_metric.objectives[0].sign >= 0),
)
)
| HyperParameterOptimizer |
python | pypa__pipenv | pipenv/exceptions.py | {
"start": 7746,
"end": 7893
} | class ____(PipenvException):
def __init__(self, message=None, **kwargs):
PipenvException.__init__(self, message, **kwargs)
| SetupException |
python | conda__conda | conda/common/configuration.py | {
"start": 41018,
"end": 43693
} | class ____(Parameter):
"""Parameter type for a Configuration class that holds an object with Parameter fields."""
_type = object
def __init__(self, element_type, default=ConfigurationObject(), validation=None):
"""
Args:
element_type (object): The object type with parameter fields held in ObjectParameter.
default (Sequence): default value, empty tuple if not given.
"""
self._element_type = element_type
super().__init__(default, validation)
def get_all_matches(self, name, names, instance):
# it also config settings like `proxy_servers: ~`
matches, exceptions = super().get_all_matches(name, names, instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
def load(self, name, match):
value = match.value(self._element_type)
if value is None:
return ObjectLoadedParameter(
name,
None,
self._element_type,
match.keyflag(),
None,
validation=self._validation,
)
if not isinstance(value, (Mapping, ConfigurationObject)):
raise InvalidTypeError(
name, value, match.source, value.__class__.__name__, self._type.__name__
)
# for a default object, extract out the instance variables
if isinstance(value, ConfigurationObject):
value = vars(value)
object_parameter_attrs = {
attr_name: parameter_type
for attr_name, parameter_type in vars(self._element_type).items()
if isinstance(parameter_type, Parameter)
}
# recursively load object fields
loaded_attrs = {}
for attr_name, parameter_type in object_parameter_attrs.items():
if raw_child_value := value.get(attr_name):
loaded_child_value = parameter_type.load(name, raw_child_value)
else:
loaded_child_value = parameter_type.default
loaded_attrs[attr_name] = loaded_child_value
# copy object and replace Parameter with LoadedParameter fields
object_copy = copy.deepcopy(self._element_type)
for attr_name, loaded_child_parameter in loaded_attrs.items():
object_copy.__setattr__(attr_name, loaded_child_parameter)
return ObjectLoadedParameter(
name,
object_copy,
self._element_type,
match.keyflag(),
match.valueflags(self._element_type),
validation=self._validation,
)
| ObjectParameter |
python | pytorch__pytorch | test/jit/test_module_containers.py | {
"start": 449,
"end": 25791
} | class ____(JitTestCase):
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def forward(self, x):
return x + 3
class B(torch.nn.Module):
def forward(self, x):
return {"1": x}
class C(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.foo = torch.nn.Sequential(A(), B())
def forward(self, x):
return self.foo(x)
self.checkModule(C(), (torch.tensor(1),))
def test_moduledict(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class Inner2(torch.nn.Module):
def forward(self, x):
return x * 2
class Inner3(torch.nn.Module):
def forward(self, x):
return (x - 4) * 3
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
modules = OrderedDict(
[
("one", Inner()),
("two", Inner2()),
("three", Inner3()),
]
)
self.moduledict = nn.ModuleDict(modules)
def forward(self, x, skip_name):
# type: (Tensor, str)
names = torch.jit.annotate(List[str], [])
values = []
for name in self.moduledict:
names.append(name)
for name, mod in self.moduledict.items():
if name != skip_name:
names.append(name)
x = mod(x)
values.append(x)
for mod in self.moduledict.values():
x = mod(x)
values.append(x)
for key in self.moduledict:
names.append(key)
return x, names
class M2(M):
def forward(self, x, skip_name):
# type: (Tensor, str)
names = torch.jit.annotate(List[str], [])
values = []
x2 = x
iter = 0
for name in self.moduledict:
names.append(name)
for i, (name, mod) in enumerate(self.moduledict.items()):
iter += i
if name != skip_name:
names.append(name)
x = mod(x)
values.append(x)
for i, mod in enumerate(self.moduledict.values()):
iter += i
x = mod(x)
values.append(x)
for i, key in enumerate(self.moduledict.keys()):
iter += i
names.append(key)
for mod, mod in zip(self.moduledict.values(), self.moduledict.values()):
iter += i
x2 = mod(mod(x2))
return x, x2, names, iter
for name in ["", "one", "two", "three"]:
inp = torch.tensor(1)
self.checkModule(M(), (inp, name))
self.checkModule(M2(), (inp, name))
def test_custom_container_forward(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class CustomSequential(nn.Sequential):
def __init__(self) -> None:
super().__init__(nn.ReLU(), Inner())
def forward(self, x):
x = x + 3
for mod in self:
x = mod(x)
return x - 5
self.checkModule(CustomSequential(), (torch.tensor(0.5),))
class CustomModuleList(nn.ModuleList):
def __init__(self) -> None:
super().__init__([nn.ReLU(), Inner()])
def forward(self, x):
x = x + 3
for mod in self:
x = mod(x)
return x - 5
self.checkModule(CustomModuleList(), (torch.tensor(0.5),))
class CustomModuleDict(nn.ModuleDict):
def __init__(self) -> None:
super().__init__(
OrderedDict(
[
("one", Inner()),
("two", nn.ReLU()),
("three", Inner()),
]
)
)
def forward(self, x):
x = x + 3
names = torch.jit.annotate(List[str], [])
for name, mod in self.items():
x = mod(x)
names.append(name)
return names, x - 5
self.checkModule(CustomModuleDict(), (torch.tensor(0.5),))
def test_script_module_list_sequential(self):
class M(torch.jit.ScriptModule):
def __init__(self, mod_list):
super().__init__()
self.mods = mod_list
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
with torch.jit.optimized_execution(False):
m = M(nn.Sequential(nn.ReLU()))
self.assertExportImportModule(m, (torch.randn(2, 2),))
def test_script_modulelist_index(self):
class Sub(torch.nn.Module):
def __init__(self, i):
super().__init__()
self.i = i
def forward(self, thing):
return thing - self.i
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([Sub(i) for i in range(10)])
def forward(self, v):
v = self.mods[4].forward(v)
v = self.mods[-1].forward(v)
v = self.mods[-9].forward(v)
return v
x = torch.tensor(1)
self.checkModule(M(), (x,))
class MForward(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mods = nn.ModuleList([Sub(i) for i in range(10)])
def forward(self, v):
v = self.mods[4](v)
v = self.mods[-1](v)
v = self.mods[-9](v)
return v
self.checkModule(MForward(), (torch.tensor(1),))
class M2(M):
def forward(self, v):
return self.mods[-11].forward(v)
with self.assertRaisesRegexWithHighlight(
Exception, "Index -11 out of range", "self.mods[-11]"
):
torch.jit.script(M2())
class M3(M):
def forward(self, v):
i = 3
return self.mods[i].forward(v)
with self.assertRaisesRegexWithHighlight(
Exception, "Enumeration is supported", "self.mods[i]"
):
torch.jit.script(M3())
class M4(M):
def forward(self, v):
i = 3
return self.mods[i].forward(v)
with self.assertRaisesRegex(Exception, "will fail because i is not a literal"):
torch.jit.script(M4())
def test_module_interface_special_methods(self):
class CustomModuleInterface(torch.nn.Module):
pass
class CustomModuleList(CustomModuleInterface, torch.nn.ModuleList):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleList.__init__(self, modules)
class CustomSequential(CustomModuleInterface, torch.nn.Sequential):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.Sequential.__init__(self, modules)
class CustomModuleDict(CustomModuleInterface, torch.nn.ModuleDict):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleDict.__init__(self, modules)
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.modulelist = CustomModuleList([self.submod])
self.sequential = CustomSequential(self.submod)
self.moduledict = CustomModuleDict({"submod": self.submod})
def forward(self, inputs):
assert self.modulelist[0] is self.submod, (
"__getitem__ failing for ModuleList"
)
assert len(self.modulelist) == 1, "__len__ failing for ModuleList"
for module in self.modulelist:
assert module is self.submod, "__iter__ failing for ModuleList"
assert self.sequential[0] is self.submod, (
"__getitem__ failing for Sequential"
)
assert len(self.sequential) == 1, "__len__ failing for Sequential"
for module in self.sequential:
assert module is self.submod, "__iter__ failing for Sequential"
assert self.moduledict["submod"] is self.submod, (
"__getitem__ failing for ModuleDict"
)
assert len(self.moduledict) == 1, "__len__ failing for ModuleDict"
# note: unable to index moduledict with a string variable currently
i = 0
for _ in self.moduledict:
i += 1
assert i == len(self.moduledict), "iteration failing for ModuleDict"
assert "submod" in self.moduledict, "__contains__ fails for ModuleDict"
for key in self.moduledict:
assert key == "submod", "keys() fails for ModuleDict"
for item in self.moduledict.items():
assert item[0] == "submod", "items() fails for ModuleDict"
assert item[1] is self.submod, "items() fails for ModuleDict"
for value in self.moduledict.values():
assert value is self.submod, "values() fails for ModuleDict"
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_special_method_with_override(self):
class CustomModuleInterface(torch.nn.Module):
pass
class CustomModuleList(CustomModuleInterface, torch.nn.ModuleList):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleList.__init__(self, modules)
def __len__(self):
# this is arbitrary, just to check that the overridden py __len__ from
# CustomModuleList takes precedence over the automatically generated
# __len__ added by the jit compiler
return 2
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.modulelist = CustomModuleList([self.submod])
def forward(self, inputs):
assert len(self.modulelist) == 2, "__len__ failing for ModuleList"
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
torch.jit.script(m)
def test_moduledict_getitem(self):
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.relu = torch.jit.script(torch.nn.ReLU())
self.tanh = torch.jit.script(torch.nn.Tanh())
self.moduledict = torch.nn.ModuleDict(
{"relu": self.relu, "tanh": self.tanh}
)
def forward(self, input):
assert self.moduledict["relu"] is self.relu
assert self.moduledict["tanh"] is self.tanh
return input
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_moduledict_keyerror(self):
class BadModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.moduledict = torch.nn.ModuleDict({"foo": None, "bar": None})
def forward(self, input):
assert self.moduledict["blah"] == "blah", "this is a keyerror"
with self.assertRaisesRegexWithHighlight(
RuntimeError, "Key Error, blah", 'self.moduledict["blah"'
):
b = BadModule()
torch.jit.script(b)
class AnotherBadModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.moduledict = torch.nn.ModuleDict({"foo": None, "bar": None})
def forward(self, input):
idx = "blah"
assert self.moduledict[idx] == "blah", "this is a string literal error"
with self.assertRaisesRegexWithHighlight(
RuntimeError,
"Unable to extract string literal index. "
"ModuleDict indexing is only supported with string literals. "
"For example, 'i = \"a\"; self.layers\\[i\\]\\(x\\)' will fail "
"because i is not a literal.",
"self.moduledict[idx]",
):
b = AnotherBadModule()
torch.jit.script(b)
def test_normal_list_attribute_with_modules_error(self):
"""
Test that an attempt to script a module with a regular list attribute
containing other modules fails with a relevant error message.
"""
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = [torch.nn.ReLU(), torch.nn.ReLU()]
def forward(self):
return len(self.a)
error_msg = "Could not infer type of list element: Cannot infer concrete type of torch.nn.Module"
with self.assertRaisesRegexWithHighlight(RuntimeError, error_msg, "self.a"):
torch.jit.script(Mod())
def test_empty_dict_override_contains(self):
class CustomModuleInterface(torch.nn.Module):
pass
class CustomModuleDict(CustomModuleInterface, torch.nn.ModuleDict):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleDict.__init__(self, modules)
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.moduledict = CustomModuleDict()
def forward(self, inputs):
assert "submod" not in self.moduledict, (
"__contains__ fails for ModuleDict"
)
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_typed_module_dict(self):
"""
Test that a type annotation can be provided for a ModuleDict that allows
non-static indexing.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class DoesNotImplementInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.max(inp, dim=0)
# Test annotation of submodule.
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.d = torch.nn.ModuleDict({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
value: ModuleInterface = self.d[key]
return value.forward(x)
m = Mod()
self.checkModule(m, (torch.randn(2, 2), "module"))
# Test annotation of self.
class ModDict(torch.nn.ModuleDict):
def __init__(self) -> None:
super().__init__({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
submodule: ModuleInterface = self[key]
return submodule.forward(x)
m = ModDict()
self.checkModule(m, (torch.randn(2, 2), "module"))
# Test error message thrown when annotated attribute does not comply with the
# annotation.
class ModWithWrongAnnotation(torch.nn.ModuleDict):
def __init__(self) -> None:
super().__init__()
self.d = torch.nn.ModuleDict({"module": DoesNotImplementInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
submodule: ModuleInterface = self.d[key]
return submodule.forward(x)
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"Attribute module is not of annotated type", "self.d[key]"
):
torch.jit.script(ModWithWrongAnnotation())
def test_typed_module_list(self):
"""
Test that a type annotation can be provided for a ModuleList that allows
non-static indexing.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class DoesNotImplementInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.max(inp, dim=0)
# Test annotation of submodule.
class Mod(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.ModuleList([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
value: ModuleInterface = self.l[idx]
return value.forward(x)
m = Mod()
self.checkModule(m, (torch.randn(2, 2), 0))
# Test annotation of self.
class ModList(torch.nn.ModuleList):
def __init__(self) -> None:
super().__init__([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
submodule: ModuleInterface = self[idx]
return submodule.forward(x)
m = ModList()
self.checkModule(m, (torch.randn(2, 2), 0))
# Test error message thrown when annotated attribute does not comply with the
# annotation.
class ModWithWrongAnnotation(torch.nn.ModuleList):
def __init__(self) -> None:
super().__init__()
self.l = torch.nn.ModuleList([DoesNotImplementInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
submodule: ModuleInterface = self.l[idx]
return submodule.forward(x)
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"Attribute 0 is not of annotated type", "self.l[idx]"
):
torch.jit.script(ModWithWrongAnnotation())
def test_module_properties(self):
class ModuleWithProperties(torch.nn.Module):
__jit_unused_properties__ = ["ignored_attr"]
def __init__(self, a: int):
super().__init__()
self.a = a
def forward(self, a: int, b: int):
self.attr = a + b
return self.attr
@property
def attr(self):
return self.a
@property
def ignored_attr(self):
return sum([self.a])
@torch.jit.unused
@property
def ignored_attr_2(self):
return sum([self.a])
@ignored_attr_2.setter
def ignored_attr_2(self, value):
self.a = sum([self.a])
@attr.setter
def attr(self, a: int):
if a > 0:
self.a = a
else:
self.a = 0
class ModuleWithNoSetter(torch.nn.Module):
def __init__(self, a: int):
super().__init__()
self.a = a
def forward(self, a: int, b: int):
self.attr + a + b
@property
def attr(self):
return self.a + 1
self.checkModule(
ModuleWithProperties(5),
(
5,
6,
),
)
self.checkModule(
ModuleWithProperties(5),
(
-5,
-6,
),
)
self.checkModule(
ModuleWithNoSetter(5),
(
5,
6,
),
)
self.checkModule(
ModuleWithNoSetter(5),
(
-5,
-6,
),
)
mod = ModuleWithProperties(3)
scripted_mod = torch.jit.script(mod)
with self.assertRaisesRegex(AttributeError, "has no attribute"):
scripted_mod.ignored_attr
def test_module_inplace_construct(self):
class M(nn.Module):
def __init__(self, start: int):
super().__init__()
self.linear = nn.Linear(3, 3)
self.attribute = start
self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))
def method(self) -> int:
return self.attribute
@torch.jit.unused
def unused_method(self):
return self.attribute + self.attribute
def forward(self, x):
return self.linear(self.linear(x))
class N(nn.Module):
def __init__(self) -> None:
super().__init__()
self.linear = nn.Linear(4, 4)
@torch.jit.ignore
def ignored_method(self, x):
return x
def forward(self, x):
return self.linear(x)
m = torch.jit.script(M(3))
n = torch.jit.script(N())
n._reconstruct(m._c)
inp = torch.rand((3))
# Check that both modules produce the same output.
with torch.no_grad():
m_out = m(inp)
n_out = n(inp)
self.assertEqual(m_out, n_out)
# Check that ignored method is still intact.
self.assertEqual(inp, n.ignored_method(inp))
def test_parameterlist_script_getitem(self):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
self.parameter_list = nn.ParameterList(
[nn.Parameter(torch.zeros(1)) for _ in range(10)]
)
def forward(self, x):
self.module_list[0]
self.parameter_list[0]
return x
self.checkModule(MyModule(), (torch.zeros(1)))
def test_parameterlist_script_iter(self):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
self.parameter_list = nn.ParameterList(
[nn.Parameter(torch.zeros(1)) for _ in range(10)]
)
def forward(self, x):
r = x
for i, p in enumerate(self.parameter_list):
r = r + p + i
return r
self.checkModule(MyModule(), (torch.zeros(1),))
def test_parameterdict_script_getitem(self):
class MyModule(nn.Module):
def __init__(self) -> None:
super().__init__()
self.parameter_dict = nn.ParameterDict(
{k: nn.Parameter(torch.zeros(1)) for k in ["a", "b", "c"]}
)
def forward(self, x):
return (
self.parameter_dict["a"] * x
+ self.parameter_dict["b"] * self.parameter_dict["c"]
)
self.checkModule(MyModule(), (torch.ones(1),))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestModuleContainers |
python | cython__cython | Demos/benchmarks/bm_dataclasses.py | {
"start": 203,
"end": 2976
} | class ____(object):
x: float
y: float
z: float
def __init__(self, i):
self.x = x = sin(i)
self.y = cos(i) * 3
self.z = (x * x) / 2
def normalize(self):
x = self.x
y = self.y
z = self.z
norm = sqrt(x * x + y * y + z * z)
self.x /= norm
self.y /= norm
self.z /= norm
def maximize(self, other):
self.x = self.x if self.x > other.x else other.x
self.y = self.y if self.y > other.y else other.y
self.z = self.z if self.z > other.z else other.z
return self
def benchmark_create(n: cython.Py_ssize_t) -> list[Point]:
points = [None] * n
for i in range(n):
points[i] = Point(i)
return points
def benchmark_float(points: list[Point]):
for p in points:
p.normalize()
next = points[0]
for p in points[1:]:
next = next.maximize(p)
return next
def benchmark_repr(points: list[Point]):
for p in points:
repr(p)
def benchmark_compare(points: list[Point]):
all_results: bint = False
result: bint
for p1, p2 in pairwise(points):
result = False
result |= p1 == p2
result |= p1 != p2
result |= p1 < p2
result |= p1 > p2
result |= p1 <= p2
result |= p1 >= p2
all_results |= result
return all_results
def time_benchmarks(n, timer=time.perf_counter):
t0 = timer()
points = benchmark_create(n)
t1 = timer()
benchmark_float(points)
t2 = timer()
benchmark_repr(points)
t3 = timer()
benchmark_compare(points)
t4 = timer()
return {
'create': t1 - t0,
'float': t2 - t1,
'repr': t3 - t2,
'compare': t4 - t3,
}
POINTS = 10_000
def run_benchmark(repeat=True, scale=10):
from util import repeat_to_accuracy, scale_subbenchmarks
timings = time_benchmarks(POINTS)
scales = scale_subbenchmarks(timings, scale)
def timeit(func, arg, scale, timer):
i: cython.long
t0 = timer()
for i in range(scale):
func(arg)
t1 = timer()
return t1 - t0
collected_timings = defaultdict(list)
points = benchmark_create(POINTS)
collected_timings['create'] = repeat_to_accuracy(
timeit, benchmark_create, POINTS, scale=scales['create'], repeat=repeat)[0]
for name, bench_func in [
('float', benchmark_float),
('repr', benchmark_repr),
('compare', benchmark_compare),
]:
collected_timings[name] = repeat_to_accuracy(
timeit, bench_func, points, scale=scales[name], repeat=repeat)[0]
for name, timings in collected_timings.items():
print(f"bm_dataclasses[{name}]: {timings}")
| Point |
python | django__django | tests/postgres_tests/test_operations.py | {
"start": 25242,
"end": 28033
} | class ____(OperationTestBase):
app_label = "test_add_constraint_not_valid"
def test_non_check_constraint_not_supported(self):
constraint = UniqueConstraint(fields=["pink"], name="pony_pink_uniq")
msg = "AddConstraintNotValid.constraint must be a check constraint."
with self.assertRaisesMessage(TypeError, msg):
AddConstraintNotValid(model_name="pony", constraint=constraint)
def test_add(self):
table_name = f"{self.app_label}_pony"
constraint_name = "pony_pink_gte_check"
constraint = CheckConstraint(condition=Q(pink__gte=4), name=constraint_name)
operation = AddConstraintNotValid("Pony", constraint=constraint)
project_state, new_state = self.make_test_state(self.app_label, operation)
self.assertEqual(
operation.describe(),
f"Create not valid constraint {constraint_name} on model Pony",
)
self.assertEqual(
operation.formatted_description(),
f"+ Create not valid constraint {constraint_name} on model Pony",
)
self.assertEqual(
operation.migration_name_fragment,
f"pony_{constraint_name}_not_valid",
)
self.assertEqual(
len(new_state.models[self.app_label, "pony"].options["constraints"]),
1,
)
self.assertConstraintNotExists(table_name, constraint_name)
Pony = new_state.apps.get_model(self.app_label, "Pony")
self.assertEqual(len(Pony._meta.constraints), 1)
Pony.objects.create(pink=2, weight=1.0)
# Add constraint.
with connection.schema_editor(atomic=True) as editor:
operation.database_forwards(
self.app_label, editor, project_state, new_state
)
msg = f'check constraint "{constraint_name}"'
with self.assertRaisesMessage(IntegrityError, msg), transaction.atomic():
Pony.objects.create(pink=3, weight=1.0)
self.assertConstraintExists(table_name, constraint_name)
# Reversal.
with connection.schema_editor(atomic=True) as editor:
operation.database_backwards(
self.app_label, editor, project_state, new_state
)
self.assertConstraintNotExists(table_name, constraint_name)
Pony.objects.create(pink=3, weight=1.0)
# Deconstruction.
name, args, kwargs = operation.deconstruct()
self.assertEqual(name, "AddConstraintNotValid")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"model_name": "Pony", "constraint": constraint})
@unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL specific tests.")
@modify_settings(INSTALLED_APPS={"append": "migrations"})
| AddConstraintNotValidTests |
python | numba__numba | numba/tests/test_support.py | {
"start": 276,
"end": 13232
} | class ____(TestCase):
"""
Tests for TestCase.assertPreciseEqual().
"""
int_types = [int]
np_float_types = [np.float32, np.float64]
float_types = [float] + np_float_types
np_complex_types = [np.complex64, np.complex128]
complex_types = [complex] + np_complex_types
bool_types = [bool, np.bool_]
def eq(self, left, right, **kwargs):
def assert_succeed(left, right):
self.assertPreciseEqual(left, right, **kwargs)
self.assertPreciseEqual(right, left, **kwargs)
assert_succeed(left, right)
assert_succeed((left, left), (right, right))
assert_succeed([left, left], [right, right])
def ne(self, left, right, **kwargs):
def assert_fail(left, right):
try:
self.assertPreciseEqual(left, right, **kwargs)
except AssertionError:
pass
else:
self.fail("%s and %s unexpectedly considered equal" % (left, right))
assert_fail(left, right)
assert_fail(right, left)
assert_fail((left, left), (right, right))
assert_fail((right, right), (left, left))
assert_fail([left, left], [right, right])
assert_fail([right, right], [left, left])
def test_types(self):
# assertPreciseEqual() should test for type compatibility
# int-like, float-like, complex-like are not compatible
for i, f, c in itertools.product(self.int_types, self.float_types,
self.complex_types):
self.ne(i(1), f(1))
self.ne(f(1), c(1))
self.ne(i(1), c(1))
# int and long are compatible between each other
for u, v in itertools.product(self.int_types, self.int_types):
self.eq(u(1), v(1))
# int and bool are not compatible between each other
for u, v in itertools.product(self.int_types, self.bool_types):
self.ne(u(1), v(1))
# NumPy float types are not compatible between each other
for u, v in itertools.product(self.np_float_types, self.np_float_types):
if u is v:
self.eq(u(1), v(1))
else:
self.ne(u(1), v(1))
# NumPy complex types are not compatible between each other
for u, v in itertools.product(self.np_complex_types, self.np_complex_types):
if u is v:
self.eq(u(1), v(1))
else:
self.ne(u(1), v(1))
def test_int_values(self):
for tp in self.int_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(0), tp(0), prec=prec)
self.ne(tp(0), tp(1), prec=prec)
self.ne(tp(-1), tp(1), prec=prec)
self.ne(tp(2**80), tp(1+2**80), prec=prec)
def test_bool_values(self):
for tpa, tpb in itertools.product(self.bool_types, self.bool_types):
self.eq(tpa(True), tpb(True))
self.eq(tpa(False), tpb(False))
self.ne(tpa(True), tpb(False))
def test_abs_tol_parse(self):
# check invalid values in abs_tol kwarg raises
with self.assertRaises(ValueError):
self.eq(np.float64(1e-17), np.float64(1e-17), abs_tol="invalid")
with self.assertRaises(ValueError):
self.eq(np.float64(1), np.float64(2), abs_tol=int(7))
def test_float_values(self):
for tp in self.float_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(1.5), tp(1.5), prec=prec)
# Signed zeros
self.eq(tp(0.0), tp(0.0), prec=prec)
self.eq(tp(-0.0), tp(-0.0), prec=prec)
self.ne(tp(0.0), tp(-0.0), prec=prec)
self.eq(tp(0.0), tp(-0.0), prec=prec, ignore_sign_on_zero=True)
# Infinities
self.eq(tp(INF), tp(INF), prec=prec)
self.ne(tp(INF), tp(1e38), prec=prec)
self.eq(tp(-INF), tp(-INF), prec=prec)
self.ne(tp(INF), tp(-INF), prec=prec)
# NaNs
self.eq(tp(NAN), tp(NAN), prec=prec)
self.ne(tp(NAN), tp(0), prec=prec)
self.ne(tp(NAN), tp(INF), prec=prec)
self.ne(tp(NAN), tp(-INF), prec=prec)
def test_float64_values(self):
for tp in [float, np.float64]:
self.ne(tp(1.0 + DBL_EPSILON), tp(1.0))
def test_float32_values(self):
tp = np.float32
self.ne(tp(1.0 + FLT_EPSILON), tp(1.0))
def test_float64_values_inexact(self):
for tp in [float, np.float64]:
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + DBL_EPSILON)
c = scale * (1.0 + DBL_EPSILON * 2)
d = scale * (1.0 + DBL_EPSILON * 4)
self.ne(tp(a), tp(b))
self.ne(tp(a), tp(b), prec='exact')
self.eq(tp(a), tp(b), prec='double')
self.eq(tp(a), tp(b), prec='double', ulps=1)
self.ne(tp(a), tp(c), prec='double')
self.eq(tp(a), tp(c), prec='double', ulps=2)
self.ne(tp(a), tp(d), prec='double', ulps=2)
self.eq(tp(a), tp(c), prec='double', ulps=3)
self.eq(tp(a), tp(d), prec='double', ulps=3)
# test absolute tolerance based on eps
self.eq(tp(1e-16), tp(3e-16), prec='double', abs_tol="eps")
self.ne(tp(1e-16), tp(4e-16), prec='double', abs_tol="eps")
# test absolute tolerance based on value
self.eq(tp(1e-17), tp(1e-18), prec='double', abs_tol=1e-17)
self.ne(tp(1e-17), tp(3e-17), prec='double', abs_tol=1e-17)
def test_float32_values_inexact(self):
tp = np.float32
for scale in [1.0, -2**3, 2**-4, -2**-20]:
# About the choice of 0.9: there seem to be issues when
# converting
a = scale * 1.0
b = scale * (1.0 + FLT_EPSILON)
c = scale * (1.0 + FLT_EPSILON * 2)
d = scale * (1.0 + FLT_EPSILON * 4)
self.ne(tp(a), tp(b))
self.ne(tp(a), tp(b), prec='exact')
self.ne(tp(a), tp(b), prec='double')
self.eq(tp(a), tp(b), prec='single')
self.ne(tp(a), tp(c), prec='single')
self.eq(tp(a), tp(c), prec='single', ulps=2)
self.ne(tp(a), tp(d), prec='single', ulps=2)
self.eq(tp(a), tp(c), prec='single', ulps=3)
self.eq(tp(a), tp(d), prec='single', ulps=3)
# test absolute tolerance based on eps
self.eq(tp(1e-7), tp(2e-7), prec='single', abs_tol="eps")
self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol="eps")
# test absolute tolerance based on value
self.eq(tp(1e-7), tp(1e-8), prec='single', abs_tol=1e-7)
self.ne(tp(1e-7), tp(3e-7), prec='single', abs_tol=1e-7)
def test_complex_values(self):
# Complex literals with signed zeros are confusing, better use
# the explicit constructor.
c_pp, c_pn, c_np, c_nn = [complex(0.0, 0.0), complex(0.0, -0.0),
complex(-0.0, 0.0), complex(-0.0, -0.0)]
for tp in self.complex_types:
for prec in ['exact', 'single', 'double']:
self.eq(tp(1 + 2j), tp(1 + 2j), prec=prec)
self.ne(tp(1 + 1j), tp(1 + 2j), prec=prec)
self.ne(tp(2 + 2j), tp(1 + 2j), prec=prec)
# Signed zeros
self.eq(tp(c_pp), tp(c_pp), prec=prec)
self.eq(tp(c_np), tp(c_np), prec=prec)
self.eq(tp(c_nn), tp(c_nn), prec=prec)
self.ne(tp(c_pp), tp(c_pn), prec=prec)
self.ne(tp(c_pn), tp(c_nn), prec=prec)
# Infinities
self.eq(tp(complex(INF, INF)), tp(complex(INF, INF)), prec=prec)
self.eq(tp(complex(INF, -INF)), tp(complex(INF, -INF)), prec=prec)
self.eq(tp(complex(-INF, -INF)), tp(complex(-INF, -INF)), prec=prec)
self.ne(tp(complex(INF, INF)), tp(complex(INF, -INF)), prec=prec)
self.ne(tp(complex(INF, INF)), tp(complex(-INF, INF)), prec=prec)
self.eq(tp(complex(INF, 0)), tp(complex(INF, 0)), prec=prec)
# NaNs
self.eq(tp(complex(NAN, 0)), tp(complex(NAN, 0)), prec=prec)
self.eq(tp(complex(0, NAN)), tp(complex(0, NAN)), prec=prec)
self.eq(tp(complex(NAN, NAN)), tp(complex(NAN, NAN)), prec=prec)
self.eq(tp(complex(INF, NAN)), tp(complex(INF, NAN)), prec=prec)
self.eq(tp(complex(NAN, -INF)), tp(complex(NAN, -INF)), prec=prec)
# FIXME
#self.ne(tp(complex(NAN, INF)), tp(complex(NAN, -INF)))
#self.ne(tp(complex(NAN, 0)), tp(complex(NAN, 1)))
#self.ne(tp(complex(INF, NAN)), tp(complex(-INF, NAN)))
#self.ne(tp(complex(0, NAN)), tp(complex(1, NAN)))
#self.ne(tp(complex(NAN, 0)), tp(complex(0, NAN)))
# XXX should work with other precisions as well?
self.ne(tp(complex(INF, 0)), tp(complex(INF, 1)), prec='exact')
def test_complex128_values_inexact(self):
for tp in [complex, np.complex128]:
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + DBL_EPSILON)
c = scale * (1.0 + DBL_EPSILON * 2)
aa = tp(complex(a, a))
ab = tp(complex(a, b))
bb = tp(complex(b, b))
self.ne(tp(aa), tp(ab))
self.eq(tp(aa), tp(ab), prec='double')
self.eq(tp(ab), tp(bb), prec='double')
self.eq(tp(aa), tp(bb), prec='double')
ac = tp(complex(a, c))
cc = tp(complex(c, c))
self.ne(tp(aa), tp(ac), prec='double')
self.ne(tp(ac), tp(cc), prec='double')
self.eq(tp(aa), tp(ac), prec='double', ulps=2)
self.eq(tp(ac), tp(cc), prec='double', ulps=2)
self.eq(tp(aa), tp(cc), prec='double', ulps=2)
self.eq(tp(aa), tp(cc), prec='single')
def test_complex64_values_inexact(self):
tp = np.complex64
for scale in [1.0, -2**3, 2**-4, -2**-20]:
a = scale * 1.0
b = scale * (1.0 + FLT_EPSILON)
c = scale * (1.0 + FLT_EPSILON * 2)
aa = tp(complex(a, a))
ab = tp(complex(a, b))
bb = tp(complex(b, b))
self.ne(tp(aa), tp(ab))
self.ne(tp(aa), tp(ab), prec='double')
self.eq(tp(aa), tp(ab), prec='single')
self.eq(tp(ab), tp(bb), prec='single')
self.eq(tp(aa), tp(bb), prec='single')
ac = tp(complex(a, c))
cc = tp(complex(c, c))
self.ne(tp(aa), tp(ac), prec='single')
self.ne(tp(ac), tp(cc), prec='single')
self.eq(tp(aa), tp(ac), prec='single', ulps=2)
self.eq(tp(ac), tp(cc), prec='single', ulps=2)
self.eq(tp(aa), tp(cc), prec='single', ulps=2)
def test_enums(self):
values = [Color.red, Color.green, Color.blue, Shake.mint,
Shape.circle, Shape.square, Planet.EARTH, Planet.MERCURY]
for val in values:
self.eq(val, val)
self.ne(val, val.value)
for a, b in itertools.combinations(values, 2):
self.ne(a, b)
def test_arrays(self):
a = np.arange(1, 7, dtype=np.int16).reshape((2, 3))
b = a.copy()
self.eq(a, b)
# Different values
self.ne(a, b + 1)
self.ne(a, b[:-1])
self.ne(a, b.T)
# Different dtypes
self.ne(a, b.astype(np.int32))
# Different layout
self.ne(a, b.T.copy().T)
# Different ndim
self.ne(a, b.flatten())
# Different writeability
b.flags.writeable = False
self.ne(a, b)
# Precision
a = np.arange(1, 3, dtype=np.float64)
b = a * (1.0 + DBL_EPSILON)
c = a * (1.0 + DBL_EPSILON * 2)
self.ne(a, b)
self.eq(a, b, prec='double')
self.ne(a, c, prec='double')
def test_npdatetime(self):
a = np.datetime64('1900', 'Y')
b = np.datetime64('1900', 'Y')
c = np.datetime64('1900-01-01', 'D')
d = np.datetime64('1901', 'Y')
self.eq(a, b)
# Different unit
self.ne(a, c)
# Different value
self.ne(a, d)
def test_nptimedelta(self):
a = np.timedelta64(1, 'h')
b = np.timedelta64(1, 'h')
c = np.timedelta64(60, 'm')
d = np.timedelta64(2, 'h')
self.eq(a, b)
# Different unit
self.ne(a, c)
# Different value
self.ne(a, d)
| TestAssertPreciseEqual |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 12856,
"end": 12971
} | class ____(OpcodeWithArg):
_FLAGS = HAS_JREL | HAS_ARGUMENT | STORE_JUMP | PUSHES_BLOCK
__slots__ = ()
| SETUP_LOOP |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.