language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/megatron_bert/modeling_megatron_bert.py | {
"start": 11155,
"end": 11837
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Based on transformers.models.bert.modeling_bert.BertOutput. Moved LayerNorm to MegatronBertLayer below.
| MegatronBertIntermediate |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/big_button.py | {
"start": 80,
"end": 338
} | class ____(App):
CSS = """
Button {
height: 9;
}
"""
def compose(self) -> ComposeResult:
yield Button("Hello")
yield Button("Hello\nWorld !!")
if __name__ == "__main__":
app = ButtonApp()
app.run()
| ButtonApp |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 93787,
"end": 97072
} | class ____(rv_continuous):
r"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is:
.. math::
f(x, c) = (1 + c x)^{-1 - 1/c}
defined for :math:`x \ge 0` if :math:`c \ge 0`, and for
:math:`0 \le x \le -1/c` if :math:`c < 0`.
`genpareto` takes ``c`` as a shape parameter for :math:`c`.
For :math:`c=0`, `genpareto` reduces to the exponential
distribution, `expon`:
.. math::
f(x, 0) = \exp(-x)
For :math:`c=-1`, `genpareto` is uniform on ``[0, 1]``:
.. math::
f(x, -1) = 1
%(after_notes)s
%(example)s
"""
def _argcheck(self, c):
return np.isfinite(c)
def _shape_info(self):
return [_ShapeInfo("c", False, (-np.inf, np.inf), (False, False))]
def _get_support(self, c):
c = np.asarray(c)
a = np.broadcast_arrays(self.a, c)[0].copy()
b = xpx.apply_where(c < 0, c, lambda c: -1. / c,
fill_value=np.inf)
return a, b
def _pdf(self, x, c):
# genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
return np.exp(self._logpdf(x, c))
def _logpdf(self, x, c):
return xpx.apply_where((x == x) & (c != 0), (x, c),
lambda x, c: -sc.xlog1py(c + 1., c*x) / c,
fill_value=-x)
def _cdf(self, x, c):
return -sc.inv_boxcox1p(-x, -c)
def _sf(self, x, c):
return sc.inv_boxcox(-x, -c)
def _logsf(self, x, c):
return xpx.apply_where((x == x) & (c != 0), (x, c),
lambda x, c: -sc.log1p(c*x) / c,
fill_value=-x)
def _ppf(self, q, c):
return -sc.boxcox1p(-q, -c)
def _isf(self, q, c):
return -sc.boxcox(q, -c)
def _stats(self, c, moments='mv'):
m, v, s, k = None, None, None, None
if 'm' in moments:
m = xpx.apply_where(c < 1, c,
lambda xi: 1 / (1 - xi),
fill_value=np.inf)
if 'v' in moments:
v = xpx.apply_where(c < 1/2, c,
lambda xi: 1 / (1 - xi)**2 / (1 - 2 * xi),
fill_value=np.nan)
if 's' in moments:
s = xpx.apply_where(
c < 1/3, c,
lambda xi: 2 * (1 + xi) * np.sqrt(1 - 2*xi) / (1 - 3*xi),
fill_value=np.nan)
if 'k' in moments:
k = xpx.apply_where(
c < 1/4, c,
lambda xi: 3 * (1 - 2*xi) * (2*xi**2 + xi + 3)
/ (1 - 3*xi) / (1 - 4*xi) - 3,
fill_value=np.nan)
return m, v, s, k
def _munp(self, n, c):
def __munp(c):
val = 0.0
k = np.arange(0, n + 1)
for ki, cnk in zip(k, sc.comb(n, k)):
val = val + cnk * (-1) ** ki / (1.0 - c * ki)
return np.where(c * n < 1, val * (-1.0 / c) ** n, np.inf)
return xpx.apply_where(c != 0, c, __munp, fill_value=sc.gamma(n + 1))
def _entropy(self, c):
return 1. + c
genpareto = genpareto_gen(a=0.0, name='genpareto')
| genpareto_gen |
python | encode__django-rest-framework | tests/test_permissions.py | {
"start": 19642,
"end": 19736
} | class ____(PermissionInstanceView):
permission_classes = (BasicObjectPerm,)
| DeniedObjectView |
python | pypa__warehouse | tests/unit/utils/test_paginate.py | {
"start": 1872,
"end": 2238
} | class ____(FakeQuery):
def __init__(self, fake, options=None, suggestion=None):
super().__init__(fake)
self.options = options
self.suggestion = suggestion
def execute(self):
data = self.fake[self.range]
total = len(self.fake)
return FakeSuggestResult(data, total, self.options, self.suggestion)
| FakeSuggestQuery |
python | pytorch__pytorch | torch/_dynamo/functional_export.py | {
"start": 18562,
"end": 33526
} | class ____:
graph_module: torch.fx.GraphModule
in_spec: TreeSpec
in_shuffle_graph: torch.fx.GraphModule
num_flat_args: int
out_spec: TreeSpec
out_shuffle_graph: torch.fx.GraphModule
root: Optional[torch.nn.Module] = None
def pytreeify(
out: CaptureOutput, mod: Any, args: tuple[Any, ...], kwargs: dict[str, Any]
) -> PyTreeifyOutput:
"""
Given a dynamo capture output, return a callable graph module that
contain the following information:
1. input/output pytree spec
2. input/output shuffle functions
Input shuffle functions are the converters taking pytree falttened inputs
and reorder them to the calling convention of dynamo raw graph module.
Output shuffle functions are the converters taking the outputs of the
dynamo raw graph module and convert them to the pytree format.
This function will replay any side effects that happened during the bytecode,
so it is important to check against side effects before calling this function.
"""
assert out.backend_input is not None
backend_input = out.backend_input
root = None
if isinstance(mod, torch.nn.Module):
args = (mod,) + args
root = mod
elif inspect.ismethod(mod):
args = (mod.__self__,) + args
root = mod.__self__
flat_real_args, in_spec = pytree.tree_flatten((args, kwargs))
torch._dynamo.eval_frame.check_user_input_output(
flat_real_args[1 if root else 0 :], UserErrorType.INVALID_INPUT
)
f_globals = out.graph_capture_output.f_globals
class Yield(Exception):
pass
class InShuffle(torch.nn.Module):
def __init__(self):
super().__init__()
self.mod = mod
self.num_inputs = len(flat_real_args)
self.gm_inputs = None
def forward(self, *flat_proxy_args):
args, kwargs = pytree.tree_unflatten(
[flat_proxy_args[i] for i in range(self.num_inputs)], in_spec
)
def backend_dummy(*example_inputs):
self.gm_inputs = example_inputs
raise Yield
try:
out.forward_callable(
compiled_fn=backend_dummy, extra_globals=f_globals
)(*args, **kwargs)
except Yield:
assert self.gm_inputs is not None
return self.gm_inputs
raise RuntimeError
fake_mode = torch._dynamo.utils.detect_fake_mode(flat_real_args)
if fake_mode and fake_mode.shape_env is None:
fake_mode.shape_env = ShapeEnv()
in_shuffle_graph = make_fx(
InShuffle(), tracing_mode="symbolic", proxy_module_inputs=True
)(*flat_real_args)
_normalize_shuffle_graph(in_shuffle_graph)
output_node = next(iter(reversed(backend_input.graph_module.graph.nodes)))
class OutShuffle(torch.nn.Module):
def __init__(self):
super().__init__()
self.num_inputs = len(flat_real_args)
self.num_outputs = len(output_node.args[0])
self.out_spec: Optional[TreeSpec] = None
def forward(self, *flat_proxy_args):
args, kwargs = pytree.tree_unflatten(
[flat_proxy_args[i] for i in range(self.num_inputs)], in_spec
)
def backend_dummy(*example_inputs):
return [
flat_proxy_args[self.num_inputs + i]
for i in range(self.num_outputs)
]
results = out.forward_callable(
compiled_fn=backend_dummy, extra_globals=f_globals
)(*args, **kwargs)
ret, self.out_spec = pytree.tree_flatten(results)
return ret
out_shuffle = OutShuffle()
flat_out_shuffle_args = [
*flat_real_args,
*pytree.tree_map_only(
torch.fx.Node,
lambda x: fake_mode.from_tensor(x.meta["example_value"])
if fake_mode
else x.meta["example_value"],
output_node.args[0],
),
]
fake_mode = torch._dynamo.utils.detect_fake_mode(flat_out_shuffle_args)
if fake_mode and fake_mode.shape_env is None:
fake_mode.shape_env = ShapeEnv()
with enable_python_dispatcher():
out_shuffle_graph = make_fx(
out_shuffle, tracing_mode="real", proxy_module_inputs=True
)(*flat_out_shuffle_args)
_normalize_shuffle_graph(out_shuffle_graph)
assert out_shuffle.out_spec is not None
return PyTreeifyOutput(
backend_input.graph_module,
in_spec,
in_shuffle_graph,
len(flat_real_args),
out_shuffle.out_spec,
out_shuffle_graph,
root=root, # type: ignore[arg-type]
)
def normalize_graph_module(gm):
for node in gm.graph.nodes:
if node.op == "placeholder":
node.meta["val"] = node.meta["example_value"]
def dynamo_graph_capture_for_export(
mod: Callable[..., Any],
constraints: Optional[list[Constraint]] = None,
) -> Callable[..., Any]:
def inner(*args: Any, **kwargs: Any) -> Any:
assert not torch._dynamo.config.install_free_tensors
with (
torch._dynamo.config.patch(side_effect_replay_policy="warn"),
get_metrics_context(),
dynamo_timed("fullgraph_capture"),
):
out = fullgraph_capture(
mod,
args,
kwargs,
constraints=constraints,
)
# TODO filter out side effects.
pyt = pytreeify(out, mod, args, kwargs)
graph_module = pyt.graph_module
tree_leaf_names = [
graph_module.graph._graph_namespace.create_name(f"_tree_leaf_{i}", None)
for i in range(pyt.num_flat_args)
]
graph_module.graph._codegen = _ExportCodeGen(
_PyTreeInfo(
# TODO we should be able to use the names from dynamo graph directly.
argument_names(inspect.signature(mod), args, kwargs),
pyt.in_spec,
pyt.out_spec,
),
pyt.in_shuffle_graph,
pyt.out_shuffle_graph,
tree_leaf_names,
graph_module if isinstance(pyt.root, torch.nn.Module) else pyt.root,
) # type: ignore[attr-defined]
normalize_graph_module(graph_module)
if pyt.root is not None:
graph_module._parameters = pyt.root._parameters.copy()
graph_module._buffers = pyt.root._buffers.copy()
assert all(not hasattr(graph_module, m) for m in pyt.root._modules)
graph_module._modules.update(pyt.root._modules)
graph_module._non_persistent_buffers_set = (
pyt.root._non_persistent_buffers_set.copy()
)
if sys.version_info >= (3, 14):
import annotationlib # added in 3.14
annotations = annotationlib.get_annotations(torch.nn.Module)
else:
annotations = getattr(torch.nn.Module, "__annotations__", None)
for name, value in pyt.root.__dict__.items():
if annotations and name not in annotations:
graph_module.__dict__[name] = value
graph_module._in_spec = pyt.in_spec
graph_module._out_spec = pyt.out_spec
assert not hasattr(graph_module, "_in_shuffle_graph")
assert not hasattr(graph_module, "_out_shuffle_graph")
graph_module._in_shuffle_graph = pyt.in_shuffle_graph
graph_module._out_shuffle_graph = pyt.out_shuffle_graph
delattr(graph_module, "_param_name_to_source")
graph_module.recompile()
graph_module.meta["module_call_specs"] = (
out.graph_capture_output.output_graph.export_metadata.module_call_spec
)
assert out.backend_input is not None
graph_module.meta["fake_mode"] = out.backend_input.fake_mode # type: ignore[attr-defined]
graph_module.meta["fake_mode"].allow_non_fake_inputs = True
tracing_context = TracingContext(graph_module.meta["fake_mode"])
tracing_context.tensor_to_context = out.backend_input.tensor_to_context # type: ignore[attr-defined]
graph_module.meta["tracing_context"] = tracing_context
return graph_module
return inner
def _dynamo_graph_capture_for_export(
mod: Callable[..., Any],
*,
constraints: Optional[list[Constraint]] = None,
dynamic_shapes: Optional[Union[dict[str, Any], tuple[Any], list[Any]]] = None,
) -> Callable[..., torch.fx.GraphModule]:
"""
Improved dynamo graph capture using transformer approach with proper fake tensor handling.
This function creates a capture instance that handles:
1. PyTree flattening/unflattening with proper input ordering
2. Dynamo graph capture with export-specific context
3. FX graph transformation for export compatibility
4. Proper fake tensor metadata preservation
5. Dynamic dimension constraint handling
Notable improvements over manual approach:
- Uses FX Transformer for cleaner graph manipulation
- Properly handles fake tensor metadata and dynamic dimensions
- Preserves all necessary metadata for export
- More robust error handling and edge case management
TODO:
1. Are we actually gonna run the bytecode?
2. Need to attach guards
"""
_dynamic_shapes = dynamic_shapes
_constraints = constraints
def inner(*args: Any, **kwargs: Any) -> torch.fx.GraphModule:
# This sets the is_exporting flag when building guards.
with _compiling_state_context():
flat_inputs, in_spec = pytree.tree_flatten((args, kwargs))
check_user_input_output(flat_inputs, UserErrorType.INVALID_INPUT)
module_to_trace = ModuleToTrace(mod, in_spec)
orig_callable = mod.forward if isinstance(mod, torch.nn.Module) else mod
constraints: Optional[list[Constraint]] = _constraints
dynamic_shapes: Optional[Union[dict[str, Any], tuple[Any], list[Any]]] = (
_dynamic_shapes
)
from . import reset # type: ignore[attr-defined]
reset()
dynamo_config_ctx = torch._dynamo.config.patch(
specialize_int=True,
specialize_float=True,
assume_static_by_default=True,
automatic_dynamic_shapes=False,
capture_dynamic_output_shape_ops=True,
capture_scalar_outputs=True,
constant_fold_autograd_profiler_enabled=True,
log_graph_in_out_metadata=True,
# install_free_tensors ensures that params and buffers are still
# added as graph attributes, and makes Dynamo emits graphs that
# follow export pytree-able input requirements In future, if we
# fully rely on bytecode for the runtime, we can turn this flag
# off.
install_free_tensors=torch._dynamo.config.install_free_tensors_for_export,
)
with (
get_metrics_context(),
dynamo_timed("fullgraph_capture"),
dynamo_config_ctx,
):
out = fullgraph_capture(
module_to_trace,
tuple(flat_inputs),
constraints=_constraints,
_is_export_deprecated_do_not_use=True,
)
assert out.graph_capture_output.output_graph is not None
example_inputs: list[Any] = []
if out.backend_input is not None:
graph = out.backend_input.graph_module
fake_mode = out.backend_input.fake_mode
example_inputs = out.backend_input.example_inputs
else:
graph = torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph())
graph.graph.output(None)
graph.recompile()
fake_mode = None
_suggest_or_raise_constraint_violation(
module_to_trace,
orig_callable,
fake_mode,
out,
args,
kwargs,
dynamic_shapes,
)
# Extract export metadata from the new location
export_metadata = out.graph_capture_output.output_graph.export_metadata
graph_inputs = export_metadata.graph_input_idx_to_local_source
graph_output_map = export_metadata.output_return_type
out_spec = export_metadata.out_spec
module_call_spec = export_metadata.module_call_spec
# Compute dynamic dimensions for each input based on constraints
flat_args_dynamic_dims = [
{
c.dim
for c in (constraints or ())
if (
c.t_id == id(x)
and not isinstance(c, _RelaxedConstraint)
and c.constraint_range.vr.lower != c.constraint_range.vr.upper
)
}
for x in flat_inputs
]
# Create input order mapping from dynamo's internal order to user order
graph_input_order: dict[int, int] = {}
for inp in graph_inputs:
source = graph_inputs[inp]
assert isinstance(source, torch._dynamo.source.GetItemSource)
graph_input_order[source.index] = len(graph_input_order)
for real_idx, graph_idx in graph_input_order.items():
flat_inputs[real_idx] = example_inputs[graph_idx]
# Use FX transformer to rebuild the graph cleanly
transformed_graph = DynamoGraphTransformer(
graph,
flat_inputs,
flat_args_dynamic_dims,
graph_input_order,
graph_output_map,
fake_mode,
).transform()
# Set up PyTree codegen for proper input/output handling
transformed_graph.graph._codegen = _PyTreeCodeGen(
_PyTreeInfo(
argument_names(inspect.signature(orig_callable), args, kwargs), # type: ignore[attr-defined, arg-type]
in_spec,
out_spec,
)
)
transformed_graph.recompile()
clean_nn_module_stack_and_source_fn(
transformed_graph, torch._dynamo.config.inline_inbuilt_nn_modules
)
clean_export_root(transformed_graph)
transformed_graph.meta["module_call_specs"] = module_call_spec
transformed_graph.meta["fake_mode"] = fake_mode
return transformed_graph
return inner
| PyTreeifyOutput |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_ops.py | {
"start": 17900,
"end": 24660
} | class ____(metaclass=abc.ABCMeta):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None, import_scope=None):
self._nested_contexts = []
self._outer_context = ops.get_default_graph()._get_control_flow_context()
if self._outer_context:
self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def, import_scope=import_scope)
else:
# The names of tensors that have been already seen in this context.
self._values = set()
# The keys are the names of tensors referenced by but external to this
# context. Each value is the Tensor that should be used by this context to
# access the key value (e.g. a switch output guarding a cond input value).
self._external_values = {}
def _init_values_from_proto(self, values_def, import_scope=None):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(
ops.prepend_name_scope(value, import_scope)
for value in values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
k = ops.prepend_name_scope(k, import_scope)
self._external_values[k] = g.as_graph_element(
ops.prepend_name_scope(v, import_scope))
op_names = set([
op.split(":")[0]
for op in self._values - set(self._external_values.keys())
])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(op)._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def name(self):
return self._name
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
@abc.abstractmethod
def to_control_flow_context_def(self, context_def, export_scope=None):
"""Serializes this into `context_def`.
Args:
context_def: a `ControlFlowContextDef` protocol buffer.
export_scope: Optional `string`. Name scope to remove.
"""
raise NotImplementedError("Abstract method")
def _to_values_def(self, export_scope=None):
"""Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend(
[ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])
for k, v in self._external_values.items():
k = ops.strip_name_scope(k, export_scope)
values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)
return values_def
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def EnterGradientColocation(self, op: ops.Operation, gradient_uid):
"""Start building a gradient colocated with an op."""
if self._outer_context:
self._outer_context.EnterGradientColocation(op, gradient_uid)
def ExitGradientColocation(self, op: ops.Operation, gradient_uid):
"""Start building a gradient colocated with an op."""
if self._outer_context:
self._outer_context.ExitGradientColocation(op, gradient_uid)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
def fn(x):
self._outer_context.AddName(x.name)
return x
nest.map_structure(fn, result, expand_composites=True)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _RemoveExternalControlEdges(self, op: ops.Operation):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs, external_control_inputs = op.control_inputs, []
else:
internal_control_inputs, external_control_inputs = [], []
for x in op.control_inputs:
ctxt = util.GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
else:
external_control_inputs.append(x)
if len(internal_control_inputs) != len(op.control_inputs):
# TODO(mdan): perhaps there should be a replace_control_inputs()
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs, external_control_inputs
# pylint: enable=protected-access
def AddInnerOp(self, op: ops.Operation):
"""Notifies a scope about an operator added to an inner scope."""
if self._outer_context:
self._outer_context.AddInnerOp(op)
def GetControlPivot(self):
"""Returns the pivot node for this context, or None."""
return None
def IsWhileContext(self):
return False
def IsCondContext(self):
return False
def IsXLAContext(self):
return False
def __str__(self):
return self.name
| ControlFlowContext |
python | huggingface__transformers | src/transformers/models/depth_pro/modeling_depth_pro.py | {
"start": 15658,
"end": 17542
} | class ____(nn.Module):
def __init__(self, config: DepthProConfig):
super().__init__()
self.config = config
self.intermediate_hook_ids = config.intermediate_hook_ids
self.intermediate_feature_dims = config.intermediate_feature_dims
self.scaled_images_ratios = config.scaled_images_ratios
self.scaled_images_overlap_ratios = config.scaled_images_overlap_ratios
self.scaled_images_feature_dims = config.scaled_images_feature_dims
self.merge_padding_value = config.merge_padding_value
self.n_scaled_images = len(self.scaled_images_ratios)
self.n_intermediate_hooks = len(self.intermediate_hook_ids)
self.patch_encoder = DepthProPatchEncoder(config)
self.image_encoder = DepthProImageEncoder(config)
def forward(
self,
pixel_values: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
) -> Union[tuple, DepthProOutput]:
batch_size, num_channels, height, width = pixel_values.shape
patch_features = self.patch_encoder(
pixel_values,
)
image_encodings = self.image_encoder(
pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_features = image_encodings[1] # index 1 contains features
features = [image_features, *patch_features]
if not return_dict:
return (image_encodings[0], features) + image_encodings[2:]
return DepthProOutput(
last_hidden_state=image_encodings.last_hidden_state,
features=features,
hidden_states=image_encodings.hidden_states,
attentions=image_encodings.attentions,
)
| DepthProEncoder |
python | django-extensions__django-extensions | tests/testapp_with_appconfig/apps.py | {
"start": 36,
"end": 123
} | class ____(AppConfig):
name = "tests.testapp_with_appconfig"
| TestappWithAppConfigConfig |
python | arrow-py__arrow | arrow/locales.py | {
"start": 10715,
"end": 12190
} | class ____(Locale):
names = ["it", "it-it"]
past = "{0} fa"
future = "tra {0}"
and_word = "e"
timeframes = {
"now": "adesso",
"second": "un secondo",
"seconds": "{0} qualche secondo",
"minute": "un minuto",
"minutes": "{0} minuti",
"hour": "un'ora",
"hours": "{0} ore",
"day": "un giorno",
"days": "{0} giorni",
"week": "una settimana",
"weeks": "{0} settimane",
"month": "un mese",
"months": "{0} mesi",
"year": "un anno",
"years": "{0} anni",
}
month_names = [
"",
"gennaio",
"febbraio",
"marzo",
"aprile",
"maggio",
"giugno",
"luglio",
"agosto",
"settembre",
"ottobre",
"novembre",
"dicembre",
]
month_abbreviations = [
"",
"gen",
"feb",
"mar",
"apr",
"mag",
"giu",
"lug",
"ago",
"set",
"ott",
"nov",
"dic",
]
day_names = [
"",
"lunedì",
"martedì",
"mercoledì",
"giovedì",
"venerdì",
"sabato",
"domenica",
]
day_abbreviations = ["", "lun", "mar", "mer", "gio", "ven", "sab", "dom"]
ordinal_day_re = r"((?P<value>[1-3]?[0-9](?=[ºª]))[ºª])"
def _ordinal_number(self, n: int) -> str:
return f"{n}º"
| ItalianLocale |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/baked.py | {
"start": 1256,
"end": 10083
} | class ____:
"""A builder object for :class:`.query.Query` objects."""
__slots__ = "steps", "_bakery", "_cache_key", "_spoiled"
def __init__(self, bakery, initial_fn, args=()):
self._cache_key = ()
self._update_cache_key(initial_fn, args)
self.steps = [initial_fn]
self._spoiled = False
self._bakery = bakery
@classmethod
def bakery(cls, size=200, _size_alert=None):
"""Construct a new bakery.
:return: an instance of :class:`.Bakery`
"""
return Bakery(cls, util.LRUCache(size, size_alert=_size_alert))
def _clone(self):
b1 = BakedQuery.__new__(BakedQuery)
b1._cache_key = self._cache_key
b1.steps = list(self.steps)
b1._bakery = self._bakery
b1._spoiled = self._spoiled
return b1
def _update_cache_key(self, fn, args=()):
self._cache_key += (fn.__code__,) + args
def __iadd__(self, other):
if isinstance(other, tuple):
self.add_criteria(*other)
else:
self.add_criteria(other)
return self
def __add__(self, other):
if isinstance(other, tuple):
return self.with_criteria(*other)
else:
return self.with_criteria(other)
def add_criteria(self, fn, *args):
"""Add a criteria function to this :class:`.BakedQuery`.
This is equivalent to using the ``+=`` operator to
modify a :class:`.BakedQuery` in-place.
"""
self._update_cache_key(fn, args)
self.steps.append(fn)
return self
def with_criteria(self, fn, *args):
"""Add a criteria function to a :class:`.BakedQuery` cloned from this
one.
This is equivalent to using the ``+`` operator to
produce a new :class:`.BakedQuery` with modifications.
"""
return self._clone().add_criteria(fn, *args)
def for_session(self, session):
"""Return a :class:`_baked.Result` object for this
:class:`.BakedQuery`.
This is equivalent to calling the :class:`.BakedQuery` as a
Python callable, e.g. ``result = my_baked_query(session)``.
"""
return Result(self, session)
def __call__(self, session):
return self.for_session(session)
def spoil(self, full=False):
"""Cancel any query caching that will occur on this BakedQuery object.
The BakedQuery can continue to be used normally, however additional
creational functions will not be cached; they will be called
on every invocation.
This is to support the case where a particular step in constructing
a baked query disqualifies the query from being cacheable, such
as a variant that relies upon some uncacheable value.
:param full: if False, only functions added to this
:class:`.BakedQuery` object subsequent to the spoil step will be
non-cached; the state of the :class:`.BakedQuery` up until
this point will be pulled from the cache. If True, then the
entire :class:`_query.Query` object is built from scratch each
time, with all creational functions being called on each
invocation.
"""
if not full and not self._spoiled:
_spoil_point = self._clone()
_spoil_point._cache_key += ("_query_only",)
self.steps = [_spoil_point._retrieve_baked_query]
self._spoiled = True
return self
def _effective_key(self, session):
"""Return the key that actually goes into the cache dictionary for
this :class:`.BakedQuery`, taking into account the given
:class:`.Session`.
This basically means we also will include the session's query_class,
as the actual :class:`_query.Query` object is part of what's cached
and needs to match the type of :class:`_query.Query` that a later
session will want to use.
"""
return self._cache_key + (session._query_cls,)
def _with_lazyload_options(self, options, effective_path, cache_path=None):
"""Cloning version of _add_lazyload_options."""
q = self._clone()
q._add_lazyload_options(options, effective_path, cache_path=cache_path)
return q
def _add_lazyload_options(self, options, effective_path, cache_path=None):
"""Used by per-state lazy loaders to add options to the
"lazy load" query from a parent query.
Creates a cache key based on given load path and query options;
if a repeatable cache key cannot be generated, the query is
"spoiled" so that it won't use caching.
"""
key = ()
if not cache_path:
cache_path = effective_path
for opt in options:
if opt._is_legacy_option or opt._is_compile_state:
ck = opt._generate_cache_key()
if ck is None:
self.spoil(full=True)
else:
assert not ck[1], (
"loader options with variable bound parameters "
"not supported with baked queries. Please "
"use new-style select() statements for cached "
"ORM queries."
)
key += ck[0]
self.add_criteria(
lambda q: q._with_current_path(effective_path).options(*options),
cache_path.path,
key,
)
def _retrieve_baked_query(self, session):
query = self._bakery.get(self._effective_key(session), None)
if query is None:
query = self._as_query(session)
self._bakery[self._effective_key(session)] = query.with_session(
None
)
return query.with_session(session)
def _bake(self, session):
query = self._as_query(session)
query.session = None
# in 1.4, this is where before_compile() event is
# invoked
statement = query._statement_20()
# if the query is not safe to cache, we still do everything as though
# we did cache it, since the receiver of _bake() assumes subqueryload
# context was set up, etc.
#
# note also we want to cache the statement itself because this
# allows the statement itself to hold onto its cache key that is
# used by the Connection, which in itself is more expensive to
# generate than what BakedQuery was able to provide in 1.3 and prior
if statement._compile_options._bake_ok:
self._bakery[self._effective_key(session)] = (
query,
statement,
)
return query, statement
def to_query(self, query_or_session):
"""Return the :class:`_query.Query` object for use as a subquery.
This method should be used within the lambda callable being used
to generate a step of an enclosing :class:`.BakedQuery`. The
parameter should normally be the :class:`_query.Query` object that
is passed to the lambda::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(lambda s: s.query(Address))
main_bq += lambda q: q.filter(sub_bq.to_query(q).exists())
In the case where the subquery is used in the first callable against
a :class:`.Session`, the :class:`.Session` is also accepted::
sub_bq = self.bakery(lambda s: s.query(User.name))
sub_bq += lambda q: q.filter(User.id == Address.user_id).correlate(Address)
main_bq = self.bakery(
lambda s: s.query(Address.id, sub_bq.to_query(q).scalar_subquery())
)
:param query_or_session: a :class:`_query.Query` object or a class
:class:`.Session` object, that is assumed to be within the context
of an enclosing :class:`.BakedQuery` callable.
""" # noqa: E501
if isinstance(query_or_session, Session):
session = query_or_session
elif isinstance(query_or_session, Query):
session = query_or_session.session
if session is None:
raise sa_exc.ArgumentError(
"Given Query needs to be associated with a Session"
)
else:
raise TypeError(
"Query or Session object expected, got %r."
% type(query_or_session)
)
return self._as_query(session)
def _as_query(self, session):
query = self.steps[0](session)
for step in self.steps[1:]:
query = step(query)
return query
| BakedQuery |
python | huggingface__transformers | src/transformers/models/gpt_oss/modeling_gpt_oss.py | {
"start": 32729,
"end": 32992
} | class ____(GenericForTokenClassification, GptOssPreTrainedModel):
pass
__all__ = [
"GptOssForCausalLM",
"GptOssForSequenceClassification",
"GptOssForTokenClassification",
"GptOssModel",
"GptOssPreTrainedModel",
]
| GptOssForTokenClassification |
python | django__django | django/db/models/expressions.py | {
"start": 43588,
"end": 43727
} | class ____(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return "*", []
| Star |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/declarative_automation_tests/scenario_utils/scenario_state.py | {
"start": 3608,
"end": 9642
} | class ____:
"""A construct for declaring and modifying a desired Definitions object."""
asset_specs: Sequence[Union[dg.AssetSpec, MultiAssetSpec]]
check_specs: Sequence[dg.AssetCheckSpec] = field(default_factory=list)
current_time: datetime.datetime = field(default_factory=lambda: get_current_datetime())
sensors: Sequence[dg.SensorDefinition] = field(default_factory=list)
additional_repo_specs: Sequence["ScenarioSpec"] = field(default_factory=list)
def with_sensors(self, sensors: Sequence[dg.SensorDefinition]) -> "ScenarioSpec":
return dataclasses.replace(self, sensors=sensors)
@property
def assets(self) -> Sequence[dg.AssetsDefinition]:
def compute_fn(context: AssetExecutionContext) -> None:
fail_keys = {
AssetKey.from_coercible(s)
for s in json.loads(context.run.tags.get(FAIL_TAG) or "[]")
}
for asset_key in context.selected_asset_keys:
if asset_key in fail_keys:
raise Exception("Asset failed")
assets = []
for spec in self.asset_specs:
if isinstance(spec, MultiAssetSpec):
@dg.multi_asset(**spec._asdict())
def _multi_asset(context: AssetExecutionContext):
return compute_fn(context)
assets.append(_multi_asset)
else:
execution_type_str = spec.metadata.get(SYSTEM_METADATA_KEY_ASSET_EXECUTION_TYPE)
execution_type = (
AssetExecutionType[execution_type_str] if execution_type_str else None
)
# create an observable_source_asset or regular asset depending on the execution type
if execution_type == AssetExecutionType.OBSERVATION:
@dg.op
def noop(): ...
osa = dg.AssetsDefinition(
specs=[spec],
execution_type=execution_type,
keys_by_output_name={"result": spec.key},
node_def=noop,
)
assets.append(osa)
else:
# strip out the relevant parameters from the spec
params = {
"key",
"deps",
"group_name",
"code_version",
"automation_condition",
"legacy_freshness_policy",
"partitions_def",
"metadata",
}
assets.append(
dg.asset(
compute_fn=compute_fn,
**{k: v for k, v in spec._asdict().items() if k in params},
)
)
for check_spec in self.check_specs:
@dg.asset_check( # pyright: ignore[reportArgumentType]
asset=check_spec.asset_key,
name=check_spec.key.name,
blocking=check_spec.blocking,
)
def _check(): ...
assets.append(_check)
return assets
@property
def defs(self) -> dg.Definitions:
return dg.Definitions(assets=self.assets, sensors=self.sensors)
@property
def asset_graph(self) -> AssetGraph:
return AssetGraph.from_assets(self.assets)
def with_additional_repositories(
self,
scenario_specs: Sequence["ScenarioSpec"],
) -> "ScenarioSpec":
return dataclasses.replace(
self, additional_repo_specs=[*self.additional_repo_specs, *scenario_specs]
)
def with_current_time(self, time: Union[str, datetime.datetime]) -> "ScenarioSpec":
if isinstance(time, str):
time = parse_time_string(time)
return dataclasses.replace(self, current_time=time)
def with_current_time_advanced(self, **kwargs) -> "ScenarioSpec":
# hacky support for adding years
if "years" in kwargs:
kwargs["days"] = kwargs.get("days", 0) + 365 * kwargs.pop("years")
return dataclasses.replace(
self, current_time=self.current_time + datetime.timedelta(**kwargs)
)
def with_asset_properties(
self, keys: Optional[Iterable[CoercibleToAssetKey]] = None, **kwargs
) -> "ScenarioSpec":
"""Convenience method to update the properties of one or more assets in the scenario state."""
new_asset_specs = []
if "auto_materialize_policy" in kwargs:
policy = kwargs.pop("auto_materialize_policy")
kwargs["automation_condition"] = policy.to_automation_condition() if policy else None
for spec in self.asset_specs:
if isinstance(spec, MultiAssetSpec):
partitions_def = kwargs.get("partitions_def", spec.partitions_def)
new_multi_specs = [
s._replace(**{k: v for k, v in kwargs.items() if k != "partitions_def"})
if keys is None or s.key in keys
else s
for s in spec.specs
]
new_asset_specs.append(
spec._replace(partitions_def=partitions_def, specs=new_multi_specs)
)
else:
if keys is None or spec.key in {AssetKey.from_coercible(key) for key in keys}:
new_asset_specs.append(spec._replace(**kwargs))
else:
new_asset_specs.append(spec)
return dataclasses.replace(self, asset_specs=new_asset_specs)
def with_all_eager(self, max_materializations_per_minute: int = 1) -> "ScenarioSpec":
return self.with_asset_properties(
auto_materialize_policy=AutoMaterializePolicy.eager(
max_materializations_per_minute=max_materializations_per_minute,
)
)
@dataclass(frozen=True)
| ScenarioSpec |
python | pypa__warehouse | tests/unit/manage/test_views.py | {
"start": 75986,
"end": 99699
} | class ____:
def test_default_response(self, monkeypatch):
create_macaroon_obj = pretend.stub()
create_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: create_macaroon_obj
)
monkeypatch.setattr(views, "CreateMacaroonForm", create_macaroon_cls)
delete_macaroon_obj = pretend.stub()
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
project_names = [pretend.stub()]
monkeypatch.setattr(
views.ProvisionMacaroonViews, "project_names", project_names
)
request = pretend.stub(
user=pretend.stub(id=pretend.stub(), username=pretend.stub()),
find_service=lambda interface, **kw: {
IMacaroonService: pretend.stub(),
IUserService: pretend.stub(),
}[interface],
)
view = views.ProvisionMacaroonViews(request)
assert view.default_response == {
"project_names": project_names,
"create_macaroon_form": create_macaroon_obj,
"delete_macaroon_form": delete_macaroon_obj,
}
def test_project_names(self, db_request):
user = UserFactory.create()
another_user = UserFactory.create()
db_request.user = user
db_request.find_service = lambda *a, **kw: pretend.stub()
# A project with a sole owner that is the user
with_sole_owner = ProjectFactory.create(name="foo")
RoleFactory.create(user=user, project=with_sole_owner, role_name="Owner")
RoleFactory.create(
user=another_user, project=with_sole_owner, role_name="Maintainer"
)
# A project with multiple owners, including the user
with_multiple_owners = ProjectFactory.create(name="bar")
RoleFactory.create(user=user, project=with_multiple_owners, role_name="Owner")
RoleFactory.create(
user=another_user, project=with_multiple_owners, role_name="Owner"
)
# A project with a sole owner that is not the user
not_an_owner = ProjectFactory.create(name="baz")
RoleFactory.create(user=user, project=not_an_owner, role_name="Maintainer")
RoleFactory.create(user=another_user, project=not_an_owner, role_name="Owner")
# A project that the user is neither owner nor maintainer of
neither_owner_nor_maintainer = ProjectFactory.create(name="quux")
RoleFactory.create(
user=another_user, project=neither_owner_nor_maintainer, role_name="Owner"
)
view = views.ProvisionMacaroonViews(db_request)
assert set(view.project_names) == {"foo", "bar", "baz"}
def test_manage_macaroons(self, monkeypatch):
request = pretend.stub(
find_service=lambda *a, **kw: pretend.stub(),
params=pretend.stub(get=lambda s: pretend.stub()),
)
default_response = {"default": "response"}
monkeypatch.setattr(
views.ProvisionMacaroonViews, "default_response", default_response
)
view = views.ProvisionMacaroonViews(request)
result = view.manage_macaroons()
assert result == default_response
def test_create_macaroon_not_allowed(self, pyramid_request):
pyramid_request.route_path = pretend.call_recorder(lambda x: "/foo/bar")
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.user = pretend.stub(has_primary_verified_email=False)
pyramid_request.find_service = lambda interface, **kw: pretend.stub()
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.create_macaroon()
assert pyramid_request.route_path.calls == [pretend.call("manage.account")]
assert pyramid_request.session.flash.calls == [
pretend.call("Verify your email to create an API token.", queue="error")
]
assert isinstance(result, HTTPSeeOther)
assert result.location == "/foo/bar"
def test_create_macaroon_invalid_form(self, monkeypatch):
macaroon_service = pretend.stub(
create_macaroon=pretend.call_recorder(lambda *a, **kw: pretend.stub())
)
request = pretend.stub(
POST={},
user=pretend.stub(id=pretend.stub(), has_primary_verified_email=True),
find_service=lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: pretend.stub(),
}[interface],
)
create_macaroon_obj = pretend.stub(validate=lambda: False)
create_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: create_macaroon_obj
)
monkeypatch.setattr(views, "CreateMacaroonForm", create_macaroon_cls)
project_names = [pretend.stub()]
monkeypatch.setattr(
views.ProvisionMacaroonViews, "project_names", project_names
)
default_response = {"default": "response"}
monkeypatch.setattr(
views.ProvisionMacaroonViews, "default_response", default_response
)
view = views.ProvisionMacaroonViews(request)
result = view.create_macaroon()
assert result == {
**default_response,
"create_macaroon_form": create_macaroon_obj,
}
assert macaroon_service.create_macaroon.calls == []
@pytest.mark.parametrize("has_2fa", [True, False])
def test_create_macaroon(self, monkeypatch, has_2fa):
macaroon = pretend.stub()
macaroon_service = pretend.stub(
create_macaroon=pretend.call_recorder(
lambda *a, **kw: ("not a real raw macaroon", macaroon)
)
)
user_service = pretend.stub()
request = pretend.stub(
POST={},
domain=pretend.stub(),
user=pretend.stub(
id="a user id",
has_primary_verified_email=True,
record_event=pretend.call_recorder(lambda *a, **kw: None),
has_two_factor=has_2fa,
),
find_service=lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: user_service,
}[interface],
remote_addr="0.0.0.0",
)
create_macaroon_obj = pretend.stub(
validate=lambda: True,
description=pretend.stub(data=pretend.stub()),
validated_scope="user",
)
create_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: create_macaroon_obj
)
monkeypatch.setattr(views, "CreateMacaroonForm", create_macaroon_cls)
project_names = [pretend.stub()]
monkeypatch.setattr(
views.ProvisionMacaroonViews, "project_names", project_names
)
default_response = {"default": "response"}
monkeypatch.setattr(
views.ProvisionMacaroonViews, "default_response", default_response
)
view = views.ProvisionMacaroonViews(request)
result = view.create_macaroon()
assert macaroon_service.create_macaroon.calls == [
pretend.call(
location=request.domain,
user_id=request.user.id,
description=create_macaroon_obj.description.data,
scopes=[
caveats.RequestUser(user_id="a user id"),
],
additional={"made_with_2fa": has_2fa},
)
]
assert result == {
**default_response,
"serialized_macaroon": "not a real raw macaroon",
"macaroon": macaroon,
"create_macaroon_form": create_macaroon_obj,
}
assert request.user.record_event.calls == [
pretend.call(
tag=EventTag.Account.APITokenAdded,
request=request,
additional={
"description": create_macaroon_obj.description.data,
"caveats": [
{
"permissions": create_macaroon_obj.validated_scope,
"version": 1,
}
],
},
)
]
def test_create_macaroon_records_events_for_each_project(self, monkeypatch):
macaroon = pretend.stub()
macaroon_service = pretend.stub(
create_macaroon=pretend.call_recorder(
lambda *a, **kw: ("not a real raw macaroon", macaroon)
)
)
record_project_event = pretend.call_recorder(lambda *a, **kw: None)
user_service = pretend.stub()
request = pretend.stub(
POST={},
domain=pretend.stub(),
user=pretend.stub(
id=pretend.stub(),
has_primary_verified_email=True,
username=pretend.stub(),
has_two_factor=False,
projects=[
pretend.stub(
id=uuid.uuid4(),
normalized_name="foo",
record_event=record_project_event,
),
pretend.stub(
id=uuid.uuid4(),
normalized_name="bar",
record_event=record_project_event,
),
],
record_event=pretend.call_recorder(lambda *a, **kw: None),
),
find_service=lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: user_service,
}[interface],
remote_addr="0.0.0.0",
)
create_macaroon_obj = pretend.stub(
validate=lambda: True,
description=pretend.stub(data=pretend.stub()),
validated_scope={"projects": ["foo", "bar"]},
)
create_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: create_macaroon_obj
)
monkeypatch.setattr(views, "CreateMacaroonForm", create_macaroon_cls)
project_names = [pretend.stub()]
monkeypatch.setattr(
views.ProvisionMacaroonViews, "project_names", project_names
)
default_response = {"default": "response"}
monkeypatch.setattr(
views.ProvisionMacaroonViews, "default_response", default_response
)
view = views.ProvisionMacaroonViews(request)
result = view.create_macaroon()
assert macaroon_service.create_macaroon.calls == [
pretend.call(
location=request.domain,
user_id=request.user.id,
description=create_macaroon_obj.description.data,
scopes=[
caveats.ProjectName(normalized_names=["foo", "bar"]),
caveats.ProjectID(
project_ids=[str(p.id) for p in request.user.projects]
),
],
additional={"made_with_2fa": False},
)
]
assert result == {
**default_response,
"serialized_macaroon": "not a real raw macaroon",
"macaroon": macaroon,
"create_macaroon_form": create_macaroon_obj,
}
assert request.user.record_event.calls == [
pretend.call(
tag=EventTag.Account.APITokenAdded,
request=request,
additional={
"description": create_macaroon_obj.description.data,
"caveats": [
{
"permissions": create_macaroon_obj.validated_scope,
"version": 1,
},
{"project_ids": [str(p.id) for p in request.user.projects]},
],
},
)
]
assert record_project_event.calls == [
pretend.call(
tag=EventTag.Project.APITokenAdded,
request=request,
additional={
"description": create_macaroon_obj.description.data,
"user": request.user.username,
},
),
pretend.call(
tag=EventTag.Project.APITokenAdded,
request=request,
additional={
"description": create_macaroon_obj.description.data,
"user": request.user.username,
},
),
]
def test_delete_macaroon_invalid_form(self, monkeypatch, pyramid_request):
macaroon_service = pretend.stub(
delete_macaroon=pretend.call_recorder(lambda id: pretend.stub())
)
pyramid_request.POST = {
"confirm_password": "password",
"macaroon_id": "macaroon_id",
}
pyramid_request.route_path = pretend.call_recorder(lambda x: pretend.stub())
pyramid_request.find_service = lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: pretend.stub(),
}[interface]
pyramid_request.referer = "/fake/safe/route"
pyramid_request.host = None
pyramid_request.user = pretend.stub(username=pretend.stub())
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
delete_macaroon_obj = pretend.stub(validate=lambda: False)
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.delete_macaroon()
assert pyramid_request.route_path.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.location == "/fake/safe/route"
assert macaroon_service.delete_macaroon.calls == []
assert pyramid_request.session.flash.calls == [
pretend.call("Invalid credentials. Try again", queue="error")
]
def test_delete_macaroon_dangerous_redirect(self, monkeypatch, pyramid_request):
macaroon_service = pretend.stub(
delete_macaroon=pretend.call_recorder(lambda id: pretend.stub())
)
pyramid_request.POST = {
"confirm_password": "password",
"macaroon_id": "macaroon_id",
}
pyramid_request.route_path = pretend.call_recorder(lambda x: "/safe/route")
pyramid_request.find_service = lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: pretend.stub(),
}[interface]
pyramid_request.referer = "http://google.com/"
pyramid_request.host = None
pyramid_request.user = pretend.stub(username=pretend.stub())
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
delete_macaroon_obj = pretend.stub(validate=lambda: False)
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.delete_macaroon()
assert pyramid_request.route_path.calls == [pretend.call("manage.account")]
assert isinstance(result, HTTPSeeOther)
assert result.location == "/safe/route"
assert macaroon_service.delete_macaroon.calls == []
def test_delete_macaroon(self, monkeypatch, pyramid_request):
macaroon = pretend.stub(description="fake macaroon", permissions_caveat="user")
macaroon_service = pretend.stub(
delete_macaroon=pretend.call_recorder(lambda id: pretend.stub()),
find_macaroon=pretend.call_recorder(lambda id: macaroon),
)
user_service = pretend.stub()
pyramid_request.POST = {
"confirm_password": "password",
"macaroon_id": "macaroon_id",
}
pyramid_request.route_path = pretend.call_recorder(lambda x: pretend.stub())
pyramid_request.find_service = lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: user_service,
}[interface]
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.referer = "/fake/safe/route"
pyramid_request.host = None
pyramid_request.user = pretend.stub(
id=pretend.stub(),
username=pretend.stub(),
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
delete_macaroon_obj = pretend.stub(
validate=lambda: True, macaroon_id=pretend.stub(data=pretend.stub())
)
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.delete_macaroon()
assert pyramid_request.route_path.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.location == "/fake/safe/route"
assert macaroon_service.delete_macaroon.calls == [
pretend.call(delete_macaroon_obj.macaroon_id.data)
]
assert macaroon_service.find_macaroon.calls == [
pretend.call(delete_macaroon_obj.macaroon_id.data)
]
assert pyramid_request.session.flash.calls == [
pretend.call("Deleted API token 'fake macaroon'.", queue="success")
]
assert pyramid_request.user.record_event.calls == [
pretend.call(
tag=EventTag.Account.APITokenRemoved,
request=pyramid_request,
additional={"macaroon_id": delete_macaroon_obj.macaroon_id.data},
)
]
def test_delete_macaroon_when_non_existent(self, monkeypatch, pyramid_request):
user_service = pretend.stub()
macaroon_service = pretend.stub(
delete_macaroon=pretend.call_recorder(lambda id: pretend.stub()),
find_macaroon=pretend.call_recorder(lambda id: None),
)
delete_macaroon_obj = pretend.stub(
validate=lambda: True, macaroon_id=pretend.stub(data=pretend.stub())
)
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
pyramid_request.POST = {
"confirm_password": "password",
"macaroon_id": "macaroon_id",
}
pyramid_request.find_service = lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: user_service,
}[interface]
pyramid_request.route_path = pretend.call_recorder(lambda x: "/manage/account/")
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.user = pretend.stub(
id=pretend.stub(),
username=pretend.stub(),
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.delete_macaroon()
assert pyramid_request.route_path.calls == [pretend.call("manage.account")]
assert isinstance(result, HTTPSeeOther)
assert macaroon_service.find_macaroon.calls == [
pretend.call(delete_macaroon_obj.macaroon_id.data)
]
assert pyramid_request.session.flash.calls == [
pretend.call("API Token does not exist.", queue="warning")
]
def test_delete_macaroon_records_events_for_each_project(
self, monkeypatch, pyramid_request
):
macaroon = pretend.stub(
description="fake macaroon",
permissions_caveat={"projects": ["foo", "bar"]},
)
macaroon_service = pretend.stub(
delete_macaroon=pretend.call_recorder(lambda id: pretend.stub()),
find_macaroon=pretend.call_recorder(lambda id: macaroon),
)
record_project_event = pretend.call_recorder(lambda *a, **kw: None)
user_service = pretend.stub()
pyramid_request.POST = {
"confirm_password": pretend.stub(),
"macaroon_id": pretend.stub(),
}
pyramid_request.route_path = pretend.call_recorder(lambda x: pretend.stub())
pyramid_request.find_service = lambda interface, **kw: {
IMacaroonService: macaroon_service,
IUserService: user_service,
}[interface]
pyramid_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
pyramid_request.referer = "/fake/safe/route"
pyramid_request.host = None
pyramid_request.user = pretend.stub(
id=pretend.stub(),
username=pretend.stub(),
projects=[
pretend.stub(normalized_name="foo", record_event=record_project_event),
pretend.stub(normalized_name="bar", record_event=record_project_event),
],
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
delete_macaroon_obj = pretend.stub(
validate=lambda: True, macaroon_id=pretend.stub(data=pretend.stub())
)
delete_macaroon_cls = pretend.call_recorder(
lambda *a, **kw: delete_macaroon_obj
)
monkeypatch.setattr(views, "DeleteMacaroonForm", delete_macaroon_cls)
view = views.ProvisionMacaroonViews(pyramid_request)
result = view.delete_macaroon()
assert pyramid_request.route_path.calls == []
assert isinstance(result, HTTPSeeOther)
assert result.location == "/fake/safe/route"
assert macaroon_service.delete_macaroon.calls == [
pretend.call(delete_macaroon_obj.macaroon_id.data)
]
assert macaroon_service.find_macaroon.calls == [
pretend.call(delete_macaroon_obj.macaroon_id.data)
]
assert pyramid_request.session.flash.calls == [
pretend.call("Deleted API token 'fake macaroon'.", queue="success")
]
assert pyramid_request.user.record_event.calls == [
pretend.call(
request=pyramid_request,
tag=EventTag.Account.APITokenRemoved,
additional={"macaroon_id": delete_macaroon_obj.macaroon_id.data},
)
]
assert record_project_event.calls == [
pretend.call(
tag=EventTag.Project.APITokenRemoved,
request=pyramid_request,
additional={
"description": "fake macaroon",
"user": pyramid_request.user.username,
},
),
pretend.call(
tag=EventTag.Project.APITokenRemoved,
request=pyramid_request,
additional={
"description": "fake macaroon",
"user": pyramid_request.user.username,
},
),
]
| TestProvisionMacaroonViews |
python | gevent__gevent | src/gevent/pool.py | {
"start": 25020,
"end": 25634
} | class ____(object):
__slots__ = ['callback']
def __init__(self, callback):
self.callback = callback
def __call__(self, source):
if source.successful():
self.callback(source.value)
def __hash__(self):
return hash(self.callback)
def __eq__(self, other):
return self.callback == getattr(other, 'callback', other)
def __str__(self):
return str(self.callback)
def __repr__(self):
return repr(self.callback)
def __getattr__(self, item):
assert item != 'callback'
return getattr(self.callback, item)
| pass_value |
python | great-expectations__great_expectations | tests/integration/fluent/test_snowflake_datasource.py | {
"start": 319,
"end": 2380
} | class ____:
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.lower()),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_lower(self, batch_for_datasource):
"""Test Snowflake with lower case table name"""
_run_checkpoint_test(batch_for_datasource, "snowflake")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=f'"{TEST_TABLE_NAME.lower()}"'),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_lower(self, batch_for_datasource):
"""Test Snowflake with quoted lower case table name"""
_run_checkpoint_test(batch_for_datasource, "snowflake")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=TEST_TABLE_NAME.upper()),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_upper(self, batch_for_datasource):
"""Test Snowflake with upper case table name"""
_run_checkpoint_test(batch_for_datasource, "snowflake")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=f'"{TEST_TABLE_NAME.upper()}"'),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_upper(self, batch_for_datasource):
"""Test Snowflake with quoted upper case table name"""
_run_checkpoint_test(batch_for_datasource, "snowflake")
@parameterize_batch_for_data_sources(
data_source_configs=[
SnowflakeDatasourceTestConfig(table_name=f'"{TEST_TABLE_NAME.title()}"'),
],
data=pd.DataFrame({"test_column": [1, 2, 3]}),
)
def test_quoted_title(self, batch_for_datasource):
"""Test Snowflake with quoted title case table name"""
_run_checkpoint_test(batch_for_datasource, "snowflake")
| TestSnowflakeTableIdentifiers |
python | huggingface__transformers | src/transformers/models/nanochat/configuration_nanochat.py | {
"start": 731,
"end": 7625
} | class ____(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`NanoChatModel`]. It is used to instantiate a
NanoChat model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [karpathy/nanochat-d32](https://huggingface.co/karpathy/nanochat-d32).
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50304):
Vocabulary size of the NanoChat model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`NanoChatModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 8192):
Dimension of the MLP representations. If `None`, it will be computed based on the model architecture.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 6):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with.
hidden_act (`str` or `function`, *optional*, defaults to `"relu2"`):
The non-linear activation function (function or string) in the decoder.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionaty should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
final_logit_softcapping (`float`, *optional*, defaults to 15.0):
scaling factor when applying tanh softcapping on the logits.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, and value projection layers during self-attention.
bos_token_id (`int`, *optional*, defaults to 0):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
pad_token_id (`int`, *optional*, defaults to 1):
Padding token id.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
```python
>>> from transformers import NanoChatModel, NanoChatConfig
>>> # Initializing a NanoChat style configuration
>>> configuration = NanoChatConfig()
>>> # Initializing a model from the NanoChat style configuration
>>> model = NanoChatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "nanochat"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise_rep",
"layers.*.self_attn.k_proj": "colwise_rep",
"layers.*.self_attn.v_proj": "colwise_rep",
"layers.*.self_attn.o_proj": "rowwise_rep",
"layers.*.mlp.fc1": "colwise",
"layers.*.mlp.fc2": "rowwise",
}
def __init__(
self,
vocab_size: int = 50304,
hidden_size: int = 768,
intermediate_size: int | None = 8192,
num_hidden_layers: int = 12,
num_attention_heads: int = 6,
num_key_value_heads: int | None = None,
max_position_embeddings: int = 2048,
hidden_act: str = "relu2",
attention_dropout: float = 0.0,
rms_norm_eps: float = 1e-6,
initializer_range: float = 0.02,
rope_parameters: RopeParameters | dict | None = None,
use_cache: bool = True,
final_logit_softcapping: float | None = 15.0,
attention_bias: bool = False,
bos_token_id: int = 0,
eos_token_id: int = 1,
pad_token_id: int = 1,
tie_word_embeddings: bool = False,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.max_position_embeddings = max_position_embeddings
self.hidden_act = hidden_act
self.attention_dropout = attention_dropout
self.rms_norm_eps = rms_norm_eps
self.initializer_range = initializer_range
self.use_cache = use_cache
self.final_logit_softcapping = final_logit_softcapping
self.attention_bias = attention_bias
self.rope_parameters = rope_parameters
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
pad_token_id=pad_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["NanoChatConfig"]
| NanoChatConfig |
python | apache__airflow | dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py | {
"start": 5736,
"end": 53566
} | class ____(Exception):
"""Raised when user decided to quit."""
TYPE_OF_CHANGE_DESCRIPTION = {
TypeOfChange.DOCUMENTATION: "Documentation only changes - no version change needed, "
"only documentation needs to be updated",
TypeOfChange.BUGFIX: "Bugfix changes only - bump in PATCHLEVEL version needed",
TypeOfChange.FEATURE: "Feature changes - bump in MINOR version needed",
TypeOfChange.BREAKING_CHANGE: "Breaking changes - bump in MAJOR version needed",
TypeOfChange.MISC: "Miscellaneous changes - bump in PATCHLEVEL version needed",
TypeOfChange.MIN_AIRFLOW_VERSION_BUMP: "Airflow version bump change - bump in MINOR version needed",
}
def classification_result(provider_id, changed_files):
provider_id = provider_id.replace(".", "/")
changed_files = list(filter(lambda f: provider_id in f, changed_files))
if not changed_files:
return "other"
def is_doc(f):
return re.match(r"^providers/.+/docs/", f) and f.endswith(".rst")
def is_test_or_example(f):
return re.match(r"^providers/.+/tests/", f) or re.match(
r"^providers/.+/src/airflow/providers/.+/example_dags/", f
)
all_docs = all(is_doc(f) for f in changed_files)
all_test_or_example = all(is_test_or_example(f) for f in changed_files)
has_docs = any(is_doc(f) for f in changed_files)
has_test_or_example = any(is_test_or_example(f) for f in changed_files)
has_real_code = any(not (is_doc(f) or is_test_or_example(f)) for f in changed_files)
if all_docs:
return "documentation"
if all_test_or_example:
return "test_or_example_only"
if not has_real_code and (has_docs or has_test_or_example):
return "documentation"
return "other"
def classify_provider_pr_files(provider_id: str, commit_hash: str) -> str:
"""
Classify a provider commit based on changed files.
- Returns 'documentation' if any provider doc files are present.
- Returns 'test_or_example_only' if only test/example DAGs changed.
- Returns 'other' otherwise.
"""
try:
result = run_command(
["git", "diff", "--name-only", f"{commit_hash}^", commit_hash],
cwd=AIRFLOW_ROOT_PATH,
capture_output=True,
text=True,
check=True,
)
changed_files = result.stdout.strip().splitlines()
except subprocess.CalledProcessError:
# safe to return other here
return "other"
return classification_result(provider_id, changed_files)
def _get_git_log_command(
folder_paths: list[Path] | None = None, from_commit: str | None = None, to_commit: str | None = None
) -> list[str]:
"""Get git command to run for the current repo from the current folder.
The current directory should always be the package folder.
:param folder_paths: list of folder paths to check for changes
:param from_commit: if present - base commit from which to start the log from
:param to_commit: if present - final commit which should be the start of the log
:return: git command to run
"""
git_cmd = [
"git",
"log",
"--pretty=format:%H %h %cd %s",
"--date=short",
]
if from_commit and to_commit:
git_cmd.append(f"{from_commit}...{to_commit}")
elif from_commit:
git_cmd.append(from_commit)
elif to_commit:
raise ValueError("It makes no sense to specify to_commit without from_commit.")
folders = [folder_path.as_posix() for folder_path in folder_paths] if folder_paths else ["."]
git_cmd.extend(["--", *folders])
return git_cmd
def _get_change_from_line(line: str, version: str) -> Change:
split_line = line.split(" ", maxsplit=3)
message = split_line[3]
pr = None
pr_match = PR_PATTERN.match(message)
if pr_match:
pr = pr_match.group(1)
return Change(
full_hash=split_line[0],
short_hash=split_line[1],
date=split_line[2],
version=version,
message=message,
message_without_backticks=message.replace("`", "'").replace("&39;", "'"),
pr=pr,
)
def _convert_git_changes_to_table(
version: str, changes: str, base_url: str, markdown: bool = True
) -> tuple[str, list[Change]]:
"""
Converts list of changes from its string form to markdown/RST table and array of change information
The changes are in the form of multiple lines where each line consists of:
FULL_COMMIT_HASH SHORT_COMMIT_HASH COMMIT_DATE COMMIT_SUBJECT
The subject can contain spaces but one of the preceding values can, so we can make split
3 times on spaces to break it up.
:param version: Version from which the changes are
:param changes: list of changes in a form of multiple-line string
:param base_url: base url for the commit URL
:param markdown: if True, Markdown format is used else rst
:return: formatted table + list of changes (starting from the latest)
"""
from tabulate import tabulate
lines = changes.splitlines()
headers = ["Commit", "Committed", "Subject"]
table_data = []
changes_list: list[Change] = []
for line in lines:
if line == "":
continue
change = _get_change_from_line(line, version)
table_data.append(
(
f"[{change.short_hash}]({base_url}{change.full_hash})"
if markdown
else f"`{change.short_hash} <{base_url}{change.full_hash}>`__",
change.date,
f"`{change.message_without_backticks}`"
if markdown
else f"``{change.message_without_backticks}``",
)
)
changes_list.append(change)
header = ""
if not table_data:
return header, []
table = tabulate(
table_data,
headers=headers,
tablefmt="pipe" if markdown else "rst",
colalign=("left", "center", "left"),
)
if not markdown:
header += f"\n\n{version}\n" + "." * len(version) + "\n\n"
release_date = table_data[0][1]
header += f"Latest change: {release_date}\n\n"
return header + table, changes_list
def _print_changes_table(changes_table):
syntax = Syntax(changes_table, "rst", theme="ansi_dark")
get_console().print(syntax)
def _get_all_changes_for_package(
provider_id: str,
base_branch: str,
reapply_templates_only: bool,
only_min_version_update: bool,
) -> tuple[bool, list[list[Change]], str]:
"""Retrieves all changes for the package.
:param provider_id: provider package id
:param base_branch: base branch to check changes in apache remote for changes
:param reapply_templates_only: whether to only reapply templates without bumping the version
:return tuple of:
bool (whether to proceed with update)
list of lists of changes for all past versions (might be empty)
the same list converted to string RST table
"""
provider_details = get_provider_details(provider_id)
current_version = provider_details.versions[0]
current_tag_no_suffix = get_version_tag(current_version, provider_id)
if get_verbose():
get_console().print(f"[info]Checking if tag '{current_tag_no_suffix}' exist.")
result = run_command(
["git", "rev-parse", current_tag_no_suffix],
cwd=AIRFLOW_ROOT_PATH,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
check=False,
)
providers_folder_paths_for_git_commit_retrieval = [
provider_details.root_provider_path,
*provider_details.possible_old_provider_paths,
]
if not reapply_templates_only and result.returncode == 0:
if get_verbose():
get_console().print(f"[info]The tag {current_tag_no_suffix} exists.")
# The tag already exists
result = run_command(
_get_git_log_command(
providers_folder_paths_for_git_commit_retrieval,
f"{HTTPS_REMOTE}/{base_branch}",
current_tag_no_suffix,
),
cwd=AIRFLOW_ROOT_PATH,
capture_output=True,
text=True,
check=True,
)
changes = result.stdout.strip()
if changes:
provider_details = get_provider_details(provider_id)
doc_only_change_file = (
provider_details.root_provider_path / "docs" / ".latest-doc-only-change.txt"
)
if doc_only_change_file.exists():
last_doc_only_hash = doc_only_change_file.read_text().strip()
try:
result = run_command(
_get_git_log_command(
providers_folder_paths_for_git_commit_retrieval,
f"{HTTPS_REMOTE}/{base_branch}",
last_doc_only_hash,
),
cwd=AIRFLOW_ROOT_PATH,
capture_output=True,
text=True,
check=True,
)
changes_since_last_doc_only_check = result.stdout.strip()
if not changes_since_last_doc_only_check:
get_console().print(
"\n[warning]The provider has doc-only changes since the last release. Skipping[/]"
)
raise PrepareReleaseDocsChangesOnlyException()
if len(changes.splitlines()) > len(changes_since_last_doc_only_check.splitlines()):
# if doc-only was released after previous release - use it as starting point
# but if before - stay with the releases from last tag.
changes = changes_since_last_doc_only_check
except subprocess.CalledProcessError:
# ignore when the commit mentioned as last doc-only change is obsolete
pass
if not only_min_version_update:
get_console().print(
f"[warning]The provider {provider_id} has {len(changes.splitlines())} "
f"changes since last release[/]"
)
get_console().print(f"\n[info]Provider: {provider_id}[/]\n")
changes_table, array_of_changes = _convert_git_changes_to_table(
f"NEXT VERSION AFTER + {provider_details.versions[0]}",
changes,
base_url="https://github.com/apache/airflow/commit/",
markdown=False,
)
if not only_min_version_update:
_print_changes_table(changes_table)
return False, [array_of_changes], changes_table
if not only_min_version_update:
get_console().print(f"[info]No changes for {provider_id}")
return False, [], ""
if len(provider_details.versions) == 1:
get_console().print(
f"[info]The provider '{provider_id}' has never been released but it is ready to release!\n"
)
else:
get_console().print(f"[info]New version of the '{provider_id}' package is ready to be released!\n")
next_version_tag = f"{HTTPS_REMOTE}/{base_branch}"
changes_table = ""
current_version = provider_details.versions[0]
list_of_list_of_changes: list[list[Change]] = []
for version in provider_details.versions[1:]:
version_tag = get_version_tag(version, provider_id)
result = run_command(
_get_git_log_command(
providers_folder_paths_for_git_commit_retrieval, next_version_tag, version_tag
),
cwd=AIRFLOW_ROOT_PATH,
capture_output=True,
text=True,
check=True,
)
changes = result.stdout.strip()
changes_table_for_version, array_of_changes_for_version = _convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
list_of_list_of_changes.append(array_of_changes_for_version)
next_version_tag = version_tag
current_version = version
result = run_command(
_get_git_log_command(providers_folder_paths_for_git_commit_retrieval, next_version_tag),
cwd=provider_details.root_provider_path,
capture_output=True,
text=True,
check=True,
)
changes = result.stdout.strip()
changes_table_for_version, array_of_changes_for_version = _convert_git_changes_to_table(
current_version, changes, base_url="https://github.com/apache/airflow/commit/", markdown=False
)
changes_table += changes_table_for_version
return True, list_of_list_of_changes, changes_table
def _ask_the_user_for_the_type_of_changes(non_interactive: bool) -> TypeOfChange:
"""Ask user to specify type of changes (case-insensitive).
:return: Type of change.
"""
# have to do that while waiting for Python 3.11+ StrEnum [*TypeOfChange] :(
type_of_changes_array = [t.value for t in TypeOfChange]
if non_interactive:
# Simulate all possible non-terminal answers - this is useful for running on CI where we want to
# Test all possibilities.
return TypeOfChange(random.choice(type_of_changes_array))
display_answers = "/".join(type_of_changes_array) + "/q"
while True:
get_console().print(
"[warning]Type of change (d)ocumentation, (b)ugfix, (f)eature, (x)breaking "
f"change, (m)isc, (s)kip, airflow_min_(v)ersion_bump (q)uit [{display_answers}]?[/] ",
end="",
)
try:
given_answer = input("").lower()
except KeyboardInterrupt:
raise PrepareReleaseDocsUserQuitException()
if given_answer == "q":
raise PrepareReleaseDocsUserQuitException()
if given_answer in type_of_changes_array:
return TypeOfChange(given_answer)
get_console().print(
f"[warning] Wrong answer given: '{given_answer}'. Should be one of {display_answers}"
)
def _mark_latest_changes_as_documentation_only(
provider_id: str, list_of_list_of_latest_changes: list[list[Change]]
):
latest_change = list_of_list_of_latest_changes[0][0]
provider_details = get_provider_details(provider_id=provider_id)
get_console().print(
f"[special]Marking last change: {latest_change.short_hash} and all above "
f"changes since the last release as doc-only changes!"
)
latest_doc_onl_change_file = provider_details.root_provider_path / "docs" / ".latest-doc-only-change.txt"
latest_doc_onl_change_file.write_text(latest_change.full_hash + "\n")
raise PrepareReleaseDocsChangesOnlyException()
VERSION_MAJOR_INDEX = 0
VERSION_MINOR_INDEX = 1
VERSION_PATCHLEVEL_INDEX = 2
def bump_version(v: Version, index: int) -> Version:
versions = list(v.release)
versions[index] += 1
if index == VERSION_MAJOR_INDEX:
versions[VERSION_MINOR_INDEX] = 0
versions[VERSION_PATCHLEVEL_INDEX] = 0
elif index == VERSION_MINOR_INDEX:
versions[VERSION_PATCHLEVEL_INDEX] = 0
# Handle pre-release and dev version formatting
pre = f"{v.pre[0]}{v.pre[1]}" if v.pre else ""
dev = f".dev{v.dev}" if v.dev is not None else ""
return parse(
f"{versions[VERSION_MAJOR_INDEX]}.{versions[VERSION_MINOR_INDEX]}.{versions[VERSION_PATCHLEVEL_INDEX]}{pre}{dev}"
)
def _update_version_in_provider_yaml(
provider_id: str, type_of_change: TypeOfChange, min_airflow_version_bump: bool = False
) -> tuple[bool, bool, str]:
"""
Updates provider version based on the type of change selected by the user
:param type_of_change: type of change selected
:param provider_id: provider package
:param min_airflow_version_bump: if set, ensure that the version bump is at least feature version.
:return: tuple of two bools: (with_breaking_change, maybe_with_new_features, original_text)
"""
provider_details = get_provider_details(provider_id)
version = provider_details.versions[0]
v = parse(version)
with_breaking_changes = False
maybe_with_new_features = False
if type_of_change == TypeOfChange.BREAKING_CHANGE:
v = bump_version(v, VERSION_MAJOR_INDEX)
with_breaking_changes = True
# we do not know, but breaking changes may also contain new features
maybe_with_new_features = True
elif type_of_change == TypeOfChange.FEATURE:
v = bump_version(v, VERSION_MINOR_INDEX)
maybe_with_new_features = True
elif type_of_change == TypeOfChange.BUGFIX:
v = bump_version(v, VERSION_PATCHLEVEL_INDEX)
elif type_of_change == TypeOfChange.MISC:
v = bump_version(v, VERSION_PATCHLEVEL_INDEX)
if min_airflow_version_bump:
v = bump_version(v, VERSION_MINOR_INDEX)
provider_yaml_path = get_provider_yaml(provider_id)
original_provider_yaml_content = provider_yaml_path.read_text()
updated_provider_yaml_content = re.sub(
r"^versions:", f"versions:\n - {v}", original_provider_yaml_content, 1, re.MULTILINE
)
provider_yaml_path.write_text(updated_provider_yaml_content)
get_console().print(f"[special]Bumped version to {v}\n")
return with_breaking_changes, maybe_with_new_features, original_provider_yaml_content
def _update_source_date_epoch_in_provider_yaml(
provider_id: str,
) -> None:
"""
Updates source date epoch in provider yaml that then can be used to generate reproducible packages.
:param provider_id: provider package
"""
provider_yaml_path = get_provider_yaml(provider_id)
original_text = provider_yaml_path.read_text()
source_date_epoch = int(time())
new_text = re.sub(
r"source-date-epoch: [0-9]*", f"source-date-epoch: {source_date_epoch}", original_text, 1
)
provider_yaml_path.write_text(new_text)
refresh_provider_metadata_from_yaml_file(provider_yaml_path)
get_console().print(f"[special]Updated source-date-epoch to {source_date_epoch}\n")
def _verify_changelog_exists(package: str) -> Path:
provider_details = get_provider_details(package)
changelog_path = Path(provider_details.root_provider_path) / "docs" / "changelog.rst"
if not os.path.isfile(changelog_path):
get_console().print(f"\n[error]ERROR: Missing {changelog_path}[/]\n")
get_console().print("[info]Please add the file with initial content:")
get_console().print("----- START COPYING AFTER THIS LINE ------- ")
import jinja2
processed_changelog = jinja2.Template(INITIAL_CHANGELOG_CONTENT, autoescape=True).render(
package_name=provider_details.pypi_package_name,
)
syntax = Syntax(
processed_changelog,
"rst",
theme="ansi_dark",
)
get_console().print(syntax)
get_console().print("----- END COPYING BEFORE THIS LINE ------- ")
sys.exit(1)
return changelog_path
def _get_additional_distribution_info(provider_distribution_path: Path) -> str:
"""Returns additional info for the package.
:param provider_distribution_path: path for the package
:return: additional information for the path (empty string if missing)
"""
additional_info_file_path = provider_distribution_path / "ADDITIONAL_INFO.md"
if additional_info_file_path.is_file():
additional_info = additional_info_file_path.read_text()
additional_info_lines = additional_info.splitlines(keepends=True)
result = ""
skip_comment = True
for line in additional_info_lines:
if line.startswith(" -->"):
skip_comment = False
elif not skip_comment:
result += line
return result
return ""
def replace_content(file_path: Path, old_text: str, new_text: str, provider_id: str):
if new_text != old_text:
_, temp_file_path = tempfile.mkstemp()
try:
if file_path.is_file():
copyfile(file_path, temp_file_path)
file_path.write_text(new_text)
get_console().print(f"\n[info]Generated {file_path} file for the {provider_id} provider\n")
if old_text != "":
run_command(["diff", "--color=always", temp_file_path, file_path.as_posix()], check=False)
finally:
os.unlink(temp_file_path)
def _update_file(
context: dict[str, Any],
template_name: str,
extension: str,
file_name: str,
provider_id: str,
target_path: Path,
regenerate_missing_docs: bool,
) -> None:
target_file_path = target_path / file_name
if regenerate_missing_docs and target_file_path.exists():
if get_verbose():
get_console().print(
f"[warnings]The {target_file_path} exists - not regenerating it "
f"for the provider {provider_id}[/]"
)
return
new_text = render_template(
template_name=template_name, context=context, extension=extension, keep_trailing_newline=True
)
target_file_path = target_path / file_name
old_text = ""
if target_file_path.is_file():
old_text = target_file_path.read_text()
replace_content(target_file_path, old_text, new_text, provider_id)
index_path = target_path / "index.rst"
if not index_path.exists():
get_console().print(f"[error]ERROR! The index must exist for the provider docs: {index_path}")
raise PrepareReleaseDocsErrorOccurredException()
expected_link_in_index = f"<{file_name.split('.')[0]}>"
if expected_link_in_index not in index_path.read_text():
get_console().print(
f"\n[error]ERROR! The {index_path} must contain "
f"link to the generated documentation:[/]\n\n"
f"[warning]{expected_link_in_index}[/]\n\n"
f"[info]Please make sure to add it to {index_path}.\n"
)
get_console().print(f"[info]Checking for backticks correctly generated in: {target_file_path}")
match = BACKTICKS_CHECK.search(target_file_path.read_text())
if match:
get_console().print(
f"\n[error]ERROR: Single backticks (`) found in {target_file_path}:[/]\n\n"
f"[warning]{match.group(0)}[/]\n\n"
f"[info]Please fix them by replacing with double backticks (``).[/]\n"
)
raise PrepareReleaseDocsErrorOccurredException()
get_console().print(f"Linting: {target_file_path}")
import restructuredtext_lint
errors = restructuredtext_lint.lint_file(target_file_path.as_posix())
real_errors = False
if errors:
for error in errors:
# Skip known issue: linter with doc role similar to https://github.com/OCA/pylint-odoo/issues/38
if (
'No role entry for "doc"' in error.message
or 'Unknown interpreted text role "doc"' in error.message
):
continue
if "airflow-providers-commits" in error.message:
continue
real_errors = True
get_console().print(f"* [red] {error.message}")
if real_errors:
get_console().print(f"\n[red] Errors found in {target_file_path}")
raise PrepareReleaseDocsErrorOccurredException()
get_console().print(f"[success]Generated {target_file_path} for {provider_id} is OK[/]")
return
def _update_commits_rst(
context: dict[str, Any],
provider_id: str,
target_path: Path,
regenerate_missing_docs: bool,
) -> None:
_update_file(
context=context,
template_name="PROVIDER_COMMITS",
extension=".rst",
file_name="commits.rst",
provider_id=provider_id,
target_path=target_path,
regenerate_missing_docs=regenerate_missing_docs,
)
def update_release_notes(
provider_id: str,
reapply_templates_only: bool,
base_branch: str,
regenerate_missing_docs: bool,
non_interactive: bool,
only_min_version_update: bool,
) -> tuple[bool, bool, bool]:
"""Updates generated files.
This includes the readme, changes, and provider.yaml files.
:param provider_id: id of the package
:param reapply_templates_only: regenerate already released documentation only - without updating versions
:param base_branch: base branch to check changes in apache remote for changes
:param regenerate_missing_docs: whether to regenerate missing docs
:param non_interactive: run in non-interactive mode (useful for CI)
:param only_min_version_update: whether to only update min version
:return: tuple of three bools: (with_breaking_change, maybe_with_new_features, with_min_airflow_version_bump)
"""
proceed, list_of_list_of_changes, changes_as_table = _get_all_changes_for_package(
provider_id=provider_id,
base_branch=base_branch,
reapply_templates_only=reapply_templates_only,
only_min_version_update=only_min_version_update,
)
with_breaking_changes = False
maybe_with_new_features = False
original_provider_yaml_content: str | None = None
marked_for_release = False
with_min_airflow_version_bump = False
if not reapply_templates_only:
if proceed:
if non_interactive:
answer = Answer.YES
else:
provider_details = get_provider_details(provider_id)
current_release_version = provider_details.versions[0]
answer = user_confirm(
f"Provider {provider_id} with "
f"version: {current_release_version} marked for release. Proceed?"
)
marked_for_release = answer == Answer.YES
if answer == Answer.NO:
get_console().print(f"\n[warning]Skipping provider: {provider_id} on user request![/]\n")
raise PrepareReleaseDocsUserSkippedException()
if answer == Answer.QUIT:
raise PrepareReleaseDocsUserQuitException()
elif not list_of_list_of_changes:
get_console().print(
f"\n[warning]Provider: {provider_id} - skipping documentation generation. No changes![/]\n"
)
raise PrepareReleaseDocsNoChangesException()
else:
answer = user_confirm(f"Does the provider: {provider_id} have any changes apart from 'doc-only'?")
if answer == Answer.NO:
_mark_latest_changes_as_documentation_only(provider_id, list_of_list_of_changes)
return with_breaking_changes, maybe_with_new_features, False
change_table_len = len(list_of_list_of_changes[0])
table_iter = 0
type_of_current_package_changes: list[TypeOfChange] = []
while table_iter < change_table_len:
get_console().print()
formatted_message = format_message_for_classification(
list_of_list_of_changes[0][table_iter].message_without_backticks
)
change = list_of_list_of_changes[0][table_iter]
classification = classify_provider_pr_files(provider_id, change.full_hash)
if classification == "documentation":
get_console().print(
f"[green]Automatically classifying change as DOCUMENTATION since it contains only doc changes:[/]\n"
f"[blue]{formatted_message}[/]"
)
type_of_change = TypeOfChange.DOCUMENTATION
elif classification == "test_or_example_only":
get_console().print(
f"[green]Automatically classifying change as SKIPPED since it only contains test/example changes:[/]\n"
f"[blue]{formatted_message}[/]"
)
type_of_change = TypeOfChange.SKIP
else:
get_console().print(
f"[green]Define the type of change for "
f"`{formatted_message}`"
f" by referring to the above table[/]"
)
type_of_change = _ask_the_user_for_the_type_of_changes(non_interactive=non_interactive)
if type_of_change == TypeOfChange.MIN_AIRFLOW_VERSION_BUMP:
with_min_airflow_version_bump = True
change_hash = list_of_list_of_changes[0][table_iter].short_hash
SHORT_HASH_TO_TYPE_DICT[change_hash] = type_of_change
type_of_current_package_changes.append(type_of_change)
table_iter += 1
print()
most_impactful = get_most_impactful_change(type_of_current_package_changes)
get_console().print(
f"[info]The version will be bumped because of {most_impactful} kind of change"
)
type_of_change = most_impactful
if type_of_change == TypeOfChange.SKIP:
raise PrepareReleaseDocsUserSkippedException()
get_console().print(
f"[info]Provider {provider_id} has been classified as:[/]\n\n"
f"[special]{TYPE_OF_CHANGE_DESCRIPTION[type_of_change]}"
)
get_console().print()
bump = False
if type_of_change == TypeOfChange.MIN_AIRFLOW_VERSION_BUMP:
bump = True
type_of_change = TypeOfChange.MISC
if type_of_change in [
TypeOfChange.BUGFIX,
TypeOfChange.FEATURE,
TypeOfChange.BREAKING_CHANGE,
TypeOfChange.MISC,
]:
with_breaking_changes, maybe_with_new_features, original_provider_yaml_content = (
_update_version_in_provider_yaml(
provider_id=provider_id, type_of_change=type_of_change, min_airflow_version_bump=bump
)
)
if not reapply_templates_only:
_update_source_date_epoch_in_provider_yaml(provider_id)
proceed, list_of_list_of_changes, changes_as_table = _get_all_changes_for_package(
provider_id=provider_id,
base_branch=base_branch,
reapply_templates_only=reapply_templates_only,
only_min_version_update=only_min_version_update,
)
else:
if not reapply_templates_only:
_update_source_date_epoch_in_provider_yaml(provider_id)
provider_details = get_provider_details(provider_id)
current_release_version = provider_details.versions[0]
if (not non_interactive) and (not marked_for_release):
answer = user_confirm(
f"Do you want to leave the version for {provider_id} with version: "
f"{current_release_version} as is for the release?"
)
else:
answer = Answer.YES
provider_yaml_path = get_provider_yaml(provider_id)
if answer == Answer.NO:
if original_provider_yaml_content is not None:
# Restore original content of the provider.yaml
provider_yaml_path.write_text(original_provider_yaml_content)
clear_cache_for_provider_metadata(provider_yaml_path=provider_yaml_path)
type_of_change = _ask_the_user_for_the_type_of_changes(non_interactive=False)
if type_of_change == TypeOfChange.SKIP:
raise PrepareReleaseDocsUserSkippedException()
get_console().print(
f"[info]Provider {provider_id} has been classified as:[/]\n\n"
f"[special]{TYPE_OF_CHANGE_DESCRIPTION[type_of_change]}"
)
get_console().print()
if type_of_change == TypeOfChange.DOCUMENTATION:
_mark_latest_changes_as_documentation_only(provider_id, list_of_list_of_changes)
elif type_of_change in [
TypeOfChange.BUGFIX,
TypeOfChange.FEATURE,
TypeOfChange.BREAKING_CHANGE,
TypeOfChange.MISC,
]:
bump = False
if type_of_change == TypeOfChange.MIN_AIRFLOW_VERSION_BUMP:
bump = True
type_of_change = TypeOfChange.MISC
with_breaking_changes, maybe_with_new_features, _ = _update_version_in_provider_yaml(
provider_id=provider_id,
type_of_change=type_of_change,
min_airflow_version_bump=bump,
)
if not reapply_templates_only:
_update_source_date_epoch_in_provider_yaml(provider_id)
proceed, list_of_list_of_changes, changes_as_table = _get_all_changes_for_package(
provider_id=provider_id,
base_branch=base_branch,
reapply_templates_only=reapply_templates_only,
only_min_version_update=only_min_version_update,
)
else:
get_console().print(
f"[info] Proceeding with provider: {provider_id} version as {current_release_version}"
)
provider_details = get_provider_details(provider_id)
_verify_changelog_exists(provider_details.provider_id)
jinja_context = get_provider_documentation_jinja_context(
provider_id=provider_id,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
)
jinja_context["DETAILED_CHANGES_RST"] = changes_as_table
jinja_context["DETAILED_CHANGES_PRESENT"] = bool(changes_as_table)
_update_commits_rst(
jinja_context,
provider_id,
provider_details.documentation_provider_distribution_path,
regenerate_missing_docs,
)
return with_breaking_changes, maybe_with_new_features, with_min_airflow_version_bump
def _find_insertion_index_for_version(content: list[str], version: str) -> tuple[int, bool]:
"""Finds insertion index for the specified version from the .rst changelog content.
:param content: changelog split into separate lines
:param version: version to look for
:return: A 2-tuple. The first item indicates the insertion index, while the
second is a boolean indicating whether to append (False) or insert (True)
to the changelog.
"""
changelog_found = False
skip_next_line = False
index = 0
for index, line in enumerate(content):
if not changelog_found and line.strip() == version:
changelog_found = True
skip_next_line = True
elif not skip_next_line and line and all(char == "." for char in line):
return index - 2, changelog_found
else:
skip_next_line = False
return index, changelog_found
def _get_changes_classified(
changes: list[Change], with_breaking_changes: bool, maybe_with_new_features: bool
) -> ClassifiedChanges:
"""
Pre-classifies changes based on their type_of_change attribute derived based on release manager's call.
The classification is based on the decision made by the release manager when classifying the release.
If we switch to semantic commits, this process could be automated. This list is still supposed to be
manually reviewed and re-classified by the release manager if needed.
:param changes: list of changes to be classified
:param with_breaking_changes: whether to include breaking changes in the classification
:param maybe_with_new_features: whether to include new features in the classification
:return: ClassifiedChanges object containing changes classified into fixes, features, breaking changes,
misc.
"""
classified_changes = ClassifiedChanges()
for change in changes:
type_of_change = None
if change.short_hash in SHORT_HASH_TO_TYPE_DICT:
type_of_change = SHORT_HASH_TO_TYPE_DICT[change.short_hash]
if type_of_change == TypeOfChange.BUGFIX:
classified_changes.fixes.append(change)
elif type_of_change == TypeOfChange.MISC or type_of_change == TypeOfChange.MIN_AIRFLOW_VERSION_BUMP:
classified_changes.misc.append(change)
elif type_of_change == TypeOfChange.FEATURE and maybe_with_new_features:
classified_changes.features.append(change)
elif type_of_change == TypeOfChange.BREAKING_CHANGE and with_breaking_changes:
classified_changes.breaking_changes.append(change)
elif type_of_change == TypeOfChange.DOCUMENTATION:
classified_changes.docs.append(change)
else:
classified_changes.other.append(change)
return classified_changes
def _generate_new_changelog(
package_id: str,
provider_details: ProviderPackageDetails,
changes: list[list[Change]],
context: dict[str, Any],
with_breaking_changes: bool,
maybe_with_new_features: bool,
with_min_airflow_version_bump: bool = False,
):
latest_version = provider_details.versions[0]
current_changelog = provider_details.changelog_path.read_text()
current_changelog_lines = current_changelog.splitlines()
insertion_index, append = _find_insertion_index_for_version(current_changelog_lines, latest_version)
new_context = deepcopy(context)
if append:
if not changes:
get_console().print(
f"[success]The provider {package_id} changelog for `{latest_version}` "
"has first release. Not updating the changelog.[/]"
)
return
new_changes = [
change for change in changes[0] if change.pr and "(#" + change.pr + ")" not in current_changelog
]
if not new_changes:
get_console().print(
f"[success]The provider {package_id} changelog for `{latest_version}` "
"has no new changes. Not updating the changelog.[/]"
)
return
new_context["new_changes"] = new_changes
generated_new_changelog = render_template(
template_name="UPDATE_CHANGELOG", context=new_context, extension=".rst"
)
else:
if changes:
classified_changes = _get_changes_classified(
changes[0],
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
)
else:
# change log exist but without version 1.0.0 entry
classified_changes = None
new_context.update(
{
"version": latest_version,
"version_header": "." * len(latest_version),
"classified_changes": classified_changes,
"min_airflow_version_bump": with_min_airflow_version_bump,
}
)
generated_new_changelog = render_template(
template_name="CHANGELOG", context=new_context, extension=".rst"
)
new_changelog_lines = current_changelog_lines[0:insertion_index]
new_changelog_lines.extend(generated_new_changelog.splitlines())
new_changelog_lines.extend(current_changelog_lines[insertion_index:])
diff = "\n".join(difflib.context_diff(current_changelog_lines, new_changelog_lines, n=5))
syntax = Syntax(diff, "diff")
get_console().print(syntax)
if not append:
get_console().print(
f"[success]The provider {package_id} changelog for `{latest_version}` "
"version is missing. Generating fresh changelog.[/]"
)
else:
get_console().print(
f"[success]Appending the provider {package_id} changelog for `{latest_version}` version.[/]"
)
provider_details.changelog_path.write_text("\n".join(new_changelog_lines) + "\n")
def _update_index_rst(
context: dict[str, Any],
provider_id: str,
target_path: Path,
):
index_update = render_template(
template_name="PROVIDER_INDEX", context=context, extension=".rst", keep_trailing_newline=True
)
index_file_path = target_path / "index.rst"
old_text = ""
if index_file_path.is_file():
old_text = index_file_path.read_text()
new_text = deepcopy(old_text)
lines = old_text.splitlines(keepends=False)
for index, line in enumerate(lines):
if AUTOMATICALLY_GENERATED_MARKER in line:
new_text = "\n".join(lines[:index])
new_text += "\n" + AUTOMATICALLY_GENERATED_CONTENT + "\n"
new_text += index_update
replace_content(index_file_path, old_text, new_text, provider_id)
def get_provider_documentation_jinja_context(
provider_id: str, with_breaking_changes: bool, maybe_with_new_features: bool
) -> dict[str, Any]:
provider_details = get_provider_details(provider_id)
jinja_context = get_provider_jinja_context(
provider_id=provider_id,
current_release_version=provider_details.versions[0],
version_suffix="",
)
jinja_context["WITH_BREAKING_CHANGES"] = with_breaking_changes
jinja_context["MAYBE_WITH_NEW_FEATURES"] = maybe_with_new_features
jinja_context["ADDITIONAL_INFO"] = (
_get_additional_distribution_info(provider_distribution_path=provider_details.root_provider_path),
)
return jinja_context
def update_changelog(
package_id: str,
base_branch: str,
reapply_templates_only: bool,
with_breaking_changes: bool,
maybe_with_new_features: bool,
only_min_version_update: bool,
with_min_airflow_version_bump: bool,
):
"""Internal update changelog method.
:param package_id: package id
:param base_branch: base branch to check changes in apache remote for changes
:param reapply_templates_only: only reapply templates, no changelog generation
:param with_breaking_changes: whether there are any breaking changes
:param maybe_with_new_features: whether there are any new features
:param only_min_version_update: whether to only update the min version
:param with_min_airflow_version_bump: whether there is a min airflow version bump anywhere
"""
provider_details = get_provider_details(package_id)
jinja_context = get_provider_documentation_jinja_context(
provider_id=package_id,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
)
proceed, changes, _ = _get_all_changes_for_package(
provider_id=package_id,
base_branch=base_branch,
reapply_templates_only=reapply_templates_only,
only_min_version_update=only_min_version_update,
)
if not proceed:
if not only_min_version_update:
get_console().print(
f"[warning]The provider {package_id} is not being released. Skipping the package.[/]"
)
raise PrepareReleaseDocsNoChangesException()
if reapply_templates_only:
get_console().print("[info]Only reapply templates, no changelog update[/]")
else:
_generate_new_changelog(
package_id=package_id,
provider_details=provider_details,
changes=changes,
context=jinja_context,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
with_min_airflow_version_bump=with_min_airflow_version_bump,
)
get_console().print(f"\n[info]Update index.rst for {package_id}\n")
_update_index_rst(jinja_context, package_id, provider_details.documentation_provider_distribution_path)
def _generate_get_provider_info_py(context: dict[str, Any], provider_details: ProviderPackageDetails):
get_provider_info_content = black_format(
render_template(
template_name="get_provider_info",
context=context,
extension=".py",
autoescape=False,
keep_trailing_newline=True,
)
)
get_provider_info_path = provider_details.base_provider_package_path / "get_provider_info.py"
get_provider_info_path.write_text(get_provider_info_content)
get_console().print(
f"[info]Generated {get_provider_info_path} for the {provider_details.provider_id} provider\n"
)
def _generate_docs_conf(context: dict[str, Any], provider_details: ProviderPackageDetails):
docs_conf_content = render_template(
template_name="conf",
context=context,
extension=".py",
keep_trailing_newline=True,
)
docs_conf_path = provider_details.root_provider_path / "docs" / "conf.py"
docs_conf_path.write_text(docs_conf_content)
get_console().print(f"[info]Generated {docs_conf_path} for the {provider_details.provider_id} provider\n")
def _generate_readme_rst(context: dict[str, Any], provider_details: ProviderPackageDetails):
get_provider_readme_content = render_template(
template_name="PROVIDER_README",
context=context,
extension=".rst",
keep_trailing_newline=True,
)
get_provider_readme_path = provider_details.root_provider_path / "README.rst"
get_provider_readme_path.write_text(get_provider_readme_content)
get_console().print(
f"[info]Generated {get_provider_readme_path} for the {provider_details.provider_id} provider\n"
)
def _generate_build_files_for_provider(
context: dict[str, Any],
provider_details: ProviderPackageDetails,
skip_readme: bool,
):
init_py_content = black_format(
render_template(
template_name="PROVIDER__INIT__PY",
context=context,
extension=".py",
keep_trailing_newline=True,
)
)
init_py_path = provider_details.base_provider_package_path / "__init__.py"
init_py_path.write_text(init_py_content)
if not skip_readme:
_generate_readme_rst(context, provider_details)
_generate_docs_conf(context, provider_details)
regenerate_pyproject_toml(context, provider_details, version_suffix=None)
_generate_get_provider_info_py(context, provider_details)
shutil.copy(
BREEZE_SOURCES_PATH / "airflow_breeze" / "templates" / "PROVIDER_LICENSE.txt",
provider_details.root_provider_path / "LICENSE",
)
def _replace_min_airflow_version_in_provider_yaml(
context: dict[str, Any],
provider_yaml_path: Path,
):
provider_yaml_txt = provider_yaml_path.read_text()
provider_yaml_txt = re.sub(
r" {2}- apache-airflow>=.*",
f" - apache-airflow>={context['MIN_AIRFLOW_VERSION']}",
provider_yaml_txt,
)
provider_yaml_path.write_text(provider_yaml_txt)
refresh_provider_metadata_from_yaml_file(provider_yaml_path)
def update_min_airflow_version_and_build_files(
provider_id: str, with_breaking_changes: bool, maybe_with_new_features: bool, skip_readme: bool
):
"""Updates min airflow version in provider yaml and __init__.py
:param provider_id: provider package id
:param with_breaking_changes: whether there are any breaking changes
:param maybe_with_new_features: whether there are any new features
:param skip_readme: skip updating readme: skip_readme
:return:
"""
provider_details = get_provider_details(provider_id)
if provider_details.removed:
return
jinja_context = get_provider_documentation_jinja_context(
provider_id=provider_id,
with_breaking_changes=with_breaking_changes,
maybe_with_new_features=maybe_with_new_features,
)
_generate_build_files_for_provider(
context=jinja_context,
provider_details=provider_details,
skip_readme=skip_readme,
)
_replace_min_airflow_version_in_provider_yaml(
context=jinja_context, provider_yaml_path=provider_details.provider_yaml_path
)
| PrepareReleaseDocsUserQuitException |
python | openai__openai-python | src/openai/resources/chat/completions/completions.py | {
"start": 160117,
"end": 161026
} | class ____:
def __init__(self, completions: Completions) -> None:
self._completions = completions
self.parse = _legacy_response.to_raw_response_wrapper(
completions.parse,
)
self.create = _legacy_response.to_raw_response_wrapper(
completions.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
completions.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
completions.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
completions.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
completions.delete,
)
@cached_property
def messages(self) -> MessagesWithRawResponse:
return MessagesWithRawResponse(self._completions.messages)
| CompletionsWithRawResponse |
python | python__mypy | mypy/erasetype.py | {
"start": 8581,
"end": 10768
} | class ____(TypeTranslator):
"""Removes the Literal[...] type that may be associated with any
Instance types."""
def visit_instance(self, t: Instance) -> Type:
if not t.last_known_value and not t.args:
return t
return t.copy_modified(args=[a.accept(self) for a in t.args], last_known_value=None)
def visit_type_alias_type(self, t: TypeAliasType) -> Type:
# Type aliases can't contain literal values, because they are
# always constructed as explicit types.
return t
def visit_union_type(self, t: UnionType) -> Type:
new = cast(UnionType, super().visit_union_type(t))
# Erasure can result in many duplicate items; merge them.
# Call make_simplified_union only on lists of instance types
# that all have the same fullname, to avoid simplifying too
# much.
instances = [item for item in new.items if isinstance(get_proper_type(item), Instance)]
# Avoid merge in simple cases such as optional types.
if len(instances) > 1:
instances_by_name: dict[str, list[Instance]] = {}
p_new_items = get_proper_types(new.items)
for p_item in p_new_items:
if isinstance(p_item, Instance) and not p_item.args:
instances_by_name.setdefault(p_item.type.fullname, []).append(p_item)
merged: list[Type] = []
for item in new.items:
orig_item = item
item = get_proper_type(item)
if isinstance(item, Instance) and not item.args:
types = instances_by_name.get(item.type.fullname)
if types is not None:
if len(types) == 1:
merged.append(item)
else:
from mypy.typeops import make_simplified_union
merged.append(make_simplified_union(types))
del instances_by_name[item.type.fullname]
else:
merged.append(orig_item)
return UnionType.make_union(merged)
return new
| LastKnownValueEraser |
python | pypa__pipenv | pipenv/patched/pip/_internal/network/auth.py | {
"start": 7453,
"end": 20899
} | class ____(AuthBase):
def __init__(
self,
prompting: bool = True,
index_urls: Optional[List[str]] = None,
keyring_provider: str = "auto",
) -> None:
self.prompting = prompting
self.index_urls = index_urls
self.keyring_provider = keyring_provider # type: ignore[assignment]
self.passwords: Dict[str, AuthInfo] = {}
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save: Optional[Credentials] = None
@property
def keyring_provider(self) -> KeyRingBaseProvider:
return get_keyring_provider(self._keyring_provider)
@keyring_provider.setter
def keyring_provider(self, provider: str) -> None:
# The free function get_keyring_provider has been decorated with
# functools.cache. If an exception occurs in get_keyring_auth that
# cache will be cleared and keyring disabled, take that into account
# if you want to remove this indirection.
self._keyring_provider = provider
@property
def use_keyring(self) -> bool:
# We won't use keyring when --no-input is passed unless
# a specific provider is requested because it might require
# user interaction
return self.prompting or self._keyring_provider not in ["auto", "disabled"]
def _get_keyring_auth(
self,
url: Optional[str],
username: Optional[str],
) -> Optional[AuthInfo]:
"""Return the tuple auth for a given url from keyring."""
# Do nothing if no url was provided
if not url:
return None
try:
return self.keyring_provider.get_auth_info(url, username)
except Exception as exc:
# Log the full exception (with stacktrace) at debug, so it'll only
# show up when running in verbose mode.
logger.debug("Keyring is skipped due to an exception", exc_info=True)
# Always log a shortened version of the exception.
logger.warning(
"Keyring is skipped due to an exception: %s",
str(exc),
)
global KEYRING_DISABLED
KEYRING_DISABLED = True
get_keyring_provider.cache_clear()
return None
def _get_index_url(self, url: str) -> Optional[str]:
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
url = remove_auth_from_url(url).rstrip("/") + "/"
parsed_url = urllib.parse.urlsplit(url)
candidates = []
for index in self.index_urls:
index = index.rstrip("/") + "/"
parsed_index = urllib.parse.urlsplit(remove_auth_from_url(index))
if parsed_url == parsed_index:
return index
if parsed_url.netloc != parsed_index.netloc:
continue
candidate = urllib.parse.urlsplit(index)
candidates.append(candidate)
if not candidates:
return None
candidates.sort(
reverse=True,
key=lambda candidate: commonprefix(
[
parsed_url.path,
candidate.path,
]
).rfind("/"),
)
return urllib.parse.urlunsplit(candidates[0])
def _get_new_credentials(
self,
original_url: str,
*,
allow_netrc: bool = True,
allow_keyring: bool = False,
) -> AuthInfo:
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
# fmt: off
kr_auth = (
self._get_keyring_auth(index_url, username) or
self._get_keyring_auth(netloc, username)
)
# fmt: on
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(
self, original_url: str
) -> Tuple[str, Optional[str], Optional[str]]:
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Try to get credentials from original url
username, password = self._get_new_credentials(original_url)
# If credentials not found, use any stored credentials for this netloc.
# Do this if either the username or the password is missing.
# This accounts for the situation in which the user has specified
# the username in the index url, but the password comes from keyring.
if (username is None or password is None) and netloc in self.passwords:
un, pw = self.passwords[netloc]
# It is possible that the cached credentials are for a different username,
# in which case the cache should be ignored.
if username is None or username == un:
username, password = un, pw
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None)
# Credentials were not found
or (username is None and password is None)
), f"Could not load credentials from url: {original_url}"
return url, username, password
def __call__(self, req: Request) -> Request:
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(
self, netloc: str
) -> Tuple[Optional[str], Optional[str], bool]:
username = ask_input(f"User for {netloc}: ") if self.prompting else None
if not username:
return None, None, False
if self.use_keyring:
auth = self._get_keyring_auth(netloc, username)
if auth and auth[0] is not None and auth[1] is not None:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self) -> bool:
if (
not self.prompting
or not self.use_keyring
or not self.keyring_provider.has_keyring
):
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp: Response, **kwargs: Any) -> Response:
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
username, password = None, None
# Query the keyring for credentials:
if self.use_keyring:
username, password = self._get_new_credentials(
resp.url,
allow_netrc=False,
allow_keyring=True,
)
# We are not able to prompt the user so simply return the response
if not self.prompting and not username and not password:
return resp
parsed = urllib.parse.urlparse(resp.url)
# Prompt the user for a new username and password
save = False
if not username and not password:
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = Credentials(
url=parsed.netloc,
username=username,
password=password,
)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
# The result of the assignment isn't used, it's just needed to consume
# the content.
_ = resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
"401 Error, Credentials not correct for %s",
resp.request.url,
)
def save_credentials(self, resp: Response, **kwargs: Any) -> None:
"""Response callback to save credentials on success."""
assert (
self.keyring_provider.has_keyring
), "should never reach here without keyring"
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info("Saving credentials to keyring")
self.keyring_provider.save_auth_info(
creds.url, creds.username, creds.password
)
except Exception:
logger.exception("Failed to save credentials")
| MultiDomainBasicAuth |
python | numba__numba | numba/tests/cache_usecases.py | {
"start": 2445,
"end": 3566
} | class ____(TestCase):
"""
Tests for functionality of this module's functions.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
self.assertPreciseEqual(mod.add_usecase(2, 3), 6)
self.assertPreciseEqual(mod.add_objmode_usecase(2, 3), 6)
self.assertPreciseEqual(mod.outer_uncached(3, 2), 2)
self.assertPreciseEqual(mod.outer(3, 2), 2)
packed_rec = mod.record_return(mod.packed_arr, 1)
self.assertPreciseEqual(tuple(packed_rec), (2, 43.5))
aligned_rec = mod.record_return(mod.aligned_arr, 1)
self.assertPreciseEqual(tuple(aligned_rec), (2, 43.5))
@jit(cache=True)
def first_class_function_mul(x):
return x * x
@jit(cache=True)
def first_class_function_add(x):
return x + x
@jit(cache=True)
def first_class_function_usecase(f, x):
return f(x)
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
@jit(parallel=True, cache=True, nopython=True)
def parfor_usecase(ary):
return ary * ary + ary
| _TestModule |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 40787,
"end": 42622
} | class ____(base_classes.Font):
def __init__(self, parent, xl):
# xl can be font or font_object
self.parent = parent
self.xl = xl
@property
def api(self):
return self.xl
@property
def bold(self):
return self.xl.bold.get()
@bold.setter
def bold(self, value):
self.xl.bold.set(value)
@property
def italic(self):
return self.xl.italic.get()
@italic.setter
def italic(self, value):
self.xl.italic.set(value)
@property
def size(self):
return self.xl.font_size.get()
@size.setter
def size(self, value):
self.xl.font_size.set(value)
@property
def color(self):
if isinstance(self.parent, Range):
return tuple(self.xl.color.get())
elif isinstance(self.parent, Shape):
return tuple(self.xl.font_color.get())
@color.setter
def color(self, color_or_rgb):
if isinstance(color_or_rgb, str):
color_or_rgb = utils.hex_to_rgb(color_or_rgb)
if self.xl is not None:
if isinstance(self.parent, (Range, Characters)):
obj = self.xl.color
elif isinstance(self.parent, Shape):
obj = self.xl.font_color
if isinstance(color_or_rgb, int):
obj.set(int_to_rgb(color_or_rgb))
else:
obj.set(color_or_rgb)
@property
def name(self):
if isinstance(self.parent, Range):
return self.xl.name.get()
elif isinstance(self.parent, Shape):
return self.xl.font_name.get()
@name.setter
def name(self, value):
if isinstance(self.parent, Range):
self.xl.name.set(value)
elif isinstance(self.parent, Shape):
self.xl.font_name.set(value)
| Font |
python | django__django | tests/middleware_exceptions/tests.py | {
"start": 13186,
"end": 15707
} | class ____(SimpleTestCase):
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncTemplateResponseMiddleware",
]
)
async def test_process_template_response(self):
response = await self.async_client.get(
"/middleware_exceptions/template_response/"
)
self.assertEqual(
response.content,
b"template_response OK\nAsyncTemplateResponseMiddleware",
)
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncNoTemplateResponseMiddleware",
]
)
async def test_process_template_response_returns_none(self):
msg = (
"AsyncNoTemplateResponseMiddleware.process_template_response "
"didn't return an HttpResponse object. It returned None instead."
)
with self.assertRaisesMessage(ValueError, msg):
await self.async_client.get("/middleware_exceptions/template_response/")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncProcessExceptionMiddleware",
]
)
async def test_exception_in_render_passed_to_process_exception(self):
response = await self.async_client.get(
"/middleware_exceptions/exception_in_render/"
)
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncProcessExceptionMiddleware",
]
)
async def test_exception_in_async_render_passed_to_process_exception(self):
response = await self.async_client.get(
"/middleware_exceptions/async_exception_in_render/"
)
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncProcessExceptionMiddleware",
]
)
async def test_view_exception_handled_by_process_exception(self):
response = await self.async_client.get("/middleware_exceptions/error/")
self.assertEqual(response.content, b"Exception caught")
@override_settings(
MIDDLEWARE=[
"middleware_exceptions.middleware.AsyncProcessViewMiddleware",
]
)
async def test_process_view_return_response(self):
response = await self.async_client.get("/middleware_exceptions/view/")
self.assertEqual(response.content, b"Processed view normal_view")
| AsyncMiddlewareTests |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_3.py | {
"start": 313,
"end": 367
} | class ____(BaseModel[int]):
x: collections.Awaitable
| E |
python | django__django | tests/serializers/models/natural.py | {
"start": 1117,
"end": 1498
} | class ____(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=100, unique=True)
class Manager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
objects = Manager()
def natural_key(self):
return (self.name,)
| NaturalPKWithDefault |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 25806,
"end": 26997
} | class ____(MeanMetricWrapper):
"""Calculates how often predictions equal labels.
This metric creates two local variables, `total` and `count` that are used to
compute the frequency with which `y_pred` matches `y_true`. This frequency is
ultimately returned as `binary accuracy`: an idempotent operation that simply
divides `total` by `count`.
If `sample_weight` is `None`, weights default to 1.
Use `sample_weight` of 0 to mask values.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
Standalone usage:
>>> m = tf.keras.metrics.Accuracy()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
>>> m.result().numpy()
0.75
>>> m.reset_state()
>>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
... sample_weight=[1, 1, 0, 0])
>>> m.result().numpy()
0.5
Usage with `compile()` API:
```python
model.compile(optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.Accuracy()])
```
"""
def __init__(self, name='accuracy', dtype=None):
super(Accuracy, self).__init__(accuracy, name, dtype=dtype)
| Accuracy |
python | weaviate__weaviate-python-client | weaviate/collections/grpc/shared.py | {
"start": 28504,
"end": 29548
} | class ____:
@staticmethod
def parse_single_or_multi_vec(vector: PrimitiveVectorType) -> _Packing:
if _is_2d_vector(vector):
return _Packing(
bytes_=_Pack.multi(vector),
type_=base_pb2.Vectors.VECTOR_TYPE_MULTI_FP32,
)
elif _is_1d_vector(vector):
return _Packing(
bytes_=_Pack.single(vector),
type_=base_pb2.Vectors.VECTOR_TYPE_SINGLE_FP32,
)
else:
raise WeaviateInvalidInputError(f"Invalid vectors: {vector}")
@staticmethod
def single(vector: OneDimensionalVectorType) -> bytes:
vector_list = _get_vector_v4(vector)
return struct.pack("{}f".format(len(vector_list)), *vector_list)
@staticmethod
def multi(vector: TwoDimensionalVectorType) -> bytes:
vector_list = [item for sublist in vector for item in sublist]
return struct.pack("<H", len(vector[0])) + struct.pack(
"{}f".format(len(vector_list)), *vector_list
)
| _Pack |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 6361,
"end": 6835
} | class ____(Token):
""" Represents an array constructor.
Examples
========
>>> from sympy import fcode
>>> from sympy.codegen.fnodes import ArrayConstructor
>>> ac = ArrayConstructor([1, 2, 3])
>>> fcode(ac, standard=95, source_format='free')
'(/1, 2, 3/)'
>>> fcode(ac, standard=2003, source_format='free')
'[1, 2, 3]'
"""
__slots__ = _fields = ('elements',)
_construct_elements = staticmethod(_mk_Tuple)
| ArrayConstructor |
python | pytorch__pytorch | test/test_autocast.py | {
"start": 13594,
"end": 14757
} | class ____(TestCase):
def test_autocast_fast_dtype(self):
gpu_fast_dtype = torch.get_autocast_dtype(device_type="cuda")
cpu_fast_dtype = torch.get_autocast_dtype(device_type="cpu")
self.assertEqual(gpu_fast_dtype, torch.half)
self.assertEqual(cpu_fast_dtype, torch.bfloat16)
def test_invalid_device(self):
dev = "not a real device"
msg = f"Invalid device string: '{dev}'"
with self.assertRaisesRegex(RuntimeError, msg):
with torch.autocast(device_type=dev):
_ = torch.tensor(1)
with self.assertRaisesRegex(RuntimeError, msg):
assert torch.amp.is_autocast_available(device_type=dev)
def test_non_string_device(self):
"""Test that `autocast` throws a ValueError when provided a `torch.device` object for `device_type` instead of a string"""
dev = torch.device("cpu")
msg = f"Expected `device_type` of type `str`, got: `{type(dev)}`"
with self.assertRaisesRegex(expected_exception=ValueError, expected_regex=msg):
torch.autocast(device_type=dev)
if __name__ == "__main__":
run_tests()
| TestTorchAutocast |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/gcs/io_manager.py | {
"start": 796,
"end": 2907
} | class ____(UPathIOManager):
def __init__(self, bucket: str, client: Optional[Any] = None, prefix: str = "dagster"):
self.bucket = check.str_param(bucket, "bucket")
self.client = client or storage.Client()
self.bucket_obj = self.client.bucket(bucket)
check.invariant(self.bucket_obj.exists())
self.prefix = check.str_param(prefix, "prefix")
super().__init__(base_path=UPath(self.prefix))
def unlink(self, path: UPath) -> None:
key = path.as_posix()
if self.bucket_obj.blob(key).exists():
self.bucket_obj.blob(key).delete()
def path_exists(self, path: UPath) -> bool:
key = path.as_posix()
blobs = self.client.list_blobs(self.bucket, prefix=key)
return len(list(blobs)) > 0
def get_op_output_relative_path(self, context: Union[InputContext, OutputContext]) -> UPath:
parts = context.get_identifier()
run_id = parts[0]
output_parts = parts[1:]
return UPath("storage", run_id, "files", *output_parts)
def get_loading_input_log_message(self, path: UPath) -> str:
return f"Loading GCS object from: {self._uri_for_path(path)}"
def get_writing_output_log_message(self, path: UPath) -> str:
return f"Writing GCS object at: {self._uri_for_path(path)}"
def _uri_for_path(self, path: UPath) -> str:
return f"gs://{self.bucket}/{path.as_posix()}"
def make_directory(self, path: UPath) -> None:
# It is not necessary to create directories in GCP
return None
def load_from_path(self, context: InputContext, path: UPath) -> Any:
bytes_obj = self.bucket_obj.blob(path.as_posix()).download_as_bytes()
return pickle.loads(bytes_obj)
def dump_to_path(self, context: OutputContext, obj: Any, path: UPath) -> None:
pickled_obj = pickle.dumps(obj, PICKLE_PROTOCOL)
backoff(
self.bucket_obj.blob(path.as_posix()).upload_from_string,
args=[pickled_obj],
retry_on=(TooManyRequests, Forbidden, ServiceUnavailable),
)
| PickledObjectGCSIOManager |
python | jmcnamara__XlsxWriter | xlsxwriter/test/color/test_color01.py | {
"start": 282,
"end": 2147
} | class ____(unittest.TestCase):
"""
Test cases for the Color class.
"""
def test_color_rgb_from_string(self):
"""Test creating a Color instance from a hex string."""
color = Color("#FF5733")
self.assertEqual(color._rgb_value, 0xFF5733)
self.assertEqual(color._type, ColorTypes.RGB)
self.assertFalse(color._is_automatic)
def test_color_rgb_from_int(self):
"""Test creating a Color instance from an integer RGB value."""
color = Color(0x00FF00)
self.assertEqual(color._rgb_value, 0x00FF00)
self.assertEqual(color._type, ColorTypes.RGB)
self.assertFalse(color._is_automatic)
def test_color_theme(self):
"""Test creating a Color instance from a theme color tuple."""
color = Color((2, 3))
self.assertEqual(color._theme_color, (2, 3))
self.assertEqual(color._type, ColorTypes.THEME)
self.assertFalse(color._is_automatic)
def test_color_invalid_string(self):
"""Test creating a Color instance with an invalid string."""
with self.assertRaises(ValueError):
Color("invalid")
def test_color_invalid_int(self):
"""Test creating a Color instance with an out-of-range integer."""
with self.assertRaises(ValueError):
Color(0xFFFFFF + 1)
def test_color_invalid_theme(self):
"""Test creating a Color instance with an invalid theme tuple."""
with self.assertRaises(ValueError):
Color((10, 2)) # Invalid theme color
with self.assertRaises(ValueError):
Color((2, 6)) # Invalid theme shade
def test_is_automatic_property(self):
"""Test setting and getting the is_automatic property."""
color = Color("#000000")
color._is_automatic = True
self.assertTrue(color._is_automatic)
| TestColor |
python | huggingface__transformers | src/transformers/trainer_callback.py | {
"start": 8580,
"end": 10340
} | class ____:
"""
A class for objects that include the ability to have its state
be saved during `Trainer._save_checkpoint` and loaded back in during
`Trainer._load_from_checkpoint`.
These must implement a `state` function that gets called during the respective
Trainer function call. It should only include parameters and attributes needed to
recreate the state at a particular time, to avoid utilizing pickle/maintain standard
file IO writing.
Example:
```python
class EarlyStoppingCallback(TrainerCallback, ExportableState):
def __init__(self, early_stopping_patience: int = 1, early_stopping_threshold: Optional[float] = 0.0):
self.early_stopping_patience = early_stopping_patience
self.early_stopping_threshold = early_stopping_threshold
# early_stopping_patience_counter denotes the number of times validation metrics failed to improve.
self.early_stopping_patience_counter = 0
def state(self) -> dict:
return {
"args": {
"early_stopping_patience": self.early_stopping_patience,
"early_stopping_threshold": self.early_stopping_threshold,
},
"attributes": {
"early_stopping_patience_counter": self.early_stopping_patience_counter,
}
}
```"""
def state(self) -> dict:
raise NotImplementedError("You must implement a `state` function to utilize this class.")
@classmethod
def from_state(cls, state):
instance = cls(**state["args"])
for k, v in state["attributes"].items():
setattr(instance, k, v)
return instance
@dataclass
| ExportableState |
python | kamyu104__LeetCode-Solutions | Python/find-the-occurrence-of-first-almost-equal-substring.py | {
"start": 50,
"end": 910
} | class ____(object):
def minStartingIndex(self, s, pattern):
"""
:type s: str
:type pattern: str
:rtype: int
"""
K = 1
# Template: https://cp-algorithms.com/string/z-function.html
def z_function(s): # Time: O(n), Space: O(n)
z = [0]*len(s)
l, r = 0, 0
for i in xrange(1, len(z)):
if i <= r:
z[i] = min(r-i+1, z[i-l])
while i+z[i] < len(z) and s[z[i]] == s[i+z[i]]:
z[i] += 1
if i+z[i]-1 > r:
l, r = i, i+z[i]-1
return z
z1 = z_function(pattern+s)
z2 = z_function(pattern[::-1]+s[::-1])
return next((i for i in xrange(len(s)-len(pattern)+1) if z1[len(pattern)+i]+K+z2[len(s)-i] >= len(pattern)), -1)
| Solution |
python | numpy__numpy | numpy/polynomial/tests/test_symbol.py | {
"start": 3735,
"end": 5372
} | class ____:
"""
Test other methods for manipulating/creating polynomial objects.
"""
p = poly.Polynomial([1, 2, 3, 0], symbol='z')
def test_copy(self):
other = self.p.copy()
assert_equal(other.symbol, 'z')
def test_trim(self):
other = self.p.trim()
assert_equal(other.symbol, 'z')
def test_truncate(self):
other = self.p.truncate(2)
assert_equal(other.symbol, 'z')
@pytest.mark.parametrize('kwarg', (
{'domain': [-10, 10]},
{'window': [-10, 10]},
{'kind': poly.Chebyshev},
))
def test_convert(self, kwarg):
other = self.p.convert(**kwarg)
assert_equal(other.symbol, 'z')
def test_integ(self):
other = self.p.integ()
assert_equal(other.symbol, 'z')
def test_deriv(self):
other = self.p.deriv()
assert_equal(other.symbol, 'z')
def test_composition():
p = poly.Polynomial([3, 2, 1], symbol="t")
q = poly.Polynomial([5, 1, 0, -1], symbol="λ_1")
r = p(q)
assert r.symbol == "λ_1"
#
# Class methods that result in new polynomial class instances
#
def test_fit():
x, y = (range(10),) * 2
p = poly.Polynomial.fit(x, y, deg=1, symbol='z')
assert_equal(p.symbol, 'z')
def test_froomroots():
roots = [-2, 2]
p = poly.Polynomial.fromroots(roots, symbol='z')
assert_equal(p.symbol, 'z')
def test_identity():
p = poly.Polynomial.identity(domain=[-1, 1], window=[5, 20], symbol='z')
assert_equal(p.symbol, 'z')
def test_basis():
p = poly.Polynomial.basis(3, symbol='z')
assert_equal(p.symbol, 'z')
| TestExtraMethods |
python | apache__airflow | providers/fab/tests/unit/fab/auth_manager/api_fastapi/services/test_roles.py | {
"start": 2183,
"end": 14147
} | class ____:
def setup_method(self):
self.body_ok = types.SimpleNamespace(
name="roleA",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_read"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
self.body_bad_action = types.SimpleNamespace(
name="roleB",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="no_such_action"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
self.body_bad_resource = types.SimpleNamespace(
name="roleC",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_read"),
resource=types.SimpleNamespace(name="NOPE"),
)
],
)
# POST /roles
def test_create_role_success(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.side_effect = [
None,
_make_role_obj("roleA", [("can_read", "DAG")]),
]
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
out = FABAuthManagerRoles.create_role(self.body_ok)
assert out.name == "roleA"
assert out.permissions
assert out.permissions[0].action.name == "can_read"
assert out.permissions[0].resource.name == "DAG"
security_manager.bulk_sync_roles.assert_called_once_with(
[{"role": "roleA", "perms": [("can_read", "DAG")]}]
)
def test_create_role_conflict(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = object()
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.create_role(self.body_ok)
assert ex.value.status_code == 409
def test_create_role_action_not_found(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = None
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.create_role(self.body_bad_action)
assert ex.value.status_code == 400
assert "action" in ex.value.detail
def test_create_role_resource_not_found(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = None
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.create_role(self.body_bad_resource)
assert ex.value.status_code == 400
assert "resource" in ex.value.detail
def test_create_role_unexpected_no_created(
self, get_fab_auth_manager, fab_auth_manager, security_manager
):
security_manager.find_role.side_effect = [None, None]
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.create_role(self.body_ok)
assert ex.value.status_code == 500
# GET /roles
@patch("airflow.providers.fab.auth_manager.api_fastapi.services.roles.build_ordering")
def test_get_roles_happy_path(self, build_ordering, get_fab_auth_manager):
role1 = _make_role_obj("viewer", [("can_read", "DAG")])
role2 = _make_role_obj("admin", [("can_read", "DAG")])
fake_roles_result = _FakeScalarRoles([role1, role2])
session = MagicMock()
session.scalars.side_effect = [
_FakeScalarCount(2),
fake_roles_result,
]
fab_auth_manager = MagicMock()
fab_auth_manager.security_manager = MagicMock(session=session)
get_fab_auth_manager.return_value = fab_auth_manager
build_ordering.return_value = column("name").desc()
out = FABAuthManagerRoles.get_roles(order_by="-name", limit=5, offset=3)
assert out.total_entries == 2
assert [r.name for r in out.roles] == ["viewer", "admin"]
assert fake_roles_result._unique_called is True
build_ordering.assert_called_once()
args, kwargs = build_ordering.call_args
assert args[0] == "-name"
assert set(kwargs["allowed"].keys()) == {"name", "role_id"}
assert session.scalars.call_count == 2
@patch("airflow.providers.fab.auth_manager.api_fastapi.services.roles.build_ordering")
def test_get_roles_invalid_order_by_bubbles_400(self, build_ordering, get_fab_auth_manager):
session = MagicMock()
fab_auth_manager = MagicMock()
fab_auth_manager.security_manager = MagicMock(session=session)
get_fab_auth_manager.return_value = fab_auth_manager
build_ordering.side_effect = HTTPException(status_code=400, detail="disallowed")
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.get_roles(order_by="nope", limit=10, offset=0)
assert ex.value.status_code == 400
# DELETE /roles/{name}
def test_delete_role_success(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = _make_role_obj("roleA", [])
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
FABAuthManagerRoles.delete_role(name="roleA")
security_manager.delete_role.assert_called_once()
def test_delete_role_not_found(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = None
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.delete_role(name="roleA")
assert ex.value.status_code == 404
# GET /roles/{name}
def test_get_role_success(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = _make_role_obj("roleA", [("can_read", "DAG")])
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
out = FABAuthManagerRoles.get_role(name="roleA")
assert out.name == "roleA"
assert out.permissions
assert out.permissions[0].action.name == "can_read"
assert out.permissions[0].resource.name == "DAG"
def test_get_role_not_found(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = None
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.get_role(name="roleA")
assert ex.value.status_code == 404
# PATCH /roles/{name}
def test_patch_role_success(self, get_fab_auth_manager, fab_auth_manager, security_manager):
role = _make_role_obj("viewer", [("can_read", "DAG")])
security_manager.find_role.return_value = role
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
body = types.SimpleNamespace(
name="viewer",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_edit"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
out = FABAuthManagerRoles.patch_role(body=body, name="viewer")
assert out.name == "viewer"
assert out.permissions
assert out.permissions[0].action.name == "can_edit"
assert out.permissions[0].resource.name == "DAG"
def test_patch_role_rename_success(self, get_fab_auth_manager, fab_auth_manager, security_manager):
role = _make_role_obj("viewer", [("can_edit", "DAG")])
security_manager.find_role.return_value = role
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
body = types.SimpleNamespace(
name="editor",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_edit"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
out = FABAuthManagerRoles.patch_role(body=body, name="viewer")
assert out.name == "editor"
assert out.permissions
assert out.permissions[0].action.name == "can_edit"
assert out.permissions[0].resource.name == "DAG"
def test_patch_role_with_update_mask(self, get_fab_auth_manager, fab_auth_manager, security_manager):
role = _make_role_obj("viewer", [("can_read", "DAG")])
security_manager.find_role.return_value = role
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
body = types.SimpleNamespace(
name="viewer1",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_edit"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
out = FABAuthManagerRoles.patch_role(
body=body,
name="viewer",
update_mask=["actions"],
)
assert out.name == "viewer"
assert out.permissions
assert out.permissions[0].action.name == "can_edit"
assert out.permissions[0].resource.name == "DAG"
def test_patch_role_rename_with_update_mask(
self, get_fab_auth_manager, fab_auth_manager, security_manager
):
role = _make_role_obj("viewer", [("can_read", "DAG")])
security_manager.find_role.return_value = role
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
body = types.SimpleNamespace(
name="viewer1",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_edit"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
out = FABAuthManagerRoles.patch_role(
body=body,
name="viewer",
update_mask=["name"],
)
assert out.name == "viewer1"
assert out.permissions
assert out.permissions[0].action.name == "can_read"
assert out.permissions[0].resource.name == "DAG"
def test_patch_role_not_found(self, get_fab_auth_manager, fab_auth_manager, security_manager):
security_manager.find_role.return_value = None
fab_auth_manager.security_manager = security_manager
get_fab_auth_manager.return_value = fab_auth_manager
body = types.SimpleNamespace(
name="viewer",
permissions=[
types.SimpleNamespace(
action=types.SimpleNamespace(name="can_edit"),
resource=types.SimpleNamespace(name="DAG"),
)
],
)
with pytest.raises(HTTPException) as ex:
FABAuthManagerRoles.patch_role(body=body, name="viewer")
assert ex.value.status_code == 404
| TestRolesService |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 118124,
"end": 119190
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
existing_cluster_id: Optional[str] = Field(
None,
description=(
"If existing_cluster_id, the ID of an existing cluster that is used for all"
" runs of this job. When running jobs on an existing cluster, you may need"
" to manually restart the cluster if it stops responding. We suggest"
" running jobs on new clusters for greater reliability."
),
examples=["0923-164208-meows279"],
)
libraries: Optional[List[Library]] = Field(
None,
description=(
"An optional list of libraries to be installed on the cluster that executes"
" the job. The default value is an empty list."
),
)
new_cluster: Optional[NewCluster] = Field(
None,
description=(
"If new_cluster, a description of a cluster that is created for each run."
),
)
| ClusterSpec |
python | tox-dev__tox | src/tox/tox_env/register.py | {
"start": 293,
"end": 2640
} | class ____:
"""tox environment registry."""
def __init__(self) -> None:
self._run_envs: dict[str, type[RunToxEnv]] = {}
self._package_envs: dict[str, type[PackageToxEnv]] = {}
self._default_run_env: str = ""
def _register_tox_env_types(self, manager: Plugin) -> None:
manager.tox_register_tox_env(register=self)
def add_run_env(self, of_type: type[RunToxEnv]) -> None:
"""
Define a new run tox environment type.
:param of_type: the new run environment type
"""
self._run_envs[of_type.id()] = of_type
def add_package_env(self, of_type: type[PackageToxEnv]) -> None:
"""
Define a new packaging tox environment type.
:param of_type: the new packaging environment type
"""
self._package_envs[of_type.id()] = of_type
@property
def env_runners(self) -> Iterable[str]:
""":returns: run environment types currently defined"""
return self._run_envs.keys()
@property
def default_env_runner(self) -> str:
""":returns: the default run environment type"""
if not self._default_run_env and self._run_envs:
self._default_run_env = next(iter(self._run_envs.keys()))
return self._default_run_env
@default_env_runner.setter
def default_env_runner(self, value: str) -> None:
"""
Change the default run environment type.
:param value: the new run environment type by name
"""
if value not in self._run_envs:
msg = "run env must be registered before setting it as default"
raise ValueError(msg)
self._default_run_env = value
def runner(self, name: str) -> type[RunToxEnv]:
"""
Lookup a run tox environment type by name.
:param name: the name of the runner type
:return: the type of the runner type
"""
return self._run_envs[name]
def package(self, name: str) -> type[PackageToxEnv]:
"""
Lookup a packaging tox environment type by name.
:param name: the name of the packaging type
:return: the type of the packaging type
"""
return self._package_envs[name]
REGISTER = ToxEnvRegister() #: the tox register
__all__ = (
"REGISTER",
"ToxEnvRegister",
)
| ToxEnvRegister |
python | jina-ai__jina | jina/logging/logger.py | {
"start": 2449,
"end": 3031
} | class ____(logging.handlers.SysLogHandler):
"""
Override the priority_map :class:`SysLogHandler`.
.. warning::
This messages at DEBUG and INFO are therefore not stored by ASL, (ASL = Apple System Log)
which in turn means they can't be printed by syslog after the fact. You can confirm it via :command:`syslog` or
:command:`tail -f /var/log/system.log`.
"""
priority_map = {
'DEBUG': 'debug',
'INFO': 'info',
'WARNING': 'warning',
'ERROR': 'error',
'CRITICAL': 'critical',
}
| SysLogHandlerWrapper |
python | PrefectHQ__prefect | src/prefect/server/concurrency/lease_storage/memory.py | {
"start": 405,
"end": 4046
} | class ____(_ConcurrencyLeaseStorage):
"""
A singleton concurrency lease storage implementation that stores leases in memory.
"""
_instance: "ConcurrencyLeaseStorage | None" = None
_initialized: bool = False
def __new__(cls) -> "ConcurrencyLeaseStorage":
if cls._instance is None:
cls._instance = super().__new__(cls)
return cls._instance
def __init__(self):
if self.__class__._initialized:
return
self.leases: dict[UUID, ResourceLease[ConcurrencyLimitLeaseMetadata]] = {}
self.expirations: dict[UUID, datetime] = {}
self.__class__._initialized = True
async def create_lease(
self,
resource_ids: list[UUID],
ttl: timedelta,
metadata: ConcurrencyLimitLeaseMetadata | None = None,
) -> ResourceLease[ConcurrencyLimitLeaseMetadata]:
expiration = datetime.now(timezone.utc) + ttl
lease = ResourceLease(
resource_ids=resource_ids, metadata=metadata, expiration=expiration
)
self.leases[lease.id] = lease
self.expirations[lease.id] = expiration
return lease
async def read_lease(
self, lease_id: UUID
) -> ResourceLease[ConcurrencyLimitLeaseMetadata] | None:
return self.leases.get(lease_id)
async def renew_lease(self, lease_id: UUID, ttl: timedelta) -> bool:
"""
Atomically renew a concurrency lease by updating its expiration.
Checks if the lease exists before updating the expiration index,
preventing orphaned index entries.
Args:
lease_id: The ID of the lease to renew
ttl: The new time-to-live duration
Returns:
True if the lease was renewed, False if it didn't exist
"""
if lease_id not in self.leases:
# Clean up any orphaned expiration entry
self.expirations.pop(lease_id, None)
return False
self.expirations[lease_id] = datetime.now(timezone.utc) + ttl
return True
async def revoke_lease(self, lease_id: UUID) -> None:
self.leases.pop(lease_id, None)
self.expirations.pop(lease_id, None)
async def read_active_lease_ids(
self, limit: int = 100, offset: int = 0
) -> list[UUID]:
now = datetime.now(timezone.utc)
active_leases = [
lease_id
for lease_id, expiration in self.expirations.items()
if expiration > now
]
return active_leases[offset : offset + limit]
async def read_expired_lease_ids(self, limit: int = 100) -> list[UUID]:
now = datetime.now(timezone.utc)
expired_leases = [
lease_id
for lease_id, expiration in self.expirations.items()
if expiration < now
]
return expired_leases[:limit]
async def list_holders_for_limit(
self, limit_id: UUID
) -> list[tuple[UUID, ConcurrencyLeaseHolder]]:
"""List all holders for a given concurrency limit."""
now = datetime.now(timezone.utc)
holders_with_leases: list[tuple[UUID, ConcurrencyLeaseHolder]] = []
for lease_id, lease in self.leases.items():
# Check if lease is active and for the specified limit
if (
limit_id in lease.resource_ids
and self.expirations.get(lease_id, now) > now
and lease.metadata
and lease.metadata.holder
):
holders_with_leases.append((lease.id, lease.metadata.holder))
return holders_with_leases
| ConcurrencyLeaseStorage |
python | walkccc__LeetCode | solutions/273. Integer to English Words/273.py | {
"start": 0,
"end": 1129
} | class ____:
def numberToWords(self, num: int) -> str:
if num == 0:
return 'Zero'
belowTwenty = ['', 'One', 'Two', 'Three',
'Four', 'Five', 'Six', 'Seven',
'Eight', 'Nine', 'Ten', 'Eleven',
'Twelve', 'Thirteen', 'Fourteen', 'Fifteen',
'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen']
tens = ['', 'Ten', 'Twenty', 'Thirty', 'Forty',
'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety']
def helper(num: int) -> str:
if num < 20:
s = belowTwenty[num]
elif num < 100:
s = tens[num // 10] + ' ' + belowTwenty[num % 10]
elif num < 1000:
s = helper(num // 100) + ' Hundred ' + helper(num % 100)
elif num < 1000000:
s = helper(num // 1000) + ' Thousand ' + helper(num % 1000)
elif num < 1000000000:
s = helper(num // 1000000) + ' Million ' + helper(num % 1000000)
else:
s = helper(num // 1000000000) + ' Billion ' + helper(num % 1000000000)
return s.strip()
return helper(num)
| Solution |
python | huggingface__transformers | src/transformers/models/dinov3_vit/modular_dinov3_vit.py | {
"start": 11770,
"end": 11822
} | class ____(Dinov2DropPath):
pass
| DINOv3ViTDropPath |
python | tensorflow__tensorflow | tensorflow/python/lib/io/tf_record_test.py | {
"start": 12875,
"end": 20293
} | class ____(TFCompressionTestCase):
"""TFRecordIterator test"""
def setUp(self):
super(TFRecordIteratorTest, self).setUp()
self._num_records = 7
def testIterator(self):
"""test Iterator"""
records = [self._Record(0, i) for i in range(self._num_records)]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(records, "compressed_records", options)
reader = tf_record.tf_record_iterator(fn, options)
for expected in records:
record = next(reader)
self.assertEqual(expected, record)
with self.assertRaises(StopIteration):
record = next(reader)
def testWriteZlibRead(self):
"""Verify compression with TFRecordWriter is zlib library compatible."""
original = [b"foo", b"bar"]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(original, "write_zlib_read.tfrecord.z",
options)
zfn = self._ZlibDecompressFile(fn, "write_zlib_read.tfrecord")
actual = list(tf_record.tf_record_iterator(zfn))
self.assertEqual(actual, original)
def testWriteZlibReadLarge(self):
"""Verify compression for large records is zlib library compatible."""
# Make it large (about 5MB)
original = [_TEXT * 10240]
options = tf_record.TFRecordOptions(TFRecordCompressionType.ZLIB)
fn = self._WriteRecordsToFile(original, "write_zlib_read_large.tfrecord.z",
options)
zfn = self._ZlibDecompressFile(fn, "write_zlib_read_large.tfrecord")
actual = list(tf_record.tf_record_iterator(zfn))
self.assertEqual(actual, original)
def testWriteGzipRead(self):
original = [b"foo", b"bar"]
options = tf_record.TFRecordOptions(TFRecordCompressionType.GZIP)
fn = self._WriteRecordsToFile(original, "write_gzip_read.tfrecord.gz",
options)
gzfn = self._GzipDecompressFile(fn, "write_gzip_read.tfrecord")
actual = list(tf_record.tf_record_iterator(gzfn))
self.assertEqual(actual, original)
def testReadGrowingFile_preservesReadOffset(self):
"""Verify that tf_record_iterator preserves read offset even after EOF.
When a file is iterated to EOF, the iterator should raise StopIteration but
not actually close the reader. Then if later new data is appended, the
iterator should start returning that new data on the next call to next(),
preserving the read offset. This behavior is required by TensorBoard.
"""
# Start the file with a good record.
fn = os.path.join(self.get_temp_dir(), "file.tfrecord")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"one")
writer.write(b"two")
writer.flush()
iterator = tf_record.tf_record_iterator(fn)
self.assertEqual(b"one", next(iterator))
self.assertEqual(b"two", next(iterator))
# Iterating at EOF results in StopIteration repeatedly.
with self.assertRaises(StopIteration):
next(iterator)
with self.assertRaises(StopIteration):
next(iterator)
# Retrying after adding a new record successfully returns the new record,
# preserving the prior read offset.
writer.write(b"three")
writer.flush()
self.assertEqual(b"three", next(iterator))
with self.assertRaises(StopIteration):
next(iterator)
def testReadTruncatedFile_preservesReadOffset(self):
"""Verify that tf_record_iterator throws an exception on bad TFRecords.
When a truncated record is completed, the iterator should return that new
record on the next attempt at iteration, preserving the read offset. This
behavior is required by TensorBoard.
"""
# Write out a record and read it back it to get the raw bytes.
fn = os.path.join(self.get_temp_dir(), "temp_file")
with tf_record.TFRecordWriter(fn) as writer:
writer.write(b"truncated")
with open(fn, "rb") as f:
record_bytes = f.read()
# Start the file with a good record.
fn_truncated = os.path.join(self.get_temp_dir(), "truncated_file")
with tf_record.TFRecordWriter(fn_truncated) as writer:
writer.write(b"good")
with open(fn_truncated, "ab", buffering=0) as f:
# Cause truncation by omitting the last byte from the record.
f.write(record_bytes[:-1])
iterator = tf_record.tf_record_iterator(fn_truncated)
# Good record appears first.
self.assertEqual(b"good", next(iterator))
# Truncated record repeatedly causes DataLossError upon iteration.
with self.assertRaises(errors_impl.DataLossError):
next(iterator)
with self.assertRaises(errors_impl.DataLossError):
next(iterator)
# Retrying after completing the record successfully returns the rest of
# the file contents, preserving the prior read offset.
f.write(record_bytes[-1:])
self.assertEqual(b"truncated", next(iterator))
with self.assertRaises(StopIteration):
next(iterator)
def testReadReplacedFile_preservesReadOffset_afterReopen(self):
"""Verify that tf_record_iterator allows reopening at the same read offset.
In some cases, data will be logically "appended" to a file by replacing the
entire file with a new version that includes the additional data. For
example, this can happen with certain GCS implementations (since GCS has no
true append operation), or when using rsync without the `--inplace` option
to transfer snapshots of a growing file. Since the iterator retains a handle
to a stale version of the file, it won't return any of the new data.
To force this to happen, callers can check for a replaced file (e.g. via a
stat call that reflects an increased file size) and opt to close and reopen
the iterator. When iteration is next attempted, this should result in
reading from the newly opened file, while preserving the read offset. This
behavior is required by TensorBoard.
"""
def write_records_to_file(filename, records):
writer = tf_record.TFRecordWriter(filename)
for record in records:
writer.write(record)
writer.close()
fn = os.path.join(self.get_temp_dir(), "orig_file")
write_records_to_file(fn, [b"one", b"two"])
iterator = tf_record.tf_record_iterator(fn)
self.assertEqual(b"one", next(iterator))
self.assertEqual(b"two", next(iterator))
# Iterating at EOF results in StopIteration repeatedly.
with self.assertRaises(StopIteration):
next(iterator)
with self.assertRaises(StopIteration):
next(iterator)
# Add a new record to the end of the file by overwriting it.
fn2 = os.path.join(self.get_temp_dir(), "new_file")
write_records_to_file(fn2, [b"one", b"two", b"three"])
# Windows disallows replacing files while in use, so close iterator early.
if os.name == "nt":
iterator.close()
os.replace(fn2, fn)
# Iterating at EOF still results in StopIteration; new data is not shown.
with self.assertRaises(StopIteration):
next(iterator)
with self.assertRaises(StopIteration):
next(iterator)
# Retrying after close and reopen successfully returns the new record,
# preserving the prior read offset.
iterator.close()
iterator.reopen()
self.assertEqual(b"three", next(iterator))
with self.assertRaises(StopIteration):
next(iterator)
| TFRecordIteratorTest |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 102012,
"end": 102128
} | class ____:
xlA1 = 1 # from enum XlReferenceStyle
xlR1C1 = -4150 # from enum XlReferenceStyle
| ReferenceStyle |
python | scikit-learn__scikit-learn | sklearn/preprocessing/_data.py | {
"start": 40277,
"end": 51643
} | class ____(OneToOneFeatureMixin, TransformerMixin, BaseEstimator):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
`MaxAbsScaler` doesn't reduce the effect of outliers; it only linearly
scales them down. For an example visualization, refer to :ref:`Compare
MaxAbsScaler with other scalers <plot_all_scaling_max_abs_scaler_section>`.
.. versionadded:: 0.17
Parameters
----------
copy : bool, default=True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
clip : bool, default=False
Set to True to clip transformed values of held-out data to [-1, 1].
Since this parameter will clip values, `inverse_transform` may not
be able to restore the original data.
.. note::
Setting `clip=True` does not prevent feature drift (a distribution
shift between training and test data). The transformed values are clipped
to the [-1, 1] range, which helps avoid unintended behavior in models
sensitive to out-of-range inputs (e.g. linear models). Use with care,
as clipping can distort the distribution of test data.
Attributes
----------
scale_ : ndarray of shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray of shape (n_features,)
Per feature maximum absolute value.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See Also
--------
maxabs_scale : Equivalent function without the estimator API.
Notes
-----
NaNs are treated as missing values: disregarded in fit, and maintained in
transform.
Examples
--------
>>> from sklearn.preprocessing import MaxAbsScaler
>>> X = [[ 1., -1., 2.],
... [ 2., 0., 0.],
... [ 0., 1., -1.]]
>>> transformer = MaxAbsScaler().fit(X)
>>> transformer
MaxAbsScaler()
>>> transformer.transform(X)
array([[ 0.5, -1. , 1. ],
[ 1. , 0. , 0. ],
[ 0. , 1. , -0.5]])
"""
_parameter_constraints: dict = {
"copy": ["boolean"],
"clip": ["boolean"],
}
def __init__(self, *, copy=True, clip=False):
self.copy = copy
self.clip = clip
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, because they are all set together
# in partial_fit
if hasattr(self, "scale_"):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
@_fit_context(prefer_skip_nested_validation=True)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when :meth:`fit` is not feasible due to very large number of
`n_samples` or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : None
Ignored.
Returns
-------
self : object
Fitted scaler.
"""
xp, _ = get_namespace(X)
first_pass = not hasattr(self, "n_samples_seen_")
X = validate_data(
self,
X,
reset=first_pass,
accept_sparse=("csr", "csc"),
dtype=_array_api.supported_float_dtypes(xp),
ensure_all_finite="allow-nan",
)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0, ignore_nan=True)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = _array_api._nanmax(xp.abs(X), axis=0, xp=xp)
if first_pass:
self.n_samples_seen_ = X.shape[0]
else:
max_abs = xp.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs, copy=True)
return self
def transform(self, X):
"""Scale the data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be scaled.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = validate_data(
self,
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
reset=False,
dtype=_array_api.supported_float_dtypes(xp),
force_writeable=True,
ensure_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
if self.clip:
np.clip(X.data, -1.0, 1.0, out=X.data)
else:
X /= self.scale_
if self.clip:
device_ = device(X)
X = _modify_in_place_if_numpy(
xp,
xp.clip,
X,
xp.asarray(-1.0, dtype=X.dtype, device=device_),
xp.asarray(1.0, dtype=X.dtype, device=device_),
out=X,
)
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data that should be transformed back.
Returns
-------
X_original : {ndarray, sparse matrix} of shape (n_samples, n_features)
Transformed array.
"""
check_is_fitted(self)
xp, _ = get_namespace(X)
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=self.copy,
dtype=_array_api.supported_float_dtypes(xp),
force_writeable=True,
ensure_all_finite="allow-nan",
)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
tags.input_tags.sparse = True
return tags
@validate_params(
{
"X": ["array-like", "sparse matrix"],
"axis": [Options(Integral, {0, 1})],
},
prefer_skip_nested_validation=False,
)
def maxabs_scale(X, *, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The data.
axis : {0, 1}, default=0
Axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : bool, default=True
If False, try to avoid a copy and scale in place.
This is not guaranteed to always work in place; e.g. if the data is
a numpy array with an int dtype, a copy will be returned even with
copy=False.
Returns
-------
X_tr : {ndarray, sparse matrix} of shape (n_samples, n_features)
The transformed data.
.. warning:: Risk of data leak
Do not use :func:`~sklearn.preprocessing.maxabs_scale` unless you know
what you are doing. A common mistake is to apply it to the entire data
*before* splitting into training and test sets. This will bias the
model evaluation because information would have leaked from the test
set to the training set.
In general, we recommend using
:class:`~sklearn.preprocessing.MaxAbsScaler` within a
:ref:`Pipeline <pipeline>` in order to prevent most risks of data
leaking: `pipe = make_pipeline(MaxAbsScaler(), LogisticRegression())`.
See Also
--------
MaxAbsScaler : Performs scaling to the [-1, 1] range using
the Transformer API (e.g. as part of a preprocessing
:class:`~sklearn.pipeline.Pipeline`).
Notes
-----
NaNs are treated as missing values: disregarded to compute the statistics,
and maintained during the data transformation.
For a comparison of the different scalers, transformers, and normalizers,
see: :ref:`sphx_glr_auto_examples_preprocessing_plot_all_scaling.py`.
Examples
--------
>>> from sklearn.preprocessing import maxabs_scale
>>> X = [[-2, 1, 2], [-1, 0, 1]]
>>> maxabs_scale(X, axis=0) # scale each column independently
array([[-1. , 1. , 1. ],
[-0.5, 0. , 0.5]])
>>> maxabs_scale(X, axis=1) # scale each row independently
array([[-1. , 0.5, 1. ],
[-1. , 0. , 1. ]])
"""
# Unlike the scaler object, this function allows 1d input.
# If copy is required, it will be done inside the scaler object.
X = check_array(
X,
accept_sparse=("csr", "csc"),
copy=False,
ensure_2d=False,
dtype=FLOAT_DTYPES,
ensure_all_finite="allow-nan",
)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
| MaxAbsScaler |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_merge_range05.py | {
"start": 315,
"end": 892
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("merge_range05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = workbook.add_format({"align": "center"})
worksheet.merge_range(1, 1, 1, 3, 123, cell_format)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ansible__ansible | test/lib/ansible_test/_util/controller/sanity/validate-modules/validate_modules/utils.py | {
"start": 5971,
"end": 7023
} | class ____(AnsibleModule):
"""AnsibleModule that does not actually load params. This is used to get access to the
methods within AnsibleModule without having to fake a bunch of data
"""
def _load_params(self):
self.params = {'_ansible_selinux_special_fs': [], '_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False, '_ansible_check_mode': False}
def parse_isodate(v, allow_date):
if allow_date:
if isinstance(v, datetime.date):
return v
msg = 'Expected ISO 8601 date string (YYYY-MM-DD) or YAML date'
else:
msg = 'Expected ISO 8601 date string (YYYY-MM-DD)'
if not isinstance(v, str):
raise ValueError(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', v):
raise ValueError(msg)
try:
return datetime.datetime.strptime(v, '%Y-%m-%d').date()
except ValueError:
raise ValueError(msg)
| NoArgsAnsibleModule |
python | PrefectHQ__prefect | tests/server/schemas/test_schedules.py | {
"start": 31898,
"end": 50687
} | class ____:
@pytest.mark.parametrize(
"start_date",
[
datetime(2018, 1, 1, tzinfo=ZoneInfo("UTC")),
datetime(2021, 2, 2, tzinfo=ZoneInfo("UTC")),
datetime(2025, 3, 3, tzinfo=ZoneInfo("UTC")),
],
)
async def test_daily_with_start_date(self, start_date):
s = RRuleSchedule.from_rrule(rrule.rrule(freq=rrule.DAILY, dtstart=start_date))
dates = await s.get_dates(5, start=start_date)
assert dates == [start_date + timedelta(days=i) for i in range(5)]
@pytest.mark.parametrize(
"start_date",
[
datetime(2018, 1, 1, tzinfo=ZoneInfo("UTC")),
datetime(2021, 2, 2, tzinfo=ZoneInfo("UTC")),
datetime(2025, 3, 3, tzinfo=ZoneInfo("UTC")),
],
)
async def test_daily_with_end_date(self, start_date):
s = RRuleSchedule.from_rrule(rrule.rrule(freq=rrule.DAILY, dtstart=start_date))
dates = await s.get_dates(
5, start=start_date, end=start_date + timedelta(days=2, hours=-1)
)
assert dates == [start_date + timedelta(days=i) for i in range(2)]
async def test_rrule_returns_nothing_before_dtstart(self):
s = RRuleSchedule.from_rrule(
rrule.rrule(
freq=rrule.DAILY, dtstart=datetime(2030, 1, 1, tzinfo=ZoneInfo("UTC"))
)
)
dates = await s.get_dates(5, start=datetime(2030, 1, 1, tzinfo=ZoneInfo("UTC")))
assert dates == [
datetime(2030, 1, 1, tzinfo=ZoneInfo("UTC")) + timedelta(days=i)
for i in range(5)
]
async def test_rrule_validates_rrule_str(self):
# generic validation error
with pytest.raises(ValidationError):
RRuleSchedule(rrule="bad rrule string")
# generic validation error
with pytest.raises(ValidationError):
RRuleSchedule(rrule="FREQ=DAILYBAD")
# informative error when possible
with pytest.raises(ValidationError):
RRuleSchedule(rrule="FREQ=DAILYBAD")
async def test_rrule_max_rrule_len(self):
start = datetime(2000, 1, 1, tzinfo=ZoneInfo("UTC"))
s = "RDATE:" + ",".join(
[
(start + timedelta(days=i)).strftime("%Y%m%d") + "T000000Z"
for i in range(365 * 3)
]
)
assert len(s) > MAX_RRULE_LENGTH
with pytest.raises(ValidationError):
RRuleSchedule(rrule=s)
async def test_rrule_schedule_handles_complex_rrulesets(self):
s = RRuleSchedule(
rrule=(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
)
)
dates_from_1900 = await s.get_dates(
5, start=datetime(1900, 1, 1, tzinfo=ZoneInfo("UTC"))
)
dates_from_2000 = await s.get_dates(
5, start=datetime(2000, 1, 1, tzinfo=ZoneInfo("UTC"))
)
assert len(dates_from_1900) == 3
assert len(dates_from_2000) == 0
async def test_rrule_schedule_preserves_and_localizes_rrules(self):
timezone = "America/New_York"
s = RRuleSchedule(
rrule=(
"DTSTART:19970902T090000\n"
"rrule:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
),
timezone=timezone,
)
expected_tzinfo = dateutil.tz.gettz(timezone)
converted_rruleset = s.to_rrule()
assert len(converted_rruleset._rrule) == 2
assert converted_rruleset._rrule[0]._dtstart.tzinfo == expected_tzinfo
async def test_rrule_schedule_preserves_and_localizes_exrules(self):
timezone = "America/New_York"
s = RRuleSchedule(
rrule=(
"DTSTART:19970902T090000\n"
"EXRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
),
timezone=timezone,
)
expected_tzinfo = dateutil.tz.gettz(timezone)
converted_rruleset = s.to_rrule()
assert len(converted_rruleset._rrule) == 1
assert len(converted_rruleset._exrule) == 1
assert converted_rruleset._exrule[0]._dtstart.tzinfo == expected_tzinfo
async def test_rrule_schedule_preserves_and_localizes_rdates(self):
timezone = "America/New_York"
s = RRuleSchedule(
rrule="RDATE:20221012T134000Z,20221012T230000Z,20221013T120000Z,20221014T120000Z,20221015T120000Z",
timezone=timezone,
)
expected_tzinfo = dateutil.tz.gettz(timezone)
converted_rruleset = s.to_rrule()
assert len(converted_rruleset._rdate) == 5
assert len(converted_rruleset._exdate) == 0
assert all(rd.tzinfo == expected_tzinfo for rd in converted_rruleset._rdate)
async def test_rrule_schedule_preserves_and_localizes_exdates(self):
timezone = "America/New_York"
s = RRuleSchedule(
rrule="EXDATE:20221012T134000Z,20221012T230000Z,20221013T120000Z,20221014T120000Z,20221015T120000Z",
timezone=timezone,
)
expected_tzinfo = dateutil.tz.gettz(timezone)
converted_rruleset = s.to_rrule()
assert len(converted_rruleset._rdate) == 0
assert len(converted_rruleset._exdate) == 5
assert all(rd.tzinfo == expected_tzinfo for rd in converted_rruleset._exdate)
async def test_serialization_preserves_rrules_rdates_exrules_exdates(self):
dt_nyc = datetime(2018, 1, 11, 4, tzinfo=ZoneInfo("America/New_York"))
last_leap_year = datetime(2020, 2, 29, tzinfo=ZoneInfo("America/New_York"))
next_leap_year = datetime(2024, 2, 29, tzinfo=ZoneInfo("America/New_York"))
rrset = rrule.rruleset(cache=True)
rrset.rrule(rrule.rrule(rrule.HOURLY, count=10, dtstart=dt_nyc))
rrset.exrule(rrule.rrule(rrule.DAILY, count=10, dtstart=dt_nyc))
rrset.rdate(last_leap_year)
rrset.exdate(next_leap_year)
expected_tzinfo = dateutil.tz.gettz("America/New_York")
serialized_schedule = RRuleSchedule.from_rrule(rrset)
roundtrip_rruleset = serialized_schedule.to_rrule()
# assert string serialization preserves all rruleset components
assert len(roundtrip_rruleset._rrule) == 1
assert len(roundtrip_rruleset._exrule) == 1
assert len(roundtrip_rruleset._rdate) == 1
assert len(roundtrip_rruleset._exdate) == 1
# assert rruleset localizes all rruleset components
assert roundtrip_rruleset._rrule[0]._dtstart.tzinfo == expected_tzinfo
assert roundtrip_rruleset._exrule[0]._dtstart.tzinfo == expected_tzinfo
assert roundtrip_rruleset._rdate[0].tzinfo == expected_tzinfo
assert roundtrip_rruleset._exdate[0].tzinfo == expected_tzinfo
@pytest.mark.xfail(
reason="we currently cannot roundtrip RRuleSchedule objects for all timezones"
)
async def test_rrule_schedule_handles_rruleset_roundtrips(self):
s1 = RRuleSchedule(
rrule=(
"DTSTART:19970902T090000\n"
"RRULE:FREQ=YEARLY;COUNT=2;BYDAY=TU\n"
"RRULE:FREQ=YEARLY;COUNT=1;BYDAY=TH\n"
)
)
s2 = RRuleSchedule.from_rrule(s1.to_rrule())
s1_dates = await s1.get_dates(
5, start=datetime(1900, 1, 1, tzinfo=ZoneInfo("UTC"))
)
s2_dates = await s2.get_dates(
5, start=datetime(1900, 1, 1, tzinfo=ZoneInfo("UTC"))
)
assert s1_dates == s2_dates
async def test_rrule_schedule_rejects_rrulesets_with_many_dtstart_timezones(self):
dt_nyc = datetime(2018, 1, 11, 4, tzinfo=ZoneInfo("America/New_York"))
dt_chicago = datetime(2018, 1, 11, 3, tzinfo=ZoneInfo("America/Chicago"))
rrset = rrule.rruleset(cache=True)
rrset.rrule(rrule.rrule(rrule.HOURLY, count=10, dtstart=dt_nyc))
rrset.rrule(rrule.rrule(rrule.HOURLY, count=10, dtstart=dt_chicago))
with pytest.raises(ValueError, match="too many dtstart timezones"):
RRuleSchedule.from_rrule(rrset)
async def test_rrule_schedule_rejects_rrulesets_with_many_dtstarts(self):
dt_1 = datetime(2018, 1, 11, 4, tzinfo=ZoneInfo("America/New_York"))
dt_2 = datetime(2018, 2, 11, 4, tzinfo=ZoneInfo("America/New_York"))
rrset = rrule.rruleset(cache=True)
rrset.rrule(rrule.rrule(rrule.HOURLY, count=10, dtstart=dt_1))
rrset.rrule(rrule.rrule(rrule.HOURLY, count=10, dtstart=dt_2))
with pytest.raises(ValueError, match="too many dtstarts"):
RRuleSchedule.from_rrule(rrset)
@pytest.mark.xfail(
reason="we currently cannot roundtrip RRuleSchedule objects for all timezones"
)
async def test_rrule_schedule_handles_rrule_roundtrips(self):
dt = datetime(2018, 3, 11, 4, tzinfo=ZoneInfo("Europe/Berlin"))
base_rule = rrule.rrule(rrule.HOURLY, dtstart=dt)
s1 = RRuleSchedule.from_rrule(base_rule)
s2 = RRuleSchedule.from_rrule(s1.to_rrule())
assert s1.timezone == "CET"
assert s2.timezone == "CET"
base_dates = list(base_rule.xafter(datetime(1900, 1, 1), count=5))
s1_dates = await s1.get_dates(
5, start=datetime(1900, 1, 1, tzinfo=ZoneInfo("Europe/Berlin"))
)
s2_dates = await s2.get_dates(
5, start=datetime(1900, 1, 1, tzinfo=ZoneInfo("Europe/Berlin"))
)
assert base_dates == s1_dates == s2_dates
async def test_rrule_from_str(self):
# create a schedule from an RRule object
s1 = RRuleSchedule.from_rrule(
rrule.rrule(
freq=rrule.DAILY,
count=5,
dtstart=datetime.now(ZoneInfo("UTC")) + timedelta(hours=1),
)
)
assert isinstance(s1.rrule, str)
assert s1.rrule.endswith("RRULE:FREQ=DAILY;COUNT=5")
# create a schedule from the equivalent RRule string
s2 = RRuleSchedule(rrule=s1.rrule)
dts1 = await s1.get_dates(n=10)
dts2 = await s2.get_dates(n=10)
assert dts1 == dts2
assert len(dts1) == 5
async def test_rrule_validates_rrule_obj(self):
with pytest.raises(ValueError, match="(Invalid RRule object)"):
RRuleSchedule.from_rrule("bad rrule")
@pytest.mark.parametrize(
"rrule_obj,rrule_str,expected_dts",
[
# Every third year (INTERVAL) on the first Tuesday (BYDAY) after a Monday (BYMONTHDAY) in October.
(
rrule.rrule(
rrule.YEARLY,
dt,
interval=3,
bymonth=10,
byweekday=rrule.TU,
bymonthday=(2, 3, 4, 5, 6, 7, 8),
),
"DTSTART:20200101T000000\nRRULE:FREQ=YEARLY;INTERVAL=3;BYMONTH=10;BYMONTHDAY=2,3,4,5,6,7,8;BYDAY=TU",
[
datetime(2020, 10, 6, 0, 0, tzinfo=ZoneInfo("UTC")),
datetime(2023, 10, 3, 0, 0, tzinfo=ZoneInfo("UTC")),
datetime(2026, 10, 6, 0, 0, tzinfo=ZoneInfo("UTC")),
],
),
# every minute
(
rrule.rrule(rrule.MINUTELY, dt),
"DTSTART:20200101T000000\nRRULE:FREQ=MINUTELY",
[
dt + timedelta(minutes=0),
dt + timedelta(minutes=1),
dt + timedelta(minutes=2),
],
),
# last weekday of every other month
(
rrule.rrule(
rrule.MONTHLY,
dt,
interval=2,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),
bysetpos=-1,
),
"DTSTART:20200101T000000\nRRULE:FREQ=MONTHLY;INTERVAL=2;BYSETPOS=-1;BYDAY=MO,TU,WE,TH,FR",
[
datetime(2020, 1, 31, tzinfo=ZoneInfo("UTC")),
datetime(2020, 3, 31, tzinfo=ZoneInfo("UTC")),
datetime(2020, 5, 29, tzinfo=ZoneInfo("UTC")),
],
),
# Every weekday (BYDAY) for the next 8 weekdays (COUNT).
(
rrule.rrule(
rrule.DAILY,
dt,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),
count=8,
),
"DTSTART:20200101T000000\nRRULE:FREQ=DAILY;COUNT=8;BYDAY=MO,TU,WE,TH,FR",
[
datetime(2020, 1, 1, tzinfo=ZoneInfo("UTC")),
datetime(2020, 1, 2, tzinfo=ZoneInfo("UTC")),
datetime(2020, 1, 3, tzinfo=ZoneInfo("UTC")),
],
),
# Every three weeks on Sunday until 9/23/2021
(
rrule.rrule(
rrule.WEEKLY,
dt,
byweekday=rrule.SU,
interval=3,
until=datetime(2021, 9, 23, tzinfo=ZoneInfo("UTC")),
),
"DTSTART:20200101T000000\nRRULE:FREQ=WEEKLY;INTERVAL=3;UNTIL=20210923T000000;BYDAY=SU",
[
datetime(2020, 1, 5, tzinfo=ZoneInfo("UTC")),
datetime(2020, 1, 26, tzinfo=ZoneInfo("UTC")),
datetime(2020, 2, 16, tzinfo=ZoneInfo("UTC")),
],
),
# every week at 9:13:54
(
rrule.rrule(rrule.WEEKLY, dt, byhour=9, byminute=13, bysecond=54),
"DTSTART:20200101T000000\nRRULE:FREQ=WEEKLY;BYHOUR=9;BYMINUTE=13;BYSECOND=54",
[
datetime(2020, 1, 1, 9, 13, 54, tzinfo=ZoneInfo("UTC")),
datetime(2020, 1, 8, 9, 13, 54, tzinfo=ZoneInfo("UTC")),
datetime(2020, 1, 15, 9, 13, 54, tzinfo=ZoneInfo("UTC")),
],
),
# every year on the 7th and 16th week, on the first weekday
(
rrule.rrule(rrule.YEARLY, dt, byweekno=(7, 16), byweekday=rrule.WE),
"DTSTART:20200101T000000\nRRULE:FREQ=YEARLY;BYWEEKNO=7,16;BYDAY=WE",
[
datetime(2020, 2, 12, tzinfo=ZoneInfo("UTC")),
datetime(2020, 4, 15, tzinfo=ZoneInfo("UTC")),
datetime(2021, 2, 17, tzinfo=ZoneInfo("UTC")),
],
),
],
)
async def test_rrule(self, rrule_obj, rrule_str, expected_dts):
s = RRuleSchedule.from_rrule(rrule_obj)
assert s.model_dump()["rrule"] == rrule_str
dates = await s.get_dates(n=3, start=dt)
assert dates == expected_dts
async def test_rrule_with_count(self):
# Every weekday (BYDAY) for the next 8 weekdays (COUNT).
s = RRuleSchedule.from_rrule(
rrule.rrule(
rrule.DAILY,
dt,
byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),
count=8,
)
)
assert (
s.model_dump()["rrule"]
== "DTSTART:20200101T000000\nRRULE:FREQ=DAILY;COUNT=8;BYDAY=MO,TU,WE,TH,FR"
)
dates = await s.get_dates(n=100, start=dt)
assert dates == [
dt + timedelta(days=0),
dt + timedelta(days=1),
dt + timedelta(days=2),
dt + timedelta(days=5),
dt + timedelta(days=6),
dt + timedelta(days=7),
dt + timedelta(days=8),
dt + timedelta(days=9),
]
@pytest.fixture
async def weekly_on_friday() -> RRuleSchedule:
return RRuleSchedule(rrule="FREQ=WEEKLY;INTERVAL=1;BYDAY=FR", timezone="UTC")
async def test_unanchored_rrule_schedules_are_idempotent(
weekly_on_friday: RRuleSchedule,
):
"""Regression test for an issue discovered in Prefect Cloud, where a schedule with
an RRULE that didn't anchor to a specific time was being rescheduled every time the
scheduler loop picked it up. This is because when a user does not provide a DTSTART
in their rule, then the current time is assumed to be the DTSTART.
This test confirms the behavior when a user does _not_ provide a DTSTART.
"""
start = datetime(2023, 6, 8, tzinfo=ZoneInfo("UTC"))
end = start + timedelta(days=21)
assert start.weekday() == 3
first_set = await weekly_on_friday.get_dates(
n=3,
start=start,
end=end,
)
# Sleep long enough that a full second definitely ticks over, because the RRULE
# precision is only to the second.
await asyncio.sleep(1.1)
second_set = await weekly_on_friday.get_dates(
n=3,
start=start,
end=end,
)
assert first_set == second_set
assert [dt.date() for dt in first_set] == [
date(2023, 6, 9),
date(2023, 6, 16),
date(2023, 6, 23),
]
for date_obj in first_set:
assert date_obj.weekday() == 4
@pytest.fixture
async def weekly_at_1pm_fridays() -> RRuleSchedule:
return RRuleSchedule(
rrule="DTSTART:20230608T130000\nFREQ=WEEKLY;INTERVAL=1;BYDAY=FR",
timezone="UTC",
)
async def test_rrule_schedules_can_have_embedded_anchors(
weekly_at_1pm_fridays: RRuleSchedule,
):
"""Regression test for an issue discovered in Prefect Cloud, where a schedule with
an RRULE that didn't anchor to a specific time was being rescheduled every time the
scheduler loop picked it up. This is because when a user does not provide a DTSTART
in their rule, then the current time is assumed to be the DTSTART.
This case confirms that if a user provides an alternative DTSTART it will be
respected.
"""
start = datetime(2023, 6, 8, tzinfo=ZoneInfo("UTC"))
end = start + timedelta(days=21)
assert start.weekday() == 3
first_set = await weekly_at_1pm_fridays.get_dates(
n=3,
start=start,
end=end,
)
# Sleep long enough that a full second definitely ticks over, because the RRULE
# precision is only to the second.
await asyncio.sleep(1.1)
second_set = await weekly_at_1pm_fridays.get_dates(
n=3,
start=start,
end=end,
)
assert first_set == second_set
assert first_set == [
datetime(2023, 6, 9, 13, tzinfo=ZoneInfo("UTC")),
datetime(2023, 6, 16, 13, tzinfo=ZoneInfo("UTC")),
datetime(2023, 6, 23, 13, tzinfo=ZoneInfo("UTC")),
]
for date_obj in first_set:
assert date_obj.weekday() == 4
| TestRRuleSchedule |
python | ansible__ansible | lib/ansible/_internal/_templating/_lazy_containers.py | {
"start": 2844,
"end": 3575
} | class ____(Sentinel):
"""Sentinel used to indicate a requested key was not found."""
# There are several operations performed by lazy containers, with some variation between types.
#
# Columns: D=dict, L=list, T=tuple
# Cells: l=lazy (upon access), n=non-lazy (__init__/__new__)
#
# D L T Feature Description
# - - - ----------- ---------------------------------------------------------------
# l l n propagation when container items which are containers become lazy instances
# l l n transform when transforms are applied to container items
# l l n templating when templating is performed on container items
# l l l access when access calls are performed on container items
| _NoKeySentinel |
python | django__django | tests/model_regress/tests.py | {
"start": 9472,
"end": 10103
} | class ____(TestCase):
def test_fields_cache_reset_on_copy(self):
department1 = Department.objects.create(id=1, name="department1")
department2 = Department.objects.create(id=2, name="department2")
worker1 = Worker.objects.create(name="worker", department=department1)
worker2 = copy.copy(worker1)
self.assertEqual(worker2.department, department1)
# Changing related fields doesn't mutate the base object.
worker2.department = department2
self.assertEqual(worker2.department, department2)
self.assertEqual(worker1.department, department1)
| ModelFieldsCacheTest |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 15961,
"end": 16321
} | class ____(GroupType):
type_id = 1013
slug = "performance_db_main_thread"
description = "DB on Main Thread"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.MOBILE.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| PerformanceDBMainThreadGroupType |
python | huggingface__transformers | src/transformers/models/gemma3/configuration_gemma3.py | {
"start": 1442,
"end": 12314
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Gemma3TextModel`]. It is used to instantiate an Gemma3Text
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Gemma3Text-7B.
e.g. [google/gemma3_text-7b](https://huggingface.co/google/gemma3_text-7b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 262208):
Vocabulary size of the Gemma3Text model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Gemma3TextModel`]
hidden_size (`int`, *optional*, defaults to 2304):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 9216):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 26):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 4):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
head_dim (`int`, *optional*, defaults to 256):
The attention head dimension.
hidden_activation (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
max_position_embeddings (`int`, *optional*, defaults to 131072):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
eos_token_id (`int`, *optional*, defaults to 1):
End of stream token id.
bos_token_id (`int`, *optional*, defaults to 2):
Beginning of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
query_pre_attn_scalar (`float`, *optional*, defaults to 256):
Scaling factor used on the attention scores
sliding_window (`int`, *optional*, defaults to 4096):
In Gemma3Text, every other layer uses sliding window attention. This is the size of the sliding window.
layer_types (`list`, *optional*):
Attention pattern for each layer.
final_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the logits.
attn_logit_softcapping (`float`, *optional*):
Scaling factor when applying tanh softcapping on the attention scores.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
use_bidirectional_attention (`bool`, *optional*, defaults to `False`):
If True, the model will attend to all text tokens instead of using a causal mask. This does not change
behavior for vision tokens.
```python
>>> from transformers import Gemma3TextModel, Gemma3TextConfig
>>> # Initializing a Gemma3Text gemma3_text-7b style configuration
>>> configuration = Gemma3TextConfig()
>>> # Initializing a model from the gemma3_text-7b style configuration
>>> model = Gemma3TextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "gemma3_text"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
default_theta = {"global": 1_000_000.0, "local": 10_000.0}
def __init__(
self,
vocab_size: Optional[int] = 262_208,
hidden_size: Optional[int] = 2304,
intermediate_size: Optional[int] = 9216,
num_hidden_layers: Optional[int] = 26,
num_attention_heads: Optional[int] = 8,
num_key_value_heads: Optional[int] = 4,
head_dim: Optional[int] = 256,
hidden_activation: Optional[str] = "gelu_pytorch_tanh",
max_position_embeddings: Optional[int] = 131_072,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[int] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = 0,
eos_token_id: Optional[int] = 1,
bos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
attention_bias: Optional[bool] = False,
attention_dropout: Optional[float] = 0.0,
query_pre_attn_scalar: Optional[int] = 256,
sliding_window: Optional[int] = 4096,
layer_types: Optional[list[str]] = None,
final_logit_softcapping: Optional[float] = None,
attn_logit_softcapping: Optional[float] = None,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
use_bidirectional_attention: Optional[bool] = False,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.head_dim = head_dim
self.num_key_value_heads = num_key_value_heads
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
self.hidden_activation = hidden_activation
self.query_pre_attn_scalar = query_pre_attn_scalar
self.sliding_window = sliding_window
self.final_logit_softcapping = final_logit_softcapping
self.attn_logit_softcapping = attn_logit_softcapping
self.layer_types = layer_types
self.use_bidirectional_attention = use_bidirectional_attention
if use_bidirectional_attention:
self.sliding_window = (self.sliding_window // 2) + 1 # due to fa we set exclusive bounds
# BC -> the pattern used to be a simple int, and it's still present in configs on the Hub
self._sliding_window_pattern = kwargs.get("sliding_window_pattern", 6)
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if bool((i + 1) % self._sliding_window_pattern) else "full_attention"
for i in range(self.num_hidden_layers)
]
layer_type_validation(self.layer_types, self.num_hidden_layers)
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
def convert_rope_params_to_dict(self, ignore_keys_at_rope_validation=None, **kwargs):
rope_scaling = kwargs.pop("rope_scaling", None)
# Try to set `rope_scaling` if available, otherwise use `rope_parameters`. If we find `rope_parameters`
# as arg in the inputs, we can safely assume that it is in the new format. New naming used -> new format
default_rope_params = {
"sliding_attention": {"rope_type": "default"},
"full_attention": {"rope_type": "default"},
}
self.rope_parameters = self.rope_parameters if self.rope_parameters is not None else default_rope_params
if rope_scaling is not None:
self.rope_parameters["full_attention"].update(rope_scaling)
self.rope_parameters["full_attention"].setdefault(
"rope_theta", kwargs.pop("rope_theta", self.default_theta["global"])
)
self.rope_parameters["sliding_attention"].setdefault(
"rope_theta", kwargs.pop("rope_local_base_freq", self.default_theta["local"])
)
# Standardize and validate the correctness of rotary position embeddings parameters
self.standardize_rope_params()
self.validate_rope(ignore_keys=ignore_keys_at_rope_validation)
return kwargs
| Gemma3TextConfig |
python | PyCQA__pylint | doc/data/messages/a/abstract-class-instantiated/good.py | {
"start": 13,
"end": 101
} | class ____(abc.ABC):
@abc.abstractmethod
def make_sound(self):
pass
| Animal |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/data_structures/lookup_ops_test.py | {
"start": 164118,
"end": 165947
} | class ____(test.Benchmark):
def _create_table(self):
return lookup_ops.MutableHashTable(dtypes.int64, dtypes.float32, 0.0)
def benchmark_single_repeated_scalar_insert_scalar(self):
table = self._create_table()
value = variables.Variable(1.0)
insert = table.insert(0, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) == 1
def benchmark_many_repeated_scalar_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable(1.0)
insert = table.insert(c, value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=10000)
assert sess.run(size) >= 10000
def benchmark_single_repeated_batch_32_insert_scalar(self):
table = self._create_table()
value = variables.Variable([1.0] * 32)
insert = table.insert(list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) == 32
def benchmark_many_repeated_batch_32_insert_scalar(self):
table = self._create_table()
c = dataset_ops.make_one_shot_iterator(counter.Counter()).get_next()
value = variables.Variable([1.0] * 32)
insert = table.insert(32 * c + list(range(32)), value)
size = table.size()
with session.Session() as sess:
sess.run(value.initializer)
self.run_op_benchmark(sess, insert, burn_iters=10, min_iters=1000)
assert sess.run(size) >= 1000 * 32
| MutableHashTableBenchmark |
python | lazyprogrammer__machine_learning_examples | rl2/atari/dqn_theano.py | {
"start": 7138,
"end": 14414
} | class ____:
def __init__(self, K, conv_layer_sizes, hidden_layer_sizes):
self.K = K
# inputs and targets
X = T.ftensor4('X')
G = T.fvector('G')
actions = T.ivector('actions')
# create the graph
self.conv_layers = []
num_input_filters = 4 # number of filters / color channels
current_size = IM_SIZE
for num_output_filters, filtersz, stride in conv_layer_sizes:
### not using this currently, it didn't make a difference ###
# cut = None
# if filtersz % 2 == 0: # if even
# cut = (current_size + stride - 1) // stride
layer = ConvLayer(num_input_filters, num_output_filters, filtersz, stride)
current_size = (current_size + stride - 1) // stride
# print("current_size:", current_size)
self.conv_layers.append(layer)
num_input_filters = num_output_filters
# get conv output size
Z = X / 255.0
for layer in self.conv_layers:
Z = layer.forward(Z)
conv_out = Z.flatten(ndim=2)
conv_out_op = theano.function(inputs=[X], outputs=conv_out, allow_input_downcast=True)
test = conv_out_op(np.random.randn(1, 4, IM_SIZE, IM_SIZE))
flattened_ouput_size = test.shape[1]
# build fully connected layers
self.layers = []
M1 = flattened_ouput_size
print("flattened_ouput_size:", flattened_ouput_size)
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in (self.conv_layers + self.layers):
self.params += layer.params
# calculate final output and cost
Z = conv_out
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
selected_action_values = Y_hat[T.arange(actions.shape[0]), actions]
cost = T.mean((G - selected_action_values)**2)
# create train function
updates = adam(cost, self.params)
# compile functions
self.train_op = theano.function(
inputs=[X, G, actions],
outputs=cost,
updates=updates,
allow_input_downcast=True
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
allow_input_downcast=True
)
def copy_from(self, other):
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = q.get_value()
p.set_value(actual)
def predict(self, X):
return self.predict_op(X)
def update(self, states, actions, targets):
return self.train_op(states, targets, actions)
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
return np.argmax(self.predict([x])[0])
def learn(model, target_model, experience_replay_buffer, gamma, batch_size):
# Sample experiences
states, actions, rewards, next_states, dones = experience_replay_buffer.get_minibatch()
# Calculate targets
next_Qs = target_model.predict(next_states)
next_Q = np.amax(next_Qs, axis=1)
targets = rewards + np.invert(dones).astype(np.float32) * gamma * next_Q
# Update model
loss = model.update(states, actions, targets)
return loss
def play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_size,
epsilon,
epsilon_change,
epsilon_min):
t0 = datetime.now()
# Reset the environment
obs = env.reset()
obs_small = downsample_image(obs)
state = np.stack([obs_small] * 4, axis=0)
loss = None
total_time_training = 0
num_steps_in_episode = 0
episode_reward = 0
done = False
while not done:
# Update target network
if total_t % TARGET_UPDATE_PERIOD == 0:
target_model.copy_from(model)
print("Copied model parameters to target network. total_t = %s, period = %s" % (total_t, TARGET_UPDATE_PERIOD))
# Take action
action = model.sample_action(state, epsilon)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
next_state = np.append(state[1:], np.expand_dims(obs_small, 0), axis=0)
episode_reward += reward
# Save the latest experience
experience_replay_buffer.add_experience(action, obs_small, reward, done)
# Train the model, keep track of time
t0_2 = datetime.now()
loss = learn(model, target_model, experience_replay_buffer, gamma, batch_size)
dt = datetime.now() - t0_2
total_time_training += dt.total_seconds()
num_steps_in_episode += 1
state = next_state
total_t += 1
epsilon = max(epsilon - epsilon_change, epsilon_min)
return total_t, episode_reward, (datetime.now() - t0), num_steps_in_episode, total_time_training/num_steps_in_episode, epsilon
def smooth(x):
# last 100
n = len(x)
y = np.zeros(n)
for i in range(n):
start = max(0, i - 99)
y[i] = float(x[start:(i+1)].sum()) / (i - start + 1)
return y
if __name__ == '__main__':
# hyperparams and initialize stuff
conv_layer_sizes = [(32, 8, 4), (64, 4, 2), (64, 3, 1)]
hidden_layer_sizes = [512]
gamma = 0.99
batch_sz = 32
num_episodes = 5000
total_t = 0
experience_replay_buffer = ReplayMemory()
episode_rewards = np.zeros(num_episodes)
step_counts = np.zeros(num_episodes)
# epsilon
# decays linearly until 0.1
epsilon = 1.0
epsilon_min = 0.1
epsilon_change = (epsilon - epsilon_min) / 500000
# Create environment
env = gym.envs.make("Breakout-v0")
# Create models
model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
)
target_model = DQN(
K=K,
conv_layer_sizes=conv_layer_sizes,
hidden_layer_sizes=hidden_layer_sizes,
)
print("Populating experience replay buffer...")
obs = env.reset()
obs_small = downsample_image(obs)
for i in range(MIN_EXPERIENCES):
action = np.random.choice(K)
obs, reward, done, _ = env.step(action)
obs_small = downsample_image(obs)
experience_replay_buffer.add_experience(action, obs_small, reward, done)
if done:
obs = env.reset()
# Play a number of episodes and learn!
t0 = datetime.now()
for i in range(num_episodes):
total_t, episode_reward, duration, num_steps_in_episode, time_per_step, epsilon = play_one(
env,
total_t,
experience_replay_buffer,
model,
target_model,
gamma,
batch_sz,
epsilon,
epsilon_change,
epsilon_min,
)
episode_rewards[i] = episode_reward
step_counts[i] = num_steps_in_episode
last_100_avg = episode_rewards[max(0, i - 100):i + 1].mean()
last_100_avg_steps = step_counts[max(0, i - 100):i + 1].mean()
print("Episode:", i,
"Duration:", duration,
"Num steps:", num_steps_in_episode,
"Reward:", episode_reward,
"Training time per step:", "%.3f" % time_per_step,
"Avg Reward (Last 100):", "%.3f" % last_100_avg,
"Avg Steps (Last 100):", "%.1f" % last_100_avg_steps,
"Epsilon:", "%.3f" % epsilon
)
sys.stdout.flush()
print("Total duration:", datetime.now() - t0)
# Plot the smoothed returns
y = smooth(episode_rewards)
plt.plot(episode_rewards, label='orig')
plt.plot(y, label='smoothed')
plt.legend()
plt.show()
| DQN |
python | getsentry__sentry | src/sentry/api/serializers/models/rule.py | {
"start": 1853,
"end": 2173
} | class ____(TypedDict, total=False):
owner: str | None
createdBy: RuleCreatedBy | None
environment: str | None
lastTriggered: str | None
snoozeCreatedBy: str | None
snoozeForEveryone: bool | None
disableReason: str
disableDate: str
errors: list[_ErrorDict]
| RuleSerializerResponseOptional |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_s3.py | {
"start": 16946,
"end": 17729
} | class ____:
def test_execute(self):
operator = S3ListPrefixesOperator(
task_id="test-s3-list-prefixes-operator", bucket=BUCKET_NAME, prefix="test/", delimiter="/"
)
operator.hook = mock.MagicMock()
operator.hook.list_prefixes.return_value = ["test/"]
subfolders = operator.execute(None)
operator.hook.list_prefixes.assert_called_once_with(
bucket_name=BUCKET_NAME, prefix="test/", delimiter="/"
)
assert subfolders == ["test/"]
def test_template_fields(self):
operator = S3ListPrefixesOperator(
task_id="test-s3-list-prefixes-operator", bucket=BUCKET_NAME, prefix="test/", delimiter="/"
)
validate_template_fields(operator)
| TestS3ListPrefixesOperator |
python | django__django | tests/queries/test_query.py | {
"start": 8847,
"end": 9069
} | class ____(SimpleTestCase):
def test_repr(self):
self.assertEqual(
repr(JoinPromoter(AND, 3, True)),
"JoinPromoter(connector='AND', num_children=3, negated=True)",
)
| JoinPromoterTest |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 83686,
"end": 83975
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
AutoModelForSeq2SeqLM = auto_class_update(
AutoModelForSeq2SeqLM,
head_doc="sequence-to-sequence language modeling",
checkpoint_for_example="google-t5/t5-base",
)
| AutoModelForSeq2SeqLM |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_dialect.py | {
"start": 30103,
"end": 33962
} | class ____:
__only_on__ = "postgresql+psycopg2"
__backend__ = True
run_create_tables = "each"
run_deletes = None
options = None
@config.fixture()
def connection(self):
opts = dict(self.options)
opts["use_reaper"] = False
eng = engines.testing_engine(options=opts)
conn = eng.connect()
trans = conn.begin()
yield conn
if trans.is_active:
trans.rollback()
conn.close()
eng.dispose()
@classmethod
def define_tables(cls, metadata):
Table(
"data",
metadata,
Column("id", Integer, primary_key=True),
Column("x", String),
Column("y", String),
Column("z", Integer, server_default="5"),
)
Table(
"Unitéble2",
metadata,
Column("méil", Integer, primary_key=True),
Column("\u6e2c\u8a66", Integer),
)
def test_insert_unicode_keys(self, connection):
table = self.tables["Unitéble2"]
stmt = table.insert()
connection.execute(
stmt,
[
{"méil": 1, "\u6e2c\u8a66": 1},
{"méil": 2, "\u6e2c\u8a66": 2},
{"méil": 3, "\u6e2c\u8a66": 3},
],
)
eq_(connection.execute(table.select()).all(), [(1, 1), (2, 2), (3, 3)])
def test_update_fallback(self, connection):
from psycopg2 import extras
batch_page_size = connection.dialect.executemany_batch_page_size
meth = extras.execute_batch
stmt = "UPDATE data SET y=%(yval)s WHERE data.x = %(xval)s"
expected_kwargs = {"page_size": batch_page_size}
with mock.patch.object(
extras, meth.__name__, side_effect=meth
) as mock_exec:
connection.execute(
self.tables.data.update()
.where(self.tables.data.c.x == bindparam("xval"))
.values(y=bindparam("yval")),
[
{"xval": "x1", "yval": "y5"},
{"xval": "x3", "yval": "y6"},
],
)
if (
connection.dialect.executemany_mode
is EXECUTEMANY_VALUES_PLUS_BATCH
):
eq_(
mock_exec.mock_calls,
[
mock.call(
mock.ANY,
stmt,
[
{"xval": "x1", "yval": "y5"},
{"xval": "x3", "yval": "y6"},
],
**expected_kwargs,
)
],
)
else:
eq_(mock_exec.mock_calls, [])
def test_not_sane_rowcount(self, connection):
if (
connection.dialect.executemany_mode
is EXECUTEMANY_VALUES_PLUS_BATCH
):
assert not connection.dialect.supports_sane_multi_rowcount
else:
assert connection.dialect.supports_sane_multi_rowcount
def test_update(self, connection):
connection.execute(
self.tables.data.insert(),
[
{"x": "x1", "y": "y1"},
{"x": "x2", "y": "y2"},
{"x": "x3", "y": "y3"},
],
)
connection.execute(
self.tables.data.update()
.where(self.tables.data.c.x == bindparam("xval"))
.values(y=bindparam("yval")),
[{"xval": "x1", "yval": "y5"}, {"xval": "x3", "yval": "y6"}],
)
eq_(
connection.execute(
select(self.tables.data).order_by(self.tables.data.c.id)
).fetchall(),
[(1, "x1", "y5", 5), (2, "x2", "y2", 5), (3, "x3", "y6", 5)],
)
| ExecuteManyMode |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/graph/asset_graph_differ.py | {
"start": 424,
"end": 859
} | class ____(Enum):
"""What change an asset has undergone between two deployments. Used
in distinguishing asset definition changes in branch deployment and
in subsequent other deployments.
"""
NEW = "NEW"
CODE_VERSION = "CODE_VERSION"
DEPENDENCIES = "DEPENDENCIES"
PARTITIONS_DEFINITION = "PARTITIONS_DEFINITION"
TAGS = "TAGS"
METADATA = "METADATA"
REMOVED = "REMOVED"
| AssetDefinitionChangeType |
python | streamlit__streamlit | lib/streamlit/errors.py | {
"start": 6289,
"end": 6641
} | class ____(LocalizableStreamlitException):
"""Exception raised when an invalid key is specified."""
def __init__(self, key: str) -> None:
super().__init__(
'We only accept the keys: `"Get help"`, `"Report a bug"`, and `"About"` (`"{key}"` is not a valid key.)',
key=key,
)
| StreamlitInvalidMenuItemKeyError |
python | tornadoweb__tornado | tornado/websocket.py | {
"start": 23028,
"end": 26055
} | class ____(abc.ABC):
"""Base class for WebSocket protocol versions."""
def __init__(self, handler: "_WebSocketDelegate") -> None:
self.handler = handler
self.stream = None # type: Optional[IOStream]
self.client_terminated = False
self.server_terminated = False
def _run_callback(
self, callback: Callable, *args: Any, **kwargs: Any
) -> "Optional[Future[Any]]":
"""Runs the given callback with exception handling.
If the callback is a coroutine, returns its Future. On error, aborts the
websocket connection and returns None.
"""
try:
result = callback(*args, **kwargs)
except Exception:
self.handler.log_exception(*sys.exc_info())
self._abort()
return None
else:
if result is not None:
result = gen.convert_yielded(result)
assert self.stream is not None
self.stream.io_loop.add_future(result, lambda f: f.result())
return result
def on_connection_close(self) -> None:
self._abort()
def _abort(self) -> None:
"""Instantly aborts the WebSocket connection by closing the socket"""
self.client_terminated = True
self.server_terminated = True
if self.stream is not None:
self.stream.close() # forcibly tear down the connection
self.close() # let the subclass cleanup
@abc.abstractmethod
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
raise NotImplementedError()
@abc.abstractmethod
def is_closing(self) -> bool:
raise NotImplementedError()
@abc.abstractmethod
async def accept_connection(self, handler: WebSocketHandler) -> None:
raise NotImplementedError()
@abc.abstractmethod
def write_message(
self, message: Union[str, bytes, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
raise NotImplementedError()
@property
@abc.abstractmethod
def selected_subprotocol(self) -> Optional[str]:
raise NotImplementedError()
@abc.abstractmethod
def write_ping(self, data: bytes) -> None:
raise NotImplementedError()
# The entry points below are used by WebSocketClientConnection,
# which was introduced after we only supported a single version of
# WebSocketProtocol. The WebSocketProtocol/WebSocketProtocol13
# boundary is currently pretty ad-hoc.
@abc.abstractmethod
def _process_server_headers(
self, key: Union[str, bytes], headers: httputil.HTTPHeaders
) -> None:
raise NotImplementedError()
@abc.abstractmethod
def start_pinging(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
async def _receive_frame_loop(self) -> None:
raise NotImplementedError()
@abc.abstractmethod
def set_nodelay(self, x: bool) -> None:
raise NotImplementedError()
| WebSocketProtocol |
python | psf__black | tests/data/cases/dummy_implementations.py | {
"start": 3322,
"end": 3384
} | class ____:
def f(self):
# Comment
...
| ClassH |
python | scikit-learn__scikit-learn | sklearn/decomposition/_dict_learning.py | {
"start": 34537,
"end": 38398
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin):
"""Base class from SparseCoder and DictionaryLearning algorithms."""
def __init__(
self,
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha,
split_sign,
n_jobs,
positive_code,
transform_max_iter,
):
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.transform_max_iter = transform_max_iter
self.split_sign = split_sign
self.n_jobs = n_jobs
self.positive_code = positive_code
def _transform(self, X, dictionary):
"""Private method allowing to accommodate both DictionaryLearning and
SparseCoder."""
X = validate_data(self, X, reset=False)
if hasattr(self, "alpha") and self.transform_alpha is None:
transform_alpha = self.alpha
else:
transform_alpha = self.transform_alpha
code = sparse_encode(
X,
dictionary,
algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=transform_alpha,
max_iter=self.transform_max_iter,
n_jobs=self.n_jobs,
positive=self.positive_code,
)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
def transform(self, X):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
check_is_fitted(self)
return self._transform(X, self.components_)
def _inverse_transform(self, code, dictionary):
"""Private method allowing to accommodate both DictionaryLearning and
SparseCoder."""
code = check_array(code)
# compute number of expected features in code
expected_n_components = dictionary.shape[0]
if self.split_sign:
expected_n_components += expected_n_components
if not code.shape[1] == expected_n_components:
raise ValueError(
"The number of components in the code is different from the "
"number of components in the dictionary."
f"Expected {expected_n_components}, got {code.shape[1]}."
)
if self.split_sign:
n_samples, n_features = code.shape
n_features //= 2
code = code[:, :n_features] - code[:, n_features:]
return code @ dictionary
def inverse_transform(self, X):
"""Transform data back to its original space.
Parameters
----------
X : array-like of shape (n_samples, n_components)
Data to be transformed back. Must have the same number of
components as the data used to train the model.
Returns
-------
X_original : ndarray of shape (n_samples, n_features)
Transformed data.
"""
check_is_fitted(self)
return self._inverse_transform(X, self.components_)
| _BaseSparseCoding |
python | spack__spack | lib/spack/spack/repo.py | {
"start": 80742,
"end": 81165
} | class ____(RepoError):
"""Raised when a package's class constructor fails."""
def __init__(self, name, exc_type, exc_obj, exc_tb):
super().__init__(
"Class constructor failed for package '%s'." % name,
"\nCaused by:\n"
+ ("%s: %s\n" % (exc_type.__name__, exc_obj))
+ "".join(traceback.format_tb(exc_tb)),
)
self.name = name
| FailedConstructorError |
python | Netflix__metaflow | test/core/tests/large_mflog.py | {
"start": 67,
"end": 4857
} | class ____(MetaflowTest):
"""
Test that we can capture a large amount of log messages with
accurate timings
"""
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
HEADER = """
NUM_FOREACH = 32
NUM_LINES = 5000
"""
@steps(0, ["foreach-split-small"], required=True)
def split(self):
self.arr = range(NUM_FOREACH)
import random
import string
self.random_log_prefix = "".join(
[random.choice(string.ascii_lowercase) for _ in range(5)]
)
@steps(0, ["foreach-inner-small"], required=True)
def inner(self):
ISOFORMAT = "%Y-%m-%dT%H:%M:%S.%f"
from datetime import datetime
from metaflow import current
import sys
self.log_step = current.step_name
task_id = current.task_id
for i in range(NUM_LINES):
now = datetime.utcnow().strftime(ISOFORMAT)
print("%s %s stdout %d %s" % (self.random_log_prefix, task_id, i, now))
sys.stderr.write(
"%s %s stderr %d %s\n" % (self.random_log_prefix, task_id, i, now)
)
@steps(0, ["foreach-join-small"], required=True)
def join(self, inputs):
self.log_step = inputs[0].log_step
self.random_log_prefix = inputs[0].random_log_prefix
@steps(1, ["all"])
def step_all(self):
pass
@steps(0, ["end"])
def step_end(self):
self.num_foreach = NUM_FOREACH
self.num_lines = NUM_LINES
def check_results(self, flow, checker):
from itertools import groupby
from datetime import datetime
ISOFORMAT = "%Y-%m-%dT%H:%M:%S.%f"
_val = lambda n: list(checker.artifact_dict("end", n).values())[0][n]
step_name = _val("log_step")
num_foreach = _val("num_foreach")
num_lines = _val("num_lines")
random_log_prefix = _val("random_log_prefix")
run = checker.get_run()
for stream in ("stdout", "stderr"):
log = checker.get_log(step_name, stream)
# ignore event_logger noise and Batch/Lambda noise by only looking at
# log lines with the random prefix (generated by the very first step)
lines = [
line.split()
for line in log.splitlines()
if line.startswith(random_log_prefix)
]
assert_equals(len(lines), num_foreach * num_lines)
for task_id, task_lines_iter in groupby(lines, lambda x: x[1]):
task_lines = list(task_lines_iter)
assert_equals(len(task_lines), num_lines)
for i, (_, _, stream_type, idx, tstamp) in enumerate(task_lines):
# test that loglines originate from the correct stream
# and are properly ordered
assert_equals(stream_type, stream)
assert_equals(int(idx), i)
if run is not None:
for task in run[step_name]:
# test task.loglines
task_lines = [
(tstamp, msg)
for tstamp, msg in task.loglines(stream)
if msg.startswith(random_log_prefix)
]
assert_equals(len(task_lines), num_lines)
for i, (mf_tstamp, msg) in enumerate(task_lines):
_, task_id, stream_type, idx, tstamp_str = msg.split()
assert_equals(task_id, task.id)
assert_equals(stream_type, stream)
assert_equals(int(idx), i)
# May 13, 2021 - Muting this test for now since the
# GitHub CI runner is constrained on resources causing
# this test to flake. TODO: Make this check less flaky.
# tstamp = datetime.strptime(tstamp_str, ISOFORMAT)
# delta = mf_tstamp - tstamp
# # TODO challenge: optimize local runtime so that
# # delta.seconds can be made smaller, e.g. 5 secs
# # enable this line to see a distribution of deltas:
# # print("DELTA", delta.seconds)
# if delta.days > 0 or delta.seconds > 60:
# raise Exception("Time delta too high. "\
# "Mflog %s, user %s"\
# % (mf_tstamp, tstamp))
| LargeMflogTest |
python | sqlalchemy__sqlalchemy | test/sql/test_sequences.py | {
"start": 13558,
"end": 19666
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__requires__ = ("sequences",)
__sparse_driver_backend__ = True
@testing.combinations(
(Sequence("foo_seq"),),
(Sequence("foo_seq", start=8),),
(Sequence("foo_seq", increment=5),),
)
def test_start_increment(self, seq):
seq = normalize_sequence(config, seq)
seq.create(testing.db)
try:
with testing.db.connect() as conn:
values = [conn.scalar(seq) for i in range(3)]
start = seq.start or testing.db.dialect.default_sequence_base
inc = seq.increment or 1
eq_(values, list(range(start, start + inc * 3, inc)))
finally:
seq.drop(testing.db)
def _has_sequence(self, connection, name):
return testing.db.dialect.has_sequence(connection, name)
def test_nextval_unsupported(self):
"""test next_value() used on non-sequence platform
raises NotImplementedError."""
s = normalize_sequence(config, Sequence("my_seq"))
d = sqlite.dialect()
assert_raises_message(
NotImplementedError,
"Dialect 'sqlite' does not support sequence increments.",
s.next_value().compile,
dialect=d,
)
def test_checkfirst_sequence(self, connection):
s = normalize_sequence(config, Sequence("my_sequence"))
s.create(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
s.create(connection, checkfirst=True)
s.drop(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
s.drop(connection, checkfirst=True)
def test_checkfirst_metadata(self, connection):
m = MetaData()
Sequence("my_sequence", metadata=m)
m.create_all(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
m.create_all(connection, checkfirst=True)
m.drop_all(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
m.drop_all(connection, checkfirst=True)
def test_checkfirst_table(self, connection):
m = MetaData()
s = normalize_sequence(config, Sequence("my_sequence"))
t = Table("t", m, Column("c", Integer, s, primary_key=True))
t.create(connection, checkfirst=False)
assert self._has_sequence(connection, "my_sequence")
t.create(connection, checkfirst=True)
t.drop(connection, checkfirst=False)
assert not self._has_sequence(connection, "my_sequence")
t.drop(connection, checkfirst=True)
@testing.provide_metadata
def test_table_overrides_metadata_create(self, connection):
metadata = self.metadata
normalize_sequence(config, Sequence("s1", metadata=metadata))
s2 = normalize_sequence(config, Sequence("s2", metadata=metadata))
s3 = normalize_sequence(config, Sequence("s3"))
t = Table("t", metadata, Column("c", Integer, s3, primary_key=True))
assert s3.metadata is metadata
t.create(connection, checkfirst=True)
s3.drop(connection)
# 't' is created, and 's3' won't be
# re-created since it's linked to 't'.
# 's1' and 's2' are, however.
metadata.create_all(connection)
assert self._has_sequence(connection, "s1")
assert self._has_sequence(connection, "s2")
assert not self._has_sequence(connection, "s3")
s2.drop(connection)
assert self._has_sequence(connection, "s1")
assert not self._has_sequence(connection, "s2")
metadata.drop_all(connection)
assert not self._has_sequence(connection, "s1")
assert not self._has_sequence(connection, "s2")
@testing.requires.insert_returning
@testing.requires.supports_sequence_for_autoincrement_column
@testing.provide_metadata
def test_freestanding_sequence_via_autoinc(self, connection):
t = Table(
"some_table",
self.metadata,
Column(
"id",
Integer,
autoincrement=True,
primary_key=True,
default=normalize_sequence(
config, Sequence("my_sequence", metadata=self.metadata)
).next_value(),
),
)
self.metadata.create_all(connection)
result = connection.execute(t.insert())
eq_(result.inserted_primary_key, (1,))
@testing.requires.sequences_as_server_defaults
@testing.provide_metadata
def test_shared_sequence(self, connection):
# test case for #6071
common_seq = normalize_sequence(
config, Sequence("common_sequence", metadata=self.metadata)
)
Table(
"table_1",
self.metadata,
Column(
"id",
Integer,
common_seq,
server_default=common_seq.next_value(),
primary_key=True,
),
)
Table(
"table_2",
self.metadata,
Column(
"id",
Integer,
common_seq,
server_default=common_seq.next_value(),
primary_key=True,
),
)
self.metadata.create_all(connection)
is_true(self._has_sequence(connection, "common_sequence"))
is_true(testing.db.dialect.has_table(connection, "table_1"))
is_true(testing.db.dialect.has_table(connection, "table_2"))
self.metadata.drop_all(connection)
is_false(self._has_sequence(connection, "common_sequence"))
is_false(testing.db.dialect.has_table(connection, "table_1"))
is_false(testing.db.dialect.has_table(connection, "table_2"))
def test_next_value_type(self):
seq = normalize_sequence(
config, Sequence("my_sequence", data_type=BigInteger)
)
assert isinstance(seq.next_value().type, BigInteger)
| SequenceTest |
python | great-expectations__great_expectations | great_expectations/expectations/sql_tokens_and_types.py | {
"start": 1998,
"end": 2651
} | class ____(str, Enum):
ARRAYTYPE = "ARRAY"
BINARYTYPE = "BINARY"
BOOLEAN = "BOOLEAN"
BYTE = "BYTE"
TINYINT = "TINYINT"
DATE = "DATE"
DECIMAL = "DECIMAL"
DEC = "DEC"
NUMERIC = "NUMERIC"
INTERVAL = "INTERVAL"
DAY = "DAY"
YEAR = "YEAR"
MONTH = "MONTH"
HOUR = "HOUR"
SECOND = "SECOND"
MINUTE = "MINUTE"
DOUBLE = "DOUBLE"
FLOAT = "FLOAT"
REAL = "REAL"
INTEGER = "INTEGER"
INT = "INT"
LONG = "LONG"
BIGINT = "BIGINT"
MAP = "MAP"
SHORT = "SHORT"
SMALLINT = "SMALLINT"
STRING = "STRING"
STRUCT = "STRUCT"
TIMESTAMP = "TIMESTAMP"
| ValidSparkSqlTypes |
python | kubernetes-client__python | kubernetes/client/models/v1_self_subject_rules_review_spec.py | {
"start": 383,
"end": 3626
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'namespace': 'str'
}
attribute_map = {
'namespace': 'namespace'
}
def __init__(self, namespace=None, local_vars_configuration=None): # noqa: E501
"""V1SelfSubjectRulesReviewSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._namespace = None
self.discriminator = None
if namespace is not None:
self.namespace = namespace
@property
def namespace(self):
"""Gets the namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
Namespace to evaluate rules for. Required. # noqa: E501
:return: The namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1SelfSubjectRulesReviewSpec.
Namespace to evaluate rules for. Required. # noqa: E501
:param namespace: The namespace of this V1SelfSubjectRulesReviewSpec. # noqa: E501
:type: str
"""
self._namespace = namespace
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1SelfSubjectRulesReviewSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1SelfSubjectRulesReviewSpec):
return True
return self.to_dict() != other.to_dict()
| V1SelfSubjectRulesReviewSpec |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/via_type_of.py | {
"start": 1036,
"end": 1798
} | class ____:
x: Dict[str, int] = {}
y: List[str] = []
z: Annotated[float, "test2"] = 0.0
def test2_alarm1():
# always-via-type:Dict[str, int]
c = Test2_C(**_test_source())
_test_sink(c.x)
def test2_alarm2():
# always-via-type:List[str]
c = Test2_C(**_test_source())
_test_sink(c.y)
def test2_alarm3():
# always-via-type:float
c = Test2_C(**_test_source())
_test_sink(c.z)
def test2_alarm4(foo):
# via-type:Dict[str, int], via-type:List[str], via-type:float
c = Test2_C(**_test_source())
foo = c.x
if 1:
foo = c.y
elif 2:
foo = c.z
_test_sink(foo)
def test2_alarm5_via_constructor():
taint: str = _test_source()
_test_sink(Test2_C(x={}, y=[], z=taint))
| Test2_C |
python | getsentry__sentry | src/sentry/notifications/validators.py | {
"start": 1716,
"end": 2503
} | class ____(
UserNotificationSettingsOptionsDetailsSerializer
):
providers = serializers.ListField(child=serializers.CharField())
def validate_providers(self, value):
for provider in value:
if provider not in PERSONAL_NOTIFICATION_PROVIDERS:
raise serializers.ValidationError("Invalid provider")
return value
def validate_scope_type(self, value):
# for now, we limit the scopes for provider settings
if value not in [
NotificationScopeEnum.USER.value,
NotificationScopeEnum.TEAM.value,
NotificationScopeEnum.ORGANIZATION.value,
]:
raise serializers.ValidationError("Invalid scope type")
return value
| UserNotificationSettingsProvidersDetailsSerializer |
python | getsentry__sentry | tests/sentry/integrations/msteams/notifications/test_regression.py | {
"start": 602,
"end": 2154
} | class ____(MSTeamsActivityNotificationTest):
def test_regression(self, mock_send_card: MagicMock) -> None:
"""
Test that the card for MS Teams notification is generated correctly when an issue regresses.
"""
notification = RegressionActivityNotification(
Activity(
project=self.project,
group=self.group,
user_id=self.user.id,
type=ActivityType.SET_REGRESSION,
data={},
)
)
with self.tasks():
notification.send()
mock_send_card.assert_called_once()
args, kwargs = mock_send_card.call_args
assert args[0] == "some_conversation_id"
body = args[1]["body"]
assert 4 == len(body)
assert "Issue marked as regression" == body[0]["text"]
assert (
f"[{self.group.title}](http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=regression\\_activity-msteams&notification\\_uuid="
in body[1]["text"]
)
notification_uuid = self.get_notification_uuid(body[3]["columns"][1]["items"][0]["text"])
assert (
f"{self.project.slug} | [Notification Settings](http://testserver/settings/account/notifications/workflow/?referrer=regression\\_activity-msteams-user&notification\\_uuid={notification_uuid}&organizationId={self.organization.id})"
== body[3]["columns"][1]["items"][0]["text"]
)
| MSTeamsRegressionNotificationTest |
python | numba__numba | numba/core/ir.py | {
"start": 26753,
"end": 27067
} | class ____(Terminator):
"""
Unconditional branch.
"""
def __init__(self, target, loc):
assert isinstance(loc, Loc)
self.target = target
self.loc = loc
def __str__(self):
return 'jump %s' % self.target
def get_targets(self):
return [self.target]
| Jump |
python | openai__openai-python | src/openai/types/realtime/realtime_mcp_tool_call.py | {
"start": 666,
"end": 1325
} | class ____(BaseModel):
id: str
"""The unique ID of the tool call."""
arguments: str
"""A JSON string of the arguments passed to the tool."""
name: str
"""The name of the tool that was run."""
server_label: str
"""The label of the MCP server running the tool."""
type: Literal["mcp_call"]
"""The type of the item. Always `mcp_call`."""
approval_request_id: Optional[str] = None
"""The ID of an associated approval request, if any."""
error: Optional[Error] = None
"""The error from the tool call, if any."""
output: Optional[str] = None
"""The output from the tool call."""
| RealtimeMcpToolCall |
python | django__django | django/views/generic/dates.py | {
"start": 2094,
"end": 3768
} | class ____:
"""Mixin for views manipulating month-based data."""
month_format = "%b"
month = None
def get_month_format(self):
"""
Get a month format string in strptime syntax to be used to parse the
month from url variables.
"""
return self.month_format
def get_month(self):
"""Return the month for which this view should display data."""
month = self.month
if month is None:
try:
month = self.kwargs["month"]
except KeyError:
try:
month = self.request.GET["month"]
except KeyError:
raise Http404(_("No month specified"))
return month
def get_next_month(self, date):
"""Get the next valid month."""
return _get_next_prev(self, date, is_previous=False, period="month")
def get_previous_month(self, date):
"""Get the previous valid month."""
return _get_next_prev(self, date, is_previous=True, period="month")
def _get_next_month(self, date):
"""
Return the start date of the next interval.
The interval is defined by start date <= item date < next start date.
"""
if date.month == 12:
try:
return date.replace(year=date.year + 1, month=1, day=1)
except ValueError:
raise Http404(_("Date out of range"))
else:
return date.replace(month=date.month + 1, day=1)
def _get_current_month(self, date):
"""Return the start date of the previous interval."""
return date.replace(day=1)
| MonthMixin |
python | pyqtgraph__pyqtgraph | pyqtgraph/configfile.py | {
"start": 657,
"end": 5978
} | class ____(Exception):
def __init__(self, message, lineNum, line, fileName=None):
self.lineNum = lineNum
self.line = line
self.message = message
self.fileName = fileName
Exception.__init__(self, message)
def __str__(self):
if self.fileName is None:
msg = f"Error parsing string at line {self.lineNum:d}:\n"
else:
msg = f"Error parsing config file '{self.fileName}' at line {self.lineNum:d}:\n"
msg += f"{self.line}\n{Exception.__str__(self)}"
return msg
def writeConfigFile(data, fname):
s = genString(data)
with open(fname, 'wt') as fd:
fd.write(s)
def readConfigFile(fname, **scope):
global GLOBAL_PATH
if GLOBAL_PATH is not None:
fname2 = os.path.join(GLOBAL_PATH, fname)
if os.path.exists(fname2):
fname = fname2
GLOBAL_PATH = os.path.dirname(os.path.abspath(fname))
local = {
**scope,
**units.allUnits,
'OrderedDict': OrderedDict,
'readConfigFile': readConfigFile,
'Point': Point,
'QtCore': QtCore,
'ColorMap': ColorMap,
'datetime': datetime,
# Needed for reconstructing numpy arrays
'array': numpy.array,
}
for dtype in ['int8', 'uint8',
'int16', 'uint16', 'float16',
'int32', 'uint32', 'float32',
'int64', 'uint64', 'float64']:
local[dtype] = getattr(numpy, dtype)
try:
with open(fname, "rt") as fd:
s = fd.read()
s = s.replace("\r\n", "\n")
s = s.replace("\r", "\n")
data = parseString(s, **local)[1]
except ParseError:
sys.exc_info()[1].fileName = fname
raise
except:
print(f"Error while reading config file {fname}:")
raise
return data
def appendConfigFile(data, fname):
s = genString(data)
with open(fname, 'at') as fd:
fd.write(s)
def genString(data, indent=''):
s = ''
for k in data:
sk = str(k)
if not sk:
print(data)
raise ValueError('blank dict keys not allowed (see data above)')
if sk[0] == ' ' or ':' in sk:
print(data)
raise ValueError(
f'dict keys must not contain ":" or start with spaces [offending key is "{sk}"]'
)
if isinstance(data[k], dict):
s += f"{indent}{sk}:\n"
s += genString(data[k], f'{indent} ')
else:
line = repr(data[k]).replace("\n", "\\\n")
s += f"{indent}{sk}: {line}\n"
return s
def parseString(lines, start=0, **scope):
data = OrderedDict()
if isinstance(lines, str):
lines = lines.replace("\\\n", "")
lines = lines.split('\n')
indent = None
ln = start - 1
l = ''
try:
while True:
ln += 1
if ln >= len(lines):
break
l = lines[ln]
## Skip blank lines or lines starting with #
if not _line_is_real(l):
continue
## Measure line indentation, make sure it is correct for this level
lineInd = measureIndent(l)
if indent is None:
indent = lineInd
if lineInd < indent:
ln -= 1
break
if lineInd > indent:
raise ParseError(f'Indentation is incorrect. Expected {indent:d}, got {lineInd:d}', ln + 1, l)
if ':' not in l:
raise ParseError('Missing colon', ln + 1, l)
k, _, v = l.partition(':')
k = k.strip()
v = v.strip()
## set up local variables to use for eval
if len(k) < 1:
raise ParseError('Missing name preceding colon', ln + 1, l)
if k[0] == '(' and k[-1] == ')': # If the key looks like a tuple, try evaluating it.
with contextlib.suppress(Exception): # If tuple conversion fails, keep the string
k1 = eval(k, scope)
if type(k1) is tuple:
k = k1
if _line_is_real(v): # eval the value
try:
val = eval(v, scope)
except Exception as ex:
raise ParseError(
f"Error evaluating expression '{v}': [{ex.__class__.__name__}: {ex}]", ln + 1, l
) from ex
else:
next_real_ln = next((i for i in range(ln + 1, len(lines)) if _line_is_real(lines[i])), len(lines))
if ln + 1 >= len(lines) or measureIndent(lines[next_real_ln]) <= indent:
val = {}
else:
ln, val = parseString(lines, start=ln + 1, **scope)
if k in data:
raise ParseError(f'Duplicate key: {k}', ln + 1, l)
data[k] = val
except ParseError:
raise
except Exception as ex:
raise ParseError(f"{ex.__class__.__name__}: {ex}", ln + 1, l) from ex
return ln, data
def _line_is_real(line):
return not re.match(r'\s*#', line) and re.search(r'\S', line)
def measureIndent(s):
n = 0
while n < len(s) and s[n] == ' ':
n += 1
return n
| ParseError |
python | ray-project__ray | python/ray/serve/tests/test_autoscaling_policy.py | {
"start": 57024,
"end": 70825
} | class ____:
@pytest.fixture
def serve_instance_with_two_signal(self, serve_instance):
client = serve_instance
signal_a = SignalActor.options(name="signal_A").remote()
signal_b = SignalActor.options(name="signal_B").remote()
yield client, signal_a, signal_b
# Delete signal actors so there is no conflict between tests
ray.kill(signal_a)
ray.kill(signal_b)
def verify_scaling_decisions(self, signal_A, signal_B):
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
hB = serve.get_deployment_handle("B", app_name=SERVE_DEFAULT_APP_NAME)
# ---- Deployment A ----
ray.get(signal_A.send.remote(clear=True))
results = [hA.remote() for _ in range(40)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 40)
wait_for_condition(check_num_replicas_eq, name="A", target=2)
ray.get(signal_A.send.remote(clear=True))
assert all(result.result(timeout_s=10) for result in results)
results = [hA.remote() for _ in range(70)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 70)
wait_for_condition(check_num_replicas_eq, name="A", target=4)
ray.get(signal_A.send.remote())
assert all(result.result(timeout_s=10) for result in results)
# ---- Deployment B ----
ray.get(signal_B.send.remote(clear=True))
results = [hB.remote() for _ in range(50)]
wait_for_condition(lambda: ray.get(signal_B.cur_num_waiters.remote()) == 50)
wait_for_condition(check_num_replicas_eq, name="B", target=3)
ray.get(signal_B.send.remote(clear=True))
assert all(result.result(timeout_s=10) for result in results)
results = [hB.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_B.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="B", target=5)
ray.get(signal_B.send.remote())
assert all(result.result(timeout_s=10) for result in results)
@pytest.mark.parametrize(
"policy",
[
{
"policy_function": "ray.serve.tests.test_autoscaling_policy.app_level_custom_autoscaling_policy"
},
AutoscalingPolicy(
policy_function="ray.serve.tests.test_autoscaling_policy.app_level_custom_autoscaling_policy"
),
AutoscalingPolicy(policy_function=app_level_custom_autoscaling_policy),
],
)
def test_application_autoscaling_policy(
self, serve_instance_with_two_signal, policy
):
client, signal_A, signal_B = serve_instance_with_two_signal
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"autoscaling_policy": policy,
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
},
"graceful_shutdown_timeout_s": 0.1,
},
{
"name": "B",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
},
"graceful_shutdown_timeout_s": 0.1,
},
],
}
print(time.ctime(), "Deploying application with deployments A and B.")
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
print(time.ctime(), "Application is RUNNING.")
self.verify_scaling_decisions(signal_A, signal_B)
def test_autoscaling_policy_switchback(self, serve_instance_with_two_signal):
client, signal_A, signal_B = serve_instance_with_two_signal
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
"policy": {
"policy_function": "ray.serve.tests.test_autoscaling_policy.custom_autoscaling_policy"
},
},
"graceful_shutdown_timeout_s": 0.1,
},
],
}
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(60)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 60)
wait_for_condition(check_num_replicas_eq, name="A", target=3)
ray.get(signal_A.send.remote())
assert all(result.result(timeout_s=10) for result in results)
ray.get(signal_A.send.remote(clear=True))
# Switch to app-level policy
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"autoscaling_policy": {
"policy_function": "ray.serve.tests.test_autoscaling_policy.app_level_custom_autoscaling_policy"
},
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
},
"graceful_shutdown_timeout_s": 0.1,
},
{
"name": "B",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
},
"graceful_shutdown_timeout_s": 0.1,
},
],
}
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="A", target=4)
ray.get(signal_A.send.remote())
assert all(result.result(timeout_s=10) for result in results)
ray.get(signal_A.send.remote(clear=True))
hB = serve.get_deployment_handle("B", app_name=SERVE_DEFAULT_APP_NAME)
results = [hB.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_B.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="B", target=5)
ray.get(signal_B.send.remote())
assert all(result.result(timeout_s=10) for result in results)
ray.get(signal_B.send.remote(clear=True))
# switch back to deployment-level policy
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
"policy": {
"policy_function": "ray.serve.tests.test_autoscaling_policy.custom_autoscaling_policy"
},
},
"graceful_shutdown_timeout_s": 0.1,
},
],
}
print(time.ctime(), "Deploying application with deployments A and B.")
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="A", target=3)
ray.get(signal_A.send.remote())
assert all(result.result(timeout_s=10) for result in results)
def test_autoscaling_policy_enable_disable(self, serve_instance_with_two_signal):
client, signal_A, _ = serve_instance_with_two_signal
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"num_replicas": 1,
},
],
}
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="A", target=1)
ray.get(signal_A.send.remote(clear=True))
assert all(result.result(timeout_s=10) for result in results)
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"autoscaling_policy": {
"policy_function": "ray.serve.tests.test_autoscaling_policy.app_level_custom_autoscaling_policy"
},
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"num_replicas": "auto",
"autoscaling_config": {
"min_replicas": 1,
"max_replicas": 10,
"metrics_interval_s": 0.1,
"upscale_delay_s": 0.1,
"downscale_delay_s": 0.5,
"look_back_period_s": 1,
},
},
],
}
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="A", target=4)
ray.get(signal_A.send.remote(clear=True))
assert all(result.result(timeout_s=10) for result in results)
# turn off app-level autoscaling policy
config_template = {
"import_path": "ray.serve.tests.test_config_files.get_multi_deployment_signal_app.app",
"deployments": [
{
"name": "A",
"max_ongoing_requests": 1000,
"num_replicas": 1,
},
],
}
client.deploy_apps(
ServeDeploySchema.parse_obj({"applications": [config_template]})
)
wait_for_condition(check_running, timeout=15)
wait_for_condition(check_num_replicas_eq, name="A", target=1)
hA = serve.get_deployment_handle("A", app_name=SERVE_DEFAULT_APP_NAME)
results = [hA.remote() for _ in range(120)]
wait_for_condition(lambda: ray.get(signal_A.cur_num_waiters.remote()) == 120)
wait_for_condition(check_num_replicas_eq, name="A", target=1)
ray.get(signal_A.send.remote(clear=True))
assert all(result.result(timeout_s=10) for result in results)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestAppLevelAutoscalingPolicy |
python | pyca__cryptography | tests/hazmat/primitives/test_rsa.py | {
"start": 54350,
"end": 57805
} | class ____:
test_rsa_pkcs1v15_verify_sha1 = pytest.mark.supported(
only_if=lambda backend: (
backend.signature_hash_supported(hashes.SHA1())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA1 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA1(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha224 = pytest.mark.supported(
only_if=lambda backend: (
backend.signature_hash_supported(hashes.SHA224())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA224 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA224(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha256 = pytest.mark.supported(
only_if=lambda backend: (
backend.signature_hash_supported(hashes.SHA256())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA256 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA256(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha384 = pytest.mark.supported(
only_if=lambda backend: (
backend.signature_hash_supported(hashes.SHA384())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA384 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA384(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
test_rsa_pkcs1v15_verify_sha512 = pytest.mark.supported(
only_if=lambda backend: (
backend.signature_hash_supported(hashes.SHA512())
and backend.rsa_padding_supported(padding.PKCS1v15())
),
skip_message="Does not support SHA512 and PKCS1v1.5.",
)(
generate_rsa_verification_test(
load_rsa_nist_vectors,
os.path.join("asymmetric", "RSA", "FIPS_186-2"),
[
"SigGen15_186-2.rsp",
"SigGen15_186-3.rsp",
"SigVer15_186-3.rsp",
],
hashes.SHA512(),
lambda params, hash_alg: padding.PKCS1v15(),
)
)
| TestRSAPKCS1Verification |
python | numba__numba | numba/cuda/tests/cudapy/test_datetime.py | {
"start": 192,
"end": 3508
} | class ____(CUDATestCase):
def test_basic_datetime_kernel(self):
@cuda.jit
def foo(start, end, delta):
for i in range(cuda.grid(1), delta.size, cuda.gridsize(1)):
delta[i] = end[i] - start[i]
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = np.zeros_like(arr1, dtype='timedelta64[D]')
foo[1, 32](arr1, arr2, delta)
self.assertPreciseEqual(delta, arr2 - arr1)
def test_scalar_datetime_kernel(self):
@cuda.jit
def foo(dates, target, delta, matches, outdelta):
for i in range(cuda.grid(1), matches.size, cuda.gridsize(1)):
matches[i] = dates[i] == target
outdelta[i] = dates[i] - delta
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
target = arr1[5] # datetime
delta = arr1[6] - arr1[5] # timedelta
matches = np.zeros_like(arr1, dtype=np.bool_)
outdelta = np.zeros_like(arr1, dtype='datetime64[D]')
foo[1, 32](arr1, target, delta, matches, outdelta)
where = matches.nonzero()
self.assertEqual(list(where), [5])
self.assertPreciseEqual(outdelta, arr1 - delta)
@skip_on_cudasim('ufunc API unsupported in the simulator')
def test_ufunc(self):
datetime_t = from_dtype(np.dtype('datetime64[D]'))
@vectorize([(datetime_t, datetime_t)], target='cuda')
def timediff(start, end):
return end - start
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = timediff(arr1, arr2)
self.assertPreciseEqual(delta, arr2 - arr1)
@skip_on_cudasim('ufunc API unsupported in the simulator')
def test_gufunc(self):
datetime_t = from_dtype(np.dtype('datetime64[D]'))
timedelta_t = from_dtype(np.dtype('timedelta64[D]'))
@guvectorize([(datetime_t, datetime_t, timedelta_t[:])], '(),()->()',
target='cuda')
def timediff(start, end, out):
out[0] = end - start
arr1 = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr2 = arr1 + np.random.randint(0, 10000, arr1.size)
delta = timediff(arr1, arr2)
self.assertPreciseEqual(delta, arr2 - arr1)
@skip_on_cudasim('no .copy_to_host() in the simulator')
def test_datetime_view_as_int64(self):
arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
darr = cuda.to_device(arr)
viewed = darr.view(np.int64)
self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())
self.assertEqual(viewed.gpu_data, darr.gpu_data)
@skip_on_cudasim('no .copy_to_host() in the simulator')
def test_timedelta_view_as_int64(self):
arr = np.arange('2005-02', '2006-02', dtype='datetime64[D]')
arr = arr - (arr - 1)
self.assertEqual(arr.dtype, np.dtype('timedelta64[D]'))
darr = cuda.to_device(arr)
viewed = darr.view(np.int64)
self.assertPreciseEqual(arr.view(np.int64), viewed.copy_to_host())
self.assertEqual(viewed.gpu_data, darr.gpu_data)
if __name__ == '__main__':
unittest.main()
| TestCudaDateTime |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-azurecosmosmongo/tests/test_azurecosmosmongo.py | {
"start": 2219,
"end": 4432
} | class ____:
@classmethod
def setup_class(cls) -> None:
# insure the test collection is empty
assert collection.count_documents({}) == 0 # type: ignore[index]
@classmethod
def teardown_class(cls) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
@pytest.fixture(autouse=True)
def setup(self) -> None:
# delete all the documents in the collection
collection.delete_many({}) # type: ignore[index]
def test_add_and_delete(self) -> None:
vector_store = AzureCosmosDBMongoDBVectorSearch(
mongodb_client=test_client, # type: ignore
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
index_name=INDEX_NAME,
cosmos_search_kwargs={"dimensions": 3},
)
sleep(1) # waits for azure cosmosdb mongodb to update
vector_store.add(
[
TextNode(
text="test node text",
id_="test node id",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id="test doc id")
},
embedding=[0.5, 0.5, 0.5],
)
]
)
assert collection.count_documents({}) == 1
vector_store.delete("test doc id")
assert collection.count_documents({}) == 0
def test_query(self, node_embeddings: List[TextNode]) -> None:
vector_store = AzureCosmosDBMongoDBVectorSearch(
mongodb_client=test_client, # type: ignore
db_name=DB_NAME,
collection_name=COLLECTION_NAME,
index_name=INDEX_NAME,
cosmos_search_kwargs={"dimensions": 3},
)
vector_store.add(node_embeddings) # type: ignore
sleep(1) # wait for azure cosmodb mongodb to update the index
res = vector_store.query(
VectorStoreQuery(query_embedding=[1.0, 0.0, 0.0], similarity_top_k=1)
)
print("res:\n", res)
sleep(5)
assert res.nodes
assert res.nodes[0].get_content() == "lorem ipsum"
| TestAzureMongovCoreVectorSearch |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/TargetItem.py | {
"start": 351,
"end": 11973
} | class ____(UIGraphicsItem):
"""Draws a draggable target symbol (circle plus crosshair).
The size of TargetItem will remain fixed on screen even as the view is zoomed.
Includes an optional text label.
"""
sigPositionChanged = QtCore.Signal(object)
sigPositionChangeFinished = QtCore.Signal(object)
def __init__(
self,
pos=None,
size=10,
symbol="crosshair",
pen=None,
hoverPen=None,
brush=None,
hoverBrush=None,
movable=True,
label=None,
labelOpts=None,
):
r"""
Parameters
----------
pos : list, tuple, QPointF, QPoint, Optional
Initial position of the symbol. Default is (0, 0)
size : int
Size of the symbol in pixels. Default is 10.
pen : QPen, tuple, list or str
Pen to use when drawing line. Can be any arguments that are valid
for :func:`~pyqtgraph.mkPen`. Default pen is transparent yellow.
brush : QBrush, tuple, list, or str
Defines the brush that fill the symbol. Can be any arguments that
is valid for :func:`~pyqtgraph.mkBrush`. Default is transparent
blue.
movable : bool
If True, the symbol can be dragged to a new position by the user.
hoverPen : QPen, tuple, list, or str
Pen to use when drawing symbol when hovering over it. Can be any
arguments that are valid for :func:`~pyqtgraph.mkPen`. Default pen
is red.
hoverBrush : QBrush, tuple, list or str
Brush to use to fill the symbol when hovering over it. Can be any
arguments that is valid for :func:`~pyqtgraph.mkBrush`. Default is
transparent blue.
symbol : QPainterPath or str
QPainterPath to use for drawing the target, should be centered at
``(0, 0)`` with ``max(width, height) == 1.0``. Alternatively a string
which can be any symbol accepted by
:func:`~pyqtgraph.ScatterPlotItem.setSymbol`
label : bool, str or callable, optional
Text to be displayed in a label attached to the symbol, or None to
show no label (default is None). May optionally include formatting
strings to display the symbol value, or a callable that accepts x
and y as inputs. If True, the label is ``x = {: >.3n}\ny = {: >.3n}``
False or None will result in no text being displayed
labelOpts : dict
A dict of keyword arguments to use when constructing the text
label. See :class:`TargetLabel` and :class:`~pyqtgraph.TextItem`
"""
super().__init__()
self.movable = movable
self.moving = False
self._label = None
self.mouseHovering = False
if pen is None:
pen = (255, 255, 0)
self.setPen(pen)
if hoverPen is None:
hoverPen = (255, 0, 255)
self.setHoverPen(hoverPen)
if brush is None:
brush = (0, 0, 255, 50)
self.setBrush(brush)
if hoverBrush is None:
hoverBrush = (0, 255, 255, 100)
self.setHoverBrush(hoverBrush)
self.currentPen = self.pen
self.currentBrush = self.brush
self._shape = None
self._pos = Point(0, 0)
if pos is None:
pos = Point(0, 0)
self.setPos(pos)
self._path = None
self.setSymbol(symbol)
self.scale = size
self.setLabel(label, labelOpts)
def setSymbol(self, symbol):
"""Method to set the TargetItem symbol, during or after creation
Parameters
----------
symbol : QPainterPath or str
QPainterPath to use for drawing the target, should be centered at
``(0, 0)`` with ``max(width, height) == 1.0``. Alternatively a string
which can be any symbol accepted by
:func:`~pyqtgraph.ScatterPlotItem.setSymbol`
Raises
------
KeyError
If ``symbol`` string is unknown
TypeError
If unknown type is is provided as ``symbol``
"""
if isinstance(symbol, str):
try:
path = Symbols[symbol]
except KeyError:
raise KeyError(f"Symbol name '{symbol}' not found in available Symbols")
elif isinstance(symbol, QtGui.QPainterPath):
path = symbol
else:
raise TypeError("Unknown type provided as symbol")
self.setPath(path)
def setPos(self, *args):
"""Method to set the position to ``(x, y)`` within the plot view
Parameters
----------
args : tuple or list or QtCore.QPointF or QtCore.QPoint or Point or float
Two float values or a container that specifies ``(x, y)`` position where the
TargetItem should be placed
Raises
------
TypeError
If args cannot be used to instantiate a Point
"""
try:
newPos = Point(*args)
except TypeError:
raise
except Exception:
raise TypeError(f"Could not make Point from arguments: {args!r}")
if self._pos != newPos:
self._pos = newPos
super().setPos(self._pos)
self.sigPositionChanged.emit(self)
def setBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.brush = fn.mkBrush(*args, **kwargs)
if not self.mouseHovering:
self.currentBrush = self.brush
self.update()
def setHoverBrush(self, *args, **kwargs):
"""Set the brush that fills the symbol when hovering over it. Allowable
arguments are any that are valid for :func:`~pyqtgraph.mkBrush`.
"""
self.hoverBrush = fn.mkBrush(*args, **kwargs)
if self.mouseHovering:
self.currentBrush = self.hoverBrush
self.update()
def setPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol. Allowable arguments are any that
are valid for :func:`~pyqtgraph.mkPen`."""
self.pen = fn.mkPen(*args, **kwargs)
if not self.mouseHovering:
self.currentPen = self.pen
self.update()
def setHoverPen(self, *args, **kwargs):
"""Set the pen for drawing the symbol when hovering over it. Allowable
arguments are any that are valid for
:func:`~pyqtgraph.mkPen`."""
self.hoverPen = fn.mkPen(*args, **kwargs)
if self.mouseHovering:
self.currentPen = self.hoverPen
self.update()
def boundingRect(self):
return self.shape().boundingRect()
def paint(self, p, *_):
p.setPen(self.currentPen)
p.setBrush(self.currentBrush)
p.drawPath(self.shape())
def setPath(self, path):
if path != self._path:
self._path = path
self._shape = None
return None
def shape(self):
if self._shape is None:
s = self.generateShape()
if s is None:
return self._path
self._shape = s
# beware--this can cause the view to adjust
# which would immediately invalidate the shape.
self.prepareGeometryChange()
return self._shape
def generateShape(self):
dt = self.deviceTransform_()
if dt is None:
self._shape = self._path
return None
v = dt.map(QtCore.QPointF(1, 0)) - dt.map(QtCore.QPointF(0, 0))
dti = fn.invertQTransform(dt)
devPos = dt.map(QtCore.QPointF(0, 0))
tr = QtGui.QTransform()
tr.translate(devPos.x(), devPos.y())
va = atan2(v.y(), v.x())
tr.rotateRadians(va)
tr.scale(self.scale, self.scale)
return dti.map(tr.map(self._path))
def mouseDragEvent(self, ev):
if not self.movable or ev.button() != QtCore.Qt.MouseButton.LeftButton:
return
ev.accept()
if ev.isStart():
self.symbolOffset = self.pos() - self.mapToView(ev.buttonDownPos())
self.moving = True
if not self.moving:
return
self.setPos(self.symbolOffset + self.mapToView(ev.pos()))
if ev.isFinish():
self.moving = False
self.sigPositionChangeFinished.emit(self)
def mouseClickEvent(self, ev):
if self.moving and ev.button() == QtCore.Qt.MouseButton.RightButton:
ev.accept()
self.moving = False
self.sigPositionChanged.emit(self)
self.sigPositionChangeFinished.emit(self)
def setMouseHover(self, hover):
# Inform the item that the mouse is(not) hovering over it
if self.mouseHovering is hover:
return
self.mouseHovering = hover
if hover:
self.currentBrush = self.hoverBrush
self.currentPen = self.hoverPen
else:
self.currentBrush = self.brush
self.currentPen = self.pen
self.update()
def hoverEvent(self, ev):
if self.movable and (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton):
self.setMouseHover(True)
else:
self.setMouseHover(False)
def viewTransformChanged(self):
GraphicsObject.viewTransformChanged(self)
self._shape = None # invalidate shape, recompute later if requested.
self.update()
def pos(self):
"""Provides the current position of the TargetItem
Returns
-------
Point
pg.Point of the current position of the TargetItem
"""
return self._pos
def label(self):
"""Provides the TargetLabel if it exists
Returns
-------
TargetLabel or None
If a TargetLabel exists for this TargetItem, return that, otherwise
return None
"""
return self._label
def setLabel(self, text=None, labelOpts=None):
"""Method to call to enable or disable the TargetLabel for displaying text
Parameters
----------
text : Callable or str, optional
Details how to format the text, by default None
If None, do not show any text next to the TargetItem
If Callable, then the label will display the result of ``text(x, y)``
If a fromatted string, then the output of ``text.format(x, y)`` will be
displayed
If a non-formatted string, then the text label will display ``text``, by
default None
labelOpts : dict, optional
These arguments are passed on to :class:`~pyqtgraph.TextItem`
"""
if not text:
if self._label is not None and self._label.scene() is not None:
# remove the label if it's already added
self._label.scene().removeItem(self._label)
self._label = None
else:
# provide default text if text is True
if text is True:
# convert to default value or empty string
text = "x = {: .3n}\ny = {: .3n}"
labelOpts = {} if labelOpts is None else labelOpts
if self._label is not None:
self._label.scene().removeItem(self._label)
self._label = TargetLabel(self, text=text, **labelOpts)
| TargetItem |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_getnewargs/invalid_getnewargs_ex_returned.py | {
"start": 1627,
"end": 1843
} | class ____:
""" __getnewargs_ex__ returns tuple with wrong type for first arg """
def __getnewargs_ex__(self): # [invalid-getnewargs-ex-returned]
return (dict(x="y"), dict(x="y"))
| ThirdBadGetNewArgsEx |
python | oauthlib__oauthlib | tests/oauth2/rfc8628/endpoints/test_error_responses.py | {
"start": 297,
"end": 3701
} | class ____(TestCase):
def set_client(self, request):
request.client = mock.MagicMock()
request.client.client_id = "mocked"
return True
def build_request(self, uri="https://example.com/device_authorize", client_id="foo"):
body = ""
if client_id:
body = f"client_id={client_id}"
return Request(
uri,
http_method="POST",
body=body,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
def assert_request_raises(self, error, request):
"""Test that the request fails similarly on the validation and response endpoint."""
self.assertRaises(
error,
self.device.validate_device_authorization_request,
request,
)
self.assertRaises(
error,
self.device.create_device_authorization_response,
uri=request.uri,
http_method=request.http_method,
body=request.body,
headers=request.headers,
)
def setUp(self):
self.validator = mock.MagicMock(spec=RequestValidator)
self.validator.get_default_redirect_uri.return_value = None
self.validator.get_code_challenge.return_value = None
self.device = DeviceApplicationServer(self.validator, "https://example.com/verify")
def test_missing_client_id(self):
# Device code grant
request = self.build_request(client_id=None)
self.assert_request_raises(errors.MissingClientIdError, request)
def test_empty_client_id(self):
# Device code grant
self.assertRaises(
errors.MissingClientIdError,
self.device.create_device_authorization_response,
"https://i.l/",
"POST",
"client_id=",
{"Content-Type": "application/x-www-form-urlencoded"},
)
def test_invalid_client_id(self):
request = self.build_request(client_id="foo")
# Device code grant
self.validator.validate_client_id.return_value = False
self.assert_request_raises(errors.InvalidClientIdError, request)
def test_duplicate_client_id(self):
request = self.build_request()
request.body = "client_id=foo&client_id=bar"
# Device code grant
self.validator.validate_client_id.return_value = False
self.assert_request_raises(errors.InvalidRequestFatalError, request)
def test_unauthenticated_confidential_client(self):
self.validator.client_authentication_required.return_value = True
self.validator.authenticate_client.return_value = False
request = self.build_request()
self.assert_request_raises(errors.InvalidClientError, request)
def test_unauthenticated_public_client(self):
self.validator.client_authentication_required.return_value = False
self.validator.authenticate_client_id.return_value = False
request = self.build_request()
self.assert_request_raises(errors.InvalidClientError, request)
def test_duplicate_scope_parameter(self):
request = self.build_request()
request.body = "client_id=foo&scope=foo&scope=bar"
# Device code grant
self.validator.validate_client_id.return_value = False
self.assert_request_raises(errors.InvalidRequestFatalError, request)
| ErrorResponseTest |
python | bokeh__bokeh | src/bokeh/models/widgets/pickers.py | {
"start": 8484,
"end": 8838
} | class ____(BaseDatePicker):
""" Calendar-based picker of date ranges. """
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
value = Nullable(Tuple(Date, Date), default=None, help="""
The initial or picked date range.
""")
| DateRangePicker |
python | walkccc__LeetCode | solutions/2147. Number of Ways to Divide a Long Corridor/2147.py | {
"start": 0,
"end": 386
} | class ____:
def numberOfWays(self, corridor: str) -> int:
MOD = 1_000_000_007
ans = 1
prevSeat = -1
numSeats = 0
for i, c in enumerate(corridor):
if c == 'S':
numSeats += 1
if numSeats > 2 and numSeats % 2 == 1:
ans = ans * (i - prevSeat) % MOD
prevSeat = i
return ans if numSeats > 1 and numSeats % 2 == 0 else 0
| Solution |
python | sympy__sympy | sympy/codegen/ast.py | {
"start": 28786,
"end": 29881
} | class ____(Token):
""" Subclass of Token, carrying the attribute 'attrs' (Tuple)
Examples
========
>>> from sympy.codegen.ast import Node, value_const, pointer_const
>>> n1 = Node([value_const])
>>> n1.attr_params('value_const') # get the parameters of attribute (by name)
()
>>> from sympy.codegen.fnodes import dimension
>>> n2 = Node([value_const, dimension(5, 3)])
>>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance)
()
>>> n2.attr_params('dimension') # get the parameters of attribute (by name)
(5, 3)
>>> n2.attr_params(pointer_const) is None
True
"""
__slots__: tuple[str, ...] = ('attrs',)
_fields = __slots__
defaults: dict[str, Any] = {'attrs': Tuple()}
_construct_attrs = staticmethod(_mk_Tuple)
def attr_params(self, looking_for):
""" Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """
for attr in self.attrs:
if str(attr.name) == str(looking_for):
return attr.parameters
| Node |
python | django-haystack__django-haystack | haystack/exceptions.py | {
"start": 795,
"end": 916
} | class ____(HaystackError):
"""Raised when incorrect arguments have been provided for spatial."""
pass
| SpatialError |
python | gevent__gevent | src/gevent/_fileobjectcommon.py | {
"start": 20276,
"end": 24359
} | class ____(FileObjectBase):
"""
FileObjectThread()
A file-like object wrapping another file-like object, performing all blocking
operations on that object in a background thread.
.. caution::
Attempting to change the threadpool or lock of an existing FileObjectThread
has undefined consequences.
.. versionchanged:: 1.1b1
The file object is closed using the threadpool. Note that whether or
not this action is synchronous or asynchronous is not documented.
"""
def __init__(self, *args, **kwargs):
"""
:keyword bool lock: If True (the default) then all operations will
be performed one-by-one. Note that this does not guarantee that, if using
this file object from multiple threads/greenlets, operations will be performed
in any particular order, only that no two operations will be attempted at the
same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize
file operations with an external resource.
:keyword bool closefd: If True (the default) then when this object is closed,
the underlying object is closed as well. If *fobj* is a path, then
*closefd* must be True.
"""
lock = kwargs.pop('lock', True)
threadpool = kwargs.pop('threadpool', None)
descriptor = OpenDescriptor(*args, **kwargs)
self.threadpool = threadpool or get_hub().threadpool
self.lock = lock
if self.lock is True:
self.lock = Semaphore()
elif not self.lock:
self.lock = DummySemaphore()
if not hasattr(self.lock, '__enter__'):
raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock))
self.__io_holder = [descriptor.opened()] # signal for _wrap_method
FileObjectBase.__init__(self, descriptor)
def _do_close(self, fobj, closefd):
self.__io_holder[0] = None # for _wrap_method
try:
with self.lock:
self.threadpool.apply(fobj.flush)
finally:
if closefd:
# Note that we're not taking the lock; older code
# did fobj.close() without going through the threadpool at all,
# so acquiring the lock could potentially introduce deadlocks
# that weren't present before. Avoiding the lock doesn't make
# the existing race condition any worse.
# We wrap the close in an exception handler and re-raise directly
# to avoid the (common, expected) IOError from being logged by the pool
def close(_fobj=fobj):
try:
_fobj.close()
except: # pylint:disable=bare-except
# pylint:disable-next=return-in-finally
return sys.exc_info()
finally:
_fobj = None
del fobj
exc_info = self.threadpool.apply(close)
del close
if exc_info:
reraise(*exc_info)
def _do_delegate_methods(self):
FileObjectBase._do_delegate_methods(self)
self.__io_holder[0] = self._io
def _extra_repr(self):
return ' threadpool=%r' % (self.threadpool,)
def _wrap_method(self, method):
# NOTE: We are careful to avoid introducing a refcycle
# within self. Our wrapper cannot refer to self.
io_holder = self.__io_holder
lock = self.lock
threadpool = self.threadpool
@functools.wraps(method)
def thread_method(*args, **kwargs):
if io_holder[0] is None:
# This is different than FileObjectPosix, etc,
# because we want to save the expensive trip through
# the threadpool.
raise FileObjectClosed
with lock:
return threadpool.apply(method, args, kwargs)
return thread_method
| FileObjectThread |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_base_aws.py | {
"start": 15534,
"end": 46372
} | class ____:
@mock_aws
def test_get_client_type_set_in_class_attribute(self):
client = boto3.client("emr", region_name="us-east-1")
if client.list_clusters()["Clusters"]:
raise ValueError("AWS not properly mocked")
hook = AwsBaseHook(aws_conn_id="aws_default", client_type="emr")
client_from_hook = hook.get_client_type()
assert client_from_hook.list_clusters()["Clusters"] == []
@mock_aws
def test_get_resource_type_set_in_class_attribute(self):
hook = AwsBaseHook(aws_conn_id="aws_default", resource_type="dynamodb")
resource_from_hook = hook.get_resource_type()
# this table needs to be created in production
table = resource_from_hook.create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 0
@mock_aws
def test_get_session_returns_a_boto3_session(self):
hook = AwsBaseHook(aws_conn_id="aws_default", resource_type="dynamodb")
session_from_hook = hook.get_session()
resource_from_session = session_from_hook.resource("dynamodb")
table = resource_from_session.create_table(
TableName="test_airflow",
KeySchema=[
{"AttributeName": "id", "KeyType": "HASH"},
],
AttributeDefinitions=[{"AttributeName": "id", "AttributeType": "S"}],
ProvisionedThroughput={"ReadCapacityUnits": 10, "WriteCapacityUnits": 10},
)
table.meta.client.get_waiter("table_exists").wait(TableName="test_airflow")
assert table.item_count == 0
@pytest.mark.parametrize(
"hook_params",
[
pytest.param({"client_type": "s3"}, id="client-type"),
pytest.param({"resource_type": "dynamodb"}, id="resource-type"),
],
)
def test_user_agent_extra_update(self, hook_params):
"""
We are only looking for the keys appended by the AwsBaseHook. A user_agent string
is a number of key/value pairs such as: `BOTO3/1.25.4 AIRFLOW/2.5.0.DEV0 AMPP/6.0.0`.
"""
client_meta = AwsBaseHook(aws_conn_id=None, client_type="s3").conn_client_meta
expected_user_agent_tag_keys = ["Airflow", "AmPP", "Caller", "DagRunKey"]
result_user_agent_tags = client_meta.config.user_agent.split(" ")
result_user_agent_tag_keys = [tag.split("/")[0].lower() for tag in result_user_agent_tags]
for key in expected_user_agent_tag_keys:
assert key.lower() in result_user_agent_tag_keys
@staticmethod
def fetch_tags() -> dict[str, str]:
"""Helper method which creates an AwsBaseHook and returns the user agent string split into a dict."""
user_agent_string = AwsBaseHook(client_type="s3").get_client_type().meta.config.user_agent
# Split the list of {Key}/{Value} into a dict
return dict(tag.split("/") for tag in user_agent_string.split(" "))
@pytest.mark.parametrize("found_classes", [["RandomOperator"], ["BaseSensorOperator", "TestSensor"]])
@mock.patch.object(AwsBaseHook, "_find_operator_class_name")
def test_user_agent_caller_target_function_found(self, mock_class_name, found_classes):
mock_class_name.side_effect = found_classes
user_agent_tags = self.fetch_tags()
assert mock_class_name.call_count == len(found_classes)
assert user_agent_tags["Caller"] == found_classes[-1]
@mock.patch.object(AwsEcsExecutor, "_load_run_kwargs")
def test_user_agent_caller_target_executor_found(self, mock_load_run_kwargs, sdk_connection_not_found):
with conf_vars(
{
("aws_ecs_executor", "cluster"): "foo",
("aws_ecs_executor", "region_name"): "us-east-1",
("aws_ecs_executor", "container_name"): "bar",
("aws_ecs_executor", "conn_id"): "fish",
}
):
executor = AwsEcsExecutor()
user_agent_dict = dict(tag.split("/") for tag in executor.ecs.meta.config.user_agent.split(" "))
assert user_agent_dict["Caller"] == "AwsEcsExecutor"
def test_user_agent_caller_target_function_not_found(self):
default_caller_name = "Unknown"
user_agent_tags = self.fetch_tags()
assert user_agent_tags["Caller"] == default_caller_name
@pytest.mark.db_test
@pytest.mark.parametrize(
("env_var", "expected_version"), [({"AIRFLOW_CTX_DAG_ID": "banana"}, 5), [{}, None]]
)
@mock.patch.object(AwsBaseHook, "_get_caller", return_value="Test")
def test_user_agent_dag_run_key_is_hashed_correctly(
self, _, env_var, expected_version, mock_supervisor_comms
):
if AIRFLOW_V_3_0_PLUS:
from airflow.sdk.execution_time.comms import ConnectionResult
mock_supervisor_comms.send.return_value = ConnectionResult(
conn_id="aws_default",
conn_type="aws",
)
with mock.patch.dict(os.environ, env_var, clear=True):
dag_run_key = self.fetch_tags()["DagRunKey"]
assert UUID(dag_run_key).version == expected_version
@pytest.mark.parametrize(
"sts_endpoint",
[
pytest.param(None, id="not-set"),
pytest.param("https://foo.bar/spam/egg", id="custom"),
],
)
@mock.patch.object(AwsBaseHook, "get_connection")
@mock_aws
def test_assume_role(self, mock_get_connection, sts_endpoint):
aws_conn_id = "aws/test"
role_arn = "arn:aws:iam::123456:role/role_arn"
slugified_role_session_name = "airflow_aws-test"
fake_conn_extra = {"role_arn": role_arn, "endpoint_url": "https://example.org"}
if sts_endpoint:
fake_conn_extra["service_config"] = {"sts": {"endpoint_url": sts_endpoint}}
mock_get_connection.return_value = Connection(conn_id=aws_conn_id, extra=fake_conn_extra)
def mock_assume_role(**kwargs):
assert kwargs["RoleArn"] == role_arn
# The role session name gets invalid characters removed/replaced with hyphens
# (e.g. / is replaced with -)
assert kwargs["RoleSessionName"] == slugified_role_session_name
sts_response = {
"ResponseMetadata": {"HTTPStatusCode": 200},
"Credentials": {
"Expiration": datetime.now(),
"AccessKeyId": 1,
"SecretAccessKey": 1,
"SessionToken": 1,
},
}
return sts_response
with mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.BaseSessionFactory._create_basic_session",
spec=boto3.session.Session,
) as mocked_basic_session:
mocked_basic_session.return_value.region_name = "us-east-2"
mock_client = mocked_basic_session.return_value.client
mock_client.return_value.assume_role.side_effect = mock_assume_role
AwsBaseHook(aws_conn_id=aws_conn_id, client_type="s3").get_client_type()
mocked_basic_session.assert_has_calls(
[
mock.call().client("sts", config=mock.ANY, endpoint_url=sts_endpoint),
mock.call()
.client()
.assume_role(
RoleArn=role_arn,
RoleSessionName=slugified_role_session_name,
),
]
)
def test_get_credentials_from_gcp_credentials(self):
mock_connection = Connection(
extra=json.dumps(
{
"role_arn": "arn:aws:iam::123456:role/role_arn",
"assume_role_method": "assume_role_with_web_identity",
"assume_role_with_web_identity_federation": "google",
"assume_role_with_web_identity_federation_audience": "aws-federation.airflow.apache.org",
}
)
)
mock_connection.conn_type = "aws"
# Store original __import__
orig_import = __import__
mock_id_token_credentials = mock.Mock()
def import_mock(name, *args):
if name == "airflow.providers.google.common.utils.id_token_credentials":
return mock_id_token_credentials
return orig_import(name, *args)
with (
mock.patch("builtins.__import__", side_effect=import_mock),
mock.patch.dict("os.environ", AIRFLOW_CONN_AWS_DEFAULT=mock_connection.get_uri()),
mock.patch("airflow.providers.amazon.aws.hooks.base_aws.boto3") as mock_boto3,
mock.patch("airflow.providers.amazon.aws.hooks.base_aws.botocore") as mock_botocore,
mock.patch("airflow.providers.amazon.aws.hooks.base_aws.botocore.session") as mock_session,
):
hook = AwsBaseHook(aws_conn_id="aws_default", client_type="airflow_test")
credentials_from_hook = hook.get_credentials()
mock_get_credentials = mock_boto3.session.Session.return_value.get_credentials
assert (
mock_get_credentials.return_value.get_frozen_credentials.return_value == credentials_from_hook
)
mock_boto3.assert_has_calls(
[
mock.call.session.Session(),
mock.call.session.Session()._session.__bool__(),
mock.call.session.Session(botocore_session=mock_session.get_session.return_value),
mock.call.session.Session().get_credentials(),
mock.call.session.Session().get_credentials().get_frozen_credentials(),
]
)
mock_fetcher = mock_botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher
mock_botocore.assert_has_calls(
[
mock.call.credentials.AssumeRoleWithWebIdentityCredentialFetcher(
client_creator=mock_boto3.session.Session.return_value._session.create_client,
extra_args={},
role_arn="arn:aws:iam::123456:role/role_arn",
web_identity_token_loader=mock.ANY,
),
mock.call.credentials.DeferredRefreshableCredentials(
method="assume-role-with-web-identity",
refresh_using=mock_fetcher.return_value.fetch_credentials,
time_fetcher=mock.ANY,
),
]
)
mock_session.assert_has_calls(
[
mock.call.get_session(),
mock.call.get_session().set_config_variable(
"region", mock_boto3.session.Session.return_value.region_name
),
]
)
mock_id_token_credentials.assert_has_calls(
[mock.call.get_default_id_token_credentials(target_audience="aws-federation.airflow.apache.org")]
)
@mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.botocore.credentials.AssumeRoleWithWebIdentityCredentialFetcher"
)
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.botocore.session.Session")
def test_get_credentials_from_token_file(self, mock_session, mock_credentials_fetcher):
with mock.patch.object(
AwsBaseHook,
"get_connection",
return_value=Connection(
conn_id="aws_default",
conn_type="aws",
extra=json.dumps(
{
"role_arn": "arn:aws:iam::123456:role/role_arn",
"assume_role_method": "assume_role_with_web_identity",
"assume_role_with_web_identity_token_file": "/my-token-path",
"assume_role_with_web_identity_federation": "file",
}
),
),
):
mock_open_ = mock_open(read_data="TOKEN")
with mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.botocore.utils.FileWebIdentityTokenLoader.__init__.__defaults__",
new=(mock_open_,),
):
AwsBaseHook(aws_conn_id="aws_default", client_type="airflow_test").get_session()
_, mock_creds_fetcher_kwargs = mock_credentials_fetcher.call_args
assert isinstance(
mock_creds_fetcher_kwargs["web_identity_token_loader"], FileWebIdentityTokenLoader
)
assert mock_creds_fetcher_kwargs["web_identity_token_loader"]() == "TOKEN"
assert mock_open_.call_args.args[0] == "/my-token-path"
@pytest.mark.parametrize(
"sts_endpoint",
[
pytest.param(None, id="not-set"),
pytest.param("https://foo.bar/spam/egg", id="custom"),
],
)
@mock.patch.object(AwsBaseHook, "get_connection")
@mock_aws
def test_assume_role_with_saml(self, mock_get_connection, sts_endpoint):
idp_url = "https://my-idp.local.corp"
principal_arn = "principal_arn_1234567890"
role_arn = "arn:aws:iam::123456:role/role_arn"
xpath = "1234"
duration_seconds = 901
fake_conn_extra = {
"role_arn": role_arn,
"assume_role_method": "assume_role_with_saml",
"assume_role_with_saml": {
"principal_arn": principal_arn,
"idp_url": idp_url,
"idp_auth_method": "http_spegno_auth",
"mutual_authentication": "REQUIRED",
"saml_response_xpath": xpath,
"log_idp_response": True,
},
"assume_role_kwargs": {"DurationSeconds": duration_seconds},
"endpoint_url": "https://example.org",
}
if sts_endpoint:
fake_conn_extra["service_config"] = {"sts": {"endpoint_url": sts_endpoint}}
mock_get_connection.return_value = Connection(conn_id=MOCK_AWS_CONN_ID, extra=fake_conn_extra)
encoded_saml_assertion = b64encode(SAML_ASSERTION.encode("utf-8")).decode("utf-8")
# Store original __import__
orig_import = __import__
mock_requests_gssapi = mock.Mock()
mock_auth = mock_requests_gssapi.HTTPSPNEGOAuth()
mock_lxml = mock.Mock()
mock_xpath = mock_lxml.etree.fromstring.return_value.xpath
mock_xpath.return_value = encoded_saml_assertion
def import_mock(name, *args, **kwargs):
if name == "requests_gssapi":
return mock_requests_gssapi
if name == "lxml":
return mock_lxml
return orig_import(name, *args, **kwargs)
def mock_assume_role_with_saml(**kwargs):
assert kwargs["RoleArn"] == role_arn
assert kwargs["PrincipalArn"] == principal_arn
assert kwargs["SAMLAssertion"] == encoded_saml_assertion
assert kwargs["DurationSeconds"] == duration_seconds
sts_response = {
"ResponseMetadata": {"HTTPStatusCode": 200},
"Credentials": {
"Expiration": datetime.now(),
"AccessKeyId": 1,
"SecretAccessKey": 1,
"SessionToken": 1,
},
}
return sts_response
with (
mock.patch("builtins.__import__", side_effect=import_mock),
mock.patch("airflow.providers.amazon.aws.hooks.base_aws.requests.Session.get") as mock_get,
mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.BaseSessionFactory._create_basic_session",
spec=boto3.session.Session,
) as mocked_basic_session,
):
mocked_basic_session.return_value.region_name = "us-east-2"
mock_client = mocked_basic_session.return_value.client
mock_client.return_value.assume_role_with_saml.side_effect = mock_assume_role_with_saml
AwsBaseHook(aws_conn_id="aws_default", client_type="s3").get_client_type()
mock_get.assert_called_once_with(idp_url, auth=mock_auth)
mock_xpath.assert_called_once_with(xpath)
mocked_basic_session.assert_has_calls = [
mock.call().client("sts", config=mock.ANY, endpoint_url=sts_endpoint),
mock.call()
.client()
.assume_role_with_saml(
DurationSeconds=duration_seconds,
PrincipalArn=principal_arn,
RoleArn=role_arn,
SAMLAssertion=encoded_saml_assertion,
),
]
@mock_aws
def test_expand_role(self):
conn = boto3.client("iam", region_name="us-east-1")
conn.create_role(RoleName="test-role", AssumeRolePolicyDocument="some policy")
hook = AwsBaseHook(aws_conn_id="aws_default", client_type="airflow_test")
arn = hook.expand_role("test-role")
expect_arn = conn.get_role(RoleName="test-role").get("Role").get("Arn")
assert arn == expect_arn
def test_use_default_boto3_behaviour_without_conn_id(self):
for conn_id in (None, ""):
hook = AwsBaseHook(aws_conn_id=conn_id, client_type="s3")
# should cause no exception
hook.get_client_type("s3")
@mock.patch.object(AwsBaseHook, "get_connection")
@mock_aws
def test_refreshable_credentials(self, mock_get_connection):
role_arn = "arn:aws:iam::123456:role/role_arn"
conn_id = "F5"
mock_connection = Connection(conn_id=conn_id, extra='{"role_arn":"' + role_arn + '"}')
mock_get_connection.return_value = mock_connection
hook = AwsBaseHook(aws_conn_id="aws_default", client_type="sts")
expire_on_calls = []
def mock_refresh_credentials():
expiry_datetime = datetime.now(timezone.utc)
expire_on_call = expire_on_calls.pop()
if expire_on_call:
expiry_datetime -= timedelta(minutes=1000)
else:
expiry_datetime += timedelta(minutes=1000)
credentials = {
"access_key": "1",
"secret_key": "2",
"token": "3",
"expiry_time": expiry_datetime.isoformat(),
}
return credentials
# Test with credentials that have not expired
expire_on_calls = [False]
with mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.BaseSessionFactory._refresh_credentials"
) as mock_refresh:
mock_refresh.side_effect = mock_refresh_credentials
client = hook.get_client_type()
assert mock_refresh.call_count == 1
client.get_caller_identity()
assert mock_refresh.call_count == 1
client.get_caller_identity()
assert mock_refresh.call_count == 1
assert len(expire_on_calls) == 0
# Test with credentials that have expired
expire_on_calls = [False, True]
with mock.patch(
"airflow.providers.amazon.aws.hooks.base_aws.BaseSessionFactory._refresh_credentials"
) as mock_refresh:
mock_refresh.side_effect = mock_refresh_credentials
client = hook.get_client_type("sts")
client.get_caller_identity()
assert mock_refresh.call_count == 2
client.get_caller_identity()
assert mock_refresh.call_count == 2
assert len(expire_on_calls) == 0
@mock_aws
@pytest.mark.parametrize("conn_type", ["client", "resource"])
@pytest.mark.parametrize(
("connection_uri", "region_name", "env_region", "expected_region_name"),
[
("aws://?region_name=eu-west-1", None, "", "eu-west-1"),
("aws://?region_name=eu-west-1", "cn-north-1", "", "cn-north-1"),
("aws://?region_name=eu-west-1", None, "us-east-2", "eu-west-1"),
("aws://?region_name=eu-west-1", "cn-north-1", "us-gov-east-1", "cn-north-1"),
("aws://?", "cn-north-1", "us-gov-east-1", "cn-north-1"),
("aws://?", None, "us-gov-east-1", "us-gov-east-1"),
],
)
def test_connection_region_name(
self, conn_type, connection_uri, region_name, env_region, expected_region_name
):
with mock.patch.dict(
"os.environ", AIRFLOW_CONN_TEST_CONN=connection_uri, AWS_DEFAULT_REGION=env_region
):
if conn_type == "client":
hook = AwsBaseHook(aws_conn_id="test_conn", region_name=region_name, client_type="dynamodb")
elif conn_type == "resource":
hook = AwsBaseHook(aws_conn_id="test_conn", region_name=region_name, resource_type="dynamodb")
else:
raise ValueError(f"Unsupported conn_type={conn_type!r}")
assert hook.conn_region_name == expected_region_name
@mock_aws
@pytest.mark.parametrize("conn_type", ["client", "resource"])
@pytest.mark.parametrize(
("connection_uri", "expected_partition"),
[
("aws://?region_name=eu-west-1", "aws"),
("aws://?region_name=cn-north-1", "aws-cn"),
("aws://?region_name=us-gov-east-1", "aws-us-gov"),
],
)
def test_connection_aws_partition(self, conn_type, connection_uri, expected_partition):
with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=connection_uri):
if conn_type == "client":
hook = AwsBaseHook(aws_conn_id="test_conn", client_type="dynamodb")
elif conn_type == "resource":
hook = AwsBaseHook(aws_conn_id="test_conn", resource_type="dynamodb")
else:
raise ValueError(f"Unsupported conn_type={conn_type!r}")
assert hook.conn_partition == expected_partition
@mock_aws
def test_service_name(self):
client_hook = AwsBaseHook(aws_conn_id=None, client_type="dynamodb")
resource_hook = AwsBaseHook(aws_conn_id=None, resource_type="dynamodb")
# Should not raise any error here
invalid_hook = AwsBaseHook(aws_conn_id=None, client_type="dynamodb", resource_type="dynamodb")
assert client_hook.service_name == "dynamodb"
assert resource_hook.service_name == "dynamodb"
with pytest.raises(ValueError, match="Either client_type=.* or resource_type=.* must be provided"):
invalid_hook.service_name
with pytest.raises(LookupError, match="Requested `resource_type`, but `client_type` was set instead"):
client_hook._resolve_service_name(is_resource_type=True)
with pytest.raises(LookupError, match="Requested `client_type`, but `resource_type` was set instead"):
resource_hook._resolve_service_name(is_resource_type=False)
@pytest.mark.parametrize(
("client_type", "resource_type"),
[
("s3", "dynamodb"),
(None, None),
("", ""),
],
)
def test_connection_client_resource_types_check(self, client_type, resource_type):
# Should not raise any error during Hook initialisation.
hook = AwsBaseHook(aws_conn_id=None, client_type=client_type, resource_type=resource_type)
with pytest.raises(ValueError, match="Either client_type=.* or resource_type=.* must be provided"):
hook.get_conn()
@mock_aws
def test_hook_connection_test(self):
hook = AwsBaseHook(client_type="s3")
result, message = hook.test_connection()
assert result
assert hook.client_type == "s3" # Same client_type which defined during initialisation
@mock.patch("boto3.session.Session")
def test_hook_connection_test_failed(self, mock_boto3_session):
"""Test ``test_connection`` failure."""
hook = AwsBaseHook(client_type="ec2")
# Tests that STS API return non 200 code. Under normal circumstances this is hardly possible.
response_metadata = {"HTTPStatusCode": 500, "reason": "Test Failure"}
mock_sts_client = mock.MagicMock()
mock_sts_client.return_value.get_caller_identity.return_value = {
"ResponseMetadata": response_metadata
}
mock_boto3_session.return_value.client = mock_sts_client
result, message = hook.test_connection()
assert not result
assert message == json.dumps(response_metadata)
mock_sts_client.assert_called_once_with(service_name="sts", endpoint_url=None)
def mock_error():
raise ConnectionError("Test Error")
# Something bad happen during boto3.session.Session creation (e.g. wrong credentials or conn error)
mock_boto3_session.reset_mock()
mock_boto3_session.side_effect = mock_error
result, message = hook.test_connection()
assert not result
assert message == "'ConnectionError' error occurred while testing connection: Test Error"
assert hook.client_type == "ec2"
@pytest.mark.parametrize(
("sts_service_endpoint_url", "result_url"),
[
pytest.param(None, None, id="not-set"),
pytest.param("https://sts.service:1234", "https://sts.service:1234", id="sts-service-endpoint"),
],
)
@mock.patch("boto3.session.Session")
def test_hook_connection_endpoint_url_valid(
self, mock_boto3_session, sts_service_endpoint_url, result_url, monkeypatch
):
"""Test if test_endpoint_url is valid in test connection"""
mock_sts_client = mock.MagicMock()
mock_boto3_session.return_value.client = mock_sts_client
warn_context = nullcontext()
fake_extra = {"endpoint_url": "https://test.conn:777/should/ignore/global/endpoint/url"}
if sts_service_endpoint_url:
fake_extra["service_config"] = {"sts": {"endpoint_url": sts_service_endpoint_url}}
monkeypatch.setenv(
f"AIRFLOW_CONN_{MOCK_AWS_CONN_ID.upper()}", json.dumps({"conn_type": "aws", "extra": fake_extra})
)
hook = AwsBaseHook(aws_conn_id=MOCK_AWS_CONN_ID, client_type="eks")
with warn_context:
hook.test_connection()
mock_sts_client.assert_called_once_with(service_name="sts", endpoint_url=result_url)
@mock.patch.dict(os.environ, {f"AIRFLOW_CONN_{MOCK_AWS_CONN_ID.upper()}": "aws://"})
def test_conn_config_conn_id_exists(self):
"""Test retrieve connection config if aws_conn_id exists."""
hook = AwsBaseHook(aws_conn_id=MOCK_AWS_CONN_ID)
conn_config_exist = hook.conn_config
assert conn_config_exist is hook.conn_config, "Expected cached Connection Config"
assert isinstance(conn_config_exist, AwsConnectionWrapper)
assert conn_config_exist
@pytest.mark.parametrize("aws_conn_id", ["", None], ids=["empty", "None"])
def test_conn_config_conn_id_empty(self, aws_conn_id):
"""Test retrieve connection config if aws_conn_id empty or None."""
conn_config_empty = AwsBaseHook(aws_conn_id=aws_conn_id).conn_config
assert isinstance(conn_config_empty, AwsConnectionWrapper)
assert not conn_config_empty
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.SessionFactory")
@pytest.mark.parametrize("hook_region_name", [None, "eu-west-1"])
@pytest.mark.parametrize(
"hook_botocore_config",
[
pytest.param(None, id="empty-botocore-config"),
pytest.param(Config(s3={"us_east_1_regional_endpoint": "regional"}), id="botocore-config"),
pytest.param({"s3": {"us_east_1_regional_endpoint": "regional"}}, id="botocore-config-as-dict"),
],
)
@pytest.mark.parametrize("method_region_name", [None, "cn-north-1"])
def test_get_session(
self, mock_session_factory, hook_region_name, hook_botocore_config, method_region_name
):
"""Test get boto3 Session by hook."""
mock_session_factory_instance = mock_session_factory.return_value
mock_session_factory_instance.create_session.return_value = MOCK_BOTO3_SESSION
hook = AwsBaseHook(aws_conn_id=None, region_name=hook_region_name, config=hook_botocore_config)
session = hook.get_session(region_name=method_region_name)
mock_session_factory.assert_called_once_with(
conn=hook.conn_config,
region_name=method_region_name,
config=mock.ANY,
)
assert mock_session_factory_instance.create_session.assert_called_once
assert session == MOCK_BOTO3_SESSION
@pytest.mark.parametrize("verify", [None, "path/to/cert/hook-bundle.pem", False])
@pytest.mark.parametrize("conn_verify", [None, "path/to/cert/conn-bundle.pem", False])
def test_resolve_verify(self, verify, conn_verify):
mock_conn = Connection(
conn_id="test_conn",
conn_type="aws",
extra={"verify": conn_verify} if conn_verify is not None else {},
)
with mock.patch.dict("os.environ", AIRFLOW_CONN_TEST_CONN=mock_conn.get_uri()):
hook = AwsBaseHook(aws_conn_id="test_conn", verify=verify)
expected = verify if verify is not None else conn_verify
assert hook.verify == expected
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.AwsGenericHook.get_session")
@mock.patch("airflow.providers.amazon.aws.hooks.base_aws.mask_secret")
@pytest.mark.parametrize("token", [None, "mock-aws-session-token"])
@pytest.mark.parametrize("secret_key", ["mock-aws-secret-access-key"])
@pytest.mark.parametrize("access_key", ["mock-aws-access-key-id"])
def test_get_credentials_mask_secrets(
self, mock_mask_secret, mock_boto3_session, access_key, secret_key, token
):
expected_credentials = ReadOnlyCredentials(access_key=access_key, secret_key=secret_key, token=token)
mock_credentials = mock.MagicMock(return_value=expected_credentials)
mock_boto3_session.return_value.get_credentials.return_value.get_frozen_credentials = mock_credentials
expected_calls = [mock.call(secret_key)]
if token:
expected_calls.append(mock.call(token))
hook = AwsBaseHook(aws_conn_id=None)
credentials = hook.get_credentials()
assert mock_mask_secret.mock_calls == expected_calls
assert credentials == expected_credentials
@mock_aws
def test_account_id(self):
assert AwsBaseHook(aws_conn_id=None).account_id == DEFAULT_ACCOUNT_ID
| TestAwsBaseHook |
python | dask__dask | dask/dataframe/dask_expr/_str_accessor.py | {
"start": 3717,
"end": 4405
} | class ____(Reduction):
_parameters = ["frame", "sep", "na_rep"]
@property
def chunk_kwargs(self):
return {"sep": self.sep, "na_rep": self.na_rep}
@property
def combine_kwargs(self):
return self.chunk_kwargs
@property
def aggregate_kwargs(self):
return self.chunk_kwargs
@staticmethod
def reduction_chunk(ser, *args, **kwargs):
return ser.str.cat(*args, **kwargs)
@staticmethod
def reduction_combine(ser, *args, **kwargs):
return Cat.reduction_chunk(ser, *args, **kwargs)
@staticmethod
def reduction_aggregate(ser, *args, **kwargs):
return Cat.reduction_chunk(ser, *args, **kwargs)
| Cat |
python | tensorflow__tensorflow | tensorflow/lite/python/util_test.py | {
"start": 17732,
"end": 21046
} | class ____(
test_util.TensorFlowTestCase, parameterized.TestCase
):
def _generate_int8_f32io_concat_residual_tflite(self, number_of_inputs=3):
dtype = float
class ConcatNResidual(tf.keras.layers.Layer):
"""A simple concat and residual Keras Model."""
def __init__(self, number_of_inputs=3, **kwargs):
super().__init__(**kwargs)
self.number_of_inputs = number_of_inputs
self.conv = tf.keras.layers.Conv2D(2, (2, 2), padding="same")
self.mins = [-0.01 * (i + 1) for i in range(self.number_of_inputs)]
self.maxs = [0.01 * (i + 1) for i in range(self.number_of_inputs)]
def call(self, inputs):
xs = [
tf.quantization.fake_quant_with_min_max_args(
inputs[i], self.mins[i], self.maxs[i]
)
for i in range(self.number_of_inputs)
]
x = tf.keras.backend.concatenate(xs, 1)
x = x[:, : inputs[-1].shape[1]]
x = x + xs[-1]
x = tf.quantization.fake_quant_with_min_max_args(x, -2.242, 2.242)
return x
inputs = [
tf.keras.layers.Input(shape=(2, 2, 2), batch_size=1, dtype=dtype)
for _ in range(number_of_inputs)
]
outputs = ConcatNResidual(number_of_inputs)(inputs)
model = tf.keras.Model(inputs, outputs)
converter = lite.TFLiteConverterV2.from_keras_model(model)
converter.optimizations = [lite.Optimize.DEFAULT]
tflite_model = converter.convert()
return tflite_model
def _verify_tensor_connections(self, flatbuffer_model):
"""Verify that all the tensors have input and output ops except the tensors have buffer data."""
tflite_subgraph = flatbuffer_model.subgraphs[0]
tensors = tflite_subgraph.tensors
buffers = flatbuffer_model.buffers
tensors_used_as_inputs = set()
tensors_used_as_outputs = set()
for op in tflite_subgraph.operators:
tensors_used_as_inputs.update(
idx for idx in op.inputs if buffers[tensors[idx].buffer].data is None
)
tensors_used_as_outputs.update(idx for idx in op.outputs)
tensors_used_as_inputs.update(idx for idx in tflite_subgraph.outputs)
tensors_used_as_outputs.update(idx for idx in tflite_subgraph.inputs)
self.assertEqual(tensors_used_as_inputs, tensors_used_as_outputs)
@parameterized.named_parameters([
("_IntOnly_Float32InputOutput", tf.float32),
("_IntOnly_INT8InputOutput", tf.int8),
("_IntOnly_UINT8InputOutput", tf.uint8),
])
def test(self, inference_input_output_type):
"""Make sure modifying IO types removes tensors correctly."""
srqed_int8_f32io_model = self._generate_int8_f32io_concat_residual_tflite()
if inference_input_output_type != tf.float32:
target_model = util.modify_model_io_type(
srqed_int8_f32io_model,
inference_input_output_type,
inference_input_output_type,
)
else:
target_model = srqed_int8_f32io_model
tflite_path = os.path.join(self.get_temp_dir(), "concat_residual.tflite")
with tf.io.gfile.GFile(tflite_path, "wb") as writer:
writer.write(target_model)
flatbuffer_model = _read_model(tflite_path)
self._verify_tensor_connections(flatbuffer_model)
if __name__ == "__main__":
test.main()
| UtilModifyIntegerQuantizedConcatResidualModelIOTypeTest |
python | falconry__falcon | tests/asgi/test_middleware_asgi.py | {
"start": 134,
"end": 256
} | class ____:
async def process_resource(self, req, resp, resource, params):
pass
| MiddlewareIncompatibleWithWSGI_B |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 596543,
"end": 596878
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("RepositoryInvitation", graphql_name="node")
| RepositoryInvitationEdge |
python | vyperlang__vyper | vyper/venom/memory_location.py | {
"start": 3388,
"end": 4754
} | class ____(MemoryLocation):
op: IRAbstractMemLoc
segment: MemoryLocationSegment
def is_empty(self):
return self.segment.is_empty()
@property
def is_offset_fixed(self) -> bool:
return True
@property
def is_size_fixed(self) -> bool:
return True
@property
def is_fixed(self) -> bool:
return True
@property
def is_volatile(self) -> bool:
return self.segment.is_volatile
def mk_volatile(self) -> MemoryLocationAbstract:
return dc.replace(self, segment=self.segment.mk_volatile())
@staticmethod
def may_overlap_abstract(loc1: MemoryLocationAbstract, loc2: MemoryLocationAbstract) -> bool:
if loc1.op._id == loc2.op._id:
return MemoryLocationSegment.may_overlap_concrete(loc1.segment, loc2.segment)
else:
return False
def completely_contains(self, other: MemoryLocation) -> bool:
if other == MemoryLocation.UNDEFINED:
return False
if not isinstance(other, MemoryLocationAbstract):
return False
if self.op.size is None:
return False
if other.is_empty():
return True
if self.op._id == other.op._id:
return self.segment.completely_contains(other.segment)
return False
@dataclass(frozen=True)
| MemoryLocationAbstract |
python | huggingface__transformers | examples/modular-transformers/modeling_dummy_bert.py | {
"start": 16359,
"end": 19127
} | class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = DummyBertAttention(config, is_causal=config.is_decoder, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = DummyBertAttention(
config,
is_causal=False,
layer_idx=layer_idx,
is_cross_attention=True,
)
self.intermediate = DummyBertIntermediate(config)
self.output = DummyBertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_value: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
self_attention_output, _ = self.attention(
hidden_states,
attention_mask,
past_key_value=past_key_value,
cache_position=cache_position,
**kwargs,
)
attention_output = self_attention_output
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_output, _ = self.crossattention(
self_attention_output,
None, # attention_mask
encoder_hidden_states,
encoder_attention_mask,
past_key_value=past_key_value,
**kwargs,
)
attention_output = cross_attention_output
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return layer_output
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| DummyBertLayer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/pipelines/config.py | {
"start": 8901,
"end": 9383
} | class ____(namedtuple("_EvaluationError", "stack reason message error_data")):
def __new__(cls, stack, reason, message, error_data):
return super().__new__(
cls,
check.inst_param(stack, "stack", GrapheneEvaluationStack),
check.inst_param(reason, "reason", GrapheneEvaluationErrorReason),
check.str_param(message, "message"),
check.inst_param(error_data, "error_data", ERROR_DATA_TYPES),
)
| EvaluationError |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 104422,
"end": 106134
} | class ____(nn.Module):
def __init__(self, embed_dim, hidden_dim, kernel_size, var_pred_dropout):
super().__init__()
self.conv1 = nn.Conv1d(
embed_dim,
hidden_dim,
kernel_size=kernel_size,
padding="same",
)
self.activation_function = nn.ReLU()
self.ln1 = nn.LayerNorm(hidden_dim)
self.dropout_module = nn.Dropout(p=var_pred_dropout)
self.conv2 = nn.Conv1d(
hidden_dim,
hidden_dim,
kernel_size=kernel_size,
padding="same",
)
self.ln2 = nn.LayerNorm(hidden_dim)
self.proj = nn.Linear(hidden_dim, 1)
def forward(self, hidden_states: Tensor, padding_mask: Optional[Tensor] = None) -> Tensor:
# Input: B x T x C; Output: B x T
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv1(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln1(hidden_states))
if padding_mask is not None:
hidden_states = hidden_states.masked_fill(~padding_mask.bool().unsqueeze(-1), 0.0)
hidden_states = self.conv2(hidden_states.transpose(1, 2))
hidden_states = self.activation_function(hidden_states).transpose(1, 2)
hidden_states = self.dropout_module(self.ln2(hidden_states))
return self.proj(hidden_states).squeeze(dim=2)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4THifiGan with SeamlessM4T->SeamlessM4Tv2
| SeamlessM4Tv2VariancePredictor |
python | kamyu104__LeetCode-Solutions | Python/find-missing-observations.py | {
"start": 29,
"end": 516
} | class ____(object):
def missingRolls(self, rolls, mean, n):
"""
:type rolls: List[int]
:type mean: int
:type n: int
:rtype: List[int]
"""
MAX_V = 6
MIN_V = 1
total = sum(rolls)
missing = mean*(n+len(rolls))-total
if missing < MIN_V*n or missing > MAX_V*n:
return []
q, r = divmod(missing, n)
return [q+int(i < r) for i in xrange(n)]
# Time: O(n)
# Space: O(1)
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.