language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | src/sentry/utils/snuba_rpc.py | {
"start": 2496,
"end": 12165
} | class ____(Protocol):
def SerializeToString(self, deterministic: bool = ...) -> bytes: ...
@property
def meta(
self,
) -> (
sentry_protos.snuba.v1alpha.request_common_pb2.RequestMeta
| sentry_protos.snuba.v1.request_common_pb2.RequestMeta
): ...
def table_rpc(requests: list[TraceItemTableRequest]) -> list[TraceItemTableResponse]:
return _make_rpc_requests(table_requests=requests).table_response
def timeseries_rpc(requests: list[TimeSeriesRequest]) -> list[TimeSeriesResponse]:
return _make_rpc_requests(timeseries_requests=requests).timeseries_response
def get_trace_rpc(request: GetTraceRequest) -> GetTraceResponse:
resp = _make_rpc_request("EndpointGetTrace", "v1", referrer=request.meta.referrer, req=request)
response = GetTraceResponse()
response.ParseFromString(resp.data)
return response
@sentry_sdk.trace
def _make_rpc_requests(
table_requests: list[TraceItemTableRequest] | None = None,
timeseries_requests: list[TimeSeriesRequest] | None = None,
) -> MultiRpcResponse:
"""Given lists of requests batch and run them together"""
# Throw the two lists together, _make_rpc_requests will just run them all
table_requests = [] if table_requests is None else table_requests
timeseries_requests = [] if timeseries_requests is None else timeseries_requests
requests = table_requests + timeseries_requests
endpoint_names = [
"EndpointTraceItemTable" if isinstance(req, TraceItemTableRequest) else "EndpointTimeSeries"
for req in requests
]
referrers = [req.meta.referrer for req in requests]
assert (
len(referrers) == len(requests) == len(endpoint_names)
), "Length of Referrers must match length of requests for making requests"
if referrers:
sentry_sdk.set_tag("query.referrer", referrers[0])
# Sets the thread parameters once so we're not doing it in the map repeatedly
partial_request = partial(
_make_rpc_request,
thread_isolation_scope=sentry_sdk.get_isolation_scope(),
thread_current_scope=sentry_sdk.get_current_scope(),
)
with ThreadPoolExecutor(thread_name_prefix=__name__, max_workers=10) as query_thread_pool:
response = [
result
for result in query_thread_pool.map(
partial_request,
endpoint_names,
# Currently assuming everything is v1
["v1"] * len(referrers),
referrers,
requests,
)
]
# Split the results back up, the thread pool will return them back in order so we can use the type in the
# requests list to determine which request goes where
timeseries_results = []
table_results = []
for request, item in zip(requests, response):
if isinstance(request, TraceItemTableRequest):
table_response = TraceItemTableResponse()
table_response.ParseFromString(item.data)
table_results.append(table_response)
elif isinstance(request, TimeSeriesRequest):
timeseries_response = TimeSeriesResponse()
timeseries_response.ParseFromString(item.data)
timeseries_results.append(timeseries_response)
return MultiRpcResponse(table_results, timeseries_results)
def attribute_names_rpc(req: TraceItemAttributeNamesRequest) -> TraceItemAttributeNamesResponse:
resp = _make_rpc_request("EndpointTraceItemAttributeNames", "v1", req.meta.referrer, req)
response = TraceItemAttributeNamesResponse()
response.ParseFromString(resp.data)
return response
def attribute_values_rpc(req: TraceItemAttributeValuesRequest) -> TraceItemAttributeValuesResponse:
resp = _make_rpc_request("AttributeValuesRequest", "v1", req.meta.referrer, req)
response = TraceItemAttributeValuesResponse()
response.ParseFromString(resp.data)
return response
def get_traces_rpc(req: GetTracesRequest) -> GetTracesResponse:
resp = _make_rpc_request("EndpointGetTraces", "v1", req.meta.referrer, req)
response = GetTracesResponse()
response.ParseFromString(resp.data)
return response
def trace_item_stats_rpc(req: TraceItemStatsRequest) -> TraceItemStatsResponse:
resp = _make_rpc_request("EndpointTraceItemStats", "v1", req.meta.referrer, req)
response = TraceItemStatsResponse()
response.ParseFromString(resp.data)
return response
def trace_item_details_rpc(req: TraceItemDetailsRequest) -> TraceItemDetailsResponse:
"""
An RPC which requests all of the details about a specific trace item.
For example, you might say "give me all of the attributes for the log with id 1234" or
"give me all of the attributes for the span with id 12345 and trace_id 34567"
"""
resp = _make_rpc_request("EndpointTraceItemDetails", "v1", req.meta.referrer, req)
response = TraceItemDetailsResponse()
response.ParseFromString(resp.data)
return response
def delete_trace_items_rpc(req: DeleteTraceItemsRequest) -> DeleteTraceItemsResponse:
"""
An RPC which deletes trace items matching the filters specified in the request.
Used for deleting EAP trace items (e.g. occurrences).
"""
resp = _make_rpc_request("EndpointDeleteTraceItems", "v1", req.meta.referrer, req)
response = DeleteTraceItemsResponse()
response.ParseFromString(resp.data)
return response
def rpc(
req: SnubaRPCRequest,
resp_type: type[RPCResponseType],
) -> RPCResponseType:
"""
You want to call a snuba RPC. Here's how you do it:
start_time_proto = ProtobufTimestamp()
start_time_proto.FromDatetime(start)
end_time_proto = ProtobufTimestamp()
end_time_proto.FromDatetime(end)
aggregate_req = AggregateBucketRequest(
meta=RequestMeta(
organization_id=organization.id,
cogs_category="events_analytics_platform",
referrer=referrer,
project_ids=[project.id for project in projects],
start_timestamp=start_time_proto,
end_timestamp=end_time_proto,
trace_item_type=TraceItemType.TRACE_ITEM_TYPE_SPAN,
),
aggregate=AggregateBucketRequest.FUNCTION_SUM,
filter=TraceItemFilter(
comparison_filter=ComparisonFilter(
key=AttributeKey(name="op", type=AttributeKey.Type.TYPE_STRING),
value=AttributeValue(val_str="ai.run"),
)
),
granularity_secs=60,
key=AttributeKey(
name="duration", type=AttributeKey.TYPE_FLOAT
),
attribute_key_transform_context=AttributeKeyTransformContext(),
)
aggregate_resp = snuba.rpc(aggregate_req, AggregateBucketResponse)
"""
cls = req.__class__
endpoint_name = cls.__name__
class_version = cls.__module__.split(".", 3)[2]
http_resp = _make_rpc_request(endpoint_name, class_version, req.meta.referrer, req)
resp = resp_type()
resp.ParseFromString(http_resp.data)
return resp
@sentry_sdk.trace
def _make_rpc_request(
endpoint_name: str,
class_version: str,
referrer: str | None,
req: SnubaRPCRequest | CreateSubscriptionRequest,
thread_isolation_scope: sentry_sdk.Scope | None = None,
thread_current_scope: sentry_sdk.Scope | None = None,
) -> BaseHTTPResponse:
thread_isolation_scope = (
sentry_sdk.get_isolation_scope()
if thread_isolation_scope is None
else thread_isolation_scope
)
thread_current_scope = (
sentry_sdk.get_current_scope() if thread_current_scope is None else thread_current_scope
)
if SNUBA_INFO:
from google.protobuf.json_format import MessageToJson
log_snuba_info(f"{referrer}.body:\n{MessageToJson(req)}") # type: ignore[arg-type]
with sentry_sdk.scope.use_isolation_scope(thread_isolation_scope):
with sentry_sdk.scope.use_scope(thread_current_scope):
with sentry_sdk.start_span(op="snuba_rpc.run", name=req.__class__.__name__) as span:
if referrer:
span.set_tag("snuba.referrer", referrer)
span.set_data("snuba.query", req)
try:
http_resp = _snuba_pool.urlopen(
"POST",
f"/rpc/{endpoint_name}/{class_version}",
body=req.SerializeToString(),
headers=(
{
"referer": referrer,
}
if referrer
else {}
),
)
except urllib3.exceptions.HTTPError as err:
raise SnubaRPCError(err)
span.set_tag("timeout", "False")
if http_resp.status != 200 and http_resp.status != 202:
error = ErrorProto()
error.ParseFromString(http_resp.data)
if SNUBA_INFO:
log_snuba_info(f"{referrer}.error:\n{error}")
if http_resp.status == 404:
raise NotFound() from SnubaRPCError(error)
raise SnubaRPCError(error)
return http_resp
def create_subscription(req: CreateSubscriptionRequest) -> CreateSubscriptionResponse:
cls = req.__class__
endpoint_name = cls.__name__
class_version = cls.__module__.split(".", 3)[2]
http_resp = _make_rpc_request(endpoint_name, class_version, None, req)
resp = CreateSubscriptionResponse()
resp.ParseFromString(http_resp.data)
return resp
| SnubaRPCRequest |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 3760,
"end": 5943
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.date = date(2012, 8, 13)
self.widget = widgets.DateWidget("%d.%m.%Y")
def test_render(self):
self.assertEqual(self.widget.render(self.date), "13.08.2012")
def test_render_derived_date(self):
derived_date = CustomDate(2012, 8, 13)
self.assertEqual(self.widget.render(derived_date), "13.08.2012")
def test_render_none(self):
self.assertEqual(self.widget.render(None), "")
def test_render_invalid_type(self):
self.assertEqual(self.widget.render(int(1)), "")
def test_render_coerce_to_string_is_False(self):
self.widget = widgets.DateWidget(coerce_to_string=False)
self.assertEqual(self.date, self.widget.render(self.date))
def test_render_datetime_safe(self):
"""datetime_safe is supposed to be used to support dates older than 1000"""
self.date = date(10, 8, 2)
self.assertEqual(self.widget.render(self.date), "02.08.0010")
def test_clean(self):
self.assertEqual(self.widget.clean("13.08.2012"), self.date)
def test_clean_returns_None_for_empty_value(self):
self.assertIsNone(self.widget.clean(None))
def test_clean_returns_date_when_date_passed(self):
self.assertEqual(self.date, self.widget.clean(self.date))
@patch("import_export.widgets.logger")
def test_clean_raises_ValueError(self, mock_logger):
self.widget = widgets.DateWidget("x")
with self.assertRaisesRegex(
ValueError, "Value could not be parsed using defined formats."
):
self.widget.clean("2021-05-01")
mock_logger.debug.assert_called_with(
"time data '2021-05-01' does not match format 'x'"
)
@override_settings(USE_TZ=True)
def test_use_tz(self):
self.assertEqual(self.widget.render(self.date), "13.08.2012")
self.assertEqual(self.widget.clean("13.08.2012"), self.date)
@override_settings(DATE_INPUT_FORMATS=None)
def test_default_format(self):
self.widget = widgets.DateWidget()
self.assertEqual(("%Y-%m-%d",), self.widget.formats)
| DateWidgetTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_format19.py | {
"start": 315,
"end": 1563
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_format19.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart(
{
"type": "column",
"subtype": "stacked",
}
)
chart.axis_ids = [56127488, 57455360]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series(
{
"values": "=Sheet1!$C$1:$C$5",
"data_labels": {"value": 1, "position": "inside_base"},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | doocs__leetcode | solution/1300-1399/1385.Find the Distance Value Between Two Arrays/Solution.py | {
"start": 0,
"end": 273
} | class ____:
def findTheDistanceValue(self, arr1: List[int], arr2: List[int], d: int) -> int:
arr2.sort()
ans = 0
for x in arr1:
i = bisect_left(arr2, x - d)
ans += i == len(arr2) or arr2[i] > x + d
return ans
| Solution |
python | pytorch__pytorch | torch/distributed/_functional_collectives.py | {
"start": 32116,
"end": 46275
} | class ____(torch.autograd.Function):
"""
_FromTorchTensor allows autograd to propagate from a normal Tensor to an
AsyncCollectiveTensor.
"""
@staticmethod
def forward( # type: ignore[override]
ctx, # pyre-ignore[2]: Parameter must be annotated.
input: torch.Tensor,
) -> torch.Tensor:
return _maybe_wrap_tensor(input)
@staticmethod
def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor: # type: ignore[override]
return grad_output
def _are_we_tracing() -> bool:
if is_torchdynamo_compiling():
return True
# If fake mode is turned on, we are almost definitely compiling/tracing.
if torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE) is not None:
return True
# See Note [enable_python_dispatcher in dynamo]
if torch._C._dispatch_tls_is_dispatch_key_included(
torch._C.DispatchKey.PythonDispatcher
):
return True
return get_proxy_mode() is not None
def _maybe_wrap_tensor(self) -> torch.Tensor:
if _are_we_tracing():
return wait_tensor(self)
res = AsyncCollectiveTensor(self)
return cast(torch.Tensor, res)
@contextlib.contextmanager
def allow_inflight_collective_as_graph_input_ctx(value: bool = True):
"""
Context manager to temporarily set whether inflight collectives are allowed as torch.compile graph inputs.
Common use case is when the collective is issued in eager (with `async_op=True`) but waited in compiled region:
```
def all_reduce_eager(x):
y = x * x
req = dist.all_reduce(y, op=dist.ReduceOp.SUM, async_op=True)
return y
@torch.compile(fullgraph=True)
def all_reduce_wait_compiled(y):
torch.ops.c10d_functional.wait_tensor(y)
return y * y
x = torch.ones(1280, 1280, device="cuda") + self.rank
# the context manager ensures that `wait_tensor(y)` will wait on the correct work object
with allow_inflight_collective_as_graph_input_ctx():
y = all_reduce_eager(x)
z = all_reduce_wait_compiled(y)
```
With this context manager, when a collective is called, under the hood the work object of the collective
will be registered in the work registry, and the wait_tensor() in compiled region called on
the output tensor of the collective will wait on the correct work object.
"""
previous = torch._C._distributed_c10d._allow_inflight_collective_as_graph_input()
try:
torch._C._distributed_c10d._set_allow_inflight_collective_as_graph_input(value)
yield
finally:
torch._C._distributed_c10d._set_allow_inflight_collective_as_graph_input(
previous
)
def _make_all_gather_out_tensor(input, group_size):
out_size = list(input.size())
if len(out_size) == 0:
out_size.append(group_size)
else:
out_size[0] *= group_size
out_tensor = input.new_empty(out_size)
return out_tensor
def _all_gather_into_tensor_coalesced_meta(self, tag, rankset, group_size):
return [_make_all_gather_out_tensor(t, group_size) for t in self]
# We now register meta kernels to deal with tracing
def _broadcast_meta(self, *args):
return torch.empty_like(self)
def _all_reduce_meta(self, *args):
return torch.empty_like(self)
def _wait_tensor_meta(self, *args):
return torch.empty_like(self)
def _all_gather_into_tensor_meta(shard, tag, rankset, group_size):
return _make_all_gather_out_tensor(shard, group_size)
def _reduce_scatter_tensor_meta(input, reduce_op, tag, rankset, group_size):
out_size = list(input.size())
out_size[0] //= group_size
return input.new_empty(out_size)
def _all_reduce_coalesced_meta(self, *args):
return [torch.empty_like(t) for t in self]
def _all_reduce__meta(inp, *args):
return inp
def _broadcast__meta(inp, *args):
return inp
def _all_reduce_coalesced__meta(inputs, *args):
return inputs
def _reduce_scatter_tensor_coalesced_meta(inputs, reduceOp, tag, rankset, group_size):
def mk_out_tensor(input):
out_size = list(input.size())
out_size[0] //= group_size
out_tensor = input.new_empty(out_size)
return out_tensor
return [mk_out_tensor(t) for t in inputs]
# NB: We often say all_to_all has dynamic output size, but this is not
# technically true: instead, what typically happens is you manually
# communicate the output_split_sizes ahead of time (which is dynamic),
# but then you pass those sizes explicitly, and the all to all itself
# isn't dynamic, it just follows the specified output splits
def _all_to_all_single_meta(
input, output_split_sizes, input_split_sizes, *args, **kwargs
):
if output_split_sizes is None:
return input.new_empty(input.size())
else:
for s in output_split_sizes:
torch._check(s >= 0)
out_size = list(input.size())
out_size[0] = sum(output_split_sizes)
return input.new_empty(out_size)
def _all_gather_into_tensor_out_native_meta(input, group_size, group_name, *, out):
return _make_all_gather_out_tensor(input, group_size)
def _all_gather_into_tensor_native_meta(input, group_size, group_name):
return _make_all_gather_out_tensor(input, group_size)
def _all_gather_into_tensor_coalesced_native_meta(inputs, group_size, group_name):
return [
_all_gather_into_tensor_native_meta(input, group_size, group_name)
for input in inputs
]
def _reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name):
shape = list(inp.size())
shape[0] //= group_size
return inp.new_empty(shape)
def _reduce_scatter_tensor_coalesced_native_meta(
inputs, reduce_op, group_size, group_name
):
return [
_reduce_scatter_tensor_native_meta(inp, reduce_op, group_size, group_name)
for inp in inputs
]
# Library MUST be defined at module scope or it doesn't work
lib_impl = torch.library.Library("_c10d_functional", "IMPL")
lib_impl.impl("all_reduce", _all_reduce_meta, "Meta")
lib_impl.impl("all_reduce_", _all_reduce__meta, "Meta")
lib_impl.impl("all_reduce_coalesced", _all_reduce_coalesced_meta, "Meta")
lib_impl.impl("all_reduce_coalesced_", _all_reduce_coalesced__meta, "Meta")
lib_impl.impl("wait_tensor", _wait_tensor_meta, "Meta")
lib_impl.impl(
"all_gather_into_tensor_out", _all_gather_into_tensor_out_native_meta, "Meta"
)
lib_impl.impl("all_gather_into_tensor", _all_gather_into_tensor_native_meta, "Meta")
lib_impl.impl(
"all_gather_into_tensor_coalesced",
_all_gather_into_tensor_coalesced_native_meta,
"Meta",
)
lib_impl.impl("reduce_scatter_tensor", _reduce_scatter_tensor_native_meta, "Meta")
lib_impl.impl(
"reduce_scatter_tensor_coalesced",
_reduce_scatter_tensor_coalesced_native_meta,
"Meta",
)
lib_impl.impl("all_to_all_single", _all_to_all_single_meta, "Meta")
lib_impl.impl("broadcast", _broadcast_meta, "Meta")
lib_impl.impl("broadcast_", _broadcast__meta, "Meta")
# mark these ops has side effect so that they won't be removed by DCE
torch.fx.node.has_side_effect(torch.ops._c10d_functional.wait_tensor.default) # type: ignore[has-type]
torch.fx.node.has_side_effect(torch.ops._c10d_functional.wait_tensor) # type: ignore[has-type]
# Register legacy ops for backward compatibility
# TODO(yifu): remove these in functional collective beta release
legacy_lib = torch.library.Library("c10d_functional", "DEF")
legacy_lib_impl = torch.library.Library("c10d_functional", "IMPL")
ops_defs = [
"broadcast(Tensor self, int src, str tag, int[] ranks, int group_size) -> Tensor",
"all_reduce(Tensor self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
"all_reduce_coalesced(Tensor[] self, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
"wait_tensor(Tensor self) -> Tensor",
"all_gather_into_tensor(Tensor shard, str tag, int[] ranks, int group_size) -> Tensor",
"all_gather_into_tensor_coalesced(Tensor[] input, str tag, int[] ranks, int group_size) -> Tensor[]",
"reduce_scatter_tensor(Tensor input, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor",
"reduce_scatter_tensor_coalesced(Tensor[] inputs, str reduceOp, str tag, int[] ranks, int group_size) -> Tensor[]",
"all_to_all_single(Tensor input, SymInt[]? output_split_sizes, SymInt[]? input_split_sizes, str tag, int[] ranks, int group_size) -> Tensor", # noqa: B950
]
my_module = sys.modules[__name__]
for op_def in ops_defs:
op_name = op_def[0 : op_def.index("(")]
backend_impl = getattr(fun_col_impl, f"_{op_name}")
legacy_lib.define(op_def, tags=torch.Tag.pt2_compliant_tag)
legacy_lib_impl.impl(op_name, backend_impl, "CompositeImplicitAutograd")
"""
Dynamo Remappings allow seamless translation from non-functional collectives of supportable form into
functional collective calls followed by inplace copy ops, allowing them to be traced into a functional graph.
We implement this by writing a decomposition and teaching dynamo how to associate it to a corresponding op via
the mapping dict below.
These schemas intentionally match torch.distributed.distributed_c10d.* ops that we are trying to remap from
"""
def all_gather_tensor_inplace(
output_tensor: torch.Tensor,
input_tensor: torch.Tensor,
group=None, # TODO add a type,
async_op: bool = False,
tag: str = "",
gather_dim: int = 0,
):
if async_op:
raise AssertionError(
"Can't remap async version of inplace op to functional collective"
)
group = group or dist.group.WORLD
if group is None:
raise AssertionError("group cannot be None")
return output_tensor.copy_(all_gather_tensor(input_tensor, gather_dim, group, tag))
def reduce_scatter_tensor_inplace(
output: torch.Tensor,
input: torch.Tensor,
op: str = "sum", # TODO type is actually c10d ReduceOp. is this ok?
group=None, # TODO add a type
async_op: bool = False,
scatter_dim: int = 0,
tag: str = "",
):
if async_op:
raise AssertionError(
"Can't remap async version of inplace op to functional collective"
)
group = group or dist.group.WORLD
if group is None:
raise AssertionError("group cannot be None")
return output.copy_(reduce_scatter_tensor(input, op, scatter_dim, group, tag))
REDUCE_OP_TO_STR = {
dist.ReduceOp.SUM: "sum",
dist.ReduceOp.AVG: "avg",
dist.ReduceOp.PRODUCT: "product",
dist.ReduceOp.MIN: "min",
dist.ReduceOp.MAX: "max",
dist.ReduceOp.BAND: "band",
dist.ReduceOp.BOR: "bor",
dist.ReduceOp.BXOR: "bxor",
}
def all_reduce_inplace(
tensor: torch.Tensor,
op: str = "sum",
group=None,
async_op: bool = False,
tag: str = "",
):
if async_op:
raise AssertionError(
"Can't remap async version of inplace op to functional collective"
)
group = group or dist.group.WORLD
if group is None:
raise AssertionError("group cannot be None")
return tensor.copy_(all_reduce(tensor, op, group, tag))
def all_to_all_inplace(
output: torch.Tensor,
input: torch.Tensor,
output_split_sizes=None,
input_split_sizes=None,
group=None,
async_op=False,
tag: str = "",
):
if async_op:
raise AssertionError(
"Can't remap async version of inplace op to functional collective"
)
group = group or dist.group.WORLD
if group is None:
raise AssertionError("group cannot be None")
return output.copy_(
all_to_all_single(
input,
output_split_sizes,
input_split_sizes,
group,
tag,
)
)
def all_gather_inplace(
tensor_list: list[torch.Tensor],
tensor: torch.Tensor,
group=None,
async_op=False,
tag: str = "",
):
if async_op:
raise AssertionError(
"Can't remap async version of inplace op to functional collective"
)
if tensor.dim() != 0 and not all(t.size(0) == tensor.size(0) for t in tensor_list):
raise AssertionError("Remapping variable size all_gather is not yet supported")
group = group or dist.group.WORLD
if group is None:
raise AssertionError("group cannot be None")
output = all_gather_tensor(tensor, 0, group, tag)
# Use aten.slice instead of aten.split because the latter causes
# tensor.shape(0) to be unnecessarily baked in when it's a SymInt.
output_splits = []
offset = 0
for t in tensor_list:
is_scalar = t.dim() == 0
t_offset = 1 if is_scalar else t.size(0)
# pyrefly: ignore [unsupported-operation]
out = output[offset] if is_scalar else output[offset : offset + t_offset]
output_splits.append(out)
# pyrefly: ignore [unsupported-operation]
offset += t_offset
for dst, src in zip(tensor_list, output_splits):
dst.copy_(src)
return tensor_list
from torch.distributed.distributed_c10d import ( # pyrefly: ignore # deprecated; pyrefly: ignore [deprecated]
_all_gather_base as legacy_all_gather_base,
_reduce_scatter_base as legacy_reduce_scatter_base,
all_gather as legacy_all_gather,
all_gather_into_tensor as legacy_allgather,
all_reduce as legacy_allreduce,
all_to_all_single as legacy_all_to_all_single,
reduce_scatter_tensor as legacy_reducescatter,
)
# This dict should contain sets of functions that dynamo is allowed to remap.
# Functions in this set should accept the same args/kwargs 1:1 as their mapping.
traceable_collective_remaps = {
legacy_allgather: all_gather_tensor_inplace, # type: ignore[has-type]
legacy_reducescatter: reduce_scatter_tensor_inplace, # type: ignore[has-type]
legacy_allreduce: all_reduce_inplace, # type: ignore[has-type]
legacy_all_to_all_single: all_to_all_inplace, # type: ignore[has-type]
legacy_all_gather: all_gather_inplace, # type: ignore[has-type]
legacy_reduce_scatter_base: reduce_scatter_tensor_inplace, # type: ignore[has-type]
legacy_all_gather_base: all_gather_tensor_inplace, # type: ignore[has-type]
}
| _FromTorchTensor |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 17878,
"end": 18413
} | class ____(TestCurrencyProvider):
"""Test uk_UA currency provider."""
@classmethod
def setup_class(cls):
from faker.providers.currency.uk_UA import Provider as UkUaCurrencyProvider
cls.provider = UkUaCurrencyProvider
cls.currencies = cls.provider.currencies
cls.cryptocurrencies = cls.provider.cryptocurrencies
cls.currency_codes, cls.currency_names = tuple(zip(*cls.currencies))
cls.cryptocurrency_codes, cls.cryptocurrency_names = tuple(zip(*cls.cryptocurrencies))
| TestUkUa |
python | ray-project__ray | python/ray/train/v2/_internal/state/schema.py | {
"start": 4508,
"end": 4735
} | class ____(BaseModel):
"""GPU usage statistics for a process."""
pid: int = Field(description="The process ID.")
gpuMemoryUsage: int = Field(description="The GPU memory usage in bytes.")
@DeveloperAPI
| ProcessGPUUsage |
python | kubernetes-client__python | kubernetes/client/models/v1beta1_volume_attributes_class.py | {
"start": 383,
"end": 9693
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'driver_name': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'parameters': 'dict(str, str)'
}
attribute_map = {
'api_version': 'apiVersion',
'driver_name': 'driverName',
'kind': 'kind',
'metadata': 'metadata',
'parameters': 'parameters'
}
def __init__(self, api_version=None, driver_name=None, kind=None, metadata=None, parameters=None, local_vars_configuration=None): # noqa: E501
"""V1beta1VolumeAttributesClass - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._driver_name = None
self._kind = None
self._metadata = None
self._parameters = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.driver_name = driver_name
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if parameters is not None:
self.parameters = parameters
@property
def api_version(self):
"""Gets the api_version of this V1beta1VolumeAttributesClass. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1beta1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1beta1VolumeAttributesClass.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1beta1VolumeAttributesClass. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def driver_name(self):
"""Gets the driver_name of this V1beta1VolumeAttributesClass. # noqa: E501
Name of the CSI driver This field is immutable. # noqa: E501
:return: The driver_name of this V1beta1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._driver_name
@driver_name.setter
def driver_name(self, driver_name):
"""Sets the driver_name of this V1beta1VolumeAttributesClass.
Name of the CSI driver This field is immutable. # noqa: E501
:param driver_name: The driver_name of this V1beta1VolumeAttributesClass. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and driver_name is None: # noqa: E501
raise ValueError("Invalid value for `driver_name`, must not be `None`") # noqa: E501
self._driver_name = driver_name
@property
def kind(self):
"""Gets the kind of this V1beta1VolumeAttributesClass. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1beta1VolumeAttributesClass. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1beta1VolumeAttributesClass.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1beta1VolumeAttributesClass. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1beta1VolumeAttributesClass. # noqa: E501
:return: The metadata of this V1beta1VolumeAttributesClass. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1beta1VolumeAttributesClass.
:param metadata: The metadata of this V1beta1VolumeAttributesClass. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def parameters(self):
"""Gets the parameters of this V1beta1VolumeAttributesClass. # noqa: E501
parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass. This field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field. # noqa: E501
:return: The parameters of this V1beta1VolumeAttributesClass. # noqa: E501
:rtype: dict(str, str)
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
"""Sets the parameters of this V1beta1VolumeAttributesClass.
parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass. This field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field. # noqa: E501
:param parameters: The parameters of this V1beta1VolumeAttributesClass. # noqa: E501
:type: dict(str, str)
"""
self._parameters = parameters
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1VolumeAttributesClass):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1VolumeAttributesClass):
return True
return self.to_dict() != other.to_dict()
| V1beta1VolumeAttributesClass |
python | scipy__scipy | scipy/stats/tests/test_generation/reference_distributions.py | {
"start": 14851,
"end": 15120
} | class ____(ReferenceDistribution):
def __init(self, *, df):
super().__init__(df=df)
def _pdf(self, x, df):
return (mp.gamma((df + mp.one)/2)/(mp.sqrt(df * mp.pi) * mp.gamma(df/2))
* (mp.one + x*x/df)**(-(df + mp.one)/2))
| StudentT |
python | django-crispy-forms__django-crispy-forms | crispy_forms/layout.py | {
"start": 28923,
"end": 29428
} | class ____:
"""
Layout object. It can contain pure HTML and it has access to the whole
context of the page where the form is being rendered.
Examples::
HTML("{% if saved %}Data saved{% endif %}")
HTML('<input type="hidden" name="{{ step_field }}" value="{{ step0 }}" />')
"""
def __init__(self, html):
self.html = html
def render(self, form, context, template_pack=TEMPLATE_PACK, **kwargs):
return Template(str(self.html)).render(context)
| HTML |
python | spyder-ide__spyder | spyder/api/widgets/mixins.py | {
"start": 13929,
"end": 23367
} | class ____:
"""
Provide methods to create, add and get actions in a unified way.
This mixin uses a custom action object.
"""
def _update_action_state(self, action_name, value):
"""
This allows to update the state of a togglable action without emitting
signals.
This is useful when the global application configuration changes and
we need to propagate the current state of an action based on those
changes
"""
self.blockSignals(True)
try:
self.get_action(action_name).setChecked(value)
except SpyderAPIError:
pass
self.blockSignals(False)
# Comment: The word `context` is used for two different concepts.
# On one side it refers to a Qt widget shortcut context and on the
# other it refers to a section of the configuration (or the widget
# name where it is applied).
def create_action(self, name, text, icon=None, icon_text='', tip=None,
toggled=None, triggered=None, data=None,
shortcut=None, shortcut_context=None,
context=Qt.WidgetWithChildrenShortcut, initial=None,
register_shortcut=False, section=None, option=None,
parent=None, register_action=True, overwrite=False,
context_name=None, menurole=None):
"""
name: str
unique identifiable name for the action
text: str
Localized text for the action
icon: QIcon,
Icon for the action when applied to menu or toolbutton.
icon_text: str
Icon for text in toolbars. If True, this will also disable
the tooltip on this toolbutton if part of a toolbar.
tip: str
Tooltip to define for action on menu or toolbar.
toggled: Optional[Union[Callable, bool]]
If True, then the action modifies the configuration option on the
section specified. Otherwise, it should be a callable to use
when toggling this action. If None, then the action does not
behave like a checkbox.
triggered: callable
The callable to use when triggering this action.
data: Any
Data to be set on the action.
shortcut_context: str
Set the `str` context of the shortcut.
context: Qt.ShortcutContext
Set the context for the shortcut.
initial: object
Sets the initial state of a togglable action. This does not emit
the toggled signal.
section: Optional[str]
Name of the configuration section whose option is going to be
modified. If None, and `option` is not None, then it defaults to
the class `CONF_SECTION` attribute.
option: ConfigurationKey
Name of the configuration option whose value is reflected and
affected by the action.
register_shortcut: bool, optional
If True, main window will expose the shortcut in Preferences.
The default value is `False`.
parent: QWidget (None)
Define the parent of the widget. Use `self` if not provided.
register_action: bool, optional
If True, the action will be registered and searchable.
The default value is `True`.
overwrite: bool, optional
If True, in case of action overwriting no warning will be shown.
The default value is `False`
context_name: Optional[str]
Name of the context that holds the action in case of registration.
The combination of `name` and `context_name` is unique so trying
to register an action with the same `name` and `context_name` will
cause a warning unless `overwrite` is set to `True`.
menurole: QAction.MenuRole, optional
Menu role for the action (it only has effect on macOS).
Notes
-----
There is no need to set shortcuts right now. We only create actions
with this (and similar methods) and these are then exposed as possible
shortcuts on plugin registration in the main window with the
register_shortcut argument.
If icon_text is True, this will also disable the tooltip.
If a shortcut is found in the default config then it is assigned,
otherwise it's left blank for the user to define one for it.
"""
if triggered is None and toggled is None:
raise SpyderAPIError(
'Action must provide the toggled or triggered parameters!'
)
if parent is None:
parent = self
if toggled and not callable(toggled):
toggled = lambda value: None
if toggled is not None:
if section is None and option is not None:
section = self.CONF_SECTION
action = create_action(
parent,
text=text,
icon=icon,
tip=tip,
toggled=toggled,
triggered=triggered,
data=data,
context=context,
section=section,
shortcut=shortcut,
option=option,
id_=name,
plugin=self.PLUGIN_NAME,
context_name=(
self.CONTEXT_NAME if context_name is None else context_name),
register_action=register_action,
overwrite=overwrite,
menurole=menurole
)
action.name = name
if icon_text:
action.setIconText(icon_text)
action.text_beside_icon = bool(icon_text)
action.shortcut_context = shortcut_context
action.register_shortcut = register_shortcut
action.tip = tip
if initial is not None:
if toggled:
action.setChecked(initial)
elif triggered:
raise SpyderAPIError(
'Initial values can only apply to togglable actions!')
else:
if toggled:
if section is not None and option is not None:
value = CONF.get(section, option)
action.setChecked(value)
return action
def get_action(self, name: str, context: Optional[str] = None,
plugin: Optional[str] = None) -> Any:
"""
Return an action by name, context and plugin.
Parameters
----------
name: str
Name of the action to retrieve.
context: Optional[str]
Widget or context identifier under which the action was stored.
If None, then `CONTEXT_NAME` is used instead
plugin: Optional[str]
Name of the plugin where the action was defined. If None, then
`PLUGIN_NAME` is used.
Returns
-------
action: SpyderAction
The corresponding action stored under the given `name`, `context`
and `plugin`.
Raises
------
KeyError
If either of `name`, `context` or `plugin` keys do not exist in the
action registry.
"""
plugin = self.PLUGIN_NAME if plugin is None else plugin
context = self.CONTEXT_NAME if context is None else context
return ACTION_REGISTRY.get_reference(name, plugin, context)
def get_actions(self, context: Optional[str] = None,
plugin: Optional[str] = None) -> dict:
"""
Return all actions defined by a context on a given plugin.
Parameters
----------
context: Optional[str]
Widget or context identifier under which the actions were stored.
If None, then `CONTEXT_NAME` is used instead
plugin: Optional[str]
Name of the plugin where the actions were defined. If None, then
`PLUGIN_NAME` is used.
Returns
-------
actions: Dict[str, SpyderAction]
A dictionary that maps string keys to their corresponding actions.
Notes
-----
1. Actions should be created once. Creating new actions on menu popup
is *highly* discouraged.
2. Actions can be created directly on a PluginMainWidget or
PluginMainContainer subclass. Child widgets can also create
actions, but they need to subclass SpyderWidgetMixin.
3. PluginMainWidget or PluginMainContainer will collect any actions
defined in subwidgets (if defined) and expose them in the
get_actions method at the plugin level.
4. Any action created this way is now exposed as a possible shortcut
automatically without manual shortcut registration.
If an option is found in the config system then it is assigned,
otherwise it's left with an empty shortcut.
5. There is no need to override this method.
"""
plugin = self.PLUGIN_NAME if plugin is None else plugin
context = self.CONTEXT_NAME if context is None else context
return ACTION_REGISTRY.get_references(plugin, context)
def update_actions(self, options):
"""
Update the state of exposed actions.
Exposed actions are actions created by the `create_action` method.
"""
raise NotImplementedError('')
| SpyderActionMixin |
python | walkccc__LeetCode | solutions/2204. Distance to a Cycle in Undirected Graph/2204.py | {
"start": 0,
"end": 1432
} | class ____:
def distanceToCycle(self, n: int, edges: list[list[int]]) -> list[int]:
ans = [0] * n
graph = [[] for _ in range(n)]
for u, v in edges:
graph[u].append(v)
graph[v].append(u)
NO_RANK = -2
# The minRank that u can reach with forward edges
def getRank(u: int, currRank: int, rank: list[int]) -> int:
if rank[u] != NO_RANK: # The rank is already determined
return rank[u]
rank[u] = currRank
minRank = currRank
for v in graph[u]:
# Visited or parent (that's why NO_RANK = -2 instead of -1)
if rank[v] == len(rank) or rank[v] == currRank - 1:
continue
nextRank = getRank(v, currRank + 1, rank)
# NextRank should > currRank if there's no cycle
if nextRank <= currRank:
cycle.append(v)
minRank = min(minRank, nextRank)
rank[u] = len(rank) # Mark as visited.
return minRank
# rank[i] := the minimum node that node i can reach with forward edges
# Initialize with NO_RANK = -2 to indicate not visited.
cycle = []
getRank(0, 0, [NO_RANK] * n)
q = collections.deque(cycle)
seen = set(cycle)
step = 1
while q:
for _ in range(len(q)):
u = q.popleft()
for v in graph[u]:
if v in seen:
continue
q.append(v)
seen.add(v)
ans[v] = step
step += 1
return ans
| Solution |
python | wandb__wandb | wandb/vendor/pygments/lexers/dylan.py | {
"start": 8300,
"end": 8915
} | class ____(RegexLexer):
"""
For Dylan LID (Library Interchange Definition) files.
.. versionadded:: 1.6
"""
name = 'DylanLID'
aliases = ['dylan-lid', 'lid']
filenames = ['*.lid', '*.hdp']
mimetypes = ['text/x-dylan-lid']
flags = re.IGNORECASE
tokens = {
'root': [
# Whitespace
(r'\s+', Text),
# single line comment
(r'//.*?\n', Comment.Single),
# lid header
(r'(.*?)(:)([ \t]*)(.*(?:\n[ \t].+)*)',
bygroups(Name.Attribute, Operator, Text, String)),
]
}
| DylanLidLexer |
python | marshmallow-code__apispec | src/apispec/ext/marshmallow/__init__.py | {
"start": 3578,
"end": 8997
} | class ____(BasePlugin):
"""APISpec plugin for translating marshmallow schemas to OpenAPI/JSONSchema format.
:param callable schema_name_resolver: Callable to generate the schema definition name.
Receives the `Schema` class and returns the name to be used in refs within
the generated spec. When working with circular referencing this function
must must not return `None` for schemas in a circular reference chain.
Example: ::
from apispec.ext.marshmallow.common import resolve_schema_cls
def schema_name_resolver(schema):
schema_cls = resolve_schema_cls(schema)
return schema_cls.__name__
"""
Converter = OpenAPIConverter
Resolver = SchemaResolver
def __init__(
self,
schema_name_resolver: typing.Callable[[type[Schema]], str] | None = None,
) -> None:
super().__init__()
self.schema_name_resolver = schema_name_resolver or resolver
self.spec: APISpec | None = None
self.openapi_version: Version | None = None
self.converter: OpenAPIConverter | None = None
self.resolver: SchemaResolver | None = None
def init_spec(self, spec: APISpec) -> None:
super().init_spec(spec)
self.spec = spec
self.openapi_version = spec.openapi_version
self.converter = self.Converter(
openapi_version=spec.openapi_version,
schema_name_resolver=self.schema_name_resolver,
spec=spec,
)
self.resolver = self.Resolver(
openapi_version=spec.openapi_version, converter=self.converter
)
def map_to_openapi_type(self, field_cls, *args):
"""Set mapping for custom field class.
:param type field_cls: Field class to set mapping for.
``*args`` can be:
- a pair of the form ``(type, format)``
- a core marshmallow field type (in which case we reuse that type's mapping)
Examples: ::
# Override Integer mapping
class Int32(Integer):
# ...
ma_plugin.map_to_openapi_type(Int32, 'string', 'int32')
# Map to ('integer', None) like Integer
class IntegerLike(Integer):
# ...
ma_plugin.map_to_openapi_type(IntegerLike, Integer)
"""
assert self.converter is not None, "init_spec has not yet been called"
return self.converter.map_to_openapi_type(field_cls, *args)
def schema_helper(self, name, _, schema=None, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide OpenAPI
metadata.
:param type|Schema schema: A marshmallow Schema class or instance.
"""
if schema is None:
return None
schema_instance = resolve_schema_instance(schema)
schema_key = make_schema_key(schema_instance)
self.warn_if_schema_already_in_spec(schema_key)
assert self.converter is not None, "init_spec has not yet been called"
self.converter.refs[schema_key] = name
json_schema = self.converter.schema2jsonschema(schema_instance)
return json_schema
def parameter_helper(self, parameter, **kwargs):
"""Parameter component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in parameter definition.
:param dict parameter: parameter fields. May contain a marshmallow
Schema class or instance.
"""
assert self.resolver is not None, "init_spec has not yet been called"
self.resolver.resolve_schema(parameter)
return parameter
def response_helper(self, response, **kwargs):
"""Response component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in response definition.
:param dict parameter: response fields. May contain a marshmallow
Schema class or instance.
"""
assert self.resolver is not None, "init_spec has not yet been called"
self.resolver.resolve_response(response)
return response
def header_helper(self, header: dict, **kwargs: typing.Any):
"""Header component helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` in header definition.
:param dict header: header fields. May contain a marshmallow
Schema class or instance.
"""
assert self.resolver # needed for mypy
self.resolver.resolve_schema(header)
return header
def operation_helper(
self,
path: str | None = None,
operations: dict | None = None,
**kwargs: typing.Any,
) -> None:
assert self.resolver # needed for mypy
self.resolver.resolve_operations(operations)
def warn_if_schema_already_in_spec(self, schema_key: tuple) -> None:
"""Method to warn the user if the schema has already been added to the
spec.
"""
assert self.converter # needed for mypy
if schema_key in self.converter.refs:
warnings.warn(
f"{schema_key[0]} has already been added to the spec. Adding it twice may "
"cause references to not resolve properly.",
UserWarning,
stacklevel=2,
)
| MarshmallowPlugin |
python | python__mypy | mypy/types.py | {
"start": 16361,
"end": 16962
} | class ____(Type):
"""Only used by find_isinstance_check() etc."""
__slots__ = ("type_guard",)
def __init__(self, type_guard: Type) -> None:
super().__init__(line=type_guard.line, column=type_guard.column)
self.type_guard = type_guard
def __repr__(self) -> str:
return f"TypeGuard({self.type_guard})"
# This may hide some real bugs, but it is convenient for various "synthetic"
# visitors, similar to RequiredType and ReadOnlyType below.
def accept(self, visitor: TypeVisitor[T]) -> T:
return self.type_guard.accept(visitor)
| TypeGuardedType |
python | huggingface__transformers | src/transformers/models/dpr/tokenization_dpr.py | {
"start": 15041,
"end": 15767
} | class ____(CustomDPRReaderTokenizerMixin, BertTokenizer):
r"""
Construct a DPRReader tokenizer.
[`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
__all__ = ["DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", "DPRReaderOutput", "DPRReaderTokenizer"]
| DPRReaderTokenizer |
python | walkccc__LeetCode | solutions/1240. Tiling a Rectangle with the Fewest Squares/1240.py | {
"start": 0,
"end": 827
} | class ____:
def tilingRectangle(self, n: int, m: int) -> int:
@functools.lru_cache(None)
def dp(heights: int) -> int:
minHeight = min(heights)
if minHeight == n: # All filled.
return 0
ans = m * n
heightsList = list(heights)
start = heightsList.index(minHeight)
# Try to put square of different size that doesn't exceed the width/height.
for sz in range(1, min(m - start + 1, n - minHeight + 1)):
# heights[start..start + sz) must has the same height.
if heights[start + sz - 1] != minHeight:
break
# Put a square of size `sz` to cover heights[start..start + sz).
heightslist[start:start + sz] = [minHeight + sz] * sz
ans = min(ans, dp(tuple(heightsList)))
return 1 + ans
return dp(tuple([0] * m))
| Solution |
python | weaviate__weaviate-python-client | weaviate/collections/filters.py | {
"start": 526,
"end": 6019
} | class ____:
@overload
@staticmethod
def convert(weav_filter: Literal[None]) -> None: ...
@overload
@staticmethod
def convert(weav_filter: _Filters) -> base_pb2.Filters: ...
@staticmethod
def convert(weav_filter: Optional[_Filters]) -> Optional[base_pb2.Filters]:
if weav_filter is None:
return None
elif isinstance(weav_filter, _FilterValue):
return _FilterToGRPC.__value_filter(weav_filter)
else:
return _FilterToGRPC.__and_or_not_filter(weav_filter)
@staticmethod
def __value_filter(weav_filter: _FilterValue) -> base_pb2.Filters:
return base_pb2.Filters(
operator=weav_filter.operator._to_grpc(),
value_text=_FilterToGRPC.__filter_to_text(weav_filter.value),
value_int=weav_filter.value if isinstance(weav_filter.value, int) else None,
value_boolean=weav_filter.value if isinstance(weav_filter.value, bool) else None, # type: ignore
value_number=(weav_filter.value if isinstance(weav_filter.value, float) else None),
value_int_array=_FilterToGRPC.__filter_to_int_list(weav_filter.value),
value_number_array=_FilterToGRPC.__filter_to_float_list(weav_filter.value),
value_text_array=_FilterToGRPC.__filter_to_text_list(weav_filter.value),
value_boolean_array=_FilterToGRPC.__filter_to_bool_list(weav_filter.value),
value_geo=_FilterToGRPC.__filter_to_geo(weav_filter.value),
target=_FilterToGRPC.__to_target(weav_filter.target),
)
@staticmethod
def __to_target(target: _FilterTargets) -> base_pb2.FilterTarget:
if isinstance(target, str):
return base_pb2.FilterTarget(property=target)
elif isinstance(target, _CountRef):
return base_pb2.FilterTarget(count=base_pb2.FilterReferenceCount(on=target.link_on))
elif isinstance(target, _SingleTargetRef):
assert target.target is not None
return base_pb2.FilterTarget(
single_target=base_pb2.FilterReferenceSingleTarget(
on=target.link_on, target=_FilterToGRPC.__to_target(target.target)
)
)
else:
assert isinstance(target, _MultiTargetRef)
assert target.target is not None
return base_pb2.FilterTarget(
multi_target=base_pb2.FilterReferenceMultiTarget(
on=target.link_on,
target=_FilterToGRPC.__to_target(target.target),
target_collection=target.target_collection,
)
)
@staticmethod
def __filter_to_geo(value: FilterValues) -> Optional[base_pb2.GeoCoordinatesFilter]:
if not (isinstance(value, _GeoCoordinateFilter)):
return None
return base_pb2.GeoCoordinatesFilter(
latitude=value.latitude, longitude=value.longitude, distance=value.distance
)
@staticmethod
def __filter_to_text(value: FilterValues) -> Optional[str]:
if not (
isinstance(value, TIME) or isinstance(value, str) or isinstance(value, uuid_lib.UUID)
):
return None
if isinstance(value, str):
return value
if isinstance(value, uuid_lib.UUID):
return str(value)
return _datetime_to_string(value)
@staticmethod
def __filter_to_text_list(value: FilterValues) -> Optional[base_pb2.TextArray]:
if not isinstance(value, list) or not (
isinstance(value[0], TIME)
or isinstance(value[0], str)
or isinstance(value[0], uuid_lib.UUID)
):
return None
if isinstance(value[0], str):
value_list = value
elif isinstance(value[0], uuid_lib.UUID):
value_list = [str(uid) for uid in value]
else:
dates = cast(List[TIME], value)
value_list = [_datetime_to_string(date) for date in dates]
return base_pb2.TextArray(values=cast(List[str], value_list))
@staticmethod
def __filter_to_bool_list(value: FilterValues) -> Optional[base_pb2.BooleanArray]:
if not isinstance(value, list) or not isinstance(value[0], bool):
return None
return base_pb2.BooleanArray(values=cast(List[bool], value))
@staticmethod
def __filter_to_float_list(value: FilterValues) -> Optional[base_pb2.NumberArray]:
if not isinstance(value, list) or not isinstance(value[0], float):
return None
return base_pb2.NumberArray(values=cast(List[float], value))
@staticmethod
def __filter_to_int_list(value: FilterValues) -> Optional[base_pb2.IntArray]:
if not isinstance(value, list) or not isinstance(value[0], int):
return None
return base_pb2.IntArray(values=cast(List[int], value))
@staticmethod
def __and_or_not_filter(weav_filter: _Filters) -> Optional[base_pb2.Filters]:
assert (
isinstance(weav_filter, _FilterAnd)
or isinstance(weav_filter, _FilterOr)
or isinstance(weav_filter, _FilterNot)
)
return base_pb2.Filters(
operator=weav_filter.operator._to_grpc(),
filters=[
filter_
for single_filter in weav_filter.filters
if (filter_ := _FilterToGRPC.convert(single_filter)) is not None
],
)
| _FilterToGRPC |
python | redis__redis-py | redis/asyncio/multidb/healthcheck.py | {
"start": 1988,
"end": 2859
} | class ____(AbstractHealthCheckPolicy):
"""
Policy that returns True if all health check probes are successful.
"""
def __init__(self, health_check_probes: int, health_check_delay: float):
super().__init__(health_check_probes, health_check_delay)
async def execute(self, health_checks: List[HealthCheck], database) -> bool:
for health_check in health_checks:
for attempt in range(self.health_check_probes):
try:
if not await health_check.check_health(database):
return False
except Exception as e:
raise UnhealthyDatabaseException("Unhealthy database", database, e)
if attempt < self.health_check_probes - 1:
await asyncio.sleep(self._health_check_delay)
return True
| HealthyAllPolicy |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/auto_materialize_rule_impls.py | {
"start": 8875,
"end": 13316
} | class ____(
NamedTuple(
"_AutoMaterializeAssetPartitionsFilter",
[("latest_run_required_tags", Optional[Mapping[str, str]])],
)
):
"""A filter that can be applied to an asset partition, during auto-materialize evaluation, and
returns a boolean for whether it passes.
Args:
latest_run_required_tags (Optional[Sequence[str]]): `passes` returns
True if the run responsible for the latest materialization of the asset partition
has all of these tags.
"""
@property
def description(self) -> str:
return f"latest run includes required tags: {self.latest_run_required_tags}"
def passes(
self,
context: "AutomationContext",
asset_partitions: Iterable[AssetKeyPartitionKey],
) -> Iterable[AssetKeyPartitionKey]:
if self.latest_run_required_tags is None:
return asset_partitions
will_update_asset_partitions: set[AssetKeyPartitionKey] = set()
storage_ids_to_fetch_by_key: dict[AssetKey, list[int]] = defaultdict(list)
for asset_partition in asset_partitions:
if context.legacy_context.will_update_asset_partition(asset_partition):
will_update_asset_partitions.add(asset_partition)
else:
latest_storage_id = context.legacy_context.instance_queryer.get_latest_materialization_or_observation_storage_id(
asset_partition=asset_partition
)
if latest_storage_id is not None:
storage_ids_to_fetch_by_key[asset_partition.asset_key].append(latest_storage_id)
asset_partitions_by_latest_run_id: dict[str, set[AssetKeyPartitionKey]] = defaultdict(set)
step = int(os.getenv("DAGSTER_ASSET_DAEMON_RUN_TAGS_EVENT_FETCH_LIMIT", "1000"))
for asset_key, storage_ids_to_fetch in storage_ids_to_fetch_by_key.items():
for i in range(0, len(storage_ids_to_fetch), step):
storage_ids = storage_ids_to_fetch[i : i + step]
fetch_records = (
context.legacy_context.instance_queryer.instance.fetch_observations
if context.legacy_context.asset_graph.get(asset_key).is_observable
else context.legacy_context.instance_queryer.instance.fetch_materializations
)
for record in fetch_records(
records_filter=AssetRecordsFilter(
asset_key=asset_key,
storage_ids=storage_ids,
),
limit=step,
).records:
asset_partitions_by_latest_run_id[record.run_id].add(
AssetKeyPartitionKey(asset_key, record.partition_key)
)
run_ids_with_required_tags = set()
if len(asset_partitions_by_latest_run_id) > 0:
run_step = int(os.getenv("DAGSTER_ASSET_DAEMON_RUN_TAGS_RUN_FETCH_LIMIT", "1000"))
required_tag_items = self.latest_run_required_tags.items()
run_ids_to_fetch = list(asset_partitions_by_latest_run_id.keys())
for i in range(0, len(run_ids_to_fetch), run_step):
run_ids = run_ids_to_fetch[i : i + run_step]
runs = context.legacy_context.instance_queryer.instance.get_runs(
filters=RunsFilter(run_ids=run_ids)
)
run_ids_with_required_tags.update(
{run.run_id for run in runs if required_tag_items <= run.tags.items()}
)
updated_partitions_with_required_tags = {
asset_partition
for run_id, run_id_asset_partitions in asset_partitions_by_latest_run_id.items()
if run_id in run_ids_with_required_tags
for asset_partition in run_id_asset_partitions
}
if (
self.latest_run_required_tags.items()
<= {
AUTO_MATERIALIZE_TAG: "true",
**context.legacy_context.auto_materialize_run_tags,
}.items()
):
return will_update_asset_partitions | updated_partitions_with_required_tags
else:
return updated_partitions_with_required_tags
def __hash__(self):
return hash(frozenset((self.latest_run_required_tags or {}).items()))
@whitelist_for_serdes
| AutoMaterializeAssetPartitionsFilter |
python | pypa__hatch | src/hatch/cli/terminal.py | {
"start": 3982,
"end": 13075
} | class ____:
def __init__(self, *, verbosity: int, enable_color: bool | None, interactive: bool | None):
# Force consistent output for test assertions
self.testing = "HATCH_SELF_TESTING" in os.environ
self.verbosity = verbosity
self.console = Console(
force_terminal=enable_color,
force_interactive=interactive,
no_color=enable_color is False,
markup=False,
emoji=False,
highlight=False,
legacy_windows=False if self.testing else None,
)
# Set defaults so we can pretty print before loading user config
self._style_level_success: Style | str = "bold cyan"
self._style_level_error: Style | str = "bold red"
self._style_level_warning: Style | str = "bold yellow"
self._style_level_waiting: Style | str = "bold magenta"
# Default is simply bold rather than bold white for shells that have been configured with a white background
self._style_level_info: Style | str = "bold"
self._style_level_debug: Style | str = "bold"
# Chosen as the default since it's compatible everywhere and looks nice
self._style_spinner = "simpleDotsScrolling"
@cached_property
def kv_separator(self) -> Text:
return self.style_warning("->")
def style_success(self, text: str) -> Text:
return Text(text, style=self._style_level_success)
def style_error(self, text: str) -> Text:
return Text(text, style=self._style_level_error)
def style_warning(self, text: str) -> Text:
return Text(text, style=self._style_level_warning)
def style_waiting(self, text: str) -> Text:
return Text(text, style=self._style_level_waiting)
def style_info(self, text: str) -> Text:
return Text(text, style=self._style_level_info)
def style_debug(self, text: str) -> Text:
return Text(text, style=self._style_level_debug)
def initialize_styles(self, styles: dict): # no cov
from rich.errors import StyleSyntaxError
from rich.spinner import Spinner
# Lazily display errors so that they use the correct style
errors = []
for option, style in styles.items():
attribute = f"_style_level_{option}"
default_level = getattr(self, attribute, None)
if default_level:
try:
parsed_style = Style.parse(style)
except StyleSyntaxError as e: # no cov
errors.append(f"Invalid style definition for `{option}`, defaulting to `{default_level}`: {e}")
parsed_style = Style.parse(default_level)
setattr(self, attribute, parsed_style)
elif option == "spinner":
try:
Spinner(style)
except KeyError as e:
errors.append(
f"Invalid style definition for `{option}`, defaulting to `{self._style_spinner}`: {e.args[0]}"
)
else:
self._style_spinner = style
else:
setattr(self, f"_style_{option}", style)
return errors
def display(self, text="", **kwargs):
self.console.print(text, style=self._style_level_info, overflow="ignore", no_wrap=True, crop=False, **kwargs)
def display_critical(self, text="", **kwargs):
self.console.stderr = True
try:
self.console.print(
text, style=self._style_level_error, overflow="ignore", no_wrap=True, crop=False, **kwargs
)
finally:
self.console.stderr = False
def display_error(self, text="", *, stderr=True, indent=None, link=None, **kwargs):
if self.verbosity < -2: # noqa: PLR2004
return
self._output(text, self._style_level_error, stderr=stderr, indent=indent, link=link, **kwargs)
def display_warning(self, text="", *, stderr=True, indent=None, link=None, **kwargs):
if self.verbosity < -1:
return
self._output(text, self._style_level_warning, stderr=stderr, indent=indent, link=link, **kwargs)
def display_info(self, text="", *, stderr=True, indent=None, link=None, **kwargs):
if self.verbosity < 0:
return
self._output(text, self._style_level_info, stderr=stderr, indent=indent, link=link, **kwargs)
def display_success(self, text="", *, stderr=True, indent=None, link=None, **kwargs):
if self.verbosity < 0:
return
self._output(text, self._style_level_success, stderr=stderr, indent=indent, link=link, **kwargs)
def display_waiting(self, text="", *, stderr=True, indent=None, link=None, **kwargs):
if self.verbosity < 0:
return
self._output(text, self._style_level_waiting, stderr=stderr, indent=indent, link=link, **kwargs)
def display_debug(self, text="", level=1, *, stderr=True, indent=None, link=None, **kwargs):
if not 1 <= level <= 3: # noqa: PLR2004
error_message = "Debug output can only have verbosity levels between 1 and 3 (inclusive)"
raise ValueError(error_message)
if self.verbosity < level:
return
self._output(text, self._style_level_debug, stderr=stderr, indent=indent, link=link, **kwargs)
def display_mini_header(self, text, *, stderr=False, indent=None, link=None):
if self.verbosity < 0:
return
self.display_info("[", stderr=stderr, indent=indent, end="")
self.display_success(text, stderr=stderr, link=link, end="")
self.display_info("]", stderr=stderr)
def display_header(self, title=""):
self.console.rule(Text(title, self._style_level_success))
def display_syntax(self, *args, **kwargs):
from rich.syntax import Syntax
kwargs.setdefault("background_color", "default" if self.testing else None)
self.output(Syntax(*args, **kwargs))
def display_markdown(self, text, **kwargs): # no cov
from rich.markdown import Markdown
self.output(Markdown(text), **kwargs)
def display_pair(self, key, value):
self.output(self.style_success(key), self.kv_separator, value)
def display_table(self, title, columns, *, show_lines=False, column_options=None, force_ascii=False, num_rows=0):
from rich.table import Table
if column_options is None:
column_options = {}
table_options = {}
if force_ascii:
from rich.box import ASCII_DOUBLE_HEAD
table_options["box"] = ASCII_DOUBLE_HEAD
table_options["safe_box"] = True
table = Table(title=title, show_lines=show_lines, title_style="", **table_options)
columns = dict(columns)
for column_title, indices in list(columns.items()):
if indices:
table.add_column(column_title, style="bold", **column_options.get(column_title, {}))
else:
columns.pop(column_title)
if not columns:
return
for i in range(num_rows or max(map(max, columns.values())) + 1):
row = [indices.get(i, "") for indices in columns.values()]
if any(row):
table.add_row(*row)
self.output(table)
@cached_property
def status(self) -> BorrowedStatus:
return BorrowedStatus(
self.console,
is_interactive=self.console.is_interactive,
verbosity=self.verbosity,
spinner_style=self._style_spinner,
waiting_style=self._style_level_waiting,
success_style=self._style_level_success,
initializer=lambda: setattr(self.platform, "displaying_status", True), # type: ignore[attr-defined]
finalizer=lambda: setattr(self.platform, "displaying_status", False), # type: ignore[attr-defined]
)
def status_if(self, *args, condition: bool, **kwargs) -> TerminalStatus:
return self.status(*args, **kwargs) if condition else NullStatus()
def _output(self, text="", style=None, *, stderr=False, indent=None, link=None, **kwargs):
if indent:
text = indent_text(text, indent)
if link:
style = style.update_link(self.platform.format_file_uri(link))
self.output(text, stderr=stderr, style=style, **kwargs)
def output(self, *args, stderr=False, **kwargs):
kwargs.setdefault("overflow", "ignore")
kwargs.setdefault("no_wrap", True)
kwargs.setdefault("crop", False)
if not stderr:
self.console.print(*args, **kwargs)
else:
self.console.stderr = True
try:
self.console.print(*args, **kwargs)
finally:
self.console.stderr = False
@staticmethod
def prompt(text, **kwargs):
return click.prompt(text, **kwargs)
@staticmethod
def confirm(text, **kwargs):
return click.confirm(text, **kwargs)
| Terminal |
python | astropy__astropy | astropy/visualization/wcsaxes/ticks.py | {
"start": 240,
"end": 6582
} | class ____(Line2D):
"""
Ticks are derived from Line2D, and note that ticks themselves
are markers. Thus, you should use set_mec, set_mew, etc.
To change the tick size (length), you need to use
set_ticksize. To change the direction of the ticks (ticks are
in opposite direction of ticklabels by default), use
set_tick_out(False).
Note that Matplotlib's defaults dictionary :data:`~matplotlib.rcParams`
contains default settings (color, size, width) of the form `xtick.*` and
`ytick.*`. In a WCS projection, there may not be a clear relationship
between axes of the projection and 'x' or 'y' axes. For this reason,
we read defaults from `xtick.*`. The following settings affect the
default appearance of ticks:
* `xtick.direction`
* `xtick.major.size`
* `xtick.major.width`
* `xtick.minor.size`
* `xtick.color`
Attributes
----------
ticks_locs : dict
This is set when the ticks are drawn, and is a mapping from axis to
the locations of the ticks for that axis.
"""
def __init__(self, frame=None, ticksize=None, **kwargs):
self._frame = frame
if ticksize is None:
ticksize = rcParams["xtick.major.size"]
self.set_ticksize(ticksize)
self.set_minor_ticksize(rcParams["xtick.minor.size"])
self.set_tick_out(rcParams["xtick.direction"] == "out")
self.clear()
line2d_kwargs = {
"color": rcParams["xtick.color"],
"linewidth": rcParams["xtick.major.width"],
}
line2d_kwargs.update(kwargs)
Line2D.__init__(self, [0.0], [0.0], **line2d_kwargs)
self.set_visible_axes("all")
self._display_minor_ticks = False
def display_minor_ticks(self, display_minor_ticks):
self._display_minor_ticks = display_minor_ticks
def get_display_minor_ticks(self):
return self._display_minor_ticks
def set_tick_out(self, tick_out):
"""
set True if tick need to be rotated by 180 degree.
"""
self._tick_out = tick_out
def get_tick_out(self):
"""
Return True if the tick will be rotated by 180 degree.
"""
return self._tick_out
def set_ticksize(self, ticksize):
"""
set length of the ticks in points.
"""
self._ticksize = ticksize
def get_ticksize(self):
"""
Return length of the ticks in points.
"""
return self._ticksize
def set_minor_ticksize(self, ticksize):
"""
set length of the minor ticks in points.
"""
self._minor_ticksize = ticksize
def get_minor_ticksize(self):
"""
Return length of the minor ticks in points.
"""
return self._minor_ticksize
@property
def out_size(self):
if self._tick_out:
return self._ticksize
else:
return 0.0
def set_visible_axes(self, visible_axes):
self._visible_axes = self._frame._validate_positions(visible_axes)
def get_visible_axes(self):
if self._visible_axes == "all":
return list(self._frame.keys())
else:
return [x for x in self._visible_axes if x in self._frame or x == "#"]
def clear(self):
self.world = defaultdict(list)
self.pixel = defaultdict(list)
self.angle = defaultdict(list)
self.disp = defaultdict(list)
self.minor_world = defaultdict(list)
self.minor_pixel = defaultdict(list)
self.minor_angle = defaultdict(list)
self.minor_disp = defaultdict(list)
def add(self, axis, world, pixel, angle, axis_displacement):
self.world[axis].append(world)
self.pixel[axis].append(pixel)
self.angle[axis].append(angle)
self.disp[axis].append(axis_displacement)
def get_minor_world(self):
return self.minor_world
def add_minor(
self, minor_axis, minor_world, minor_pixel, minor_angle, minor_axis_displacement
):
self.minor_world[minor_axis].append(minor_world)
self.minor_pixel[minor_axis].append(minor_pixel)
self.minor_angle[minor_axis].append(minor_angle)
self.minor_disp[minor_axis].append(minor_axis_displacement)
def __len__(self):
return len(self.world)
_tickvert_path = Path([[0.0, 0.0], [1.0, 0.0]])
def draw(self, renderer):
"""
Draw the ticks.
"""
self.ticks_locs = defaultdict(list)
if not self.get_visible():
return
offset = renderer.points_to_pixels(self.get_ticksize())
self._draw_ticks(renderer, self.pixel, self.angle, offset)
if self._display_minor_ticks:
offset = renderer.points_to_pixels(self.get_minor_ticksize())
self._draw_ticks(renderer, self.minor_pixel, self.minor_angle, offset)
def _draw_ticks(self, renderer, pixel_array, angle_array, offset):
"""
Draw the minor ticks.
"""
path_trans = self.get_transform()
gc = renderer.new_gc()
gc.set_foreground(self.get_color())
gc.set_alpha(self.get_alpha())
gc.set_linewidth(self.get_linewidth())
marker_scale = Affine2D().scale(offset, offset)
marker_rotation = Affine2D()
marker_transform = marker_scale + marker_rotation
initial_angle = 180.0 if self.get_tick_out() else 0.0
for axis in self.get_visible_axes():
if axis == "#":
continue
if axis not in pixel_array:
continue
for loc, angle in zip(pixel_array[axis], angle_array[axis]):
# Set the rotation for this tick
marker_rotation.rotate_deg(initial_angle + angle)
# Draw the markers
locs = path_trans.transform_non_affine(np.array([loc, loc]))
renderer.draw_markers(
gc,
self._tickvert_path,
marker_transform,
Path(locs),
path_trans.get_affine(),
)
# Reset the tick rotation before moving to the next tick
marker_rotation.clear()
self.ticks_locs[axis].append(locs)
gc.restore()
| Ticks |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-gmail-openai-agent/llama_index/packs/gmail_openai_agent/base.py | {
"start": 296,
"end": 1234
} | class ____(BaseLlamaPack):
def __init__(self, gmail_tool_kwargs: Dict[str, Any]) -> None:
"""Init params."""
try:
from llama_index.tools.google import GmailToolSpec
except ImportError:
raise ImportError("llama_hub not installed.")
self.tool_spec = GmailToolSpec(**gmail_tool_kwargs)
self.agent = FunctionAgent(
tools=self.tool_spec.to_tool_list(),
llm=OpenAI(model="gpt-4.1"),
)
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {"gmail_tool": self.tool_spec, "agent": self.agent}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return asyncio_run(self.arun(*args, **kwargs))
async def arun(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline asynchronously."""
return await self.agent.run(*args, **kwargs)
| GmailOpenAIAgentPack |
python | walkccc__LeetCode | solutions/2065. Maximum Path Quality of a Graph/2065-2.py | {
"start": 0,
"end": 723
} | class ____:
def maximalPathQuality(
self,
values: list[int],
edges: list[list[int]],
maxTime: int,
) -> int:
ans = 0
graph = [[] for _ in range(len(values))]
# (node, quality, remainingTime, seen)
q = collections.deque([(0, values[0], maxTime, {0})])
for u, v, time in edges:
graph[u].append((v, time))
graph[v].append((u, time))
while q:
u, quality, remainingTime, seen = q.popleft()
if u == 0:
ans = max(ans, quality)
for v, time in graph[u]:
if time <= remainingTime:
q.append(
(v, quality + values[v] * (v not in seen),
remainingTime - time, seen | set([v])))
return ans
| Solution |
python | django__django | tests/custom_lookups/tests.py | {
"start": 1143,
"end": 1518
} | class ____(models.Transform):
lookup_name = "div3"
def as_sql(self, compiler, connection):
lhs, lhs_params = compiler.compile(self.lhs)
return "(%s) %%%% 3" % lhs, lhs_params
def as_oracle(self, compiler, connection, **extra_context):
lhs, lhs_params = compiler.compile(self.lhs)
return "mod(%s, 3)" % lhs, lhs_params
| Div3Transform |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 17650,
"end": 17899
} | class ____(BaseDeriveCodeMappings):
@property
def platform(self) -> str:
raise NotImplementedError
@property
def frames(self) -> list[dict[str, str | bool]]:
raise NotImplementedError
| LanguageSpecificDeriveCodeMappings |
python | pyca__cryptography | src/cryptography/x509/extensions.py | {
"start": 60529,
"end": 66453
} | class ____(ExtensionType):
oid = ExtensionOID.ISSUING_DISTRIBUTION_POINT
def __init__(
self,
full_name: Iterable[GeneralName] | None,
relative_name: RelativeDistinguishedName | None,
only_contains_user_certs: bool,
only_contains_ca_certs: bool,
only_some_reasons: frozenset[ReasonFlags] | None,
indirect_crl: bool,
only_contains_attribute_certs: bool,
) -> None:
if full_name is not None:
full_name = list(full_name)
if only_some_reasons and (
not isinstance(only_some_reasons, frozenset)
or not all(isinstance(x, ReasonFlags) for x in only_some_reasons)
):
raise TypeError(
"only_some_reasons must be None or frozenset of ReasonFlags"
)
if only_some_reasons and (
ReasonFlags.unspecified in only_some_reasons
or ReasonFlags.remove_from_crl in only_some_reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in an "
"IssuingDistributionPoint"
)
if not (
isinstance(only_contains_user_certs, bool)
and isinstance(only_contains_ca_certs, bool)
and isinstance(indirect_crl, bool)
and isinstance(only_contains_attribute_certs, bool)
):
raise TypeError(
"only_contains_user_certs, only_contains_ca_certs, "
"indirect_crl and only_contains_attribute_certs "
"must all be boolean."
)
# Per RFC5280 Section 5.2.5, the Issuing Distribution Point extension
# in a CRL can have only one of onlyContainsUserCerts,
# onlyContainsCACerts, onlyContainsAttributeCerts set to TRUE.
crl_constraints = [
only_contains_user_certs,
only_contains_ca_certs,
only_contains_attribute_certs,
]
if len([x for x in crl_constraints if x]) > 1:
raise ValueError(
"Only one of the following can be set to True: "
"only_contains_user_certs, only_contains_ca_certs, "
"only_contains_attribute_certs"
)
if not any(
[
only_contains_user_certs,
only_contains_ca_certs,
indirect_crl,
only_contains_attribute_certs,
full_name,
relative_name,
only_some_reasons,
]
):
raise ValueError(
"Cannot create empty extension: "
"if only_contains_user_certs, only_contains_ca_certs, "
"indirect_crl, and only_contains_attribute_certs are all False"
", then either full_name, relative_name, or only_some_reasons "
"must have a value."
)
self._only_contains_user_certs = only_contains_user_certs
self._only_contains_ca_certs = only_contains_ca_certs
self._indirect_crl = indirect_crl
self._only_contains_attribute_certs = only_contains_attribute_certs
self._only_some_reasons = only_some_reasons
self._full_name = full_name
self._relative_name = relative_name
def __repr__(self) -> str:
return (
f"<IssuingDistributionPoint(full_name={self.full_name}, "
f"relative_name={self.relative_name}, "
f"only_contains_user_certs={self.only_contains_user_certs}, "
f"only_contains_ca_certs={self.only_contains_ca_certs}, "
f"only_some_reasons={self.only_some_reasons}, "
f"indirect_crl={self.indirect_crl}, "
"only_contains_attribute_certs="
f"{self.only_contains_attribute_certs})>"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, IssuingDistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name
and self.relative_name == other.relative_name
and self.only_contains_user_certs == other.only_contains_user_certs
and self.only_contains_ca_certs == other.only_contains_ca_certs
and self.only_some_reasons == other.only_some_reasons
and self.indirect_crl == other.indirect_crl
and self.only_contains_attribute_certs
== other.only_contains_attribute_certs
)
def __hash__(self) -> int:
if self.full_name is not None:
full_name: tuple[GeneralName, ...] | None = tuple(self.full_name)
else:
full_name = None
return hash(
(
full_name,
self.relative_name,
self.only_contains_user_certs,
self.only_contains_ca_certs,
self.only_some_reasons,
self.indirect_crl,
self.only_contains_attribute_certs,
)
)
@property
def full_name(self) -> list[GeneralName] | None:
return self._full_name
@property
def relative_name(self) -> RelativeDistinguishedName | None:
return self._relative_name
@property
def only_contains_user_certs(self) -> bool:
return self._only_contains_user_certs
@property
def only_contains_ca_certs(self) -> bool:
return self._only_contains_ca_certs
@property
def only_some_reasons(
self,
) -> frozenset[ReasonFlags] | None:
return self._only_some_reasons
@property
def indirect_crl(self) -> bool:
return self._indirect_crl
@property
def only_contains_attribute_certs(self) -> bool:
return self._only_contains_attribute_certs
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
| IssuingDistributionPoint |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP037_3.py | {
"start": 311,
"end": 478
} | class ____:
_singleton: ClassVar[Optional["EmptyCell"]] = None
# the behavior of _singleton above should match a non-ClassVar
_doubleton: "EmptyCell"
| EmptyCell |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 3022,
"end": 3256
} | class ____:
def test_extract_class_with_docstring(self):
with tempfile.TemporaryDirectory() as temp_dir:
test_file = Path(temp_dir) / "test_module.py"
test_file.write_text('''
| TestExtractSymbolsFromFile |
python | scipy__scipy | scipy/odr/_odrpack.py | {
"start": 5422,
"end": 10676
} | class ____:
"""
The data to fit.
Parameters
----------
x : array_like
Observed data for the independent variable of the regression
y : array_like, optional
If array-like, observed data for the dependent variable of the
regression. A scalar input implies that the model to be used on
the data is implicit.
we : array_like, optional
If `we` is a scalar, then that value is used for all data points (and
all dimensions of the response variable).
If `we` is a rank-1 array of length q (the dimensionality of the
response variable), then this vector is the diagonal of the covariant
weighting matrix for all data points.
If `we` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the i'th response variable
observation (single-dimensional only).
If `we` is a rank-2 array of shape (q, q), then this is the full
covariant weighting matrix broadcast to each observation.
If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
diagonal of the covariant weighting matrix for the i'th observation.
If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
If the fit is implicit, then only a positive scalar value is used.
wd : array_like, optional
If `wd` is a scalar, then that value is used for all data points
(and all dimensions of the input variable). If `wd` = 0, then the
covariant weighting matrix for each observation is set to the identity
matrix (so each dimension of each observation has the same weight).
If `wd` is a rank-1 array of length m (the dimensionality of the input
variable), then this vector is the diagonal of the covariant weighting
matrix for all data points.
If `wd` is a rank-1 array of length n (the number of data points), then
the i'th element is the weight for the ith input variable observation
(single-dimensional only).
If `wd` is a rank-2 array of shape (m, m), then this is the full
covariant weighting matrix broadcast to each observation.
If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
diagonal of the covariant weighting matrix for the ith observation.
If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
full specification of the covariant weighting matrix for each
observation.
fix : array_like of ints, optional
The `fix` argument is the same as ifixx in the class ODR. It is an
array of integers with the same shape as data.x that determines which
input observations are treated as fixed. One can use a sequence of
length m (the dimensionality of the input observations) to fix some
dimensions for all observations. A value of 0 fixes the observation,
a value > 0 makes it free.
meta : dict, optional
Free-form dictionary for metadata.
Notes
-----
Each argument is attached to the member of the instance of the same name.
The structures of `x` and `y` are described in the Model class docstring.
If `y` is an integer, then the Data instance can only be used to fit with
implicit models where the dimensionality of the response is equal to the
specified value of `y`.
The `we` argument weights the effect a deviation in the response variable
has on the fit. The `wd` argument weights the effect a deviation in the
input variable has on the fit. To handle multidimensional inputs and
responses easily, the structure of these arguments has the n'th
dimensional axis first. These arguments heavily use the structured
arguments feature of ODRPACK to conveniently and flexibly support all
options. See the ODRPACK User's Guide for a full explanation of how these
weights are used in the algorithm. Basically, a higher value of the weight
for a particular data point makes a deviation at that point more
detrimental to the fit.
"""
def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None):
self.x = _conv(x)
if not isinstance(self.x, np.ndarray):
raise ValueError("Expected an 'ndarray' of data for 'x', "
f"but instead got data of type '{type(self.x).__name__}'")
self.y = _conv(y)
self.we = _conv(we)
self.wd = _conv(wd)
self.fix = _conv(fix)
self.meta = {} if meta is None else meta
def set_meta(self, **kwds):
""" Update the metadata dictionary with the keywords and data provided
by keywords.
Examples
--------
::
data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
"""
self.meta.update(kwds)
def __getattr__(self, attr):
""" Dispatch attribute access to the metadata dictionary.
"""
if attr != "meta" and attr in self.meta:
return self.meta[attr]
else:
raise AttributeError(f"'{attr}' not in metadata")
| Data |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 800,
"end": 846
} | class ____(Generic[AnyStr]):
s: AnyStr
| MyStr |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 44824,
"end": 46463
} | class ____(_CompositeTestBase, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"foo",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("x", Integer),
Column("y", Integer),
)
Table(
"subfoo",
metadata,
Column("id", Integer, ForeignKey("foo.id"), primary_key=True),
)
@classmethod
def setup_mappers(cls):
foo = cls.tables.foo
subfoo = cls.tables.subfoo
cls.Point = cls._type_fixture()
cls.mapper_registry.map_imperatively(
Foo,
foo,
properties={"data": composite(cls.Point, foo.c.x, foo.c.y)},
)
cls.mapper_registry.map_imperatively(SubFoo, subfoo, inherits=Foo)
def test_in_place_mutation_subclass(self):
sess = fixture_session()
d = self.Point(3, 4)
f1 = SubFoo(data=d)
sess.add(f1)
sess.commit()
f1.data.y = 5
sess.commit()
eq_(f1.data, self.Point(3, 5))
def test_pickle_of_parent_subclass(self):
sess = fixture_session()
d = self.Point(3, 4)
f1 = SubFoo(data=d)
sess.add(f1)
sess.commit()
f1.data
assert "data" in f1.__dict__
sess.close()
for loads, dumps in picklers():
sess = fixture_session()
f2 = loads(dumps(f1))
sess.add(f2)
f2.data.y = 12
assert f2 in sess.dirty
| MutableInheritedCompositesTest |
python | walkccc__LeetCode | solutions/1121. Divide Array Into Increasing Sequences/1121.py | {
"start": 0,
"end": 435
} | class ____:
def canDivideIntoSubsequences(self, nums: list[int], k: int) -> bool:
# Find the number with the maxFreq, we need at least maxFreq * k elements
# e.g. nums = [1, 2, 2, 3, 4], we have maxFreq = 2 (two 2s), so we have to
# Split nums into two subsequences say k = 3, the minimum length of nums is 2 x
# 3 = 6, which is impossible if len(nums) = 5
return len(nums) >= k * max(Counter(nums).values())
| Solution |
python | ipython__ipython | tests/test_interactiveshell.py | {
"start": 24395,
"end": 24832
} | class ____(ExitCodeChecks):
def setUp(self):
super().setUp()
self.system = ip.system_piped
@skip_win32
def test_exit_code_ok(self):
ExitCodeChecks.test_exit_code_ok(self)
@skip_win32
def test_exit_code_error(self):
ExitCodeChecks.test_exit_code_error(self)
@skip_win32
def test_exit_code_signal(self):
ExitCodeChecks.test_exit_code_signal(self)
| TestSystemPipedExitCode |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/utils.py | {
"start": 5709,
"end": 6336
} | class ____(QtCore.QObject):
sigFpsUpdate = QtCore.Signal(object)
def __init__(self, interval=1000):
super().__init__()
self.count = 0
self.last_update = 0
self.interval = interval
def update(self):
self.count += 1
if self.last_update == 0:
self.last_update = perf_counter()
self.startTimer(self.interval)
def timerEvent(self, evt):
now = perf_counter()
elapsed = now - self.last_update
fps = self.count / elapsed
self.last_update = now
self.count = 0
self.sigFpsUpdate.emit(fps)
| FrameCounter |
python | python__mypy | mypyc/test-data/fixtures/ir.py | {
"start": 673,
"end": 821
} | class ____(Protocol[T_contra, T_co]):
def __rdivmod__(self, other: T_contra) -> T_co: ...
_M = TypeVar("_M", contravariant=True)
| __SupportsRDivMod |
python | keras-team__keras | keras/src/layers/preprocessing/feature_space.py | {
"start": 30303,
"end": 30373
} | class ____(DataLayer):
def call(self, x):
return x
| TFDIdentity |
python | pytorch__pytorch | torch/_inductor/template_heuristics/triton.py | {
"start": 89487,
"end": 90171
} | class ____(ScaledMMConfigMixin, CPUConfigHeuristic):
"""Scaled MM template heuristic for CPU (non-TMA)"""
def __init__(self) -> None:
super().__init__()
# Override mm_configs to use scaled_mm_configs
self.mm_configs = self.scaled_mm_configs
# NOTE: overriding exhaustive configs here to be the same as mm_configs
# as we haven't validated exhaustive support here yet
# TODO(coconutruben): remove this once we have validated exhaustive support
# for scaled_mm
self.exhaustive_configs = self.scaled_mm_configs
@register_template_heuristic(mm_template.uid, "cpu", op_name="int_mm")
| CPUScaledMMTemplateConfigHeuristic |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/base.py | {
"start": 80914,
"end": 92105
} | class ____(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
# type is not accepted in a computed column
if column.computed is not None:
colspec += " " + self.process(column.computed)
else:
colspec += " " + self.dialect.type_compiler_instance.process(
column.type, type_expression=column
)
if column.nullable is not None:
if (
not column.nullable
or column.primary_key
or isinstance(column.default, sa_schema.Sequence)
or column.autoincrement is True
or column.identity
):
colspec += " NOT NULL"
elif column.computed is None:
# don't specify "NULL" for computed columns
colspec += " NULL"
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"in order to generate DDL"
)
d_opt = column.dialect_options["mssql"]
start = d_opt["identity_start"]
increment = d_opt["identity_increment"]
if start is not None or increment is not None:
if column.identity:
raise exc.CompileError(
"Cannot specify options 'mssql_identity_start' and/or "
"'mssql_identity_increment' while also using the "
"'Identity' construct."
)
util.warn_deprecated(
"The dialect options 'mssql_identity_start' and "
"'mssql_identity_increment' are deprecated. "
"Use the 'Identity' object instead.",
"1.4",
)
if column.identity:
colspec += self.process(column.identity, **kwargs)
elif (
column is column.table._autoincrement_column
or column.autoincrement is True
) and (
not isinstance(column.default, Sequence) or column.default.optional
):
colspec += self.process(Identity(start=start, increment=increment))
else:
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
return colspec
def visit_create_index(self, create, include_schema=False, **kw):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
# handle clustering option
clustered = index.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
# handle columnstore option (has no negative value)
columnstore = index.dialect_options["mssql"]["columnstore"]
if columnstore:
text += "COLUMNSTORE "
text += "INDEX %s ON %s" % (
self._prepared_index_name(index, include_schema=include_schema),
preparer.format_table(index.table),
)
# in some case mssql allows indexes with no columns defined
if len(index.expressions) > 0:
text += " (%s)" % ", ".join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True
)
for expr in index.expressions
)
# handle other included columns
if index.dialect_options["mssql"]["include"]:
inclusions = [
index.table.c[col] if isinstance(col, str) else col
for col in index.dialect_options["mssql"]["include"]
]
text += " INCLUDE (%s)" % ", ".join(
[preparer.quote(c.name) for c in inclusions]
)
whereclause = index.dialect_options["mssql"]["where"]
if whereclause is not None:
whereclause = coercions.expect(
roles.DDLExpressionRole, whereclause
)
where_compiled = self.sql_compiler.process(
whereclause, include_table=False, literal_binds=True
)
text += " WHERE " + where_compiled
return text
def visit_drop_index(self, drop, **kw):
return "\nDROP INDEX %s ON %s" % (
self._prepared_index_name(drop.element, include_schema=False),
self.preparer.format_table(drop.element.table),
)
def visit_create_table_as(self, element, **kw):
prep = self.preparer
# SQL Server doesn't support CREATE TABLE AS, use SELECT INTO instead
# Format: SELECT columns INTO new_table FROM source WHERE ...
qualified = prep.format_table(element.table)
# Get the inner SELECT SQL
inner_kw = dict(kw)
inner_kw["literal_binds"] = True
select_sql = self.sql_compiler.process(element.selectable, **inner_kw)
# Inject INTO clause before FROM keyword
# Find FROM position (case-insensitive)
select_upper = select_sql.upper()
from_idx = select_upper.find(" FROM ")
if from_idx == -1:
from_idx = select_upper.find("\nFROM ")
if from_idx == -1:
raise exc.CompileError(
"Could not find FROM keyword in selectable for CREATE TABLE AS"
)
# Insert INTO clause before FROM
result = (
select_sql[:from_idx]
+ f"INTO {qualified} "
+ select_sql[from_idx:]
)
return result
def visit_create_view(self, create, **kw):
# SQL Server uses CREATE OR ALTER instead of CREATE OR REPLACE
result = super().visit_create_view(create, **kw)
if create.or_replace:
result = result.replace("CREATE OR REPLACE", "CREATE OR ALTER")
return result
def visit_primary_key_constraint(self, constraint, **kw):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % self.preparer.format_constraint(
constraint
)
text += "PRIMARY KEY "
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_unique_constraint(self, constraint, **kw):
if len(constraint) == 0:
return ""
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE %s" % self.define_unique_constraint_distinct(
constraint, **kw
)
clustered = constraint.dialect_options["mssql"]["clustered"]
if clustered is not None:
if clustered:
text += "CLUSTERED "
else:
text += "NONCLUSTERED "
text += "(%s)" % ", ".join(
self.preparer.quote(c.name) for c in constraint
)
text += self.define_constraint_deferrability(constraint)
return text
def visit_computed_column(self, generated, **kw):
text = "AS (%s)" % self.sql_compiler.process(
generated.sqltext, include_table=False, literal_binds=True
)
# explicitly check for True|False since None means server default
if generated.persisted is True:
text += " PERSISTED"
return text
def visit_set_table_comment(self, create, **kw):
schema = self.preparer.schema_for_object(create.element)
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_addextendedproperty 'MS_Description', "
"{}, 'schema', {}, 'table', {}".format(
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.NVARCHAR()
),
self.preparer.quote_schema(schema_name),
self.preparer.format_table(create.element, use_schema=False),
)
)
def visit_drop_table_comment(self, drop, **kw):
schema = self.preparer.schema_for_object(drop.element)
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_dropextendedproperty 'MS_Description', 'schema', "
"{}, 'table', {}".format(
self.preparer.quote_schema(schema_name),
self.preparer.format_table(drop.element, use_schema=False),
)
)
def visit_set_column_comment(self, create, **kw):
schema = self.preparer.schema_for_object(create.element.table)
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_addextendedproperty 'MS_Description', "
"{}, 'schema', {}, 'table', {}, 'column', {}".format(
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.NVARCHAR()
),
self.preparer.quote_schema(schema_name),
self.preparer.format_table(
create.element.table, use_schema=False
),
self.preparer.format_column(create.element),
)
)
def visit_drop_column_comment(self, drop, **kw):
schema = self.preparer.schema_for_object(drop.element.table)
schema_name = schema if schema else self.dialect.default_schema_name
return (
"execute sp_dropextendedproperty 'MS_Description', 'schema', "
"{}, 'table', {}, 'column', {}".format(
self.preparer.quote_schema(schema_name),
self.preparer.format_table(
drop.element.table, use_schema=False
),
self.preparer.format_column(drop.element),
)
)
def visit_create_sequence(self, create, **kw):
prefix = None
if create.element.data_type is not None:
data_type = create.element.data_type
prefix = " AS %s" % self.type_compiler.process(data_type)
return super().visit_create_sequence(create, prefix=prefix, **kw)
def visit_identity_column(self, identity, **kw):
text = " IDENTITY"
if identity.start is not None or identity.increment is not None:
start = 1 if identity.start is None else identity.start
increment = 1 if identity.increment is None else identity.increment
text += "(%s,%s)" % (start, increment)
return text
| MSDDLCompiler |
python | doocs__leetcode | solution/2600-2699/2660.Determine the Winner of a Bowling Game/Solution.py | {
"start": 0,
"end": 410
} | class ____:
def isWinner(self, player1: List[int], player2: List[int]) -> int:
def f(arr: List[int]) -> int:
s = 0
for i, x in enumerate(arr):
k = 2 if (i and arr[i - 1] == 10) or (i > 1 and arr[i - 2] == 10) else 1
s += k * x
return s
a, b = f(player1), f(player2)
return 1 if a > b else (2 if b > a else 0)
| Solution |
python | pytorch__pytorch | torch/distributed/fsdp/_common_utils.py | {
"start": 6720,
"end": 22547
} | class ____(Enum):
"""
An enum that indicates the state of a ``FlatParamHandle`.
"""
IDLE = auto()
FORWARD = auto()
BACKWARD_PRE = auto()
BACKWARD_POST = auto()
SUMMON_FULL_PARAMS = auto()
def _is_composable(state: _FSDPState):
# TODO: This is a temporary hack for differentiate between code paths.
return not isinstance(state, nn.Module)
@no_type_check
def _module_handle(state: _FSDPState, module: nn.Module) -> Optional["FlatParamHandle"]:
"""
Returns the ``FlatParamHandle`` s corresponding to ``module``. This is
the handle that contains some parameter in ``module``.
"""
if _is_composable(state):
# A valid FSDP state may have no managed parameters and hence no
# handles, meaning no entry in `_fully_sharded_module_to_handles`
if state._handle is None:
return None
if module not in state._fully_sharded_module_to_handle:
raise AssertionError(
f"Expects a fully sharded module but got {module} on rank {state.rank}"
)
return state._fully_sharded_module_to_handle[module]
else:
# NOTE: This assumes `module` is a `FullyShardedDataParallel` instance.
return module._handle
@no_type_check
def _has_fsdp_params(state: _FSDPState, module: nn.Module) -> bool:
"""Returns if ``module`` has parameters managed by FSDP."""
return _module_handle(state, module) is not None
def _get_sharding_strategy(handle):
"""
Returns the sharding strategy of the handle.
"""
return handle._sharding_strategy if handle else None
def clean_tensor_name(tensor_name: str) -> str:
"""
Cleans the parameter or buffer name by removing any module wrapper
prefixes.
"""
tensor_name = tensor_name.replace(FSDP_PREFIX, "")
# TODO: Explicitly replacing the checkpoint wrapper prefix is not ideal as
# it couples `CheckpointWrapper` and FSDP and also does not scale for more
# module wrappers.
tensor_name = tensor_name.replace(_CHECKPOINT_PREFIX, "")
return tensor_name
def _set_fsdp_flattened(tensor: torch.Tensor) -> None:
"""
Sets an attribute on ``tensor`` to mark it as flattened by FSDP. This is to
avoid re-flattening it during nested construction.
"""
setattr(tensor, FSDP_FLATTENED, True)
def _is_fsdp_flattened(tensor: torch.Tensor) -> bool:
"""Returns if ``tensor`` has been marked as flattened by FSDP."""
return getattr(tensor, FSDP_FLATTENED, False)
def _named_parameters_with_duplicates(
module: nn.Module, **kwargs: Any
) -> list[tuple[str, nn.Parameter]]:
"""
This API is required as some modules overwrite `named_parameters()` but do not support
`remove_duplicate`.
"""
if "remove_duplicate" in kwargs:
raise AssertionError(
"_named_parameters_with_duplicates cannot be used with `remove_duplicate` argument."
)
kwargs["remove_duplicate"] = False
try:
ret = list(module.named_parameters(**kwargs))
except AssertionError:
kwargs.pop("remove_duplicate")
ret = list(module.named_parameters(**kwargs))
return ret
def _get_param_to_fqns(
model: torch.nn.Module,
dedup_shared_params: bool = True,
) -> dict[nn.Parameter, list[str]]:
"""
Constructs a mapping from parameter to a list of its \"canonical\" FQNs. Here,
we use canonical to mean the fully-qualified name assigned to the parameter
based on its position in the original nn.Module hierarchy before any wrapper
or parallelism has been applied to it. This is in contrast to FQNs that may be
generated after parallelisms or wrappers have been applied to the model.
Each normal parameter maps to a singleton list containing its FQN, while each
``FlatParameter`` maps to a list of its original parameter FQNs, which may
have length greater than one. All FQNs are prefixed starting from ``model``.
In the case where FSDP was applied with ``use_orig_params=True``, there should be no
``FlatParameter`` s registered to the model's modules and this mapping will only
contain mappings from ``nn.Parameter`` s to singleton FQN lists.
It is only in the case where FSDP was applied with ``use_orig_params=False`` where
a ``FlatParameter`` will be registered in place of the original parameters and there
will be mappings from each ``FlatParameter`` to lists of FQNs corresponding to the
original parameters.
Args:
model (torch.nn.Module): Root module (which may or may not be a
:class:`FullyShardedDataParallel` instance).
dedup_shared_params (bool): For shared parameters, if ``True``, only
includes the FQNs corresponding to the first encounter of the
shared parameter in the module traversal; if ``False``, then
includes the FQNs across all encounters. (Default: ``True``)
"""
def module_fn(module, prefix, tree_level, param_to_fqns):
for param_name, param in _named_parameters_with_duplicates(
module, recurse=False
):
local_fqns = (
param._fqns
if isinstance(param, flat_param_file.FlatParameter)
else [param_name]
) # prefixed from `module`
global_fqns = [
clean_tensor_name(prefix + name) for name in local_fqns
] # prefixed from the top level `model` (i.e. including `prefix`)
is_shared_param = param in param_to_fqns
if not is_shared_param:
param_to_fqns[param] = global_fqns
else:
if isinstance(param, flat_param_file.FlatParameter):
# DMP overwrites `named_parameters` and skip (advance to
# the next child module) the wrapped_module (e.g.,
# _dmp_wrapped_module and _fsdp_wrapped_module). When a user
# calls `named_child` to traverse the module recursively and
# calls `named_parameters` with `recurse=False`, parameters
# will be traversed more than once.
# This hack is specified designed for DMP + FSDP. We
# overwrite the flat_parameters traversal result to only obtain
# the last one, which happens to be the correct one.
#
# TODO: Remove this hack once DMP + FSDP is not supported.
warnings.warn(
"FlatParameter is being traversed more than once. "
"This case should only happen when using "
"DistributedModelParallel with FullyShardedDataParallel.",
stacklevel=2,
)
param_to_fqns[param] = global_fqns
elif not dedup_shared_params:
param_to_fqns[param].extend(global_fqns)
def return_fn(param_to_fqns):
return param_to_fqns
param_to_unflat_param_names: dict[torch.nn.Parameter, list[str]] = {}
return _apply_to_modules(
model,
module_fn,
return_fn,
[key for key, _ in _named_parameters_with_duplicates(model)],
param_to_unflat_param_names,
)
@no_type_check
def _log_post_backward_hook(
state: _FSDPState, handle: "FlatParamHandle", logger: logging.Logger
) -> None:
# Under TORCH_DISTRIBUTED_DEBUG=INFO, log the module names this hook fires for.
# Below logging of module names this post-bwd hook fires for can help debug certain
# cases where hooks don't fire, such as under certain activation checkpoint configs.
if state._use_orig_params and handle._debug_level == dist.DebugLevel.INFO:
param_fqns = _get_handle_fqns_from_root(state, handle)
logger.warning("FSDP firing post-backward hooks for parameters %s", param_fqns)
@no_type_check
def _get_handle_fqns_from_root(
state: _FSDPState, handle: "FlatParamHandle"
) -> Optional[list[str]]:
if handle is None:
return None
param_to_fqn = state._exec_order_data.param_to_fqn
handle_params = handle.flat_param._params # only populated for use_orig_params
param_fqns = [*chain.from_iterable(param_to_fqn[p] for p in handle_params)]
return param_fqns
def _apply_to_modules(
root_module: torch.nn.Module,
module_fn: Callable,
return_fn: Callable,
filter_fqns: Optional[list[str]] = None,
*args,
**kwargs,
):
"""
Performs a pre-order traversal of the modules in the hierarchy rooted at
``root_module``, applying ``module_fn`` at each module and finally
returning a value using ``return_fn``. The traversal constructs the full
module prefix name (e.g. "module.submodule." just like in model state dict)
and makes that available to ``module_fn``.
``filter_fqns`` is used because some module may have its own prefix similar
to ``FullyShardedDataParallel`` and the ``named_parameters()`` is overwritten
to remove the prefix.
"""
def f(module: torch.nn.Module, prefix: str, tree_level: int, *args, **kwargs):
# Call the module function before recursing over children (pre-order)
module_fn(module, prefix, tree_level, *args, **kwargs)
for submodule_name, submodule in module.named_children():
if submodule is None:
continue
new_prefix = prefix + submodule_name + "."
new_tree_level = tree_level + 1
if filter_fqns is not None:
for fqn in filter_fqns:
if fqn.startswith(new_prefix):
break
else:
# DMP's named_parameter() will mess up the traversal with
# ``named_children`` + `named_parameter(recurse=False)``.
# This hack is a must to make the traversal work.
# TODO: Remove this hack once DMP + FSDP is not supported.
# It turns out that recursive wrapping may trigger this as
# well.
if (
submodule_name == "_fsdp_wrapped_module"
or submodule_name == "_dmp_wrapped_module"
):
new_prefix = prefix
elif submodule_name == "module":
new_prefix = prefix
f(submodule, new_prefix, new_tree_level, *args, **kwargs)
f(root_module, "", 0, *args, **kwargs)
return return_fn(*args, **kwargs)
@no_type_check
def _assert_in_training_states(
state: _FSDPState,
training_states: list[TrainingState],
) -> None:
"""Asserts that FSDP is in the states ``_training_states``."""
# Raise a `ValueError` instead of using `assert` to ensure that these
# logical assertions run even if `assert`s are disabled
if state.training_state not in training_states:
msg = (
f"expected to be in states {training_states} but current state is "
f"{state.training_state}"
)
# Print the error on rank 0 in case this is called in the backward pass
if state.rank == 0:
if isinstance(state, nn.Module):
print(f"Asserting FSDP instance is: {state}")
print(f"ERROR: {msg}")
traceback.print_stack()
raise ValueError(msg)
def _get_root_modules(modules: set[nn.Module]) -> set[nn.Module]:
"""
Returns:
Set[nn.Module]: The subset of ``modules`` that are root modules (i.e.
parent-less) with respect to the modules in the set itself. In other
words, these are the modules in ``modules`` that are not the child of
any other module in ``modules``.
"""
root_modules: set[nn.Module] = set()
module_to_submodules = {module: set(module.modules()) for module in modules}
for candidate_module in modules:
is_root_module = True
for module, submodules in module_to_submodules.items():
is_child_module = (
candidate_module is not module and candidate_module in submodules
)
if is_child_module:
is_root_module = False
break
if is_root_module:
root_modules.add(candidate_module)
return root_modules
def _override_module_mixed_precision(
root: torch.nn.Module,
module_classes_to_override: Iterable[type[nn.Module]],
wrap_override_dict: dict[str, Any] = {"mixed_precision": None}, # noqa: B006
) -> set[type[nn.Module]]:
module_classes_to_override = tuple(set(module_classes_to_override))
# Return a set of the actually overridden module classes
overridden_module_classes: set[type[nn.Module]] = set()
for mod in root.modules():
if isinstance(mod, module_classes_to_override):
overridden_module_classes.add(type(mod))
mod._wrap_overrides = wrap_override_dict # type: ignore[assignment]
# TODO: We need to run this mixed precision ignored module in fp32,
# but ensure subsequent modules, that may possibly be running with
# mixed precision, still receive the appropriate precision inputs
# without user having to adjust mixed precision config too much.
# As a result, we attach pre and post forward hooks to up / down
# cast. We should revisit this design.
def cast_fn(
dtype: torch.dtype, module: nn.Module, x: torch.Tensor
) -> torch.Tensor:
if not torch.is_floating_point(x) or x.dtype == dtype:
return x
_MODULE_TO_INP_DTYPE[module] = x.dtype
return x.to(dtype)
def forward_pre_hook(module, args):
return _apply_to_tensors(partial(cast_fn, torch.float32, module), args)
def forward_post_hook(module, args, output):
# NOTE: If the forward did not have any floating-point tensors,
# then the dtype will not be set for this module, and we do not
# upcast the dtype.
if module in _MODULE_TO_INP_DTYPE:
old_dtype = _MODULE_TO_INP_DTYPE[module]
return _apply_to_tensors(
partial(cast_fn, old_dtype, module), output
)
# We intentionally append both of these hooks so that they run after
# all other hooks.
mod.register_forward_pre_hook(forward_pre_hook, prepend=False)
mod.register_forward_hook(forward_post_hook, prepend=False)
return overridden_module_classes
def _no_dispatch_record_stream(tensor: torch.Tensor, stream: torch.Stream) -> None:
# FIXME record_stream doesn't work with non-cuda/mtia/xpu tensors
if tensor.device.type not in [
"cuda",
"mtia",
"xpu",
torch._C._get_privateuse1_backend_name(),
]:
return
if torch.distributed._functional_collectives.is_torchdynamo_compiling():
return
# from @ezyang:
# The no_dispatch was added in https://github.com/pytorch/pytorch/pull/88014 cc @fegin
# Looking over the PR, it looks like this is because we don't actually support Stream arguments
# in torch dispatch, so it just chokes.
# If Dynamo is able to answer "are there any torch dispatch modes" active (it should answer False),
# a better version of this would just be to check if there are any modes before disabling dispatch.
# TODO(voz): Extend a dynamo util to answer the above, unify the codepaths here.
tensor.record_stream(stream)
else:
with no_dispatch():
tensor.record_stream(stream)
| HandleTrainingState |
python | pola-rs__polars | py-polars/src/polars/expr/whenthen.py | {
"start": 3412,
"end": 4305
} | class ____:
"""
Utility class for the `when-then-otherwise` expression.
Represents the state of the expression after an additional `when` is called.
In this state, `then` must be called to continue to finish the expression.
"""
def __init__(self, chained_when: Any) -> None:
self._chained_when = chained_when
def then(self, statement: IntoExpr) -> ChainedThen:
"""
Attach a statement to the corresponding condition.
Parameters
----------
statement
The statement to apply if the corresponding condition is true.
Accepts expression input. Strings are parsed as column names, other
non-expression inputs are parsed as literals.
"""
statement_pyexpr = parse_into_expression(statement)
return ChainedThen(self._chained_when.then(statement_pyexpr))
| ChainedWhen |
python | doocs__leetcode | solution/1600-1699/1630.Arithmetic Subarrays/Solution.py | {
"start": 0,
"end": 485
} | class ____:
def checkArithmeticSubarrays(
self, nums: List[int], l: List[int], r: List[int]
) -> List[bool]:
def check(nums, l, r):
n = r - l + 1
s = set(nums[l : l + n])
a1, an = min(nums[l : l + n]), max(nums[l : l + n])
d, mod = divmod(an - a1, n - 1)
return mod == 0 and all((a1 + (i - 1) * d) in s for i in range(1, n))
return [check(nums, left, right) for left, right in zip(l, r)]
| Solution |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 41360,
"end": 43265
} | class ____(TestCase):
"""Tests for ``side_effect()``"""
def test_individual(self):
# The function increments the counter for each call
counter = [0]
def func(arg):
counter[0] += 1
result = list(mi.side_effect(func, range(10)))
self.assertEqual(result, list(range(10)))
self.assertEqual(counter[0], 10)
def test_chunked(self):
# The function increments the counter for each call
counter = [0]
def func(arg):
counter[0] += 1
result = list(mi.side_effect(func, range(10), 2))
self.assertEqual(result, list(range(10)))
self.assertEqual(counter[0], 5)
def test_before_after(self):
f = StringIO()
collector = []
def func(item):
print(item, file=f)
collector.append(f.getvalue())
def it():
yield 'a'
yield 'b'
raise RuntimeError('kaboom')
before = lambda: print('HEADER', file=f)
after = f.close
try:
mi.consume(mi.side_effect(func, it(), before=before, after=after))
except RuntimeError:
pass
# The iterable should have been written to the file
self.assertEqual(collector, ['HEADER\na\n', 'HEADER\na\nb\n'])
# The file should be closed even though something bad happened
self.assertTrue(f.closed)
def test_before_fails(self):
f = StringIO()
func = lambda x: print(x, file=f)
def before():
raise RuntimeError('ouch')
try:
mi.consume(
mi.side_effect(func, 'abc', before=before, after=f.close)
)
except RuntimeError:
pass
# The file should be closed even though something bad happened in the
# before function
self.assertTrue(f.closed)
| SideEffectTests |
python | modin-project__modin | modin/core/dataframe/pandas/metadata/index.py | {
"start": 981,
"end": 12390
} | class ____:
"""
A class that hides the various implementations of the index needed for optimization.
Parameters
----------
value : sequence, PandasDataframe or callable() -> (pandas.Index, list of ints), optional
If a sequence passed this will be considered as the index values.
If a ``PandasDataframe`` passed then it will be used to lazily extract indices
when required, note that the `axis` parameter must be passed in this case.
If a callable passed then it's expected to return a pandas Index and a list of
partition lengths along the index axis.
If ``None`` was passed, the index will be considered an incomplete and will raise
a ``RuntimeError`` on an attempt of materialization. To complete the index object
you have to use ``.maybe_specify_new_frame_ref()`` method.
axis : int, optional
Specifies an axis the object represents, serves as an optional hint. This parameter
must be passed in case value is a ``PandasDataframe``.
dtypes : pandas.Series, optional
Materialized dtypes of index levels.
"""
def __init__(self, value=None, axis=None, dtypes: Optional[pandas.Series] = None):
from modin.core.dataframe.pandas.dataframe.dataframe import PandasDataframe
self._is_default_callable = False
self._axis = axis
self._dtypes = dtypes
if callable(value):
self._value = value
elif isinstance(value, PandasDataframe):
assert axis is not None
self._value = self._get_default_callable(value, axis)
self._is_default_callable = True
elif value is None:
assert axis is not None
self._value = value
else:
self._value = ensure_index(value)
self._lengths_cache = None
# index/lengths ID's for faster comparison between other ModinIndex objects,
# these should be propagated to the copies of the index
self._index_id = uuid.uuid4()
self._lengths_id = uuid.uuid4()
def maybe_get_dtypes(self) -> Optional[pandas.Series]:
"""
Get index dtypes if available.
Returns
-------
pandas.Series or None
"""
if self._dtypes is not None:
return self._dtypes
if self.is_materialized:
self._dtypes = (
self._value.dtypes
if isinstance(self._value, pandas.MultiIndex)
else pandas.Series([self._value.dtype], index=[self._value.name])
)
return self._dtypes
return None
@staticmethod
def _get_default_callable(dataframe_obj, axis):
"""
Build a callable extracting index labels and partitions lengths for the specified axis.
Parameters
----------
dataframe_obj : PandasDataframe
axis : int
0 - extract indices, 1 - extract columns.
Returns
-------
callable() -> tuple(pandas.Index, list[ints])
"""
return lambda: dataframe_obj._compute_axis_labels_and_lengths(axis)
def maybe_specify_new_frame_ref(self, value, axis) -> "ModinIndex":
"""
Set a new reference for a frame used to lazily extract index labels if it's needed.
The method sets a new reference only if the indices are not yet materialized and
if a PandasDataframe was originally passed to construct this index (so the ModinIndex
object holds a reference to it). The reason the reference should be updated is that
we don't want to hold in memory those frames that are already not needed. Once the
reference is updated, the old frame will be garbage collected if there are no
more references to it.
Parameters
----------
value : PandasDataframe
New dataframe to reference.
axis : int
Axis to extract labels from.
Returns
-------
ModinIndex
New ModinIndex with the reference updated.
"""
if self._value is not None and (
not callable(self._value) or not self._is_default_callable
):
return self
new_index = self.copy(copy_lengths=True)
new_index._axis = axis
new_index._value = self._get_default_callable(value, new_index._axis)
# if the '._value' was 'None' initially, then the '_is_default_callable' flag was
# also being set to 'False', since now the '._value' is a default callable,
# so we want to ensure that the flag is set to 'True'
new_index._is_default_callable = True
return new_index
@property
def is_materialized(self) -> bool:
"""
Check if the internal representation is materialized.
Returns
-------
bool
"""
return self.is_materialized_index(self)
@classmethod
def is_materialized_index(cls, index) -> bool:
"""
Check if the passed object represents a materialized index.
Parameters
----------
index : object
An object to check.
Returns
-------
bool
"""
# importing here to avoid circular import issue
from modin.pandas.indexing import is_range_like
if isinstance(index, cls):
index = index._value
return is_list_like(index) or is_range_like(index) or isinstance(index, slice)
def get(self, return_lengths=False) -> pandas.Index:
"""
Get the materialized internal representation.
Parameters
----------
return_lengths : bool, default: False
In some cases, during the index calculation, it's possible to get
the lengths of the partitions. This flag allows this data to be used
for optimization.
Returns
-------
pandas.Index
"""
if not self.is_materialized:
if callable(self._value):
index, self._lengths_cache = self._value()
self._value = ensure_index(index)
elif self._value is None:
raise RuntimeError(
"It's not allowed to call '.materialize()' before '._value' is specified."
)
else:
raise NotImplementedError(type(self._value))
if return_lengths:
return self._value, self._lengths_cache
else:
return self._value
def equals(self, other: "ModinIndex") -> bool:
"""
Check equality of the index values.
Parameters
----------
other : ModinIndex
Returns
-------
bool
The result of the comparison.
"""
if self._index_id == other._index_id:
return True
if not self.is_materialized:
self.get()
if not other.is_materialized:
other.get()
return self._value.equals(other._value)
def compare_partition_lengths_if_possible(self, other: "ModinIndex"):
"""
Compare the partition lengths cache for the index being stored if possible.
The ``ModinIndex`` object may sometimes store the information about partition
lengths along the axis the index belongs to. If both `self` and `other` have
this information or it can be inferred from them, the method returns
a boolean - the result of the comparison, otherwise it returns ``None``
as an indication that the comparison cannot be made.
Parameters
----------
other : ModinIndex
Returns
-------
bool or None
The result of the comparison if both `self` and `other` contain
the lengths data, ``None`` otherwise.
"""
if self._lengths_id == other._lengths_id:
return True
can_extract_lengths_from_self = self._lengths_cache is not None or callable(
self._value
)
can_extract_lengths_from_other = other._lengths_cache is not None or callable(
other._value
)
if can_extract_lengths_from_self and can_extract_lengths_from_other:
return self.get(return_lengths=True)[1] == other.get(return_lengths=True)[1]
return None
def __len__(self):
"""
Redirect the 'len' request to the internal representation.
Returns
-------
int
Notes
-----
Executing this function materializes the data.
"""
if not self.is_materialized:
self.get()
return len(self._value)
def __reduce__(self):
"""
Serialize an object of this class.
Returns
-------
tuple
Notes
-----
The default implementation generates a recursion error. In a short:
during the construction of the object, `__getattr__` function is called, which
is not intended to be used in situations where the object is not initialized.
"""
return (
self.__class__,
(self._value, self._axis),
{
"_lengths_cache": self._lengths_cache,
"_index_id": self._index_id,
"_lengths_id": self._lengths_id,
"_is_default_callable": self._is_default_callable,
},
)
def __getitem__(self, key):
"""
Get an index value at the position of `key`.
Parameters
----------
key : int
Returns
-------
label
"""
if not self.is_materialized:
self.get()
return self._value[key]
def __getattr__(self, name):
"""
Redirect access to non-existent attributes to the internal representation.
This is necessary so that objects of this class in most cases mimic the behavior
of the ``pandas.Index``. The main limitations of the current approach are type
checking and the use of this object where pandas indexes are supposed to be used.
Parameters
----------
name : str
Attribute name.
Returns
-------
object
Attribute.
Notes
-----
Executing this function materializes the data.
"""
if not self.is_materialized:
self.get()
return self._value.__getattribute__(name)
def copy(self, copy_lengths=False) -> "ModinIndex":
"""
Copy an object without materializing the internal representation.
Parameters
----------
copy_lengths : bool, default: False
Whether to copy the stored partition lengths to the
new index object.
Returns
-------
ModinIndex
"""
idx_cache = self._value
if idx_cache is not None and not callable(idx_cache):
idx_cache = idx_cache.copy()
result = ModinIndex(idx_cache, axis=self._axis, dtypes=self._dtypes)
result._index_id = self._index_id
result._is_default_callable = self._is_default_callable
if copy_lengths:
result._lengths_cache = self._lengths_cache
result._lengths_id = self._lengths_id
return result
| ModinIndex |
python | pytorch__pytorch | test/fx/quantization.py | {
"start": 1778,
"end": 2193
} | class ____(MinMaxObserver):
def quantize(self, quantizer, node, load_arg):
if not self.all_tensors:
return NotImplemented
scale, zeropoint = self.scale_zeropoint()
return quantizer.quantized_graph.create_node(
"call_function",
torch.ops.quantized.add,
load_arg(node.args),
{"scale": scale, "zero_point": zeropoint},
)
| Add |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 15322,
"end": 15950
} | class ____:
"""
TensorMetadata plus the elements as a list of raw values.
Used for hashing inlined constants.
"""
tensor_metadata: TensorMetadata
values: list[Any]
def _ident(x: T) -> T:
return x
def extract_tensor_metadata_for_cache_key(t: Tensor) -> TensorMetadata:
"""
Extracts the tensor metadata and removes fields of the TensorMetadata
that are not needed for caching
"""
meta = extract_tensor_metadata(t)
if not hasattr(t, "_is_inductor_static"):
meta = dataclasses.replace(meta, storage_offset=0, storage_bytes=None)
return meta
| TensorMetadataAndValues |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared_tests/test_check.py | {
"start": 29324,
"end": 33930
} | class ____(collections.abc.Mapping):
def __init__(self, **kwargs):
self._dict = dict()
for key, value in kwargs.items():
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict)
MAPPING_TEST_CASES = DICT_TEST_CASES + [
(dict(obj=SimpleMapping(x=1), key_type=str, value_type=int), True),
]
@pytest.mark.parametrize("kwargs, should_succeed", MAPPING_TEST_CASES)
def test_mapping_param(kwargs, should_succeed):
if should_succeed:
assert check.mapping_param(**kwargs, param_name="name") == kwargs["obj"]
else:
with pytest.raises(CheckError):
check.mapping_param(**kwargs, param_name="name")
def test_opt_mapping_param():
mapping = SimpleMapping(x=1)
assert check.opt_mapping_param(mapping, param_name="name") == mapping
assert check.opt_mapping_param(mapping, param_name="name", key_type=str) == mapping
assert check.opt_mapping_param(mapping, param_name="name", value_type=int) == mapping
assert check.opt_mapping_param(None, param_name="name") == dict()
with pytest.raises(CheckError):
check.opt_mapping_param("foo", param_name="name") # pyright: ignore[reportArgumentType]
assert check.opt_nullable_mapping_param(None, "name") is None
# ########################
# ##### NOT NONE
# ########################
def test_not_none_param():
assert check.not_none_param(1, "fine")
check.not_none_param(0, "zero is fine")
check.not_none_param("", "empty str is fine")
with pytest.raises(CheckError):
check.not_none_param(None, "none fails")
# ########################
# ##### PATH
# ########################
def test_path_param():
from pathlib import Path
assert check.path_param("/a/b.csv", "path_param") == "/a/b.csv"
if sys.platform.startswith("win32"):
assert check.opt_path_param(Path("c:\\a\\b.csv"), "path_param") == "c:\\a\\b.csv"
else:
assert check.opt_path_param(Path("/a/b.csv"), "path_param") == "/a/b.csv"
with pytest.raises(ParameterCheckError):
check.path_param(None, "path_param") # pyright: ignore[reportArgumentType]
with pytest.raises(ParameterCheckError):
check.path_param(0, "path_param") # pyright: ignore[reportArgumentType]
def test_opt_path_param():
from pathlib import Path
assert check.opt_path_param("/a/b.csv", "path_param") == "/a/b.csv"
if sys.platform.startswith("win32"):
assert check.opt_path_param(Path("c:\\a\\b.csv"), "path_param") == "c:\\a\\b.csv"
else:
assert check.opt_path_param(Path("/a/b.csv"), "path_param") == "/a/b.csv"
assert check.opt_path_param(None, "path_param") is None
with pytest.raises(ParameterCheckError):
check.opt_path_param(0, "path_param") # pyright: ignore[reportCallIssue,reportArgumentType]
# ########################
# ##### SET
# ########################
def test_set_param():
assert check.set_param(set(), "set_param") == set()
assert check.set_param(frozenset(), "set_param") == set()
with pytest.raises(ParameterCheckError):
check.set_param(None, "set_param") # pyright: ignore[reportArgumentType]
with pytest.raises(ParameterCheckError):
check.set_param("3u4", "set_param") # pyright: ignore[reportArgumentType]
obj_set = {1}
assert check.set_param(obj_set, "set_param") == obj_set
obj_set_two = {1, 1, 2}
obj_set_two_deduped = {1, 2}
assert check.set_param(obj_set_two, "set_param") == obj_set_two_deduped
assert check.set_param(obj_set_two, "set_param", of_type=int) == obj_set_two_deduped
with pytest.raises(CheckError, match="Did you pass a class"):
check.set_param({str}, "set_param", of_type=int)
with pytest.raises(CheckError, match="Member of set mismatches type"):
check.set_param({"foo"}, "set_param", of_type=int)
def test_opt_set_param():
assert check.opt_set_param(None, "set_param") == set()
assert check.opt_set_param(set(), "set_param") == set()
assert check.opt_set_param(frozenset(), "set_param") == set()
assert check.opt_set_param({3}, "set_param") == {3}
with pytest.raises(ParameterCheckError):
check.opt_set_param(0, "set_param") # pyright: ignore[reportArgumentType]
with pytest.raises(ParameterCheckError):
check.opt_set_param("3u4", "set_param") # pyright: ignore[reportArgumentType]
# ########################
# ##### SEQUENCE
# ########################
@record
| SimpleMapping |
python | getsentry__sentry | tests/sentry/utils/test_circuit_breaker2.py | {
"start": 893,
"end": 4293
} | class ____(CircuitBreaker):
"""
A circuit breaker with extra methods useful for mocking state.
To understand the methods below, it helps to understand the `RedisSlidingWindowRateLimiter`
which powers the circuit breaker. Details can be found in
https://github.com/getsentry/sentry-redis-tools/blob/d4f3dc883b1137d82b6b7a92f4b5b41991c1fc8a/sentry_redis_tools/sliding_windows_rate_limiter.py,
(which is the implementation behind the rate limiter) but TL;DR, quota usage during the time
window is tallied in buckets ("granules"), and as time passes the window slides forward one
granule at a time. To be able to mimic this, most of the methods here operate at the granule
level.
"""
def _set_breaker_state(
self, state: CircuitBreakerState, seconds_left: int | None = None
) -> None:
"""
Adjust redis keys to force the breaker into the given state. If no remaining seconds are
given, puts the breaker at the beginning of its time in the given state.
"""
now = int(time.time())
if state == CircuitBreakerState.OK:
self._delete_from_redis([self.broken_state_key, self.recovery_state_key])
elif state == CircuitBreakerState.BROKEN:
broken_state_timeout = seconds_left or self.broken_state_duration
broken_state_end = now + broken_state_timeout
recovery_timeout = broken_state_timeout + self.recovery_duration
recovery_end = now + recovery_timeout
self._set_in_redis(
[
(self.broken_state_key, broken_state_end, broken_state_timeout),
(self.recovery_state_key, recovery_end, recovery_timeout),
]
)
elif state == CircuitBreakerState.RECOVERY:
recovery_timeout = seconds_left or self.recovery_duration
recovery_end = now + recovery_timeout
self._delete_from_redis([self.broken_state_key])
self._set_in_redis([(self.recovery_state_key, recovery_end, recovery_timeout)])
assert self._get_state_and_remaining_time() == (
state,
(
None
if state == CircuitBreakerState.OK
else (
broken_state_timeout
if state == CircuitBreakerState.BROKEN
else recovery_timeout
)
),
)
def _add_quota_usage(
self,
quota: Quota,
amount_used: int,
granule_or_window_end: int | None = None,
) -> None:
"""
Add to the usage total of the given quota, in the granule or window ending at the given
time. If a window (rather than a granule) end time is given, usage will be added to the
final granule.
If no end time is given, the current time will be used.
"""
now = int(time.time())
window_end_time = granule_or_window_end or now
self.limiter.use_quotas(
[RequestedQuota(self.key, amount_used, [quota])],
[GrantedQuota(self.key, amount_used, [])],
window_end_time,
)
def _delete_from_redis(self, keys: list[str]) -> Any:
for key in keys:
self.redis_pipeline.delete(key)
return self.redis_pipeline.execute()
@freeze_time()
| MockCircuitBreaker |
python | openai__openai-python | src/openai/resources/fine_tuning/jobs/checkpoints.py | {
"start": 3612,
"end": 6476
} | class ____(AsyncAPIResource):
@cached_property
def with_raw_response(self) -> AsyncCheckpointsWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncCheckpointsWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncCheckpointsWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncCheckpointsWithStreamingResponse(self)
def list(
self,
fine_tuning_job_id: str,
*,
after: str | Omit = omit,
limit: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[FineTuningJobCheckpoint, AsyncCursorPage[FineTuningJobCheckpoint]]:
"""
List checkpoints for a fine-tuning job.
Args:
after: Identifier for the last checkpoint ID from the previous pagination request.
limit: Number of checkpoints to retrieve.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
if not fine_tuning_job_id:
raise ValueError(f"Expected a non-empty value for `fine_tuning_job_id` but received {fine_tuning_job_id!r}")
return self._get_api_list(
f"/fine_tuning/jobs/{fine_tuning_job_id}/checkpoints",
page=AsyncCursorPage[FineTuningJobCheckpoint],
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
query=maybe_transform(
{
"after": after,
"limit": limit,
},
checkpoint_list_params.CheckpointListParams,
),
),
model=FineTuningJobCheckpoint,
)
| AsyncCheckpoints |
python | pydantic__pydantic | tests/mypy/modules/generics.py | {
"start": 405,
"end": 764
} | class ____(Response[HtmlBody]):
def custom_method(self) -> None:
doctype = self.body.doctype
print(f'self: {doctype}')
example = {'url': 'foo.com', 'body': {'raw': '..<html>..', 'doctype': 'html'}}
resp = HtmlResponse.model_validate(example)
resp.custom_method()
assert_type(resp.body, HtmlBody)
T = TypeVar('T', int, str)
| HtmlResponse |
python | django__django | tests/delete_regress/tests.py | {
"start": 14357,
"end": 14614
} | class ____(SimpleTestCase):
def test_disallowed_delete_distinct_on(self):
msg = "Cannot call delete() after .distinct(*fields)."
with self.assertRaisesMessage(TypeError, msg):
Book.objects.distinct("id").delete()
| DeleteDistinct |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassConverter2.py | {
"start": 1530,
"end": 1670
} | class ____(ModelBase):
b: tuple[int, ...] = model_field(converter=tuple)
DC3([1, 2, 3])
# This should generate an error.
DC3(["", 1])
| DC3 |
python | getsentry__sentry | tests/sentry/receivers/test_sentry_apps.py | {
"start": 8176,
"end": 10663
} | class ____(APITestCase):
def setUp(self) -> None:
self.issue = self.create_group(project=self.project)
self.sentry_app = self.create_sentry_app(events=["issue.assigned"])
self.install = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app.slug
)
self.assignee = self.create_user(name="Bert", email="bert@example.com")
def test_after_issue_assigned(self, delay: MagicMock) -> None:
GroupAssignee.objects.assign(self.issue, self.assignee, self.user)
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="assigned",
user_id=self.user.id,
data={
"assignee": {
"type": "user",
"name": self.assignee.name,
"email": self.assignee.email,
"id": self.assignee.id,
}
},
)
def test_after_issue_reassigned(self, delay: MagicMock) -> None:
GroupAssignee.objects.assign(self.issue, self.assignee, self.user)
new_assignee = self.create_user(name="Berry", email="berry@example.com")
GroupAssignee.objects.assign(self.issue, new_assignee, self.user)
delay.assert_called_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="assigned",
user_id=self.user.id,
data={
"assignee": {
"type": "user",
"name": new_assignee.name,
"email": new_assignee.email,
"id": new_assignee.id,
}
},
)
def test_after_issue_assigned_with_enhanced_privacy(self, delay: MagicMock) -> None:
org = self.issue.project.organization
org.flags.enhanced_privacy = True
org.save()
GroupAssignee.objects.assign(self.issue, self.assignee, self.user)
delay.assert_called_once_with(
installation_id=self.install.id,
issue_id=self.issue.id,
type="assigned",
user_id=self.user.id,
data={
# Excludes email address
"assignee": {"type": "user", "name": self.assignee.name, "id": self.assignee.id}
},
)
@patch("sentry.sentry_apps.tasks.sentry_apps.build_comment_webhook.delay")
| TestIssueAssigned |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/feature_flag_view.py | {
"start": 417,
"end": 1042
} | class ____(AuthV2Endpoint):
owner = ApiOwner.ENTERPRISE
publish_status = {"GET": ApiPublishStatus.EXPERIMENTAL}
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {RateLimitCategory.IP: RateLimit(limit=30, window=60)} # 30 per minute per IP
}
)
def get(self, request: Request) -> Response:
"""
Check if the feature flag is set correctly on your machine.
curl -X GET "https://sentry.io/api/0/auth-v2/feature-flag/" -H "X-Sentry-Auth-V2: ***"
"""
return Response({"message": "Hello world"})
| FeatureFlagView |
python | tensorflow__tensorflow | tensorflow/python/trackable/base.py | {
"start": 2474,
"end": 3105
} | class ____(TrackableReference):
"""TrackableReference that stores weak references."""
__slots__ = ()
def __init__(self, name, ref):
if not isinstance(ref, weakref.ref):
ref = weakref.ref(ref)
super(WeakTrackableReference, self).__init__(name=name, ref=ref)
@property
def ref(self):
return self._ref()
# TODO(bfontain): Update once sharded initialization interface is finalized.
ShardInfo = collections.namedtuple("CheckpointInitialValueShardInfo",
["shape", "offset"])
@tf_export("__internal__.tracking.CheckpointInitialValueCallable", v1=[])
| WeakTrackableReference |
python | fsspec__filesystem_spec | fsspec/implementations/tests/conftest.py | {
"start": 257,
"end": 941
} | class ____(LocalFileSystem):
protocol = ["file", "other"]
FILESYSTEMS = {
"local": LocalFileSystem,
"multi": MultiProtocolFileSystem,
"memory": MemoryFileSystem,
}
READ_ONLY_FILESYSTEMS = []
@pytest.fixture(scope="function")
def fs(request):
pyarrow_fs = pytest.importorskip("pyarrow.fs")
FileSystem = pyarrow_fs.FileSystem
if request.param == "arrow":
fs = ArrowFSWrapper(FileSystem.from_uri("file:///")[0])
return fs
cls = FILESYSTEMS[request.param]
return cls()
@pytest.fixture(scope="function")
def temp_file():
with tempfile.TemporaryDirectory() as temp_dir:
return temp_dir + "test-file"
| MultiProtocolFileSystem |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 32388,
"end": 36316
} | class ____(SingleContinuousDistribution):
_argnames = ('mean', 'std', 'rate')
set = Interval(-oo, oo)
@staticmethod
def check(mean, std, rate):
_value_check(
std > 0, "Standard deviation of ExGaussian must be positive.")
_value_check(rate > 0, "Rate of ExGaussian must be positive.")
def pdf(self, x):
mean, std, rate = self.mean, self.std, self.rate
term1 = rate/2
term2 = exp(rate * (2 * mean + rate * std**2 - 2*x)/2)
term3 = erfc((mean + rate*std**2 - x)/(sqrt(2)*std))
return term1*term2*term3
def _cdf(self, x):
from sympy.stats import cdf
mean, std, rate = self.mean, self.std, self.rate
u = rate*(x - mean)
v = rate*std
GaussianCDF1 = cdf(Normal('x', 0, v))(u)
GaussianCDF2 = cdf(Normal('x', v**2, v))(u)
return GaussianCDF1 - exp(-u + (v**2/2) + log(GaussianCDF2))
def _characteristic_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - I*t/rate)**(-1)
term2 = exp(I*mean*t - std**2*t**2/2)
return term1 * term2
def _moment_generating_function(self, t):
mean, std, rate = self.mean, self.std, self.rate
term1 = (1 - t/rate)**(-1)
term2 = exp(mean*t + std**2*t**2/2)
return term1*term2
def ExGaussian(name, mean, std, rate):
r"""
Create a continuous random variable with an Exponentially modified
Gaussian (EMG) distribution.
Explanation
===========
The density of the exponentially modified Gaussian distribution is given by
.. math::
f(x) := \frac{\lambda}{2}e^{\frac{\lambda}{2}(2\mu+\lambda\sigma^2-2x)}
\text{erfc}(\frac{\mu + \lambda\sigma^2 - x}{\sqrt{2}\sigma})
with $x > 0$. Note that the expected value is `1/\lambda`.
Parameters
==========
name : A string giving a name for this distribution
mean : A Real number, the mean of Gaussian component
std : A positive Real number,
:math: `\sigma^2 > 0` the variance of Gaussian component
rate : A positive Real number,
:math: `\lambda > 0` the rate of Exponential component
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import ExGaussian, density, cdf, E
>>> from sympy.stats import variance, skewness
>>> from sympy import Symbol, pprint, simplify
>>> mean = Symbol("mu")
>>> std = Symbol("sigma", positive=True)
>>> rate = Symbol("lamda", positive=True)
>>> z = Symbol("z")
>>> X = ExGaussian("x", mean, std, rate)
>>> pprint(density(X)(z), use_unicode=False)
/ 2 \
lamda*\lamda*sigma + 2*mu - 2*z/
--------------------------------- / ___ / 2 \\
2 |\/ 2 *\lamda*sigma + mu - z/|
lamda*e *erfc|-----------------------------|
\ 2*sigma /
----------------------------------------------------------------------------
2
>>> cdf(X)(z)
-(erf(sqrt(2)*(-lamda**2*sigma**2 + lamda*(-mu + z))/(2*lamda*sigma))/2 + 1/2)*exp(lamda**2*sigma**2/2 - lamda*(-mu + z)) + erf(sqrt(2)*(-mu + z)/(2*sigma))/2 + 1/2
>>> E(X)
(lamda*mu + 1)/lamda
>>> simplify(variance(X))
sigma**2 + lamda**(-2)
>>> simplify(skewness(X))
2/(lamda**2*sigma**2 + 1)**(3/2)
References
==========
.. [1] https://en.wikipedia.org/wiki/Exponentially_modified_Gaussian_distribution
"""
return rv(name, ExGaussianDistribution, (mean, std, rate))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
| ExGaussianDistribution |
python | imageio__imageio | imageio/plugins/tifffile.py | {
"start": 7905,
"end": 20664
} | class ____(Format):
"""Provides support for a wide range of Tiff images using the tifffile
backend.
Images that contain multiple pages can be read using ``imageio.mimread()``
to read the individual pages, or ``imageio.volread()`` to obtain a
single (higher dimensional) array.
Note that global metadata is stored with the first frame in a TIFF file.
Thus calling :py:meth:`Format.Writer.set_meta_data` after the first frame
was written has no effect. Also, global metadata is ignored if metadata is
provided via the `meta` argument of :py:meth:`Format.Writer.append_data`.
If you have installed tifffile as a Python package, imageio will attempt
to use that as backend instead of the bundled backend. Doing so can
provide access to new performance improvements and bug fixes.
Parameters for reading
----------------------
offset : int
Optional start position of embedded file. By default this is
the current file position.
size : int
Optional size of embedded file. By default this is the number
of bytes from the 'offset' to the end of the file.
multifile : bool
If True (default), series may include pages from multiple files.
Currently applies to OME-TIFF only.
multifile_close : bool
If True (default), keep the handles of other files in multifile
series closed. This is inefficient when few files refer to
many pages. If False, the C runtime may run out of resources.
Parameters for saving
---------------------
bigtiff : bool
If True, the BigTIFF format is used.
byteorder : {'<', '>'}
The endianness of the data in the file.
By default this is the system's native byte order.
software : str
Name of the software used to create the image.
Saved with the first page only.
Metadata for reading
--------------------
planar_configuration : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution_unit : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
compression : int
Value indicating the compression algorithm used, e.g. 5 is LZW,
7 is JPEG, 8 is deflate.
If 1, data are uncompressed.
predictor : int
Value 2 indicates horizontal differencing was used before compression,
while 3 indicates floating point horizontal differencing.
If 1, no prediction scheme was used before compression.
orientation : {'top_left', 'bottom_right', ...}
Oriented of image array.
is_rgb : bool
True if page contains a RGB image.
is_contig : bool
True if page contains a contiguous image.
is_tiled : bool
True if page contains tiled image.
is_palette : bool
True if page contains a palette-colored image and not OME or STK.
is_reduced : bool
True if page is a reduced image of another image.
is_shaped : bool
True if page contains shape in image_description tag.
is_fluoview : bool
True if page contains FluoView MM_STAMP tag.
is_nih : bool
True if page contains NIH image header.
is_micromanager : bool
True if page contains Micro-Manager metadata.
is_ome : bool
True if page contains OME-XML in image_description tag.
is_sgi : bool
True if page contains SGI image and tile depth tags.
is_stk : bool
True if page contains UIC2Tag tag.
is_mdgel : bool
True if page contains md_file_tag tag.
is_mediacy : bool
True if page contains Media Cybernetics Id tag.
is_stk : bool
True if page contains UIC2Tag tag.
is_lsm : bool
True if page contains LSM CZ_LSM_INFO tag.
description : str
Image description
description1 : str
Additional description
is_imagej : None or str
ImageJ metadata
software : str
Software used to create the TIFF file
datetime : datetime.datetime
Creation date and time
Metadata for writing
--------------------
photometric : {'minisblack', 'miniswhite', 'rgb'}
The color space of the image data.
By default this setting is inferred from the data shape.
planarconfig : {'contig', 'planar'}
Specifies if samples are stored contiguous or in separate planes.
By default this setting is inferred from the data shape.
'contig': last dimension contains samples.
'planar': third last dimension contains samples.
resolution : (float, float) or ((int, int), (int, int))
X and Y resolution in dots per inch as float or rational numbers.
description : str
The subject of the image. Saved with the first page only.
compress : int
Values from 0 to 9 controlling the level of zlib (deflate) compression.
If 0, data are written uncompressed (default).
predictor : bool
If True, horizontal differencing is applied before compression.
Note that using an int literal 1 actually means no prediction scheme
will be used.
volume : bool
If True, volume data are stored in one tile (if applicable) using
the SGI image_depth and tile_depth tags.
Image width and depth must be multiple of 16.
Few software can read this format, e.g. MeVisLab.
writeshape : bool
If True, write the data shape to the image_description tag
if necessary and no other description is given.
extratags: sequence of tuples
Additional tags as [(code, dtype, count, value, writeonce)].
code : int
The TIFF tag Id.
dtype : str
Data type of items in 'value' in Python struct format.
One of B, s, H, I, 2I, b, h, i, f, d, Q, or q.
count : int
Number of data values. Not used for string values.
value : sequence
'Count' values compatible with 'dtype'.
writeonce : bool
If True, the tag is written to the first page only.
"""
def _can_read(self, request):
try:
_tifffile.TiffFile(request.get_file(), **request.kwargs)
except ValueError:
# vendored backend raises value exception
return False
except _tifffile.TiffFileError: # pragma: no-cover
# current version raises custom exception
return False
finally:
request.get_file().seek(0)
return True
def _can_write(self, request):
if request._uri_type in [URI_FILE, URI_BYTES]:
pass # special URI
elif request.extension not in self.extensions:
return False
try:
_tifffile.TiffWriter(request.get_file(), **request.kwargs)
except ValueError:
# vendored backend raises value exception
return False
except _tifffile.TiffFileError: # pragma: no-cover
# current version raises custom exception
return False
finally:
request.get_file().seek(0)
return True
# -- reader
class Reader(Format.Reader):
def _open(self, **kwargs):
# Allow loading from http; tifffile uses seek, so download first
if self.request.filename.startswith(("http://", "https://")):
self._f = f = open(self.request.get_local_filename(), "rb")
else:
self._f = None
f = self.request.get_file()
self._tf = _tifffile.TiffFile(f, **kwargs)
def _close(self):
self._tf.close()
if self._f is not None:
self._f.close()
def _get_length(self):
return len(self._tf.series)
def _get_data(self, index):
if index < 0 or index >= self._get_length():
raise IndexError("Index out of range while reading from tiff file")
im = self._tf.asarray(series=index)
meta = self._get_meta_data(index)
return im, meta
def _get_meta_data(self, index):
meta = {}
page = self._tf.pages[index or 0]
for key in READ_METADATA_KEYS:
try:
meta[key] = getattr(page, key)
except Exception:
pass
# tifffile <= 0.12.1 use datetime, newer use DateTime
for key in ("datetime", "DateTime"):
try:
meta["datetime"] = datetime.datetime.strptime(
page.tags[key].value, "%Y:%m:%d %H:%M:%S"
)
break
except Exception:
pass
if 296 in page.tags:
meta["resolution_unit"] = page.tags[296].value.value
if 282 in page.tags and 283 in page.tags and 296 in page.tags:
resolution_x = page.tags[282].value
resolution_y = page.tags[283].value
if resolution_x[1] == 0 or resolution_y[1] == 0:
warnings.warn(
"Ignoring resolution metadata, "
"because at least one direction has a 0 denominator.",
RuntimeWarning,
)
else:
meta["resolution"] = (
resolution_x[0] / resolution_x[1],
resolution_y[0] / resolution_y[1],
page.tags[296].value.name,
)
return meta
# -- writer
class Writer(Format.Writer):
def _open(self, bigtiff=None, byteorder=None, software=None):
try:
self._tf = _tifffile.TiffWriter(
self.request.get_file(),
bigtiff=bigtiff,
byteorder=byteorder,
software=software,
)
self._software = None
except TypeError:
# In tifffile >= 0.15, the `software` arg is passed to
# TiffWriter.save
self._tf = _tifffile.TiffWriter(
self.request.get_file(), bigtiff=bigtiff, byteorder=byteorder
)
self._software = software
self._meta = {}
self._frames_written = 0
def _close(self):
self._tf.close()
def _append_data(self, im, meta):
if meta is not None:
meta = self._sanitize_meta(meta)
else:
# Use global metadata for first frame
meta = self._meta if self._frames_written == 0 else {}
if self._software is not None and self._frames_written == 0:
meta["software"] = self._software
# No need to check self.request.mode; tifffile figures out whether
# this is a single page, or all page data at once.
try:
# TiffWriter.save has been deprecated in version 2020.9.30
write_meth = self._tf.write
except AttributeError:
write_meth = self._tf.save
write_meth(np.asanyarray(im), contiguous=False, **meta)
self._frames_written += 1
@staticmethod
def _sanitize_meta(meta):
ret = {}
for key, value in meta.items():
if key in WRITE_METADATA_KEYS:
# Special case of previously read `predictor` int value
# 1(=NONE) translation to False expected by TiffWriter.save
if key == "predictor" and not isinstance(value, bool):
ret[key] = value > 1
elif key == "compress" and value != 0:
warnings.warn(
"The use of `compress` is deprecated. Use `compression` and `compressionargs` instead.",
DeprecationWarning,
)
if _tifffile.__version__ < "2022":
ret["compression"] = (8, value)
else:
ret["compression"] = "zlib"
ret["compressionargs"] = {"level": value}
else:
ret[key] = value
return ret
def set_meta_data(self, meta):
self._meta = self._sanitize_meta(meta)
| TiffFormat |
python | ray-project__ray | python/ray/dashboard/modules/job/tests/test_utils.py | {
"start": 2383,
"end": 2918
} | class ____:
async def test_basic(self):
request = MockRequest(entrypoint="echo hi")
expected = JobSubmitRequest(entrypoint="echo hi")
assert await parse_and_validate_request(request, JobSubmitRequest) == expected
async def test_forward_compatibility(self):
request = MockRequest(entrypoint="echo hi", new_client_field=None)
expected = JobSubmitRequest(entrypoint="echo hi")
assert await parse_and_validate_request(request, JobSubmitRequest) == expected
| TestParseAndValidateRequest |
python | Netflix__metaflow | metaflow/plugins/cards/card_modules/card.py | {
"start": 3385,
"end": 4627
} | class ____(object):
# Setting REALTIME_UPDATABLE as True will allow metaflow to update the card
# during Task runtime.
REALTIME_UPDATABLE = False
_component_id = None
_logger = None
@property
def component_id(self):
return self._component_id
@component_id.setter
def component_id(self, value):
if not isinstance(value, str):
raise TypeError("Component ID must be a string")
self._component_id = value
def update(self, *args, **kwargs):
"""
#FIXME document
"""
raise NotImplementedError()
def render(self):
"""
`render` returns a string or dictionary. This class can be called on the client side to dynamically add components to the `MetaflowCard`
"""
raise NotImplementedError()
def create_component_id(component):
uuid_bit = "".join(uuid.uuid4().hex.split("-"))[:6]
return type(component).__name__.lower() + "_" + uuid_bit
def with_default_component_id(func):
def ret_func(self, *args, **kwargs):
if self.component_id is None:
self.component_id = create_component_id(self)
return func(self, *args, **kwargs)
return ret_func
| MetaflowCardComponent |
python | doocs__leetcode | solution/2900-2999/2974.Minimum Number Game/Solution.py | {
"start": 0,
"end": 250
} | class ____:
def numberGame(self, nums: List[int]) -> List[int]:
heapify(nums)
ans = []
while nums:
a, b = heappop(nums), heappop(nums)
ans.append(b)
ans.append(a)
return ans
| Solution |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 38516,
"end": 69530
} | class ____(PatternEntry):
"""
The replacement pattern for the graph
"""
normalize_args: Callable[..., list[Any]]
@staticmethod
def replace_with_graph(
match: Match,
graph: torch.fx.Graph,
replacement_graph: Union[torch.fx.Graph, torch.fx.GraphModule],
args: Sequence[torch.fx.Node],
) -> None:
"""
Inserts the replacement graph into the toplevel graph at the match
"""
added_replacement_nodes: list[torch.fx.Node] = []
class Replacer(torch.fx.Interpreter):
call_method = None # type: ignore[assignment]
call_module = None # type: ignore[assignment]
get_attr = None # type: ignore[assignment]
def run_node(self, node: torch.fx.Node) -> Any:
if node.op in ("placeholder", "output"):
return super().run_node(node)
target = node.target
args, kwargs = self.fetch_args_kwargs_from_env(node)
if node.op == "call_function":
assert callable(target)
result = graph.call_function(target, args, kwargs)
added_replacement_nodes.append(result)
_transfer_meta(
new_meta=result.meta,
old_node=node,
pass_name="Interpreter_Replacer",
)
# This function copy-pastes the replacement graph into
# the graph. If the replacement graph had any eager_input_vals,
# or val/tensor_meta, we propagate those over.
if "eager_input_vals" in node.meta:
result.meta["eager_input_vals"] = node.meta["eager_input_vals"]
if "val" in node.meta and "val" not in result.meta:
result.meta["val"] = node.meta["val"]
if isinstance(node.meta["val"], torch.Tensor):
assert "tensor_meta" in node.meta
result.meta["tensor_meta"] = node.meta["tensor_meta"]
return result
if node.op == "get_attr":
# If the replacement graph contains a HOP, the subgraphs of the HOP are "get_attr" nodes.
# We need to fetch the subgraph of the HOP then register the subgraph to the replaced graph's root.
from torch._higher_order_ops.utils import (
unique_graph_name_with_root,
)
sub_gm = super().get_attr(target, args, kwargs)
if not isinstance(sub_gm, torch.fx.GraphModule):
raise NotImplementedError(
f"NYI: replacement_graph.{target} is not a graph module. Got {sub_gm}."
)
assert graph.owning_module is not None
graph_name = None
for n, mod in graph.owning_module.named_modules():
if sub_gm is mod:
graph_name = n
break
if graph_name is None:
assert isinstance(target, str)
_, graph_name = unique_graph_name_with_root(
# pyrefly: ignore [unbound-name]
graph.owning_module,
target,
)
# pyrefly: ignore [unbound-name]
graph.owning_module.register_module(graph_name, sub_gm)
# pyrefly: ignore [unbound-name]
getattr_node = graph.get_attr(graph_name)
added_replacement_nodes.append(getattr_node)
return getattr_node
raise NotImplementedError(f"unhandled {node}")
output_nodes = match.output_nodes()
if len(output_nodes) == 1:
last_node = output_nodes[0]
else:
assert output_nodes[0]
nodes = list(output_nodes[0].graph.nodes)
indices = [
(nodes.index(n), n)
for n in output_nodes
if isinstance(n, torch.fx.Node)
]
last_node = min(indices, key=operator.itemgetter(0))[1]
def percolate_tags(
node: torch.fx.Node,
tag_name: str,
tag_value: str,
input_stops: OrderedSet[torch.fx.Node],
) -> None:
queue = [node]
visited = OrderedSet[torch.fx.Node]()
while queue:
arg = queue.pop()
if (
arg not in visited
and arg not in input_stops
and hasattr(arg, "meta")
):
visited.add(arg)
arg.meta[tag_name] = tag_value
queue.extend(arg.all_input_nodes)
with graph.inserting_before(last_node):
assert isinstance(replacement_graph, torch.fx.GraphModule)
replacement = Replacer(replacement_graph).run(*args)
if isinstance(replacement, torch.fx.Node):
replacement = [replacement]
def maybe_getitem(node: torch.fx.Node) -> Any:
if node.op != "call_function":
return None
if node.target != operator.getitem:
return None
assert len(node.args) == 2
return node.args[1]
def replace(
old: Union[torch.fx.Node, None],
new: Union[torch.fx.Node, Sequence[torch.fx.Node], None],
) -> None:
def filter_nodes_in_newly_added_nodes(node: torch.fx.Node) -> bool:
# Do not replace the use of a node if it is being used by
# nodes in the replaced graph
return node not in added_replacement_nodes
if old is None:
assert new is None
return
assert isinstance(old, torch.fx.Node)
if new is None:
old.replace_all_uses_with(
None, # type: ignore[arg-type]
delete_user_cb=filter_nodes_in_newly_added_nodes,
)
if len(old.users) == 0:
graph.erase_node(old)
return
if isinstance(new, torch.fx.Node):
if "val" not in new.meta:
new.meta.update(old.meta)
# Preserve the recompute tags in the replacement graph. We
# look at the recompute tags of the original output node to
# propagate the tag from the output all the way to the input
# args (named as args in the replace_with_graph).
# Note that this is best effort. Since patterns are from
# many to many, there is no easy way to correctly map the
# recomputable tags. It is possible in some scenarios that we
# incorrectly tag some nodes as recomputables.
for tag_name in ["recompute", "ac_graph_id"]:
if tag_name in old.meta:
percolate_tags(
new, tag_name, old.meta[tag_name], OrderedSet(args)
)
old.replace_all_uses_with(
new, delete_user_cb=filter_nodes_in_newly_added_nodes
)
if len(old.users) == 0:
graph.erase_node(old)
return
# `new` is not a node: it's a list of nodes.
#
# This happens when we want to replace a node that has a single
# packed return with multiple unpacked returns. We need to do
# some graph surgery here.
#
# Example:
# def original_graph(x):
# a = op(x)
# b = a[0]
# c = a[1]
# ...
#
# Assume that we want to replace op(x) with the graph
# def new_op(x):
# w = x + 1
# z = x + 2
# return (w, z)
#
# We need to replace `op` with the contents of `new_op`,
# and then rewrite a[0] to be w and a[1] to be z, as so:
# def new_graph(x):
# w = x + 1
# z = x + 2
# b = w
# c = z
# ...
old_uses = list(old.users.keys())
for user in old_uses:
idx = maybe_getitem(user)
if idx is None:
raise AssertionError(
"Deleted index from getitem, did you erase the index and not properly replace it?"
)
replace(user, new[idx])
graph.erase_node(old)
if len(output_nodes) == len(replacement):
for old, new in zip(output_nodes, replacement):
replace(old, new)
else:
assert len(output_nodes) == 1
replace(output_nodes[0], replacement)
match.erase_nodes()
def apply(self, match: Match, graph: torch.fx.Graph, node: torch.fx.Node) -> None:
assert match.replacement_graph is not None
self.replace_with_graph(
match,
graph,
match.replacement_graph,
self.normalize_args(*match.args, **match.kwargs),
)
def _return_true(match: Match) -> bool:
return True
def log_trace_failure(search_fn: Callable[..., Any], e: RuntimeError) -> None:
log.info(
"Replacement pattern %s failed to apply due to shape mismatch: %s",
search_fn.__name__,
e,
)
def check_and_add_duplicate_pattern(
pattern: PatternExpr,
graph: Optional[torch.fx.Graph],
seen_patterns: dict[str, list[Optional[str]]],
skip_duplicates: bool = False,
) -> bool:
"""
Check if a pattern is a duplicate. Because we ignore certain types in searching, but not
in matching, use the graph to distinguish equivalent search patterns.
Returns True if a duplicate is found and `skip_duplicates=True` is passed in. Errors if
`skip_duplicates` is False and a duplicate is found.
"""
pattern_repr = PatternPrettyPrinter.run(pattern)
equiv_pattern_reprs = seen_patterns.get(pattern_repr)
if not equiv_pattern_reprs:
seen_patterns[pattern_repr].append(str(graph) if graph else None)
return False
if graph is None:
if skip_duplicates:
return True
torch._check(
False,
lambda: f"Duplicate pattern: {pattern_repr} with no graph",
)
new_graph_str = str(graph)
for graph_str in equiv_pattern_reprs:
if new_graph_str != graph_str:
continue
if skip_duplicates:
return True
torch._check(
False,
lambda: f"Duplicate pattern: {pattern_repr} with duplicated match graph {graph_str} ",
)
equiv_pattern_reprs.append(new_graph_str)
return False
def register_replacement(
search_fn: SearchFn,
replace_fn: ReplaceFn,
example_inputs: Iterable[Any],
trace_fn: TraceFn,
pass_dicts: Union[_PassDictsType, Sequence[_PassDictsType]],
extra_check: Callable[[Match], bool] = _return_true,
scalar_workaround: Union[dict[str, Union[float, int]], None] = None,
exclusive_arg_names: Sequence[str] = (),
search_fn_pattern: Union[PatternExpr, None] = None,
skip_duplicates: bool = False,
) -> bool:
"""
Create a replacement rule based on example functions that get traced
to create patterns. This supports both training and inference when
run on a joint forward+backward graph.
Args:
search_fn: traced to give original pattern
replace_fn: traced to give replacement graph
example_inputs: example inputs for initial trace
trace_fn: fwd_only or joint_fwd_bwd
pass_dict: dict of passes to register to
extra_check: additional check to run on match(using real shapes)
"""
argnames_static = [*inspect.signature(search_fn).parameters.keys()]
if inspect.ismethod(search_fn):
search_fn = _wrap_bound_method(search_fn, argnames_static)
if inspect.ismethod(replace_fn):
replace_argnames = [*inspect.signature(replace_fn).parameters.keys()]
replace_fn = _wrap_bound_method(replace_fn, replace_argnames)
def check_fn(match: Match) -> bool:
"""
Often shapes get burned into the pattern, so our initial match ran with
`ignore_types=(int, ...)`.
Recheck the match with the correct shapes.
"""
argnames = list(argnames_static)
for name in argnames:
if name not in match.kwargs:
raise RuntimeError(
f"Not all inputs to pattern found in match.kwargs. Perhaps one "
f"of the inputs is unused? argnames={argnames}, match.kwargs={match.kwargs}"
)
args = list(
torch.fx.map_arg(
[match.kwargs[name] for name in argnames], lambda n: n.meta["val"]
)
)
sym_args: list[torch.SymInt] = []
fake_mode = torch._dynamo.utils.detect_fake_mode(args)
assert fake_mode is not None
with fake_mode:
for i, grad in enumerate(requires_grad):
if isinstance(args[i], torch.Tensor):
if grad and is_integer_dtype(args[i].dtype):
return False
args[i] = torch.empty_strided(
args[i].size(),
args[i].stride(),
dtype=args[i].dtype,
device=args[i].device,
requires_grad=grad,
)
for v in itertools.chain(args[i].shape, args[i].stride()):
if isinstance(v, torch.SymInt) and all(
statically_known_true(v != a) for a in sym_args
):
sym_args.append(v)
# If we were given a pre-traced pattern then use that instead of
# retracing. Note that this means the pattern has to be independent
# of its args.
specific_pattern = search_fn_pattern
if not specific_pattern:
if sym_args:
# AOT Autograd and make fx will dedupe symbolic shape size
# accesses of sym ints that appear as inputs
# We don't want the sym_size uses to interfere with pattern matching
# so we provide them as inputs.
# Later, when we actually do the replacement, the symbolic shape
# sizes will get re-traced and added to the graph.
def search_fn_new(*args_new: Any) -> Any:
return search_fn(*args_new[len(args_new) - len(args) :])
try:
# pyrefly: ignore [bad-argument-type]
specific_graph = trace_fn(search_fn_new, sym_args + args)
except RuntimeError as e:
log_trace_failure(search_fn, e)
return False
# correct argnames in the graph
sym_arg_names = []
for i, placeholder in zip(
range(len(sym_args) + len(args)),
specific_graph.graph.nodes,
):
if i < len(sym_args):
sym_arg_names.append(placeholder.target)
continue
with specific_graph.graph.inserting_after(placeholder):
new_node = specific_graph.graph.placeholder(
argnames[i - len(sym_args)]
)
new_node.target = new_node.name
placeholder.replace_all_uses_with(new_node)
specific_graph.graph.erase_node(placeholder)
argnames = sym_arg_names + argnames
else:
try:
specific_graph = trace_fn(search_fn, args)
except RuntimeError as e:
log_trace_failure(search_fn, e)
return False
specific_pattern = fx_to_pattern(
specific_graph,
argnames=argnames,
exclusive_arg_names=exclusive_arg_names,
scalar_workaround=scalar_workaround,
)
node = match.output_nodes()[0]
assert node is not None
specific_pattern_match = specific_pattern.match(node)
if is_match(specific_pattern_match) and extra_check(specific_pattern_match):
# trace the pattern using the shapes from the user program
match.replacement_graph = trace_fn(replace_fn, args)
if len(match.nodes) == 1:
for n in match.replacement_graph.graph.nodes:
_transfer_meta(
new_meta=n.meta,
old_node=match.nodes[0],
pass_name="replacement",
)
return True
return False
def normalize_args(**kwargs: Any) -> list[Any]:
args = [kwargs.pop(name) for name in argnames_static]
for i in range(1, len(kwargs) + 1):
if f"tangents_{i}" not in kwargs:
break
args.append(kwargs.pop(f"tangents_{i}"))
assert not kwargs, f"leftover kwargs: {kwargs!r}"
return args
if trace_fn is joint_fwd_bwd:
# If inference mode is enabled during compilation, assume that we don't
# want to match on any training graph patterns
if torch.is_inference_mode_enabled():
return False
# TODO: Revisit the functionalize_rng_ops for lowmem dropout
with functorch_config.patch(functionalize_rng_ops=False):
requires_grad: list[bool] = [
isinstance(x, torch.Tensor) and x.requires_grad for x in example_inputs
]
if search_fn_pattern is None:
pattern, gm = gen_pattern_and_search_gm(
search_fn,
example_inputs,
trace_fn,
scalar_workaround,
exclusive_arg_names,
)
else:
pattern = search_fn_pattern
gm = None
for pattern_matcher_pass in (
pass_dicts if isinstance(pass_dicts, Sequence) else [pass_dicts]
):
if isinstance(pattern_matcher_pass, PatternMatcherPass):
if check_and_add_duplicate_pattern(
pattern,
gm.graph if gm else None,
pattern_matcher_pass.seen_patterns,
skip_duplicates=skip_duplicates,
):
return False
pattern = ReplacementPatternEntry(
pattern=pattern,
extra_check=check_fn,
normalize_args=normalize_args,
)
pattern.register(pass_dicts)
return pattern.pattern # type: ignore[return-value]
_serialized_patterns: OrderedSet[str] = OrderedSet()
def _serialize_pattern(
unique_name: str,
search_fn: SearchFn,
example_inputs: Sequence[Any],
trace_fn: TraceFn,
scalar_workaround: Union[dict[str, Union[float, int]], None],
) -> PatternExpr:
def get_file_template() -> str:
auto_generated_msg = textwrap.dedent(
"""\
# This is an auto-generated file. Please do not modify it by hand.
# To re-generate, run:
# cd ~/pytorch && python torchgen/fuse/gen_patterns.py
"""
)
file_template = textwrap.dedent(
"""\
# mypy: ignore-errors
# noqa: F401, E501
{msg}
import torch
import torch._inductor
import operator
aten = torch.ops.aten
prims = torch.ops.prims
"""
).format(msg=auto_generated_msg)
pattern_matcher_imports = []
for name in dir(torch._inductor.pattern_matcher):
attr = getattr(torch._inductor.pattern_matcher, name)
try:
if isinstance(attr, type) and issubclass(
attr, (PatternExpr, _TargetExpr)
):
# pyrefly: ignore [bad-argument-type]
pattern_matcher_imports.append(name)
except TypeError:
pass
formatted_imports = ",\n ".join(pattern_matcher_imports)
formatted_imports = f"from torch._inductor.pattern_matcher import (\n {formatted_imports},\n)\n"
return f"{file_template}{formatted_imports}"
if not SERIALIZED_PATTERN_PATH.is_dir():
raise RuntimeError(
f"Could not find serialized patterns directory at {SERIALIZED_PATTERN_PATH}"
)
pattern_name = search_fn.__name__
from torch._functorch import config as functorch_config
with functorch_config.patch(functionalize_rng_ops=False):
pattern = gen_pattern(search_fn, example_inputs, trace_fn, scalar_workaround)
serialized_pattern = PatternPrettyPrinter.run(pattern, output_name=unique_name)
if pattern_name not in _serialized_patterns:
write_mode = "w"
_serialized_patterns.add(pattern_name)
else:
write_mode = "a"
file_template = get_file_template()
with open(SERIALIZED_PATTERN_PATH / f"{pattern_name}.py", write_mode) as f:
if write_mode == "w":
f.write(file_template)
else:
f.write("\n\n")
f.write(serialized_pattern)
f.write("\n")
return pattern
SERIALIZED_PATTERN_PATH = Path(__file__).parent / "fx_passes" / "serialized_patterns"
# This is the set of serialized patterns that we've registered. Used by
# test_serialized_patterns_up_to_date() to ensure the patterns are up
# to date.
_known_precompiled_patterns: list[
tuple[
Any,
Iterable[Any],
Callable[[Callable[..., Any], Iterable[Any]], torch.fx.GraphModule],
Any,
PatternExpr,
]
] = []
def gen_register_replacement(
unique_name: str,
search_fn: SearchFn,
replace_fn: ReplaceFn,
example_inputs: Iterable[Any],
trace_fn: TraceFn,
pass_dicts: Union[_PassDictsType, Sequence[_PassDictsType]],
extra_check: Callable[[Match], bool] = _return_true,
scalar_workaround: Union[dict[str, Union[float, int]], None] = None,
exclusive_arg_names: Sequence[str] = (),
skip_duplicates: bool = False,
) -> None:
# Make sure the example_inputs is materialized.
example_inputs = tuple(example_inputs)
if "PYTORCH_GEN_PATTERNS" in os.environ:
pat = _serialize_pattern(
unique_name, search_fn, example_inputs, trace_fn, scalar_workaround
)
else:
pattern_name = search_fn.__name__
m = importlib.import_module(
f"torch._inductor.fx_passes.serialized_patterns.{pattern_name}"
)
if not m or not hasattr(m, unique_name):
log.warning(
"Precompiled pattern %r not found. Run torchgen/fuse/gen_patterns.py.",
unique_name,
)
pat = getattr(m, unique_name)
for arg in pytree.tree_iter(example_inputs):
if isinstance(arg, FakeTensor) and arg.constant is not None:
# This can be a problem - small fake tensors (e.g. `tensor(2)`) will
# hold onto their original constant value - and by stashing it here
# will cause a memory leak if the constant value is on GPU.
# Since this is just an optimization we can clear it out.
arg.constant = None
_known_precompiled_patterns.append(
(search_fn, example_inputs, trace_fn, scalar_workaround, pat)
)
register_replacement(
search_fn,
replace_fn,
example_inputs,
trace_fn,
pass_dicts,
extra_check,
scalar_workaround,
exclusive_arg_names,
search_fn_pattern=pat,
skip_duplicates=skip_duplicates,
)
@functorch_config.patch(functionalize_rng_ops=False) # type: ignore[misc]
def gen_pattern_and_search_gm(
search_fn: SearchFn,
example_inputs: Sequence[Any],
trace_fn: TraceFn,
scalar_workaround: Union[dict[str, Union[float, int]], None] = None,
exclusive_arg_names: Sequence[str] = (),
) -> tuple[PatternExpr, torch.fx.GraphModule]:
argnames = [*inspect.signature(search_fn).parameters.keys()]
if scalar_workaround is None:
scalar_workaround = {}
flat_inputs = []
input_idx = 0 # Positional arguments index
for argname in argnames:
if argname in scalar_workaround:
flat_inputs.append(scalar_workaround[argname])
else:
flat_inputs.append(example_inputs[input_idx])
input_idx += 1
search_gm = trace_fn(search_fn, flat_inputs)
return (
fx_to_pattern(
search_gm,
ignore_types=(int, float, list, torch.device, torch.dtype),
argnames=argnames,
scalar_workaround=scalar_workaround,
exclusive_arg_names=exclusive_arg_names,
),
search_gm,
)
def gen_pattern(
search_fn: SearchFn,
example_inputs: Sequence[Any],
trace_fn: TraceFn,
scalar_workaround: Union[dict[str, Union[float, int]], None] = None,
exclusive_arg_names: Sequence[str] = (),
) -> PatternExpr:
return gen_pattern_and_search_gm(
search_fn, example_inputs, trace_fn, scalar_workaround, exclusive_arg_names
)[0]
def register_lowering_pattern(
pattern: PatternExpr,
extra_check: Callable[[Match], bool] = _return_true,
*,
pass_dict: _PassDictsType,
prepend: bool = False,
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
Register an aten to inductor IR replacement pattern. The decorated
function is saved and then called a lowering time allowing direct
pattern to inductor IR conversion.
"""
def decorator(handler: Callable[..., Any]) -> Callable[..., Any]:
assert callable(handler)
LoweringPatternEntry(
pattern=pattern, extra_check=extra_check, handler=handler
).register(pass_dict, prepend=prepend)
handler._inductor_lowering_function = True # type: ignore[attr-defined]
return handler
return decorator
def register_graph_pattern(
pattern: PatternExpr,
extra_check: Callable[[Match], bool] = _return_true,
*,
pass_dict: _PassDictsType,
prepend: bool = False,
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
Register a pattern that runs a function on the FX graph, allowing
custom transformation code.
"""
def decorator(handler: Callable[..., Any]) -> Callable[..., Any]:
assert callable(handler)
GraphPatternEntry(
pattern=pattern, extra_check=extra_check, handler=handler
).register(pass_dict, prepend=prepend)
return handler
return decorator
def is_start_of_fx_graph(graph: torch.fx.Graph, node: torch.fx.Node) -> bool:
# first node in the graph
return node is next(iter(graph.nodes))
# match: copy_, relu_, _set_grad_enabled, manual_seed, _enter_autocast, etc
# doesn't match: __rshift__, etc
_mutation_op_re = re.compile(r"(?<!_)(_$|_[.]|(\b|_)(set|enter|exit|seed)(\b|_))(?!_)")
def fixme_incorrect_inductor_schema_op(op: torch._ops.OpOverload) -> bool:
if op.namespace != "inductor":
return False
# TODO - fix schema
# Dont add any more !
return op in (
torch.ops.inductor.accumulate_grad_.default,
torch.ops.inductor.resize_storage_bytes_.default,
)
def is_mutation_op(node: torch.fx.Node) -> bool:
if isinstance(
node.target, torch._ops.OpOverload
) and not fixme_incorrect_inductor_schema_op(node.target):
return node.target._schema.is_mutable
elif isinstance(
node.target, torch._higher_order_ops.auto_functionalize.AutoFunctionalized
):
return False
if node.op == "call_function":
assert callable(node.target)
if _mutation_op_re.search(node.target.__name__):
return True
elif node.op == "call_method":
assert isinstance(node.target, str)
if _mutation_op_re.search(node.target):
return True
return node.kwargs.get("out") is not None
def same_mutation_regions(a: torch.fx.Node, b: torch.fx.Node) -> bool:
assert "mutation_region_id" in a.meta
assert "mutation_region_id" in b.meta
return a.meta["mutation_region_id"] == b.meta["mutation_region_id"]
def get_mutation_region_id(graph: torch.fx.Graph, node: torch.fx.Node) -> int:
n = node
while "mutation_region_id" not in n.meta and not is_start_of_fx_graph(graph, n):
n = n.prev
mutation_region_id = n.meta.get("mutation_region_id", 0)
while n is not node:
n = n.next
if is_mutation_op(n):
mutation_region_id += 1
n.meta["mutation_region_id"] = mutation_region_id
return mutation_region_id
def should_compute_mutation_region_ids(graph: torch.fx.Graph) -> bool:
return "mutation_region_id" not in next(iter(graph.nodes)).meta
def compute_mutation_region_ids(graph: torch.fx.Graph) -> None:
mutation_region_id = 0
for nd in graph.nodes:
if is_mutation_op(nd):
mutation_region_id += 1
nd.meta["mutation_region_id"] = mutation_region_id
def _wrap_bound_method(fn: Any, argnames: list[str]) -> Any:
"""
Wrap a bound method to remove 'self' from its signature for FX tracing.
"""
def wrapper(*args: Any, **kwargs: Any) -> Any:
return fn(*args, **kwargs)
params = [
inspect.Parameter(name, inspect.Parameter.POSITIONAL_OR_KEYWORD)
for name in argnames
]
wrapper.__signature__ = inspect.Signature(params) # type: ignore[attr-defined]
return wrapper
| ReplacementPatternEntry |
python | getsentry__sentry | tests/sentry/data_secrecy/test_types.py | {
"start": 7277,
"end": 7903
} | class ____(TestCase):
def test_grant_cache_status_values(self) -> None:
"""Test that GrantCacheStatus enum has expected values"""
assert GrantCacheStatus.CACHE_MISS == "cache_miss"
assert GrantCacheStatus.NEGATIVE_CACHE == "negative_cache"
assert GrantCacheStatus.VALID_WINDOW == "valid_window"
assert GrantCacheStatus.EXPIRED_WINDOW == "expired_window"
def test_grant_cache_status_is_string_enum(self) -> None:
"""Test that GrantCacheStatus values are strings"""
for status in GrantCacheStatus:
assert isinstance(status.value, str)
| GrantCacheStatusTest |
python | ray-project__ray | python/ray/data/_internal/execution/operators/join.py | {
"start": 1631,
"end": 14812
} | class ____(StatefulShuffleAggregation):
"""Aggregation performing distributed joining of the 2 sequences,
by utilising hash-based shuffling.
Hash-based shuffling applied to 2 input sequences and employing the same
partitioning scheme allows to
- Accumulate identical keys from both sequences into the same
(numerical) partition. In other words, all keys such that
hash(key) % num_partitions = partition_id
- Perform join on individual partitions independently (from other partitions)
For actual joining Pyarrow native joining functionality is utilised, providing
incredible performance while allowing keep the data from being deserialized.
"""
def __init__(
self,
*,
aggregator_id: int,
join_type: JoinType,
left_key_col_names: Tuple[str],
right_key_col_names: Tuple[str],
target_partition_ids: List[int],
data_context: DataContext,
left_columns_suffix: Optional[str] = None,
right_columns_suffix: Optional[str] = None,
):
super().__init__(aggregator_id)
assert (
len(left_key_col_names) > 0
), "At least 1 column to join on has to be provided"
assert len(right_key_col_names) == len(
left_key_col_names
), "Number of column for both left and right join operands has to match"
assert join_type in _JOIN_TYPE_TO_ARROW_JOIN_VERB_MAP, (
f"Join type is not currently supported (got: {join_type}; " # noqa: C416
f"supported: {[jt for jt in JoinType]})" # noqa: C416
)
self._left_key_col_names: Tuple[str] = left_key_col_names
self._right_key_col_names: Tuple[str] = right_key_col_names
self._join_type: JoinType = join_type
self._left_columns_suffix: Optional[str] = left_columns_suffix
self._right_columns_suffix: Optional[str] = right_columns_suffix
# Partition builders for the partition corresponding to
# left and right input sequences respectively
self._left_input_seq_partition_builders: Dict[int, ArrowBlockBuilder] = {
partition_id: ArrowBlockBuilder() for partition_id in target_partition_ids
}
self._right_input_seq_partition_builders: Dict[int, ArrowBlockBuilder] = {
partition_id: ArrowBlockBuilder() for partition_id in target_partition_ids
}
self.data_context = data_context
def accept(self, input_seq_id: int, partition_id: int, partition_shard: Block):
assert 0 <= input_seq_id < 2
partition_builder = self._get_partition_builder(
input_seq_id=input_seq_id,
partition_id=partition_id,
)
partition_builder.add_block(partition_shard)
def finalize(self, partition_id: int) -> Block:
left_on, right_on = list(self._left_key_col_names), list(
self._right_key_col_names
)
preprocess_result_l, preprocess_result_r = self._preprocess(
left_on, right_on, partition_id
)
# Perform the join on supported columns
arrow_join_type = _JOIN_TYPE_TO_ARROW_JOIN_VERB_MAP[self._join_type]
# Perform the join on supported columns
supported = preprocess_result_l.supported_projection.join(
preprocess_result_r.supported_projection,
join_type=arrow_join_type,
keys=left_on,
right_keys=right_on,
left_suffix=self._left_columns_suffix,
right_suffix=self._right_columns_suffix,
)
# Add back unsupported columns (join type logic is in should_index_* variables)
supported = self._postprocess(
supported,
preprocess_result_l.unsupported_projection,
preprocess_result_r.unsupported_projection,
)
return supported
def _preprocess(
self,
left_on: List[str],
right_on: List[str],
partition_id: int,
) -> Tuple[_DatasetPreprocessingResult, _DatasetPreprocessingResult]:
import pyarrow as pa
left_seq_partition: pa.Table = self._get_partition_builder(
input_seq_id=0, partition_id=partition_id
).build()
right_seq_partition: pa.Table = self._get_partition_builder(
input_seq_id=1, partition_id=partition_id
).build()
# Get supported columns
supported_l, unsupported_l = self._split_unsupported_columns(left_seq_partition)
supported_r, unsupported_r = self._split_unsupported_columns(
right_seq_partition
)
# Handle joins on unsupported columns
conflicting_columns: Set[str] = set(unsupported_l.column_names) & set(left_on)
if conflicting_columns:
raise ValueError(
f"Cannot join on columns with unjoinable types. "
f"Left join key columns {conflicting_columns} have unjoinable types "
f"(map, union, list, struct, etc.) which cannot be used for join operations."
)
conflicting_columns: Set[str] = set(unsupported_r.column_names) & set(right_on)
if conflicting_columns:
raise ValueError(
f"Cannot join on columns with unjoinable types. "
f"Right join key columns {conflicting_columns} have unjoinable types "
f"(map, union, list, struct, etc.) which cannot be used for join operations."
)
# Index if we have unsupported columns
should_index_l = self._should_index_side("left", supported_l, unsupported_l)
should_index_r = self._should_index_side("right", supported_r, unsupported_r)
# Add index columns for back-referencing if we have unsupported columns
if should_index_l:
supported_l = self._append_index_column(
table=supported_l, col_name=self._index_name("left")
)
if should_index_r:
supported_r = self._append_index_column(
table=supported_r, col_name=self._index_name("right")
)
left = _DatasetPreprocessingResult(
supported_projection=supported_l,
unsupported_projection=unsupported_l,
)
right = _DatasetPreprocessingResult(
supported_projection=supported_r,
unsupported_projection=unsupported_r,
)
return left, right
def _postprocess(
self,
supported: "pa.Table",
unsupported_l: "pa.Table",
unsupported_r: "pa.Table",
) -> "pa.Table":
# Index if we have unsupported columns
should_index_l = self._index_name("left") in supported.schema.names
should_index_r = self._index_name("right") in supported.schema.names
# Add back unsupported columns (join type logic is in should_index_* variables)
if should_index_l:
supported = self._add_back_unsupported_columns(
joined_table=supported,
unsupported_table=unsupported_l,
index_col_name=self._index_name("left"),
)
if should_index_r:
supported = self._add_back_unsupported_columns(
joined_table=supported,
unsupported_table=unsupported_r,
index_col_name=self._index_name("right"),
)
return supported
def _index_name(self, suffix: str) -> str:
return f"__rd_index_level_{suffix}__"
def clear(self, partition_id: int):
self._left_input_seq_partition_builders.pop(partition_id)
self._right_input_seq_partition_builders.pop(partition_id)
def _get_partition_builder(self, *, input_seq_id: int, partition_id: int):
if input_seq_id == 0:
partition_builder = self._left_input_seq_partition_builders[partition_id]
elif input_seq_id == 1:
partition_builder = self._right_input_seq_partition_builders[partition_id]
else:
raise ValueError(
f"Unexpected inpt sequence id of '{input_seq_id}' (expected 0 or 1)"
)
return partition_builder
def _should_index_side(
self, side: str, supported_table: "pa.Table", unsupported_table: "pa.Table"
) -> bool:
"""
Determine whether to create an index column for a given side of the join.
A column is "supported" if it is "joinable", and "unsupported" otherwise.
A supported_table is a table with only "supported" columns. Index columns are
needed when we have both supported and unsupported columns in a table, and
that table's columns will appear in the final result.
Args:
side: "left" or "right" to indicate which side of the join
supported_table: Table containing ONLY joinable columns
unsupported_table: Table containing ONLY unjoinable columns
Returns:
True if an index column should be created for this side
"""
# Must have both supported and unsupported columns to need indexing.
# We cannot rely on row_count because it can return a non-zero row count
# for an empty-schema.
if not supported_table.schema or not unsupported_table.schema:
return False
# For semi/anti joins, only index the side that appears in the result
if side == "left":
# Left side appears in result for all joins except right_semi/right_anti
return self._join_type not in [JoinType.RIGHT_SEMI, JoinType.RIGHT_ANTI]
else: # side == "right"
# Right side appears in result for all joins except left_semi/left_anti
return self._join_type not in [JoinType.LEFT_SEMI, JoinType.LEFT_ANTI]
def _split_unsupported_columns(
self, table: "pa.Table"
) -> Tuple["pa.Table", "pa.Table"]:
"""
Split a PyArrow table into two tables based on column joinability.
Separates columns into supported types and unsupported types that cannot be
directly joined on but should be preserved in results.
Args:
table: Input PyArrow table to split
Returns:
Tuple of (supported_table, unsupported_table) where:
- supported_table contains columns with primitive/joinable types
- unsupported_table contains columns with complex/unjoinable types
"""
supported, unsupported = [], []
for idx in range(len(table.columns)):
col: "pa.ChunkedArray" = table.column(idx)
col_type: "pa.DataType" = col.type
if _is_pa_extension_type(col_type) or self._is_pa_join_not_supported(
col_type
):
unsupported.append(idx)
else:
supported.append(idx)
return table.select(supported), table.select(unsupported)
def _add_back_unsupported_columns(
self,
joined_table: "pa.Table",
unsupported_table: "pa.Table",
index_col_name: str,
) -> "pa.Table":
# Extract the index column array and drop the column from the joined table
i = joined_table.schema.get_field_index(index_col_name)
indices = joined_table.column(i)
joined_table = joined_table.remove_column(i)
# Project the unsupported columns using the indices and combine with joined table
projected = ArrowBlockAccessor(unsupported_table).take(indices)
return ArrowBlockAccessor(joined_table).hstack(projected)
def _append_index_column(self, table: "pa.Table", col_name: str) -> "pa.Table":
import numpy as np
import pyarrow as pa
index_col = pa.array(np.arange(table.num_rows))
return table.append_column(col_name, index_col)
def _is_pa_join_not_supported(self, type: "pa.DataType") -> bool:
"""
The latest pyarrow versions do not support joins where the
tables contain the following types below (lists,
structs, maps, unions, extension types, etc.)
Args:
type: The input type of column.
Returns:
True if the type cannot be present (non join-key) during joins.
False if the type can be present.
"""
import pyarrow as pa
pyarrow_version = get_pyarrow_version()
is_v12 = pyarrow_version >= MIN_PYARROW_VERSION_RUN_END_ENCODED_TYPES
is_v16 = pyarrow_version >= MIN_PYARROW_VERSION_VIEW_TYPES
return (
pa.types.is_map(type)
or pa.types.is_union(type)
or pa.types.is_list(type)
or pa.types.is_struct(type)
or pa.types.is_null(type)
or pa.types.is_large_list(type)
or pa.types.is_fixed_size_list(type)
or (is_v12 and pa.types.is_run_end_encoded(type))
or (
is_v16
and (
pa.types.is_binary_view(type)
or pa.types.is_string_view(type)
or pa.types.is_list_view(type)
)
)
)
| JoiningShuffleAggregation |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-bing-ads/components.py | {
"start": 959,
"end": 2641
} | class ____(RecordFilter):
"""
Filter duplicated records based on the "Id" field.
This can happen when we use predicates that could match the same record
multiple times.
e.g.
With one record like:
{"type":"RECORD","record":{"stream":"accounts","data":{"Id":151049662,
"Name":"Airbyte Plumbing"},"emitted_at":1748277607993}}
account_names in config:
[
{
"name": "Airbyte",
"operator": "Contains"
},
{
"name": "Plumbing",
"operator": "Contains"
}
],
will return the same record twice, once for each predicate.
"""
CONFIG_PREDICATES = "account_names"
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
self._seen_keys = set()
@cached_property
def _using_predicates(self) -> bool:
"""
Indicates whether the connection uses predicates.
:return: True if the connector uses predicates, False otherwise
"""
predicates = self.config.get(self.CONFIG_PREDICATES)
return bool(predicates and isinstance(predicates, list) and predicates)
def filter_records(
self, records: List[Mapping[str, Any]], stream_state: StreamState, stream_slice: Optional[StreamSlice] = None, **kwargs
) -> Iterable[Mapping[str, Any]]:
for record in records:
if not self._using_predicates:
yield record
else:
key = record["Id"]
if key not in self._seen_keys:
self._seen_keys.add(key)
yield record
@dataclass
| DuplicatedRecordsFilter |
python | wandb__wandb | wandb/sdk/lib/timer.py | {
"start": 37,
"end": 440
} | class ____:
def __init__(self) -> None:
self.start_time: float = time.time()
self.start: float = time.perf_counter()
self.stop: float = self.start
def __enter__(self) -> "Timer":
return self
def __exit__(self, *args: Any) -> None:
self.stop = time.perf_counter()
@property
def elapsed(self) -> float:
return self.stop - self.start
| Timer |
python | huggingface__transformers | tests/models/luke/test_modeling_luke.py | {
"start": 33771,
"end": 37018
} | class ____(unittest.TestCase):
@slow
def test_inference_base_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-base").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base", task="entity_classification")
text = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped"
" the new world number one avoid a humiliating second- round exit at Wimbledon ."
)
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key in encoding:
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 768))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 768))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.1457, 0.1044, 0.0174]]).to(torch_device)
torch.testing.assert_close(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
@slow
def test_inference_large_model(self):
model = LukeModel.from_pretrained("studio-ousia/luke-large").eval()
model.to(torch_device)
tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large", task="entity_classification")
text = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped"
" the new world number one avoid a humiliating second- round exit at Wimbledon ."
)
span = (39, 42)
encoding = tokenizer(text, entity_spans=[span], add_prefix_space=True, return_tensors="pt")
# move all values to device
for key in encoding:
encoding[key] = encoding[key].to(torch_device)
outputs = model(**encoding)
# Verify word hidden states
expected_shape = torch.Size((1, 42, 1024))
self.assertEqual(outputs.last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]]
).to(torch_device)
torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
# Verify entity hidden states
expected_shape = torch.Size((1, 1, 1024))
self.assertEqual(outputs.entity_last_hidden_state.shape, expected_shape)
expected_slice = torch.tensor([[0.0466, -0.0106, -0.0179]]).to(torch_device)
torch.testing.assert_close(outputs.entity_last_hidden_state[0, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
| LukeModelIntegrationTests |
python | neetcode-gh__leetcode | python/1254-number-of-closed-islands.py | {
"start": 0,
"end": 781
} | class ____:
def closedIsland(self, grid: List[List[int]]) -> int:
r = len(grid)
c = len(grid[0])
seen = set()
def dfs(x, y):
if x < 0 or x >= r or y < 0 or y >= c or (x, y) in seen or grid[x][y] == 1:
return
seen.add((x, y))
grid[x][y] = 1
dfs(x+1, y)
dfs(x, y+1)
dfs(x-1, y)
dfs(x, y-1)
for i in range(r):
for j in range(c):
if i == 0 or j == 0 or i == r-1 or j == c-1:
dfs(i, j)
ans = 0
for i in range(r):
for j in range(c):
if grid[i][j] == 0:
dfs(i, j)
ans += 1
return ans | Solution |
python | kamyu104__LeetCode-Solutions | Python/largest-substring-between-two-equal-characters.py | {
"start": 29,
"end": 318
} | class ____(object):
def maxLengthBetweenEqualCharacters(self, s):
"""
:type s: str
:rtype: int
"""
result, lookup = -1, {}
for i, c in enumerate(s):
result = max(result, i-lookup.setdefault(c, i)-1)
return result
| Solution |
python | huggingface__transformers | src/transformers/models/xlm_roberta_xl/configuration_xlm_roberta_xl.py | {
"start": 764,
"end": 5820
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`XLMRobertaXLModel`].
It is used to instantiate a XLM_ROBERTA_XL model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
XLM_ROBERTA_XL [facebook/xlm-roberta-xl](https://huggingface.co/facebook/xlm-roberta-xl) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 250880):
Vocabulary size of the XLM_ROBERTA_XL model. Defines the number of different tokens that can be represented
by the `inputs_ids` passed when calling [`XLMRobertaXLModel`].
hidden_size (`int`, *optional*, defaults to 2560):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 36):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 10240):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 514):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 1):
The vocabulary size of the `token_type_ids` passed when calling [`XLMRobertaXLModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Examples:
```python
>>> from transformers import XLMRobertaXLConfig, XLMRobertaXLModel
>>> # Initializing a XLM_ROBERTA_XL google-bert/bert-base-uncased style configuration
>>> configuration = XLMRobertaXLConfig()
>>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration
>>> model = XLMRobertaXLModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xlm-roberta-xl"
def __init__(
self,
vocab_size=250880,
hidden_size=2560,
num_hidden_layers=36,
num_attention_heads=32,
intermediate_size=10240,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=514,
type_vocab_size=1,
initializer_range=0.02,
layer_norm_eps=1e-05,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
use_cache=True,
classifier_dropout=None,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
__all__ = ["XLMRobertaXLConfig"]
| XLMRobertaXLConfig |
python | facelessuser__soupsieve | tests/test_level1/test_pseudo_class.py | {
"start": 103,
"end": 495
} | class ____(util.TestCase):
"""Test pseudo-classes."""
def test_pseudo_class_not_implemented(self):
"""Test pseudo-class that is not implemented."""
self.assert_raises(':not-implemented', SelectorSyntaxError)
def test_unrecognized_pseudo(self):
"""Test unrecognized pseudo class."""
self.assert_raises(':before', SelectorSyntaxError)
| TestPseudoClass |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/executors/ecs/test_utils.py | {
"start": 19622,
"end": 22282
} | class ____:
"""Test camelize_dict_keys function."""
def test_camelize_flat_dict(self):
"""Test camelizing keys in a flat dictionary."""
input_dict = {"test_key": "value", "another_key": "value2"}
expected = {"testKey": "value", "anotherKey": "value2"}
assert camelize_dict_keys(input_dict) == expected
def test_camelize_nested_dict(self):
"""Test camelizing keys in a nested dictionary."""
input_dict = {"test_key": {"nested_key": "value"}}
expected = {"testKey": {"nestedKey": "value"}}
assert camelize_dict_keys(input_dict) == expected
def test_camelize_dict_with_tags(self):
"""Test that tags key is not camelized."""
input_dict = {"test_key": "value", "tags": {"custom_key": "custom_value"}}
expected = {"testKey": "value", "tags": {"custom_key": "custom_value"}}
assert camelize_dict_keys(input_dict) == expected
def test_camelize_dict_with_tags_uppercase(self):
"""Test that TAGS key (uppercase) gets camelized to tAGS."""
input_dict = {"test_key": "value", "TAGS": {"custom_key": "custom_value"}}
expected = {"testKey": "value", "tAGS": {"custom_key": "custom_value"}}
assert camelize_dict_keys(input_dict) == expected
def test_camelize_dict_with_mixed_case_tags(self):
"""Test that mixed case 'Tags' key gets camelized to tags."""
input_dict = {"test_key": "value", "Tags": {"custom_key": "custom_value"}}
expected = {"testKey": "value", "tags": {"custom_key": "custom_value"}}
assert camelize_dict_keys(input_dict) == expected
def test_camelize_empty_dict(self):
"""Test camelizing an empty dictionary."""
assert camelize_dict_keys({}) == {}
def test_camelize_dict_with_non_dict_values(self):
"""Test camelizing dictionary with non-dict values."""
input_dict = {"test_key": ["list", "values"], "another_key": 123}
expected = {"testKey": ["list", "values"], "anotherKey": 123}
assert camelize_dict_keys(input_dict) == expected
@mock.patch("airflow.providers.amazon.aws.executors.ecs.utils.camelize")
def test_camelize_dict_keys_with_mock(self, mock_camelize):
"""Test camelize_dict_keys with mocked camelize function."""
mock_camelize.side_effect = lambda x, uppercase_first_letter=False: f"camelized_{x}"
input_dict = {"test_key": {"nested_key": "value"}}
result = camelize_dict_keys(input_dict)
expected = {"camelized_test_key": {"camelized_nested_key": "value"}}
assert result == expected
mock_camelize.assert_called()
| TestCamelizeDictKeys |
python | doocs__leetcode | solution/0300-0399/0372.Super Pow/Solution.py | {
"start": 0,
"end": 226
} | class ____:
def superPow(self, a: int, b: List[int]) -> int:
mod = 1337
ans = 1
for e in b[::-1]:
ans = ans * pow(a, e, mod) % mod
a = pow(a, 10, mod)
return ans
| Solution |
python | openai__openai-python | src/openai/types/evals/create_eval_completions_run_data_source.py | {
"start": 3015,
"end": 3241
} | class ____(BaseModel):
text: str
"""The text output from the model."""
type: Literal["output_text"]
"""The type of the output text. Always `output_text`."""
| InputMessagesTemplateTemplateEvalItemContentOutputText |
python | Textualize__textual | src/textual/widget.py | {
"start": 6561,
"end": 6622
} | class ____(Exception):
"""Base widget error."""
| WidgetError |
python | tensorflow__tensorflow | tensorflow/python/framework/tensor.py | {
"start": 28615,
"end": 30812
} | class ____(type_spec.TypeSpec):
"""Describes a dense object with shape, dtype, and name."""
__slots__ = ["_shape", "_dtype", "_name"]
_component_specs = property(lambda self: self)
def __init__(self, shape, dtype=dtypes.float32, name=None):
"""Creates a TensorSpec.
Args:
shape: Value convertible to `tf.TensorShape`. The shape of the tensor.
dtype: Value convertible to `tf.DType`. The type of the tensor values.
name: Optional name for the Tensor.
Raises:
TypeError: If shape is not convertible to a `tf.TensorShape`, or dtype is
not convertible to a `tf.DType`.
"""
self._shape = tensor_shape.TensorShape(shape)
self._dtype = dtypes.as_dtype(dtype)
self._name = name
@property
def shape(self):
"""Returns the `TensorShape` that represents the shape of the tensor."""
return self._shape
@property
def dtype(self):
"""Returns the `dtype` of elements in the tensor."""
return self._dtype
@property
def name(self):
"""Returns the (optionally provided) name of the described tensor."""
return self._name
def is_compatible_with(self, spec_or_value):
return (isinstance(spec_or_value, (DenseSpec, self.value_type)) and
self._dtype.is_compatible_with(spec_or_value.dtype) and
self._shape.is_compatible_with(spec_or_value.shape))
def __repr__(self):
return "{}(shape={}, dtype={}, name={})".format(
type(self).__name__, self.shape, repr(self.dtype), repr(self.name))
def __hash__(self):
return hash((self._shape, self.dtype))
def __eq__(self, other):
# pylint: disable=protected-access
return (type(self) is type(other) and self._shape == other._shape and
self._dtype == other._dtype and self._name == other._name)
def __ne__(self, other):
return not self == other
def _serialize(self):
return (self._shape, self._dtype, self._name)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self.value_type
@tf_export("TensorSpec")
@type_spec_registry.register("tf.TensorSpec")
| DenseSpec |
python | django__django | tests/db_functions/text/test_repeat.py | {
"start": 186,
"end": 1276
} | class ____(TestCase):
def test_basic(self):
Author.objects.create(name="John", alias="xyz")
none_value = (
"" if connection.features.interprets_empty_strings_as_nulls else None
)
tests = (
(Repeat("name", 0), ""),
(Repeat("name", 2), "JohnJohn"),
(Repeat("name", Length("alias")), "JohnJohnJohn"),
(Repeat(Value("x"), 3), "xxx"),
(Repeat("name", None), none_value),
(Repeat(Value(None), 4), none_value),
(Repeat("goes_by", 1), none_value),
)
for function, repeated_text in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(repeated_text=function)
self.assertQuerySetEqual(
authors, [repeated_text], lambda a: a.repeated_text, ordered=False
)
def test_negative_number(self):
with self.assertRaisesMessage(
ValueError, "'number' must be greater or equal to 0."
):
Repeat("name", -1)
| RepeatTests |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 49054,
"end": 54747
} | class ____(nn.Module):
def __init__(self, config: GroundingDinoConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = GroundingDinoMultiscaleDeformableAttention(
config, num_heads=config.encoder_attention_heads, n_points=config.encoder_n_points
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim, config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes of the backbone feature maps.
spatial_shapes_list (`list[tuple[int, int]]`, *optional*):
Spatial shapes of the backbone feature maps (but as list for export compatibility).
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states, attn_weights
# Based on https://github.com/IDEA-Research/GroundingDINO/blob/2b62f419c292ca9c518daae55512fabc3fead4a4/groundingdino/models/GroundingDINO/utils.py#L24
def get_sine_pos_embed(
pos_tensor: torch.Tensor, num_pos_feats: int = 128, temperature: int = 10000, exchange_xy: bool = True
) -> Tensor:
"""
Generate sine position embeddings from a position tensor.
Args:
pos_tensor (torch.Tensor):
Tensor containing positions. Shape: [..., n].
num_pos_feats (`int`, *optional*, defaults to 128):
Projected shape for each float in the tensor.
temperature (`int`, *optional*, defaults to 10000):
Temperature in the sine/cosine function.
exchange_xy (`bool`, *optional*, defaults to `True`):
Exchange pos x and pos y. For example, input tensor is [x,y], the results will be [pos(y), pos(x)].
Returns:
position_embeddings (torch.Tensor): shape: [..., n * hidden_size].
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_tensor = pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)
position_embeddings = [sine_func(x) for x in pos_tensor]
if exchange_xy:
position_embeddings[0], position_embeddings[1] = position_embeddings[1], position_embeddings[0]
position_embeddings = torch.cat(position_embeddings, dim=-1)
return position_embeddings
| GroundingDinoDeformableLayer |
python | redis__redis-py | redis/asyncio/multidb/command_executor.py | {
"start": 3562,
"end": 11856
} | class ____(BaseCommandExecutor, AsyncCommandExecutor):
def __init__(
self,
failure_detectors: List[AsyncFailureDetector],
databases: Databases,
command_retry: Retry,
failover_strategy: AsyncFailoverStrategy,
event_dispatcher: EventDispatcherInterface,
failover_attempts: int = DEFAULT_FAILOVER_ATTEMPTS,
failover_delay: float = DEFAULT_FAILOVER_DELAY,
auto_fallback_interval: float = DEFAULT_AUTO_FALLBACK_INTERVAL,
):
"""
Initialize the DefaultCommandExecutor instance.
Args:
failure_detectors: List of failure detector instances to monitor database health
databases: Collection of available databases to execute commands on
command_retry: Retry policy for failed command execution
failover_strategy: Strategy for handling database failover
event_dispatcher: Interface for dispatching events
failover_attempts: Number of failover attempts
failover_delay: Delay between failover attempts
auto_fallback_interval: Time interval in seconds between attempts to fall back to a primary database
"""
super().__init__(auto_fallback_interval)
for fd in failure_detectors:
fd.set_command_executor(command_executor=self)
self._databases = databases
self._failure_detectors = failure_detectors
self._command_retry = command_retry
self._failover_strategy_executor = DefaultFailoverStrategyExecutor(
failover_strategy, failover_attempts, failover_delay
)
self._event_dispatcher = event_dispatcher
self._active_database: Optional[Database] = None
self._active_pubsub: Optional[PubSub] = None
self._active_pubsub_kwargs = {}
self._setup_event_dispatcher()
self._schedule_next_fallback()
@property
def databases(self) -> Databases:
return self._databases
@property
def failure_detectors(self) -> List[AsyncFailureDetector]:
return self._failure_detectors
def add_failure_detector(self, failure_detector: AsyncFailureDetector) -> None:
self._failure_detectors.append(failure_detector)
@property
def active_database(self) -> Optional[AsyncDatabase]:
return self._active_database
async def set_active_database(self, database: AsyncDatabase) -> None:
old_active = self._active_database
self._active_database = database
if old_active is not None and old_active is not database:
await self._event_dispatcher.dispatch_async(
AsyncActiveDatabaseChanged(
old_active,
self._active_database,
self,
**self._active_pubsub_kwargs,
)
)
@property
def active_pubsub(self) -> Optional[PubSub]:
return self._active_pubsub
@active_pubsub.setter
def active_pubsub(self, pubsub: PubSub) -> None:
self._active_pubsub = pubsub
@property
def failover_strategy_executor(self) -> FailoverStrategyExecutor:
return self._failover_strategy_executor
@property
def command_retry(self) -> Retry:
return self._command_retry
def pubsub(self, **kwargs):
if self._active_pubsub is None:
if isinstance(self._active_database.client, RedisCluster):
raise ValueError("PubSub is not supported for RedisCluster")
self._active_pubsub = self._active_database.client.pubsub(**kwargs)
self._active_pubsub_kwargs = kwargs
async def execute_command(self, *args, **options):
async def callback():
response = await self._active_database.client.execute_command(
*args, **options
)
await self._register_command_execution(args)
return response
return await self._execute_with_failure_detection(callback, args)
async def execute_pipeline(self, command_stack: tuple):
async def callback():
async with self._active_database.client.pipeline() as pipe:
for command, options in command_stack:
pipe.execute_command(*command, **options)
response = await pipe.execute()
await self._register_command_execution(command_stack)
return response
return await self._execute_with_failure_detection(callback, command_stack)
async def execute_transaction(
self,
func: Callable[["Pipeline"], Union[Any, Awaitable[Any]]],
*watches: KeyT,
shard_hint: Optional[str] = None,
value_from_callable: bool = False,
watch_delay: Optional[float] = None,
):
async def callback():
response = await self._active_database.client.transaction(
func,
*watches,
shard_hint=shard_hint,
value_from_callable=value_from_callable,
watch_delay=watch_delay,
)
await self._register_command_execution(())
return response
return await self._execute_with_failure_detection(callback)
async def execute_pubsub_method(self, method_name: str, *args, **kwargs):
async def callback():
method = getattr(self.active_pubsub, method_name)
if iscoroutinefunction(method):
response = await method(*args, **kwargs)
else:
response = method(*args, **kwargs)
await self._register_command_execution(args)
return response
return await self._execute_with_failure_detection(callback, *args)
async def execute_pubsub_run(
self, sleep_time: float, exception_handler=None, pubsub=None
) -> Any:
async def callback():
return await self._active_pubsub.run(
poll_timeout=sleep_time,
exception_handler=exception_handler,
pubsub=pubsub,
)
return await self._execute_with_failure_detection(callback)
async def _execute_with_failure_detection(
self, callback: Callable, cmds: tuple = ()
):
"""
Execute a commands execution callback with failure detection.
"""
async def wrapper():
# On each retry we need to check active database as it might change.
await self._check_active_database()
return await callback()
return await self._command_retry.call_with_retry(
lambda: wrapper(),
lambda error: self._on_command_fail(error, *cmds),
)
async def _check_active_database(self):
"""
Checks if active a database needs to be updated.
"""
if (
self._active_database is None
or self._active_database.circuit.state != CBState.CLOSED
or (
self._auto_fallback_interval > 0
and self._next_fallback_attempt <= datetime.now()
)
):
await self.set_active_database(
await self._failover_strategy_executor.execute()
)
self._schedule_next_fallback()
async def _on_command_fail(self, error, *args):
await self._event_dispatcher.dispatch_async(
AsyncOnCommandsFailEvent(args, error)
)
async def _register_command_execution(self, cmd: tuple):
for detector in self._failure_detectors:
await detector.register_command_execution(cmd)
def _setup_event_dispatcher(self):
"""
Registers necessary listeners.
"""
failure_listener = RegisterCommandFailure(self._failure_detectors)
resubscribe_listener = ResubscribeOnActiveDatabaseChanged()
close_connection_listener = CloseConnectionOnActiveDatabaseChanged()
self._event_dispatcher.register_listeners(
{
AsyncOnCommandsFailEvent: [failure_listener],
AsyncActiveDatabaseChanged: [
close_connection_listener,
resubscribe_listener,
],
}
)
| DefaultCommandExecutor |
python | plotly__plotly.py | plotly/graph_objs/scatterpolar/_line.py | {
"start": 233,
"end": 8428
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.line"
_valid_props = {
"backoff",
"backoffsrc",
"color",
"dash",
"shape",
"smoothing",
"width",
}
@property
def backoff(self):
"""
Sets the line back off from the end point of the nth line
segment (in px). This option is useful e.g. to avoid overlap
with arrowhead markers. With "auto" the lines would trim before
markers if `marker.angleref` is set to "previous".
The 'backoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["backoff"]
@backoff.setter
def backoff(self, val):
self["backoff"] = val
@property
def backoffsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `backoff`.
The 'backoffsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["backoffsrc"]
@backoffsrc.setter
def backoffsrc(self, val):
self["backoffsrc"] = val
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def shape(self):
"""
Determines the line shape. With "spline" the lines are drawn
using spline interpolation. The other available values
correspond to step-wise line shapes.
The 'shape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'spline']
Returns
-------
Any
"""
return self["shape"]
@shape.setter
def shape(self, val):
self["shape"] = val
@property
def smoothing(self):
"""
Has an effect only if `shape` is set to "spline" Sets the
amount of smoothing. 0 corresponds to no smoothing (equivalent
to a "linear" shape).
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
backoff
Sets the line back off from the end point of the nth
line segment (in px). This option is useful e.g. to
avoid overlap with arrowhead markers. With "auto" the
lines would trim before markers if `marker.angleref` is
set to "previous".
backoffsrc
Sets the source reference on Chart Studio Cloud for
`backoff`.
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
"""
def __init__(
self,
arg=None,
backoff=None,
backoffsrc=None,
color=None,
dash=None,
shape=None,
smoothing=None,
width=None,
**kwargs,
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Line`
backoff
Sets the line back off from the end point of the nth
line segment (in px). This option is useful e.g. to
avoid overlap with arrowhead markers. With "auto" the
lines would trim before markers if `marker.angleref` is
set to "previous".
backoffsrc
Sets the source reference on Chart Studio Cloud for
`backoff`.
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the lines are
drawn using spline interpolation. The other available
values correspond to step-wise line shapes.
smoothing
Has an effect only if `shape` is set to "spline" Sets
the amount of smoothing. 0 corresponds to no smoothing
(equivalent to a "linear" shape).
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatterpolar.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("backoff", arg, backoff)
self._set_property("backoffsrc", arg, backoffsrc)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("shape", arg, shape)
self._set_property("smoothing", arg, smoothing)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 14513,
"end": 15183
} | class ____(MarkdownList):
"""A Bullet list Markdown block."""
DEFAULT_CSS = """
MarkdownBulletList {
margin: 0 0 1 0;
padding: 0 0;
}
MarkdownBulletList Horizontal {
height: auto;
width: 1fr;
}
MarkdownBulletList Vertical {
height: auto;
width: 1fr;
}
"""
def compose(self) -> ComposeResult:
for block in self._blocks:
if isinstance(block, MarkdownListItem):
bullet = MarkdownBullet()
bullet.symbol = block.bullet
yield Horizontal(bullet, Vertical(*block._blocks))
self._blocks.clear()
| MarkdownBulletList |
python | haoel__leetcode | algorithms/python/ConvertBSTtoGreaterTree/convertBST.py | {
"start": 151,
"end": 468
} | class ____:
def convertBST(self, root):
self.total = 0
def helper(node):
if not node: return
helper(node.right)
node.val += self.total
self.total = node.val
helper(node.left)
helper(root)
return root | Solution |
python | Textualize__textual | src/textual/lazy.py | {
"start": 113,
"end": 1966
} | class ____(Widget):
"""Wraps a widget so that it is mounted *lazily*.
Lazy widgets are mounted after the first refresh. This can be used to display some parts of
the UI very quickly, followed by the lazy widgets. Technically, this won't make anything
faster, but it reduces the time the user sees a blank screen and will make apps feel
more responsive.
Making a widget lazy is beneficial for widgets which start out invisible, such as tab panes.
Note that since lazy widgets aren't mounted immediately (by definition), they will not appear
in queries for a brief interval until they are mounted. Your code should take this into account.
Example:
```python
def compose(self) -> ComposeResult:
yield Footer()
with ColorTabs("Theme Colors", "Named Colors"):
yield Content(ThemeColorButtons(), ThemeColorsView(), id="theme")
yield Lazy(NamedColorsView())
```
"""
DEFAULT_CSS = """
Lazy {
display: none;
}
"""
def __init__(self, widget: Widget) -> None:
"""Create a lazy widget.
Args:
widget: A widget that should be mounted after a refresh.
"""
self._replace_widget = widget
super().__init__()
def compose_add_child(self, widget: Widget) -> None:
self._replace_widget.compose_add_child(widget)
async def mount_composed_widgets(self, widgets: list[Widget]) -> None:
parent = self.parent
if parent is None:
return
assert isinstance(parent, Widget)
async def mount() -> None:
"""Perform the mount and discard the lazy widget."""
await parent.mount(self._replace_widget, after=self)
await self.remove()
self.call_after_refresh(mount)
| Lazy |
python | openai__openai-python | src/openai/types/beta/threads/runs/code_interpreter_output_image.py | {
"start": 242,
"end": 408
} | class ____(BaseModel):
file_id: Optional[str] = None
"""
The [file](https://platform.openai.com/docs/api-reference/files) ID of the
image.
"""
| Image |
python | dagster-io__dagster | python_modules/libraries/dagster-openai/dagster_openai/resources.py | {
"start": 5469,
"end": 15804
} | class ____(ConfigurableResource):
"""This resource is wrapper over the
`openai library <https://github.com/openai/openai-python>`_.
By configuring this OpenAI resource, you can interact with OpenAI API
and log its usage metadata in the asset metadata.
Examples:
.. code-block:: python
import os
from dagster import AssetExecutionContext, Definitions, EnvVar, asset, define_asset_job
from dagster_openai import OpenAIResource
@asset(compute_kind="OpenAI")
def openai_asset(context: AssetExecutionContext, openai: OpenAIResource):
with openai.get_client(context) as client:
client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}]
)
openai_asset_job = define_asset_job(name="openai_asset_job", selection="openai_asset")
Definitions(
assets=[openai_asset],
jobs=[openai_asset_job],
resources={
"openai": OpenAIResource(api_key=EnvVar("OPENAI_API_KEY")),
},
)
"""
api_key: str = Field(description=("OpenAI API key. See https://platform.openai.com/api-keys"))
organization: Optional[str] = Field(default=None)
project: Optional[str] = Field(default=None)
base_url: Optional[str] = Field(default=None)
_client: Client = PrivateAttr()
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def _wrap_with_usage_metadata(
self,
api_endpoint_class: ApiEndpointClassesEnum,
context: AssetExecutionContext,
output_name: Optional[str],
):
for attribute_names in API_ENDPOINT_CLASSES_TO_ENDPOINT_METHODS_MAPPING[api_endpoint_class]:
curr = self._client.__getattribute__(api_endpoint_class.value)
# Get the second to last attribute from the attribute list to reach the method.
i = 0
while i < len(attribute_names) - 1:
curr = curr.__getattribute__(attribute_names[i])
i += 1
# Wrap the method.
curr.__setattr__(
attribute_names[i],
with_usage_metadata(
context=context,
output_name=output_name,
func=curr.__getattribute__(attribute_names[i]),
),
)
def setup_for_execution(self, context: InitResourceContext) -> None:
# Set up an OpenAI client based on the API key.
self._client = Client(
api_key=self.api_key,
organization=self.organization,
project=self.project,
base_url=self.base_url,
)
@public
@contextmanager
def get_client(
self, context: Union[AssetExecutionContext, AssetCheckExecutionContext, OpExecutionContext]
) -> Generator[Client, None, None]:
"""Yields an ``openai.Client`` for interacting with the OpenAI API.
By default, in an asset context, the client comes with wrapped endpoints
for three API resources, Completions, Embeddings and Chat,
allowing to log the API usage metadata in the asset metadata.
Note that the endpoints are not and cannot be wrapped
to automatically capture the API usage metadata in an op context.
:param context: The ``context`` object for computing the op or asset in which ``get_client`` is called.
Examples:
.. code-block:: python
from dagster import (
AssetExecutionContext,
Definitions,
EnvVar,
GraphDefinition,
OpExecutionContext,
asset,
define_asset_job,
op,
)
from dagster_openai import OpenAIResource
@op
def openai_op(context: OpExecutionContext, openai: OpenAIResource):
with openai.get_client(context) as client:
client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}]
)
openai_op_job = GraphDefinition(name="openai_op_job", node_defs=[openai_op]).to_job()
@asset(compute_kind="OpenAI")
def openai_asset(context: AssetExecutionContext, openai: OpenAIResource):
with openai.get_client(context) as client:
client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}]
)
openai_asset_job = define_asset_job(name="openai_asset_job", selection="openai_asset")
Definitions(
assets=[openai_asset],
jobs=[openai_asset_job, openai_op_job],
resources={
"openai": OpenAIResource(api_key=EnvVar("OPENAI_API_KEY")),
},
)
"""
yield from self._get_client(context=context, asset_key=None)
@public
@contextmanager
def get_client_for_asset(
self, context: AssetExecutionContext, asset_key: AssetKey
) -> Generator[Client, None, None]:
"""Yields an ``openai.Client`` for interacting with the OpenAI.
When using this method, the OpenAI API usage metadata is automatically
logged in the asset materializations associated with the provided ``asset_key``.
By default, the client comes with wrapped endpoints
for three API resources, Completions, Embeddings and Chat,
allowing to log the API usage metadata in the asset metadata.
This method can only be called when working with assets,
i.e. the provided ``context`` must be of type ``AssetExecutionContext``.
:param context: The ``context`` object for computing the asset in which ``get_client`` is called.
:param asset_key: the ``asset_key`` of the asset for which a materialization should include the metadata.
Examples:
.. code-block:: python
from dagster import (
AssetExecutionContext,
AssetKey,
AssetSpec,
Definitions,
EnvVar,
MaterializeResult,
asset,
define_asset_job,
multi_asset,
)
from dagster_openai import OpenAIResource
@asset(compute_kind="OpenAI")
def openai_asset(context: AssetExecutionContext, openai: OpenAIResource):
with openai.get_client_for_asset(context, context.asset_key) as client:
client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}]
)
openai_asset_job = define_asset_job(name="openai_asset_job", selection="openai_asset")
@multi_asset(specs=[AssetSpec("my_asset1"), AssetSpec("my_asset2")], compute_kind="OpenAI")
def openai_multi_asset(context: AssetExecutionContext, openai_resource: OpenAIResource):
with openai_resource.get_client_for_asset(context, asset_key=AssetKey("my_asset1")) as client:
client.chat.completions.create(
model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Say this is a test"}]
)
return (
MaterializeResult(asset_key="my_asset1", metadata={"some_key": "some_value1"}),
MaterializeResult(asset_key="my_asset2", metadata={"some_key": "some_value2"}),
)
openai_multi_asset_job = define_asset_job(
name="openai_multi_asset_job", selection="openai_multi_asset"
)
Definitions(
assets=[openai_asset, openai_multi_asset],
jobs=[openai_asset_job, openai_multi_asset_job],
resources={
"openai": OpenAIResource(api_key=EnvVar("OPENAI_API_KEY")),
},
)
"""
yield from self._get_client(context=context, asset_key=asset_key)
def _get_client(
self,
context: Union[AssetExecutionContext, AssetCheckExecutionContext, OpExecutionContext],
asset_key: Optional[AssetKey] = None,
) -> Generator[Client, None, None]:
if isinstance(context, AssetExecutionContext):
if asset_key is None:
if len(context.assets_def.keys_by_output_name.keys()) > 1:
raise DagsterInvariantViolationError(
"The argument `asset_key` must be specified for multi_asset with more than one asset."
)
asset_key = context.asset_key
output_name = context.output_for_asset_key(asset_key)
# By default, when the resource is used in an asset context,
# we wrap the methods of `openai.resources.Completions`,
# `openai.resources.Embeddings` and `openai.resources.chat.Completions`.
# This allows the usage metadata to be captured in the asset metadata.
api_endpoint_classes = [
ApiEndpointClassesEnum.COMPLETIONS,
ApiEndpointClassesEnum.CHAT,
ApiEndpointClassesEnum.EMBEDDINGS,
]
for api_endpoint_class in api_endpoint_classes:
self._wrap_with_usage_metadata(
api_endpoint_class=api_endpoint_class,
context=context,
output_name=output_name,
)
yield self._client
def teardown_after_execution(self, context: InitResourceContext) -> None:
# Close OpenAI client.
self._client.close()
| OpenAIResource |
python | getsentry__sentry | tests/sentry/incidents/endpoints/test_organization_alert_rule_index.py | {
"start": 76750,
"end": 77259
} | class ____(AlertRuleCreateEndpointTestCrashRateAlert):
method = "post"
def setUp(self) -> None:
super().setUp()
self.valid_alert_rule["dataset"] = Dataset.Metrics.value
for tag in [
SessionMRI.RAW_SESSION.value,
SessionMRI.RAW_USER.value,
"session.status",
"init",
"crashed",
]:
indexer.record(use_case_id=UseCaseID.SESSIONS, org_id=self.organization.id, string=tag)
| MetricsCrashRateAlertCreationTest |
python | altair-viz__altair | tools/datasets/npm.py | {
"start": 857,
"end": 3332
} | class ____:
"""https://www.jsdelivr.com/docs/data.jsdelivr.com#overview."""
_opener: ClassVar[OpenerDirector] = urllib.request.build_opener()
def __init__(
self,
paths: PathMap,
*,
jsdelivr: Literal["jsdelivr"] = "jsdelivr",
npm: Literal["npm"] = "npm",
package: LiteralString = "vega-datasets",
) -> None:
self.paths: PathMap = paths
self._url: NpmUrl = NpmUrl(
CDN=f"https://cdn.{jsdelivr}.net/{npm}/{package}@",
GH=f"https://cdn.{jsdelivr}.net/gh/vega/{package}@",
)
def _prefix(self, version: BranchOrTag, /) -> LiteralString:
return f"{self.url.GH if is_branch(version) else self.url.CDN}{version}/"
def dataset_base_url(self, version: BranchOrTag, /) -> LiteralString:
"""Common url prefix for all datasets derived from ``version``."""
return f"{self._prefix(version)}data/"
@property
def url(self) -> NpmUrl:
return self._url
def file(
self,
branch_or_tag: BranchOrTag,
path: str,
/,
) -> Any:
"""
Request a file from `jsdelivr` `npm`_ or `GitHub`_ endpoints.
Parameters
----------
branch_or_tag
Version of the file, see `branches`_ and `tags`_.
path
Relative filepath from the root of the repo.
.. _npm:
https://www.jsdelivr.com/documentation#id-npm
.. _GitHub:
https://www.jsdelivr.com/documentation#id-github
.. _branches:
https://github.com/vega/vega-datasets/branches
.. _tags:
https://github.com/vega/vega-datasets/tags
"""
path = path.lstrip("./")
suffix = Path(path).suffix
if suffix == ".json":
headers = {"Accept": "application/json"}
read_fn = json.load
else:
raise NotImplementedError(path, suffix)
req = Request(f"{self._prefix(branch_or_tag)}{path}", headers=headers)
with self._opener.open(req) as response:
return read_fn(response)
def datapackage(self, *, tag: LiteralString) -> DataPackage:
return datapackage.DataPackage(
self.file(tag, "datapackage.json"),
self.dataset_base_url(tag),
self.paths["metadata"],
)
def is_branch(s: BranchOrTag, /) -> bool:
return s == "main" or not (s.startswith(tuple("v" + string.digits)))
| Npm |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/formatters/latex.py | {
"start": 16227,
"end": 19306
} | class ____(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
# find and remove all the escape tokens (replace with an empty string)
# this is very similar to DelegatingLexer.get_tokens_unprocessed.
buffered = ''
insertions = []
insertion_buf = []
for i, t, v in self._find_safe_escape_tokens(text):
if t is None:
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
insertion_buf = []
buffered += v
else:
insertion_buf.append((i, t, v))
if insertion_buf:
insertions.append((len(buffered), insertion_buf))
return do_insertions(insertions,
self.lang.get_tokens_unprocessed(buffered))
def _find_safe_escape_tokens(self, text):
""" find escape tokens that are not in strings or comments """
for i, t, v in self._filter_to(
self.lang.get_tokens_unprocessed(text),
lambda t: t in Token.Comment or t in Token.String
):
if t is None:
for i2, t2, v2 in self._find_escape_tokens(v):
yield i + i2, t2, v2
else:
yield i, None, v
def _filter_to(self, it, pred):
""" Keep only the tokens that match `pred`, merge the others together """
buf = ''
idx = 0
for i, t, v in it:
if pred(t):
if buf:
yield idx, None, buf
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
yield idx, None, buf
def _find_escape_tokens(self, text):
""" Find escape tokens within text, give token=None otherwise """
index = 0
while text:
a, sep1, text = text.partition(self.left)
if a:
yield index, None, a
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| LatexEmbeddedLexer |
python | lazyprogrammer__machine_learning_examples | rl2/mountaincar/pg_tf_random.py | {
"start": 1388,
"end": 7322
} | class ____:
def __init__(self, ft, D, hidden_layer_sizes_mean=[], hidden_layer_sizes_var=[]):
# save inputs for copy
self.ft = ft
self.D = D
self.hidden_layer_sizes_mean = hidden_layer_sizes_mean
self.hidden_layer_sizes_var = hidden_layer_sizes_var
##### model the mean #####
self.mean_layers = []
M1 = D
for M2 in hidden_layer_sizes_mean:
layer = HiddenLayer(M1, M2)
self.mean_layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, lambda x: x, use_bias=False, zeros=True)
self.mean_layers.append(layer)
##### model the variance #####
self.var_layers = []
M1 = D
for M2 in hidden_layer_sizes_var:
layer = HiddenLayer(M1, M2)
self.var_layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, 1, tf.nn.softplus, use_bias=False, zeros=False)
self.var_layers.append(layer)
# gather params
self.params = []
for layer in (self.mean_layers + self.var_layers):
self.params += layer.params
# inputs and targets
self.X = tf.placeholder(tf.float32, shape=(None, D), name='X')
self.actions = tf.placeholder(tf.float32, shape=(None,), name='actions')
self.advantages = tf.placeholder(tf.float32, shape=(None,), name='advantages')
def get_output(layers):
Z = self.X
for layer in layers:
Z = layer.forward(Z)
return tf.reshape(Z, [-1])
# calculate output and cost
mean = get_output(self.mean_layers)
std = get_output(self.var_layers) + 1e-4 # smoothing
# note: the 'variance' is actually standard deviation
norm = tf.contrib.distributions.Normal(mean, std)
self.predict_op = tf.clip_by_value(norm.sample(), -1, 1)
def set_session(self, session):
self.session = session
def init_vars(self):
init_op = tf.variables_initializer(self.params)
self.session.run(init_op)
# def partial_fit(self, X, actions, advantages):
# X = np.atleast_2d(X)
# X = self.ft.transform(X)
# actions = np.atleast_1d(actions)
# advantages = np.atleast_1d(advantages)
# self.session.run(
# self.train_op,
# feed_dict={
# self.X: X,
# self.actions: actions,
# self.advantages: advantages,
# }
# )
def predict(self, X):
X = np.atleast_2d(X)
X = self.ft.transform(X)
return self.session.run(self.predict_op, feed_dict={self.X: X})
def sample_action(self, X):
p = self.predict(X)[0]
# print("action:", p)
return p
def copy(self):
clone = PolicyModel(self.ft, self.D, self.hidden_layer_sizes_mean, self.hidden_layer_sizes_mean)
clone.set_session(self.session)
clone.init_vars() # tf will complain if we don't do this
clone.copy_from(self)
return clone
def copy_from(self, other):
# collect all the ops
ops = []
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = self.session.run(q)
op = p.assign(actual)
ops.append(op)
# now run them all
self.session.run(ops)
def perturb_params(self):
ops = []
for p in self.params:
v = self.session.run(p)
noise = np.random.randn(*v.shape) / np.sqrt(v.shape[0]) * 5.0
if np.random.random() < 0.1:
# with probability 0.1 start completely from scratch
op = p.assign(noise)
else:
op = p.assign(v + noise)
ops.append(op)
self.session.run(ops)
def play_one(env, pmodel, gamma):
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = pmodel.sample_action(observation)
# oddly, the mountain car environment requires the action to be in
# an object where the actual action is stored in object[0]
observation, reward, done, info = env.step([action])
totalreward += reward
iters += 1
return totalreward
def play_multiple_episodes(env, T, pmodel, gamma, print_iters=False):
totalrewards = np.empty(T)
for i in range(T):
totalrewards[i] = play_one(env, pmodel, gamma)
if print_iters:
print(i, "avg so far:", totalrewards[:(i+1)].mean())
avg_totalrewards = totalrewards.mean()
print("avg totalrewards:", avg_totalrewards)
return avg_totalrewards
def random_search(env, pmodel, gamma):
totalrewards = []
best_avg_totalreward = float('-inf')
best_pmodel = pmodel
num_episodes_per_param_test = 3
for t in range(100):
tmp_pmodel = best_pmodel.copy()
tmp_pmodel.perturb_params()
avg_totalrewards = play_multiple_episodes(
env,
num_episodes_per_param_test,
tmp_pmodel,
gamma
)
totalrewards.append(avg_totalrewards)
if avg_totalrewards > best_avg_totalreward:
best_pmodel = tmp_pmodel
best_avg_totalreward = avg_totalrewards
return totalrewards, best_pmodel
def main():
env = gym.make('MountainCarContinuous-v0')
ft = FeatureTransformer(env, n_components=100)
D = ft.dimensions
pmodel = PolicyModel(ft, D, [], [])
# init = tf.global_variables_initializer()
session = tf.InteractiveSession()
# session.run(init)
pmodel.set_session(session)
pmodel.init_vars()
gamma = 0.99
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
totalrewards, pmodel = random_search(env, pmodel, gamma)
print("max reward:", np.max(totalrewards))
# play 100 episodes and check the average
avg_totalrewards = play_multiple_episodes(env, 100, pmodel, gamma, print_iters=True)
print("avg reward over 100 episodes with best models:", avg_totalrewards)
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
if __name__ == '__main__':
main()
| PolicyModel |
python | run-llama__llama_index | llama-index-core/llama_index/core/node_parser/relational/hierarchical.py | {
"start": 2524,
"end": 8529
} | class ____(NodeParser):
"""
Hierarchical node parser.
Splits a document into a recursive hierarchy Nodes using a NodeParser.
NOTE: this will return a hierarchy of nodes in a flat list, where there will be
overlap between parent nodes (e.g. with a bigger chunk size), and child nodes
per parent (e.g. with a smaller chunk size).
For instance, this may return a list of nodes like:
- list of top-level nodes with chunk size 2048
- list of second-level nodes, where each node is a child of a top-level node,
chunk size 512
- list of third-level nodes, where each node is a child of a second-level node,
chunk size 128
"""
chunk_sizes: Optional[List[int]] = Field(
default=None,
description=(
"The chunk sizes to use when splitting documents, in order of level."
),
)
node_parser_ids: List[str] = Field(
default_factory=list,
description=(
"List of ids for the node parsers to use when splitting documents, "
+ "in order of level (first id used for first level, etc.)."
),
)
node_parser_map: Dict[str, NodeParser] = Field(
description="Map of node parser id to node parser.",
)
@classmethod
def from_defaults(
cls,
chunk_sizes: Optional[List[int]] = None,
chunk_overlap: int = 20,
node_parser_ids: Optional[List[str]] = None,
node_parser_map: Optional[Dict[str, NodeParser]] = None,
include_metadata: bool = True,
include_prev_next_rel: bool = True,
callback_manager: Optional[CallbackManager] = None,
) -> "HierarchicalNodeParser":
callback_manager = callback_manager or CallbackManager([])
if node_parser_ids is None:
if chunk_sizes is None:
chunk_sizes = [2048, 512, 128]
node_parser_ids = [f"chunk_size_{chunk_size}" for chunk_size in chunk_sizes]
node_parser_map = {}
for chunk_size, node_parser_id in zip(chunk_sizes, node_parser_ids):
node_parser_map[node_parser_id] = SentenceSplitter(
chunk_size=chunk_size,
callback_manager=callback_manager,
chunk_overlap=chunk_overlap,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
)
else:
if chunk_sizes is not None:
raise ValueError("Cannot specify both node_parser_ids and chunk_sizes.")
if node_parser_map is None:
raise ValueError(
"Must specify node_parser_map if using node_parser_ids."
)
return cls(
chunk_sizes=chunk_sizes,
node_parser_ids=node_parser_ids,
node_parser_map=node_parser_map,
include_metadata=include_metadata,
include_prev_next_rel=include_prev_next_rel,
callback_manager=callback_manager,
)
@classmethod
def class_name(cls) -> str:
return "HierarchicalNodeParser"
def _recursively_get_nodes_from_nodes(
self,
nodes: List[BaseNode],
level: int,
show_progress: bool = False,
) -> List[BaseNode]:
"""Recursively get nodes from nodes."""
if level >= len(self.node_parser_ids):
raise ValueError(
f"Level {level} is greater than number of text "
f"splitters ({len(self.node_parser_ids)})."
)
# first split current nodes into sub-nodes
nodes_with_progress = get_tqdm_iterable(
nodes, show_progress, "Parsing documents into nodes"
)
sub_nodes = []
for node in nodes_with_progress:
cur_sub_nodes = self.node_parser_map[
self.node_parser_ids[level]
].get_nodes_from_documents([node])
# add parent relationship from sub node to parent node
# add child relationship from parent node to sub node
# NOTE: Only add relationships if level > 0, since we don't want to add
# relationships for the top-level document objects that we are splitting
if level > 0:
for sub_node in cur_sub_nodes:
_add_parent_child_relationship(
parent_node=node,
child_node=sub_node,
)
sub_nodes.extend(cur_sub_nodes)
# now for each sub-node, recursively split into sub-sub-nodes, and add
if level < len(self.node_parser_ids) - 1:
sub_sub_nodes = self._recursively_get_nodes_from_nodes(
sub_nodes,
level + 1,
show_progress=show_progress,
)
else:
sub_sub_nodes = []
return sub_nodes + sub_sub_nodes
def get_nodes_from_documents(
self,
documents: Sequence[Document],
show_progress: bool = False,
**kwargs: Any,
) -> List[BaseNode]:
"""Parse document into nodes."""
with self.callback_manager.event(
CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents}
) as event:
all_nodes: List[BaseNode] = []
documents_with_progress = get_tqdm_iterable(
documents, show_progress, "Parsing documents into nodes"
)
# TODO: a bit of a hack rn for tqdm
for doc in documents_with_progress:
nodes_from_doc = self._recursively_get_nodes_from_nodes([doc], 0)
all_nodes.extend(nodes_from_doc)
event.on_end(payload={EventPayload.NODES: all_nodes})
return all_nodes
# Unused abstract method
def _parse_nodes(
self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any
) -> List[BaseNode]:
return list(nodes)
| HierarchicalNodeParser |
python | doocs__leetcode | solution/3700-3799/3718.Smallest Missing Multiple of K/Solution.py | {
"start": 0,
"end": 202
} | class ____:
def missingMultiple(self, nums: List[int], k: int) -> int:
s = set(nums)
for i in count(1):
x = k * i
if x not in s:
return x
| Solution |
python | pydantic__pydantic | pydantic-core/tests/test_json.py | {
"start": 12122,
"end": 16977
} | class ____(metaclass=BedReprMeta):
def __repr__(self):
raise ValueError('bad repr')
def __hash__(self):
return 1
def test_bad_repr():
b = BadRepr()
error_msg = '^Unable to serialize unknown type: <unprintable BedReprMeta object>$'
with pytest.raises(PydanticSerializationError, match=error_msg):
to_jsonable_python(b)
assert to_jsonable_python(b, serialize_unknown=True) == '<Unserializable BadRepr object>'
with pytest.raises(PydanticSerializationError, match=error_msg):
to_json(b)
assert to_json(b, serialize_unknown=True) == b'"<Unserializable BadRepr object>"'
def test_inf_nan_allow():
v = SchemaValidator(core_schema.float_schema(allow_inf_nan=True))
assert v.validate_json('Infinity') == float('inf')
assert v.validate_json('-Infinity') == float('-inf')
assert v.validate_json('NaN') == IsFloatNan()
def test_partial_parse():
with pytest.raises(ValueError, match='EOF while parsing a string at line 1 column 15'):
from_json('["aa", "bb", "c')
assert from_json('["aa", "bb", "c', allow_partial=True) == ['aa', 'bb']
with pytest.raises(ValueError, match='EOF while parsing a string at line 1 column 15'):
from_json(b'["aa", "bb", "c')
assert from_json(b'["aa", "bb", "c', allow_partial=True) == ['aa', 'bb']
def test_json_bytes_base64_round_trip():
data = b'\xd8\x07\xc1Tx$\x91F%\xf3\xf3I\xca\xd8@\x0c\xee\xc3\xab\xff\x7f\xd3\xcd\xcd\xf9\xc2\x10\xe4\xa1\xb01e'
encoded_std = b'"2AfBVHgkkUYl8/NJythADO7Dq/9/083N+cIQ5KGwMWU="'
encoded_url = b'"2AfBVHgkkUYl8_NJythADO7Dq_9_083N-cIQ5KGwMWU="'
assert to_json(data, bytes_mode='base64') == encoded_url
v = SchemaValidator(core_schema.bytes_schema(), config=CoreConfig(val_json_bytes='base64'))
assert v.validate_json(encoded_url) == data
assert v.validate_json(encoded_std) == data
with pytest.raises(ValidationError) as exc:
v.validate_json('"wrong!"')
[details] = exc.value.errors()
assert details['type'] == 'bytes_invalid_encoding'
assert to_json({'key': data}, bytes_mode='base64') == b'{"key":' + encoded_url + b'}'
v = SchemaValidator(
core_schema.dict_schema(keys_schema=core_schema.str_schema(), values_schema=core_schema.bytes_schema()),
config=CoreConfig(val_json_bytes='base64'),
)
assert v.validate_json(b'{"key":' + encoded_url + b'}') == {'key': data}
def test_json_bytes_base64_no_padding():
v = SchemaValidator(core_schema.bytes_schema(), config=CoreConfig(val_json_bytes='base64'))
base_64_without_padding = 'bm8tcGFkZGluZw'
assert v.validate_json(json.dumps(base_64_without_padding)) == b'no-padding'
def test_json_bytes_base64_invalid():
v = SchemaValidator(core_schema.bytes_schema(), config=CoreConfig(val_json_bytes='base64'))
wrong_input = 'wrong!'
with pytest.raises(ValidationError) as exc_info:
v.validate_json(json.dumps(wrong_input))
assert exc_info.value.errors(include_url=False, include_context=False) == [
{
'type': 'bytes_invalid_encoding',
'loc': (),
'msg': f'Data should be valid base64: Invalid symbol {ord("!")}, offset {len(wrong_input) - 1}.',
'input': wrong_input,
}
]
def test_json_bytes_hex_round_trip():
data = b'hello'
encoded = b'"68656c6c6f"'
assert to_json(data, bytes_mode='hex') == encoded
v = SchemaValidator(core_schema.bytes_schema(), config=CoreConfig(val_json_bytes='hex'))
assert v.validate_json(encoded) == data
assert to_json({'key': data}, bytes_mode='hex') == b'{"key":"68656c6c6f"}'
v = SchemaValidator(
core_schema.dict_schema(keys_schema=core_schema.str_schema(), values_schema=core_schema.bytes_schema()),
config=CoreConfig(val_json_bytes='hex'),
)
assert v.validate_json('{"key":"68656c6c6f"}') == {'key': data}
def test_json_bytes_hex_invalid():
v = SchemaValidator(core_schema.bytes_schema(), config=CoreConfig(val_json_bytes='hex'))
wrong_input = 'a'
with pytest.raises(ValidationError) as exc_info:
v.validate_json(json.dumps(wrong_input))
assert exc_info.value.errors(include_url=False, include_context=False) == [
{
'type': 'bytes_invalid_encoding',
'loc': (),
'msg': 'Data should be valid hex: Odd number of digits',
'input': wrong_input,
}
]
wrong_input = 'ag'
with pytest.raises(ValidationError) as exc_info:
v.validate_json(json.dumps(wrong_input))
assert exc_info.value.errors(include_url=False, include_context=False) == [
{
'type': 'bytes_invalid_encoding',
'loc': (),
'msg': "Data should be valid hex: Invalid character 'g' at position 1",
'input': wrong_input,
}
]
| BadRepr |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-form-a-target-string-given-a-dictionary.py | {
"start": 167,
"end": 781
} | class ____(object):
def numWays(self, words, target):
"""
:type words: List[str]
:type target: str
:rtype: int
"""
MOD = 10**9+7
dp = [0]*(len(target)+1)
dp[0] = 1
for i in xrange(len(words[0])):
count = collections.Counter(w[i] for w in words)
for j in reversed(xrange(len(target))):
dp[j+1] += dp[j]*count[target[j]] % MOD
return dp[-1] % MOD
# Time: O(l * (w + n)), l is the length of a word, w is the number of words, n is the length of target
# Space: O(n)
import collections
| Solution |
python | google__pytype | pytype/rewrite/abstract/functions.py | {
"start": 11103,
"end": 16808
} | class ____:
"""Representation of a Python function signature.
Attributes:
name: Name of the function.
param_names: A tuple of positional parameter names. This DOES include
positional-only parameters and does NOT include keyword-only parameters.
posonly_count: Number of positional-only parameters.
varargs_name: Name of the varargs parameter. (The "args" in *args)
kwonly_params: Tuple of keyword-only parameters.
E.g. ("x", "y") for "def f(a, *, x, y=2)". These do NOT appear in
param_names. Ordered like in the source file.
kwargs_name: Name of the kwargs parameter. (The "kwargs" in **kwargs)
defaults: Dictionary, name to value, for all parameters with default values.
annotations: A dictionary of type annotations. (string to type)
posonly_params: Tuple of positional-only parameters (i.e., the first
posonly_count names in param_names).
"""
def __init__(
self,
ctx: base.ContextType,
name: str,
param_names: tuple[str, ...],
*,
posonly_count: int = 0,
varargs_name: str | None = None,
kwonly_params: tuple[str, ...] = (),
kwargs_name: str | None = None,
defaults: Mapping[str, base.BaseValue] = datatypes.EMPTY_MAP,
annotations: Mapping[str, base.BaseValue] = datatypes.EMPTY_MAP,
):
self._ctx = ctx
self.name = name
self.param_names = param_names
self.posonly_count = posonly_count
self.varargs_name = varargs_name
self.kwonly_params = kwonly_params
self.kwargs_name = kwargs_name
self.defaults = defaults
self.annotations = annotations
@property
def posonly_params(self):
return self.param_names[:self.posonly_count]
@classmethod
def from_code(
cls, ctx: base.ContextType, name: str, code: blocks.OrderedCode,
) -> 'Signature':
"""Builds a signature from a code object."""
nonstararg_count = code.argcount + code.kwonlyargcount
if code.has_varargs():
varargs_name = code.varnames[nonstararg_count]
kwargs_pos = nonstararg_count + 1
else:
varargs_name = None
kwargs_pos = nonstararg_count
if code.has_varkeywords():
kwargs_name = code.varnames[kwargs_pos]
else:
kwargs_name = None
return cls(
ctx=ctx,
name=name,
param_names=tuple(code.varnames[:code.argcount]),
posonly_count=code.posonlyargcount,
varargs_name=varargs_name,
kwonly_params=tuple(code.varnames[code.argcount:nonstararg_count]),
kwargs_name=kwargs_name,
# TODO(b/241479600): Fill these in.
defaults={},
annotations={},
)
@classmethod
def from_pytd(
cls, ctx: base.ContextType, name: str, pytd_sig: pytd.Signature,
) -> 'Signature':
"""Builds a signature from a pytd signature."""
param_names = []
posonly_count = 0
kwonly_params = []
for p in pytd_sig.params:
if p.kind == pytd.ParameterKind.KWONLY:
kwonly_params.append(p.name)
continue
param_names.append(p.name)
posonly_count += p.kind == pytd.ParameterKind.POSONLY
defaults = {
p.name: ctx.abstract_converter.pytd_type_to_value(p.type).instantiate()
for p in pytd_sig.params if p.optional}
pytd_annotations = [
(p.name, p.type)
for p in pytd_sig.params + (pytd_sig.starargs, pytd_sig.starstarargs)
if p is not None]
pytd_annotations.append(('return', pytd_sig.return_type))
annotations = {name: ctx.abstract_converter.pytd_type_to_value(typ)
for name, typ in pytd_annotations}
return cls(
ctx=ctx,
name=name,
param_names=tuple(param_names),
posonly_count=posonly_count,
varargs_name=pytd_sig.starargs and pytd_sig.starargs.name,
kwonly_params=tuple(kwonly_params),
kwargs_name=pytd_sig.starstarargs and pytd_sig.starstarargs.name,
defaults=defaults,
annotations=annotations,
)
def __repr__(self):
pp = self._ctx.errorlog.pretty_printer
def fmt(param_name):
if param_name in self.annotations:
typ = pp.print_type_of_instance(self.annotations[param_name])
s = f'{param_name}: {typ}'
else:
s = param_name
if param_name in self.defaults:
default = pp.show_constant(self.defaults[param_name])
return f'{s} = {default}'
else:
return s
params = [fmt(param_name) for param_name in self.param_names]
if self.posonly_count:
params.insert(self.posonly_count, '/')
if self.varargs_name:
params.append('*' + fmt(self.varargs_name))
elif self.kwonly_params:
params.append('*')
params.extend(self.kwonly_params)
if self.kwargs_name:
params.append('**' + fmt(self.kwargs_name))
if 'return' in self.annotations:
ret = pp.print_type_of_instance(self.annotations['return'])
else:
ret = 'Any'
return f'def {self.name}({", ".join(params)}) -> {ret}'
def map_args(self, args: Args[_FrameT]) -> MappedArgs[_FrameT]:
# TODO(b/241479600): Implement this properly, with error detection.
argdict = _ArgMapper(self._ctx, args, self).map_args()
return MappedArgs(signature=self, argdict=argdict, frame=args.frame)
def make_fake_args(self) -> MappedArgs[FrameType]:
names = list(self.param_names + self.kwonly_params)
if self.varargs_name:
names.append(self.varargs_name)
if self.kwargs_name:
names.append(self.kwargs_name)
argdict = {}
for name in names:
typ = self.annotations.get(name, self._ctx.consts.Any)
argdict[name] = typ.instantiate().to_variable()
return MappedArgs(signature=self, argdict=argdict)
| Signature |
python | fluentpython__example-code | 20-descriptor/bulkfood/bulkfood_v3.py | {
"start": 1080,
"end": 1410
} | class ____:
weight = Quantity('weight') # <5>
price = Quantity('price') # <6>
def __init__(self, description, weight, price): # <7>
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
# END LINEITEM_V3
| LineItem |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/ArrowItem.py | {
"start": 115,
"end": 5412
} | class ____(QtWidgets.QGraphicsPathItem):
"""
For displaying scale-invariant arrows.
For arrows pointing to a location on a curve, see CurveArrow
"""
def __init__(self, parent=None, **opts):
"""
Arrows can be initialized with any keyword arguments accepted by
the setStyle() method.
"""
self.opts = {}
QtWidgets.QGraphicsPathItem.__init__(self, parent)
if 'size' in opts:
opts['headLen'] = opts['size']
if 'width' in opts:
opts['headWidth'] = opts['width']
pos = opts.pop('pos', (0, 0))
defaultOpts = {
'pxMode': True,
'angle': -150, ## If the angle is 0, the arrow points left
'headLen': 20,
'headWidth': None,
'tipAngle': 25,
'baseAngle': 0,
'tailLen': None,
'tailWidth': 3,
'pen': (200,200,200),
'brush': (50,50,200),
}
defaultOpts.update(opts)
self.setStyle(**defaultOpts)
# for backward compatibility
self.setPos(*pos)
def setStyle(self, **opts):
"""
Changes the appearance of the arrow.
All arguments are optional:
====================== =================================================
**Keyword Arguments:**
angle Orientation of the arrow in degrees. Default is
0; arrow pointing to the left.
headLen Length of the arrow head, from tip to base.
default=20
headWidth Width of the arrow head at its base. If
headWidth is specified, it overrides tipAngle.
tipAngle Angle of the tip of the arrow in degrees. Smaller
values make a 'sharper' arrow. default=25
baseAngle Angle of the base of the arrow head. Default is
0, which means that the base of the arrow head
is perpendicular to the arrow tail.
tailLen Length of the arrow tail, measured from the base
of the arrow head to the end of the tail. If
this value is None, no tail will be drawn.
default=None
tailWidth Width of the tail. default=3
pen The pen used to draw the outline of the arrow.
brush The brush used to fill the arrow.
pxMode If True, then the arrow is drawn as a fixed size
regardless of the scale of its parents (including
the ViewBox zoom level).
====================== =================================================
"""
arrowOpts = ['headLen', 'tipAngle', 'baseAngle', 'tailLen', 'tailWidth', 'headWidth']
allowedOpts = ['angle', 'pen', 'brush', 'pxMode'] + arrowOpts
needUpdate = False
for k,v in opts.items():
if k not in allowedOpts:
raise KeyError('Invalid arrow style option "%s"' % k)
if self.opts.get(k) != v:
needUpdate = True
self.opts[k] = v
if not needUpdate:
return
opt = dict([(k,self.opts[k]) for k in arrowOpts if k in self.opts])
tr = QtGui.QTransform()
tr.rotate(self.opts['angle'])
self.path = tr.map(fn.makeArrowPath(**opt))
self.setPath(self.path)
self.setPen(fn.mkPen(self.opts['pen']))
self.setBrush(fn.mkBrush(self.opts['brush']))
if self.opts['pxMode']:
self.setFlags(self.flags() | self.GraphicsItemFlag.ItemIgnoresTransformations)
else:
self.setFlags(self.flags() & ~self.GraphicsItemFlag.ItemIgnoresTransformations)
def paint(self, p, *args):
p.setRenderHint(QtGui.QPainter.RenderHint.Antialiasing)
super().paint(p, *args)
#p.setPen(fn.mkPen('r'))
#p.setBrush(fn.mkBrush(None))
#p.drawRect(self.boundingRect())
def shape(self):
#if not self.opts['pxMode']:
#return QtWidgets.QGraphicsPathItem.shape(self)
return self.path
## dataBounds and pixelPadding methods are provided to ensure ViewBox can
## properly auto-range
def dataBounds(self, ax, frac, orthoRange=None):
pw = 0
pen = self.pen()
if not pen.isCosmetic():
pw = pen.width() * 0.7072
if self.opts['pxMode']:
return [0,0]
else:
br = self.boundingRect()
if ax == 0:
return [br.left()-pw, br.right()+pw]
else:
return [br.top()-pw, br.bottom()+pw]
def pixelPadding(self):
pad = 0
if self.opts['pxMode']:
br = self.boundingRect()
pad += hypot(br.width(), br.height())
pen = self.pen()
if pen.isCosmetic():
pad += max(1, pen.width()) * 0.7072
return pad
| ArrowItem |
python | pydantic__pydantic | pydantic/v1/dataclasses.py | {
"start": 661,
"end": 8295
} | class ____:
x: int
ValidatedM = pydantic.dataclasses.dataclass(M)
```
We indeed still want to support equality, hashing, repr, ... as if it was the stdlib one!
```py
assert isinstance(ValidatedM(x=1), M)
assert ValidatedM(x=1) == M(x=1)
```
This means we **don't want to create a new dataclass that inherits from it**
The trick is to create a wrapper around `M` that will act as a proxy to trigger
validation without altering default `M` behaviour.
"""
import copy
import dataclasses
import sys
from contextlib import contextmanager
from functools import wraps
try:
from functools import cached_property
except ImportError:
# cached_property available only for python3.8+
pass
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, Generator, Optional, Type, TypeVar, Union, overload
from typing_extensions import dataclass_transform
from pydantic.v1.class_validators import gather_all_validators
from pydantic.v1.config import BaseConfig, ConfigDict, Extra, get_config
from pydantic.v1.error_wrappers import ValidationError
from pydantic.v1.errors import DataclassTypeError
from pydantic.v1.fields import Field, FieldInfo, Required, Undefined
from pydantic.v1.main import create_model, validate_model
from pydantic.v1.utils import ClassAttribute
if TYPE_CHECKING:
from pydantic.v1.main import BaseModel
from pydantic.v1.typing import CallableGenerator, NoArgAnyCallable
DataclassT = TypeVar('DataclassT', bound='Dataclass')
DataclassClassOrWrapper = Union[Type['Dataclass'], 'DataclassProxy']
class Dataclass:
# stdlib attributes
__dataclass_fields__: ClassVar[Dict[str, Any]]
__dataclass_params__: ClassVar[Any] # in reality `dataclasses._DataclassParams`
__post_init__: ClassVar[Callable[..., None]]
# Added by pydantic
__pydantic_run_validation__: ClassVar[bool]
__post_init_post_parse__: ClassVar[Callable[..., None]]
__pydantic_initialised__: ClassVar[bool]
__pydantic_model__: ClassVar[Type[BaseModel]]
__pydantic_validate_values__: ClassVar[Callable[['Dataclass'], None]]
__pydantic_has_field_info_default__: ClassVar[bool] # whether a `pydantic.Field` is used as default value
def __init__(self, *args: object, **kwargs: object) -> None:
pass
@classmethod
def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator':
pass
@classmethod
def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT':
pass
__all__ = [
'dataclass',
'set_validation',
'create_pydantic_model_from_dataclass',
'is_builtin_dataclass',
'make_dataclass_validator',
]
_T = TypeVar('_T')
if sys.version_info >= (3, 10):
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Union[ConfigDict, Type[object], None] = None,
validate_on_init: Optional[bool] = None,
use_proxy: Optional[bool] = None,
kw_only: bool = ...,
) -> Callable[[Type[_T]], 'DataclassClassOrWrapper']:
...
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
@overload
def dataclass(
_cls: Type[_T],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Union[ConfigDict, Type[object], None] = None,
validate_on_init: Optional[bool] = None,
use_proxy: Optional[bool] = None,
kw_only: bool = ...,
) -> 'DataclassClassOrWrapper':
...
else:
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
@overload
def dataclass(
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Union[ConfigDict, Type[object], None] = None,
validate_on_init: Optional[bool] = None,
use_proxy: Optional[bool] = None,
) -> Callable[[Type[_T]], 'DataclassClassOrWrapper']:
...
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
@overload
def dataclass(
_cls: Type[_T],
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Union[ConfigDict, Type[object], None] = None,
validate_on_init: Optional[bool] = None,
use_proxy: Optional[bool] = None,
) -> 'DataclassClassOrWrapper':
...
@dataclass_transform(field_specifiers=(dataclasses.field, Field))
def dataclass(
_cls: Optional[Type[_T]] = None,
*,
init: bool = True,
repr: bool = True,
eq: bool = True,
order: bool = False,
unsafe_hash: bool = False,
frozen: bool = False,
config: Union[ConfigDict, Type[object], None] = None,
validate_on_init: Optional[bool] = None,
use_proxy: Optional[bool] = None,
kw_only: bool = False,
) -> Union[Callable[[Type[_T]], 'DataclassClassOrWrapper'], 'DataclassClassOrWrapper']:
"""
Like the python standard lib dataclasses but with type validation.
The result is either a pydantic dataclass that will validate input data
or a wrapper that will trigger validation around a stdlib dataclass
to avoid modifying it directly
"""
the_config = get_config(config)
def wrap(cls: Type[Any]) -> 'DataclassClassOrWrapper':
should_use_proxy = (
use_proxy
if use_proxy is not None
else (
is_builtin_dataclass(cls)
and (cls.__bases__[0] is object or set(dir(cls)) == set(dir(cls.__bases__[0])))
)
)
if should_use_proxy:
dc_cls_doc = ''
dc_cls = DataclassProxy(cls)
default_validate_on_init = False
else:
dc_cls_doc = cls.__doc__ or '' # needs to be done before generating dataclass
if sys.version_info >= (3, 10):
dc_cls = dataclasses.dataclass(
cls,
init=init,
repr=repr,
eq=eq,
order=order,
unsafe_hash=unsafe_hash,
frozen=frozen,
kw_only=kw_only,
)
else:
dc_cls = dataclasses.dataclass( # type: ignore
cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen
)
default_validate_on_init = True
should_validate_on_init = default_validate_on_init if validate_on_init is None else validate_on_init
_add_pydantic_validation_attributes(cls, the_config, should_validate_on_init, dc_cls_doc)
dc_cls.__pydantic_model__.__try_update_forward_refs__(**{cls.__name__: cls})
return dc_cls
if _cls is None:
return wrap
return wrap(_cls)
@contextmanager
def set_validation(cls: Type['DataclassT'], value: bool) -> Generator[Type['DataclassT'], None, None]:
original_run_validation = cls.__pydantic_run_validation__
try:
cls.__pydantic_run_validation__ = value
yield cls
finally:
cls.__pydantic_run_validation__ = original_run_validation
| M |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/leaf_adds_virtual/package.py | {
"start": 216,
"end": 486
} | class ____(Package):
url = "http://www.example.com/"
url = "http://www.example.com/2.0.tar.gz"
version("2.0", md5="abcdef1234567890abcdef1234567890")
version("1.0", md5="abcdef1234567890abcdef1234567890")
depends_on("blas", when="@2.0")
| LeafAddsVirtual |
python | pytransitions__transitions | tests/test_async.py | {
"start": 941,
"end": 28883
} | class ____(TestTransitions):
@staticmethod
async def await_false():
await asyncio.sleep(0.1)
return False
@staticmethod
async def await_true():
await asyncio.sleep(0.1)
return True
@staticmethod
async def cancel_soon():
await asyncio.sleep(1)
raise TimeoutError("Callback was not cancelled!")
@staticmethod
def raise_value_error():
raise ValueError("ValueError raised.")
@staticmethod
def synced_true():
return True
@staticmethod
async def call_delayed(func, time):
await asyncio.sleep(time)
await func()
def setUp(self):
super(TestAsync, self).setUp()
self.machine_cls = AsyncMachine # type: Type[AsyncMachine]
self.machine = self.machine_cls(states=['A', 'B', 'C'], transitions=[['go', 'A', 'B']], initial='A')
def test_new_state_in_enter_callback(self):
machine = self.machine_cls(states=['A', 'B'], initial='A')
async def on_enter_B():
state = self.machine_cls.state_cls(name='C')
machine.add_state(state)
await machine.to_C()
machine.on_enter_B(on_enter_B)
asyncio.run(machine.to_B())
def test_dynamic_model_state_attribute(self):
class Model:
def __init__(self):
self.status = None
self.state = 'some_value'
m = self.machine_cls(Model(), states=['A', 'B'], initial='A', model_attribute='status')
self.assertEqual(m.model.status, 'A')
self.assertEqual(m.model.state, 'some_value')
m.add_transition('move', 'A', 'B')
asyncio.run(m.model.move())
self.assertEqual(m.model.status, 'B')
self.assertEqual(m.model.state, 'some_value')
def test_async_machine_cb(self):
mock = MagicMock()
async def async_process():
await asyncio.sleep(0.1)
mock()
m = self.machine
m.after_state_change = [async_process]
asyncio.run(m.go())
self.assertEqual(m.state, 'B')
self.assertTrue(mock.called)
def test_async_condition(self):
m = self.machine
m.add_transition('proceed', 'A', 'C', conditions=self.await_true, unless=self.await_false)
asyncio.run(m.proceed())
self.assertEqual(m.state, 'C')
def test_async_enter_exit(self):
enter_mock = MagicMock()
exit_mock = MagicMock()
async def async_enter():
await asyncio.sleep(0.1)
enter_mock()
async def async_exit():
await asyncio.sleep(0.1)
exit_mock()
m = self.machine
m.on_exit_A(async_exit)
m.on_enter_B(async_enter)
asyncio.run(m.go())
self.assertTrue(exit_mock.called)
self.assertTrue(enter_mock.called)
def test_sync_conditions(self):
mock = MagicMock()
def sync_process():
mock()
m = self.machine
m.add_transition('proceed', 'A', 'C', conditions=self.synced_true, after=sync_process)
asyncio.run(m.proceed())
self.assertEqual(m.state, 'C')
self.assertTrue(mock.called)
def test_multiple_models(self):
m1 = self.machine_cls(states=['A', 'B', 'C'], initial='A', name="m1")
m2 = self.machine_cls(states=['A'], initial='A', name='m2')
m1.add_transition(trigger='go', source='A', dest='B', before=self.cancel_soon)
m1.add_transition(trigger='fix', source='A', dest='C', after=self.cancel_soon)
m1.add_transition(trigger='check', source='C', dest='B', conditions=self.await_false)
m1.add_transition(trigger='reset', source='C', dest='A')
m2.add_transition(trigger='go', source='A', dest=None, conditions=m1.is_C, after=m1.reset)
async def run():
_ = asyncio.gather(m1.go(), # should block before B
self.call_delayed(m1.fix, 0.05), # should cancel task and go to C
self.call_delayed(m1.check, 0.07), # should exit before m1.fix
self.call_delayed(m2.go, 0.1)) # should cancel m1.fix
assert m1.is_A()
asyncio.run(run())
def test_async_callback_arguments(self):
async def process(should_fail=True):
if should_fail is not False:
raise ValueError("should_fail has been set")
self.machine.on_enter_B(process)
with self.assertRaises(ValueError):
asyncio.run(self.machine.go())
asyncio.run(self.machine.to_A())
asyncio.run(self.machine.go(should_fail=False))
def test_async_callback_event_data(self):
state_a = self.machine_cls.state_cls('A')
state_b = self.machine_cls.state_cls('B')
def sync_condition(event_data):
return event_data.state == state_a
async def async_conditions(event_data):
return event_data.state == state_a
async def async_callback(event_data):
self.assertEqual(event_data.state, state_b)
def sync_callback(event_data):
self.assertEqual(event_data.state, state_b)
m = self.machine_cls(states=[state_a, state_b], initial='A', send_event=True)
m.add_transition('go', 'A', 'B', conditions=[sync_condition, async_conditions],
after=[sync_callback, async_callback])
m.add_transition('go', 'B', 'A', conditions=sync_condition)
asyncio.run(m.go())
self.assertTrue(m.is_B())
asyncio.run(m.go())
self.assertTrue(m.is_B())
def test_async_invalid_triggers(self):
asyncio.run(self.machine.to_B())
with self.assertRaises(MachineError):
asyncio.run(self.machine.go())
self.machine.ignore_invalid_triggers = True
asyncio.run(self.machine.go())
self.assertTrue(self.machine.is_B())
def test_async_dispatch(self):
model1 = DummyModel()
model2 = DummyModel()
model3 = DummyModel()
machine = self.machine_cls(model=None, states=['A', 'B', 'C'], transitions=[['go', 'A', 'B'],
['go', 'B', 'C'],
['go', 'C', 'A']], initial='A')
machine.add_model(model1)
machine.add_model(model2, initial='B')
machine.add_model(model3, initial='C')
asyncio.run(machine.dispatch('go'))
self.assertTrue(model1.is_B())
self.assertEqual('C', model2.state)
self.assertEqual(machine.initial, model3.state)
def test_queued(self):
states = ['A', 'B', 'C', 'D']
# Define with list of dictionaries
async def change_state(machine):
# type: (AsyncMachine) -> None
self.assertEqual(machine.state, 'A')
if machine.has_queue:
await machine.run(machine=machine)
self.assertEqual(machine.state, 'A')
else:
with self.assertRaises(MachineError):
await machine.run(machine=machine)
async def raise_machine_error(event_data):
# type: (AsyncEventData) -> None
self.assertTrue(event_data.machine.has_queue)
await event_data.model.to_A()
event_data.machine._queued = False
await event_data.model.to_C()
async def raise_exception(event_data):
# type: (AsyncEventData) -> None
await event_data.model.to_C()
raise ValueError("Clears queue")
transitions = [
{'trigger': 'walk', 'source': 'A', 'dest': 'B', 'before': change_state},
{'trigger': 'run', 'source': 'B', 'dest': 'C'},
{'trigger': 'sprint', 'source': 'C', 'dest': 'D'}
] # type: Sequence[AsyncTransitionConfig]
m = self.machine_cls(states=states, transitions=transitions, initial='A')
asyncio.run(m.walk(machine=m))
self.assertEqual('B', m.state)
m = self.machine_cls(states=states, transitions=transitions, initial='A', queued=True)
asyncio.run(m.walk(machine=m))
self.assertEqual('C', m.state)
m = self.machine_cls(states=states, initial='A', queued=True, send_event=True,
before_state_change=raise_machine_error)
with self.assertRaises(MachineError):
asyncio.run(m.to_C())
m = self.machine_cls(states=states, initial='A', queued=True, send_event=True)
m.add_transition('go', 'A', 'B', after='go')
m.add_transition('go', 'B', 'C', before=raise_exception)
with self.assertRaises(ValueError):
asyncio.run(m.go())
self.assertEqual('B', m.state)
def test_model_queue(self):
mock = MagicMock()
def check_mock():
self.assertTrue(mock.called)
m1 = DummyModel()
m2 = DummyModel()
async def run():
transitions = [
{'trigger': 'mock', 'source': ['A', 'B'], 'dest': 'B', 'after': mock},
{'trigger': 'delayed', 'source': 'A', 'dest': 'B', 'before': partial(asyncio.sleep, 0.1)},
{'trigger': 'check', 'source': 'B', 'dest': 'A', 'after': check_mock},
{'trigger': 'error', 'source': 'B', 'dest': 'C', 'before': self.raise_value_error}
] # type: Sequence[AsyncTransitionConfig]
m = self.machine_cls(model=[m1, m2], states=['A', 'B', 'C'], transitions=transitions, initial='A',
queued='model')
# call m1.delayed and m2.mock should be called immediately
# m1.check should be delayed until after m1.delayed
await asyncio.gather(m1.delayed(), self.call_delayed(m1.check, 0.02), self.call_delayed(m2.mock, 0.04))
self.assertTrue(m1.is_A())
self.assertTrue(m2.is_B())
mock.reset_mock()
with self.assertRaises(ValueError):
# m1.error raises an error which should cancel m1.to_A but not m2.mock and m2.check
await asyncio.gather(m1.to_A(), m2.to_A(),
self.call_delayed(m1.delayed, 0.01), self.call_delayed(m2.delayed, 0.01),
self.call_delayed(m1.error, 0.02), self.call_delayed(m1.to_A, 0.03),
self.call_delayed(m2.mock, 0.03), self.call_delayed(m2.check, 0.04))
await asyncio.sleep(0.05) # give m2 events time to finish
self.assertTrue(m1.is_B())
self.assertTrue(m2.is_A())
asyncio.run(run())
def test_queued_remove(self):
def remove_model(event_data):
event_data.machine.remove_model(event_data.model)
def check_queue(expect, event_data):
self.assertEqual(expect, len(event_data.machine._transition_queue_dict[id(event_data.model)]))
transitions = [
{'trigger': 'go', 'source': 'A', 'dest': 'B', 'after': partial(asyncio.sleep, 0.1)},
{'trigger': 'go', 'source': 'B', 'dest': 'C'},
{'trigger': 'remove', 'source': 'B', 'dest': None, 'prepare': ['to_A', 'to_C'],
'before': partial(check_queue, 4), 'after': remove_model},
{'trigger': 'remove_queue', 'source': 'B', 'dest': None, 'prepare': ['to_A', 'to_C'],
'before': partial(check_queue, 3), 'after': remove_model}
] # type: Sequence[AsyncTransitionConfig]
async def run():
m1 = DummyModel()
m2 = DummyModel()
self.machine_cls = HierarchicalAsyncMachine
m = self.machine_cls(model=[m1, m2], states=['A', 'B', 'C'], transitions=transitions,
initial='A', queued=True, send_event=True)
await asyncio.gather(m1.go(), m2.go(),
self.call_delayed(m1.remove, 0.02), self.call_delayed(m2.go, 0.04))
_ = repr(m._transition_queue_dict) # check whether _DictionaryMock returns a valid representation
self.assertTrue(m1.is_B())
self.assertTrue(m2.is_C())
m.remove_model(m2)
self.assertNotIn(id(m1), m._transition_queue_dict)
self.assertNotIn(id(m2), m._transition_queue_dict)
m1 = DummyModel()
m2 = DummyModel()
m = self.machine_cls(model=[m1, m2], states=['A', 'B', 'C'], transitions=transitions,
initial='A', queued='model', send_event=True)
await asyncio.gather(m1.go(), m2.go(),
self.call_delayed(m1.remove_queue, 0.02), self.call_delayed(m2.go, 0.04))
self.assertTrue(m1.is_B())
self.assertTrue(m2.is_C())
m.remove_model(m2)
asyncio.run(run())
def test_async_timeout(self):
timeout_called = MagicMock()
@add_state_features(AsyncTimeout)
class TimeoutMachine(self.machine_cls): # type: ignore
pass
states = ['A',
{'name': 'B', 'timeout': 0.2, 'on_timeout': ['to_C', timeout_called]},
{'name': 'C', 'timeout': 0, 'on_timeout': 'to_D'}, 'D']
m = TimeoutMachine(states=states, initial='A')
with self.assertRaises(AttributeError):
m.add_state('Fail', timeout=1)
async def run():
await m.to_B()
await asyncio.sleep(0.1)
self.assertTrue(m.is_B()) # timeout shouldn't be triggered
await m.to_A() # cancel timeout
self.assertTrue(m.is_A())
await m.to_B()
await asyncio.sleep(0.3)
self.assertTrue(m.is_C()) # now timeout should have been processed
self.assertTrue(timeout_called.called)
m.get_state('C').timeout = 0.05
await m.to_B()
await asyncio.sleep(0.3)
self.assertTrue(m.is_D())
self.assertEqual(2, timeout_called.call_count)
asyncio.run(run())
def test_timeout_cancel(self):
error_mock = MagicMock()
timout_mock = MagicMock()
long_op_mock = MagicMock()
@add_state_features(AsyncTimeout)
class TimeoutMachine(self.machine_cls): # type: ignore
async def on_enter_B(self):
await asyncio.sleep(0.2)
long_op_mock() # should never be called
async def handle_timeout(self):
timout_mock()
await self.to_A()
machine = TimeoutMachine(states=["A", {"name": "B", "timeout": 0.1, "on_timeout": "handle_timeout"}],
initial="A", on_exception=error_mock)
async def run():
await machine.to_B()
assert timout_mock.called
assert error_mock.call_count == 1 # should only be one CancelledError
assert not long_op_mock.called
assert machine.is_A()
asyncio.run(run())
@skipIf(sys.version_info < (3, 11), "Cancel requires Python 3.11+")
def test_user_cancel(self):
machine = self.machine_cls(states=['A', 'B'], initial='A', before_state_change=self.cancel_soon)
async def run1():
try:
await asyncio.wait_for(machine.to_B(), timeout=0.5)
except asyncio.TimeoutError:
return # expected case
assert False, "Expected a TimeoutError"
async def run2():
async def raise_timeout():
raise asyncio.TimeoutError("My custom timeout")
try:
machine.add_transition('cancelled', 'A', 'B', before=raise_timeout)
await machine.cancelled()
except asyncio.TimeoutError:
return # expected case
assert False, "Expected a TimeoutError"
asyncio.run(run1())
assert machine.is_A()
asyncio.run(run2())
def test_queued_timeout_cancel(self):
error_mock = MagicMock()
timout_mock = MagicMock()
long_op_mock = MagicMock()
@add_state_features(AsyncTimeout)
class TimeoutMachine(self.machine_cls): # type: ignore
async def long_op(self, event_data):
await self.to_C()
await self.to_D()
await self.to_E()
await asyncio.sleep(1)
long_op_mock()
async def handle_timeout(self, event_data):
timout_mock()
raise TimeoutError()
async def handle_error(self, event_data):
if isinstance(event_data.error, CancelledError):
if error_mock.called:
raise RuntimeError()
error_mock()
raise event_data.error
machine = TimeoutMachine(states=["A", "C", "D", "E",
{"name": "B", "timeout": 0.1, "on_timeout": "handle_timeout",
"on_enter": "long_op"}],
initial="A", queued=True, send_event=True, on_exception="handle_error")
async def run():
await machine.to_B()
assert timout_mock.called
assert error_mock.called
assert not long_op_mock.called
assert machine.is_B()
with self.assertRaises(RuntimeError):
await machine.to_B()
asyncio.run(run())
def test_callback_order(self):
finished = []
class Model:
async def before(self):
await asyncio.sleep(0.1)
finished.append(2)
async def after(self):
await asyncio.sleep(0.1)
finished.append(3)
async def after_state_change():
finished.append(4)
async def before_state_change():
finished.append(1)
model = Model()
m = self.machine_cls(
model=model,
states=['start', 'end'],
after_state_change=after_state_change,
before_state_change=before_state_change,
initial='start',
)
m.add_transition('transit', 'start', 'end', after='after', before='before')
asyncio.run(model.transit())
assert finished == [1, 2, 3, 4]
def test_task_cleanup(self):
models = [DummyModel() for i in range(100)]
m = self.machine_cls(model=models, states=['A', 'B'], initial='A')
self.assertEqual(0, len(m.async_tasks)) # check whether other tests were already leaking tasks
async def run():
for model in m.models:
await model.to_B()
asyncio.run(run())
self.assertEqual(0, len(m.async_tasks))
def test_on_exception_callback(self):
mock = MagicMock()
def on_exception(event_data):
self.assertIsInstance(event_data.error, (ValueError, MachineError))
mock()
m = self.machine_cls(states=['A', 'B'], initial='A', transitions=[['go', 'A', 'B']], send_event=True,
after_state_change=partial(self.stuff.this_raises, ValueError))
async def run():
with self.assertRaises(ValueError):
await m.to_B()
m.on_exception.append(on_exception)
await m.to_B()
await m.go()
self.assertTrue(mock.called)
self.assertEqual(2, mock.call_count)
self.assertTrue(mock.called)
asyncio.run(run())
def test_on_exception_finalize(self):
mock = MagicMock()
def finalize():
mock()
raise RuntimeError("Could not finalize")
m = self.machine_cls(states=['A', 'B'], initial='A', finalize_event=finalize)
async def run():
self.assertTrue(await m.to_B())
self.assertTrue(mock.called)
asyncio.run(run())
def test_weakproxy_model(self):
d = DummyModel()
pr = weakref.proxy(d)
self.machine_cls(pr, states=['A', 'B'], transitions=[['go', 'A', 'B']], initial='A')
asyncio.run(pr.go())
self.assertTrue(pr.is_B())
def test_may_transition_with_auto_transitions(self):
states = ['A', 'B', 'C']
d = DummyModel()
self.machine_cls(model=d, states=states, initial='A')
async def run():
assert await d.may_to_A()
assert await d.may_trigger("to_A")
assert await d.may_to_B()
assert await d.may_trigger("to_B")
assert await d.may_to_C()
assert await d.may_trigger("to_C")
asyncio.run(run())
def test_machine_may_transitions(self):
states = ['A', 'B', 'C']
m = self.machine_cls(states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'B', conditions=[lambda: False])
m.add_transition('stop', 'B', 'C')
m.add_transition('run', 'A', 'C')
async def run():
assert not await m.may_walk()
assert not await m.may_trigger("walk")
assert not await m.may_stop()
assert not await m.may_trigger("stop")
assert await m.may_run()
assert await m.may_trigger("run")
await m.run()
assert not await m.may_run()
assert not await m.may_trigger("run")
assert not await m.may_stop()
assert not await m.may_trigger("stop")
assert not await m.may_walk()
assert not await m.may_trigger("walk")
asyncio.run(run())
def test_may_transition_with_invalid_state(self):
states = ['A', 'B', 'C']
d = DummyModel()
m = self.machine_cls(model=d, states=states, initial='A', auto_transitions=False)
m.add_transition('walk', 'A', 'UNKNOWN')
async def run():
assert not await d.may_walk()
assert not await d.may_trigger("walk")
asyncio.run(run())
def test_may_transition_internal(self):
states = ['A', 'B', 'C']
d = DummyModel()
_ = self.machine_cls(model=d, states=states, transitions=[["go", "A", "B"], ["wait", "B", None]],
initial='A', auto_transitions=False)
async def run():
assert await d.may_go()
assert await d.may_trigger("go")
assert not await d.may_wait()
assert not await d.may_trigger("wait")
await d.go()
assert not await d.may_go()
assert not await d.may_trigger("go")
assert await d.may_wait()
assert await d.may_trigger("wait")
asyncio.run(run())
def test_may_transition_with_exception(self):
stuff = Stuff(machine_cls=self.machine_cls, extra_kwargs={"send_event": True})
stuff.machine.add_transition(trigger="raises", source="A", dest="B", prepare=partial(stuff.this_raises, RuntimeError("Prepare Exception")))
stuff.machine.add_transition(trigger="raises", source="B", dest="C", conditions=partial(stuff.this_raises, ValueError("Condition Exception")))
stuff.machine.add_transition(trigger="works", source="A", dest="B")
def process_exception(event_data):
assert event_data.error is not None
assert event_data.transition is not None
assert event_data.event.name == "raises"
assert event_data.machine == stuff.machine
async def run():
with self.assertRaises(RuntimeError):
await stuff.may_raises()
with self.assertRaises(RuntimeError):
await stuff.may_trigger("raises")
assert stuff.is_A()
assert await stuff.may_works()
assert await stuff.may_trigger("works")
assert await stuff.works()
with self.assertRaises(ValueError):
await stuff.may_raises()
with self.assertRaises(ValueError):
await stuff.may_trigger("raises")
assert stuff.is_B()
stuff.machine.on_exception.append(process_exception)
assert not await stuff.may_raises()
assert not await stuff.may_trigger("raises")
assert await stuff.to_A()
assert not await stuff.may_raises()
assert not await stuff.may_trigger("raises")
asyncio.run(run())
def test_on_final(self):
final_mock = MagicMock()
machine = self.machine_cls(states=['A', {'name': 'B', 'final': True}], on_final=final_mock, initial='A')
async def run():
self.assertFalse(final_mock.called)
await machine.to_B()
self.assertTrue(final_mock.called)
await machine.to_A()
self.assertEqual(1, final_mock.call_count)
await machine.to_B()
self.assertEqual(2, final_mock.call_count)
asyncio.run(run())
def test_custom_transition(self):
class MyTransition(self.machine_cls.transition_cls): # type: ignore
def __init__(self, source, dest, conditions=None, unless=None, before=None,
after=None, prepare=None, my_int=None, my_none=None, my_str=None, my_dict=None):
super(MyTransition, self).__init__(source, dest, conditions, unless, before, after, prepare)
self.my_int = my_int
self.my_none = my_none
self.my_str = my_str
self.my_dict = my_dict
class MyMachine(self.machine_cls): # type: ignore
transition_cls = MyTransition
a_transition = {
"trigger": "go", "source": "B", "dest": "A",
"my_int": 42, "my_str": "foo", "my_dict": {"bar": "baz"}
}
transitions = [
["go", "A", "B"],
a_transition
]
m = MyMachine(states=["A", "B"], transitions=transitions, initial="A")
m.add_transition("reset", "*", "A",
my_int=23, my_str="foo2", my_none=None, my_dict={"baz": "bar"})
async def run():
assert await m.go()
trans = m.get_transitions("go", "B") # type: List[MyTransition]
assert len(trans) == 1
assert trans[0].my_str == a_transition["my_str"]
assert trans[0].my_int == a_transition["my_int"]
assert trans[0].my_dict == a_transition["my_dict"]
assert trans[0].my_none is None
trans = m.get_transitions("reset", "A")
assert len(trans) == 1
assert trans[0].my_str == "foo2"
assert trans[0].my_int == 23
assert trans[0].my_dict == {"baz": "bar"}
assert trans[0].my_none is None
asyncio.run(run())
def test_deprecation_warnings(self):
import warnings
async def run():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
machine = self.machine_cls(states=['A', 'B'], initial='A')
await machine.cancel_running_transitions(self)
self.assertEqual(len(w), 0)
# msg is deprecated, should not be used
await machine.cancel_running_transitions(self, msg="Custom message")
self.assertEqual(len(w), 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
self.assertEqual(len(w), 0)
# should use cancel_running_transitions instead
await machine.switch_model_context(self)
self.assertEqual(len(w), 1)
asyncio.run(run())
@skipIf(asyncio is None or (pgv is None and gv is None), "AsyncGraphMachine requires asyncio and (py)gaphviz")
| TestAsync |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.