language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | openai__openai-python | src/openai/resources/vector_stores/file_batches.py | {
"start": 33628,
"end": 34196
} | class ____:
def __init__(self, file_batches: AsyncFileBatches) -> None:
self._file_batches = file_batches
self.create = async_to_streamed_response_wrapper(
file_batches.create,
)
self.retrieve = async_to_streamed_response_wrapper(
file_batches.retrieve,
)
self.cancel = async_to_streamed_response_wrapper(
file_batches.cancel,
)
self.list_files = async_to_streamed_response_wrapper(
file_batches.list_files,
)
| AsyncFileBatchesWithStreamingResponse |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 21301,
"end": 22647
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self, name: str, key_id: str, private_key: str, issuer_id: str, vendor: str, start_date: str
):
"""Airbyte Source for Appstore Singer.
Documentation can be found at https://docs.airbyte.com/integrations/sources/appstore
Args:
name (str): The name of the destination.
key_id (str): Appstore Key ID. See the docs for more information on how to obtain this key.
private_key (str): Appstore Private Key. See the docs for more information on how to obtain this key.
issuer_id (str): Appstore Issuer ID. See the docs for more information on how to obtain this ID.
vendor (str): Appstore Vendor ID. See the docs for more information on how to obtain this ID.
start_date (str): UTC date and time in the format 2017-01-25T00:00:00Z. Any data before this date will not be replicated.
"""
self.key_id = check.str_param(key_id, "key_id")
self.private_key = check.str_param(private_key, "private_key")
self.issuer_id = check.str_param(issuer_id, "issuer_id")
self.vendor = check.str_param(vendor, "vendor")
self.start_date = check.str_param(start_date, "start_date")
super().__init__("Appstore Singer", name)
| AppstoreSingerSource |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 73584,
"end": 77456
} | class ____(_ValidatorFunctionSchema, total=False):
type: Required[Literal['function-after']]
def no_info_after_validator_function(
function: NoInfoValidatorFunction,
schema: CoreSchema,
*,
ref: str | None = None,
json_schema_input_schema: CoreSchema | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> AfterValidatorFunctionSchema:
"""
Returns a schema that calls a validator function after validating, no `info` argument is provided, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(v: str) -> str:
return v + 'world'
func_schema = core_schema.no_info_after_validator_function(fn, core_schema.str_schema())
schema = core_schema.typed_dict_schema({'a': core_schema.typed_dict_field(func_schema)})
v = SchemaValidator(schema)
assert v.validate_python({'a': b'hello '}) == {'a': 'hello world'}
```
Args:
function: The validator function to call after the schema is validated
schema: The schema to validate before the validator function
ref: optional unique identifier of the schema, used to reference the schema in other places
json_schema_input_schema: The core schema to be used to generate the corresponding JSON Schema input type
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='function-after',
function={'type': 'no-info', 'function': function},
schema=schema,
ref=ref,
json_schema_input_schema=json_schema_input_schema,
metadata=metadata,
serialization=serialization,
)
def with_info_after_validator_function(
function: WithInfoValidatorFunction,
schema: CoreSchema,
*,
field_name: str | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> AfterValidatorFunctionSchema:
"""
Returns a schema that calls a validator function after validation, the function is called with
an `info` argument, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
def fn(v: str, info: core_schema.ValidationInfo) -> str:
assert info.data is not None
assert info.field_name is not None
return v + 'world'
func_schema = core_schema.with_info_after_validator_function(
function=fn, schema=core_schema.str_schema()
)
schema = core_schema.typed_dict_schema({'a': core_schema.typed_dict_field(func_schema)})
v = SchemaValidator(schema)
assert v.validate_python({'a': b'hello '}) == {'a': 'hello world'}
```
Args:
function: The validator function to call after the schema is validated
schema: The schema to validate before the validator function
field_name: The name of the field this validator is applied to, if any (deprecated)
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
if field_name is not None:
warnings.warn(
'The `field_name` argument on `with_info_after_validator_function` is deprecated, it will be passed to the function through `ValidationState` instead.',
DeprecationWarning,
stacklevel=2,
)
return _dict_not_none(
type='function-after',
function=_dict_not_none(type='with-info', function=function, field_name=field_name),
schema=schema,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| AfterValidatorFunctionSchema |
python | dagster-io__dagster | examples/docs_projects/project_dspy/dspy_modules/puzzle.py | {
"start": 377,
"end": 514
} | class ____:
"""Represents a group in a Connections puzzle."""
name: str
color: str
words: List[str]
@dataclass
| PuzzleGroup |
python | readthedocs__readthedocs.org | readthedocs/redirects/querysets.py | {
"start": 668,
"end": 6754
} | class ____(NoReprQuerySet, models.QuerySet):
"""Redirects take into account their own privacy_level setting."""
use_for_related_fields = True
def _add_from_user_projects(self, queryset, user):
if user.is_authenticated:
projects_pk = AdminPermission.projects(
user=user,
admin=True,
member=True,
).values_list("pk", flat=True)
user_queryset = self.filter(project__in=projects_pk)
queryset = user_queryset | queryset
return queryset.distinct()
def api(self, user=None):
queryset = self.none()
if user:
queryset = self._add_from_user_projects(queryset, user)
return queryset
def api_v2(self, *args, **kwargs):
# API v2 is the same as API v3 for .org, but it's
# different for .com, this method is overridden there.
return self.api(*args, **kwargs)
def get_matching_redirect_with_path(
self, filename, path=None, language=None, version_slug=None, forced_only=False
):
"""
Get the matching redirect with the path to redirect to.
:param filename: The filename being served.
:param path: The whole path from the request.
:param forced_only: Include only forced redirects in the results.
:returns: A tuple with the matching redirect and new path.
"""
# Small optimization to skip executing the big query below.
# TODO: use filter(enabled=True) once we have removed the null option from the field.
if forced_only and not self.filter(force=True).exclude(enabled=False).exists():
return None, None
normalized_filename = self._normalize_path(filename)
normalized_path = self._normalize_path(path)
# Useful to allow redirects to match paths with or without trailling slash.
# For example, ``/docs`` will match ``/docs/`` and ``/docs``.
filename_without_trailling_slash = self._strip_trailling_slash(normalized_filename)
path_without_trailling_slash = self._strip_trailling_slash(normalized_path)
# Add extra fields with the ``filename`` and ``path`` to perform a
# filter at db level instead with Python.
queryset = self.annotate(
filename=Value(
filename,
output_field=CharField(),
),
path=Value(
normalized_path,
output_field=CharField(),
),
filename_without_trailling_slash=Value(
filename_without_trailling_slash,
output_field=CharField(),
),
path_without_trailling_slash=Value(
path_without_trailling_slash,
output_field=CharField(),
),
)
page = Q(
redirect_type=PAGE_REDIRECT,
from_url_without_rest__isnull=True,
filename_without_trailling_slash__exact=F("from_url"),
) | Q(
redirect_type=PAGE_REDIRECT,
from_url_without_rest__isnull=False,
filename__startswith=F("from_url_without_rest"),
)
exact = Q(
redirect_type=EXACT_REDIRECT,
from_url_without_rest__isnull=True,
path_without_trailling_slash__exact=F("from_url"),
) | Q(
redirect_type=EXACT_REDIRECT,
from_url_without_rest__isnull=False,
path__startswith=F("from_url_without_rest"),
)
clean_url_to_html = Q(redirect_type=CLEAN_URL_TO_HTML_REDIRECT)
html_to_clean_url = Q(redirect_type=HTML_TO_CLEAN_URL_REDIRECT)
if filename in ["/index.html", "/"]:
# If the filename is a root index file (``/index.html`` or ``/``), we only need to match page and exact redirects,
# since we don't have a filename to redirect to for clean_url_to_html and html_to_clean_url redirects.
queryset = queryset.filter(page | exact)
elif filename:
if filename.endswith(("/index.html", "/")):
queryset = queryset.filter(page | exact | clean_url_to_html)
elif filename.endswith(".html"):
queryset = queryset.filter(page | exact | html_to_clean_url)
else:
queryset = queryset.filter(page | exact)
else:
# If the filename is empty, we only need to match exact redirects.
# Since the other types of redirects are not valid without a filename.
queryset = queryset.filter(exact)
# TODO: use filter(enabled=True) once we have removed the null option from the field.
queryset = queryset.exclude(enabled=False)
if forced_only:
queryset = queryset.filter(force=True)
redirect = queryset.select_related("project").first()
if redirect:
new_path = redirect.get_redirect_path(
filename=normalized_filename,
path=normalized_path,
language=language,
version_slug=version_slug,
)
return redirect, new_path
return None, None
def _normalize_path(self, path):
r"""
Normalize path.
We normalize ``path`` to:
- Remove the query params.
- Remove any invalid URL chars (\r, \n, \t).
- Always start the path with ``/``.
We don't use ``.path`` to avoid parsing the filename as a full url.
For example if the path is ``http://example.com/my-path``,
``.path`` would return ``my-path``.
"""
parsed_path = urlparse(path)
normalized_path = parsed_path._replace(query="").geturl()
normalized_path = "/" + normalized_path.lstrip("/")
return normalized_path
def _strip_trailling_slash(self, path):
"""Stripe the trailling slash from the path, making sure the root path is always ``/``."""
path = path.rstrip("/")
if path == "":
return "/"
return path
| RedirectQuerySet |
python | kubernetes-client__python | kubernetes/client/models/v1_node.py | {
"start": 383,
"end": 7016
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1NodeSpec',
'status': 'V1NodeStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1Node - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1Node. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1Node. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1Node.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1Node. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1Node. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1Node. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1Node.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1Node. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1Node. # noqa: E501
:return: The metadata of this V1Node. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1Node.
:param metadata: The metadata of this V1Node. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1Node. # noqa: E501
:return: The spec of this V1Node. # noqa: E501
:rtype: V1NodeSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1Node.
:param spec: The spec of this V1Node. # noqa: E501
:type: V1NodeSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1Node. # noqa: E501
:return: The status of this V1Node. # noqa: E501
:rtype: V1NodeStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Node.
:param status: The status of this V1Node. # noqa: E501
:type: V1NodeStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Node):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Node):
return True
return self.to_dict() != other.to_dict()
| V1Node |
python | pytest-dev__pytest | testing/code/test_source.py | {
"start": 13690,
"end": 14111
} | class ____:
def setup_class(self) -> None:
self.source = """\
try:
raise ValueError
finally:
raise IndexError(1)
"""
def test_body(self) -> None:
source = getstatement(1, self.source)
assert str(source) == " raise ValueError"
def test_finally(self) -> None:
source = getstatement(3, self.source)
assert str(source) == " raise IndexError(1)"
| TestTryFinally |
python | kamyu104__LeetCode-Solutions | Python/number-of-ways-to-buy-pens-and-pencils.py | {
"start": 223,
"end": 1577
} | class ____(object):
def waysToBuyPensPencils(self, total, cost1, cost2):
"""
:type total: int
:type cost1: int
:type cost2: int
:rtype: int
"""
def gcd(a, b):
while b:
a, b = b, a%b
return a
def ceil_divide(a, b):
return (a+b-1)//b
def arithmetic_progression_sum(a, d, l):
return (a+(a+(l-1)*d))*l//2
if cost1 < cost2:
cost1, cost2 = cost2, cost1
lcm = cost1*cost2//gcd(cost1, cost2)
result = 0
d = lcm//cost2
for i in xrange(min(total//cost1+1, lcm//cost1)):
# total, cost1, cost2 = 120, 7, 5
# => cnt decreases by a fixed value every lcm(cost1, cost2)
# => arithmetic progressions of cnts are as follows
# ----- l ----- x
# | 24, 17, 10, 3 120
# | 22, 15, 8, 1 113
# cnt 21, 14, 7, 106
# | 19, 12, 5, 99
# | 18, 11, 4, 92
cnt = (total-i*cost1)//cost2+1
l = ceil_divide(cnt, d)
result += arithmetic_progression_sum(cnt, -d, l)
return result
# Time: O(t / c1), c1 = max(cost1, cost2)
# , c2 = min(cost1, cost2)
# Space: O(1)
# math
| Solution |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 8243,
"end": 8356
} | class ____(nodes.Part, nodes.Inline, nodes.FixedTextElement):
"""Node for a single parameter."""
| desc_parameter |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/virtual_with_abi/package.py | {
"start": 188,
"end": 403
} | class ____(Package):
"""Virtual package for mocking an interface with stable ABI ."""
homepage = "https://www.abi.org/"
virtual = True
def test_hello(self):
print("Hello there!")
| VirtualWithAbi |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_lib.py | {
"start": 38913,
"end": 42264
} | class ____(
collections.namedtuple("InputOptions", [
"experimental_fetch_to_device",
"experimental_replication_mode",
"experimental_place_dataset_on_device",
"experimental_per_replica_buffer_size",
])):
"""Run options for `experimental_distribute_dataset(s_from_function)`.
This can be used to hold some strategy specific configs.
```python
# Setup TPUStrategy
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
dataset = tf.data.Dataset.range(16)
distributed_dataset_on_host = (
strategy.experimental_distribute_dataset(
dataset,
tf.distribute.InputOptions(
experimental_replication_mode=
experimental_replication_mode.PER_WORKER,
experimental_place_dataset_on_device=False,
experimental_per_replica_buffer_size=1)))
```
Attributes:
experimental_fetch_to_device: Boolean. If True, dataset
elements will be prefetched to accelerator device memory. When False,
dataset elements are prefetched to host device memory. Must be False when
using TPUEmbedding API. experimental_fetch_to_device can only be used
with experimental_replication_mode=PER_WORKER. Default behavior is same as
setting it to True.
experimental_replication_mode: Replication mode for the input function.
Currently, the InputReplicationMode.PER_REPLICA is only supported with
tf.distribute.MirroredStrategy.
experimental_distribute_datasets_from_function.
The default value is InputReplicationMode.PER_WORKER.
experimental_place_dataset_on_device: Boolean. Default to False. When True,
dataset will be placed on the device, otherwise it will remain on the
host. experimental_place_dataset_on_device=True can only be used with
experimental_replication_mode=PER_REPLICA
experimental_per_replica_buffer_size: Integer. Default to 1. Indicates the
prefetch buffer size in the replica device memory. Users can set it
to 0 to completely disable prefetching behavior, or a number greater than
1 to enable larger buffer size. Note that this option is still
valid with `experimental_fetch_to_device=False`.
"""
def __new__(cls,
experimental_fetch_to_device=None,
experimental_replication_mode=InputReplicationMode.PER_WORKER,
experimental_place_dataset_on_device=False,
experimental_per_replica_buffer_size=1):
if experimental_fetch_to_device is None:
experimental_fetch_to_device = True
return super(InputOptions,
cls).__new__(cls, experimental_fetch_to_device,
experimental_replication_mode,
experimental_place_dataset_on_device,
experimental_per_replica_buffer_size)
# ------------------------------------------------------------------------------
# Base classes for all distribution strategies.
# Base class for v1 Strategy and v2 Strategy classes. For API's specific to
# v1/v2 Strategy, add to implementing classes of StrategyBase.
# pylint: disable=line-too-long
| InputOptions |
python | pytorch__pytorch | torch/_dynamo/variables/tensor.py | {
"start": 65149,
"end": 66002
} | class ____(TensorVariable):
"""
This is a 1-element tensor represents unspecialized python float/int.
"""
_nonvar_fields = {
"raw_value",
"need_unwrap",
*TensorVariable._nonvar_fields,
}
def __init__(
self, proxy: torch.fx.Proxy, *, raw_value=None, need_unwrap=True, **kwargs
) -> None:
super().__init__(proxy, **kwargs)
self.raw_value = raw_value
self.need_unwrap = need_unwrap
@classmethod
def from_tensor_variable(cls, tensor_variable, raw_value, need_unwrap=True):
# Convert a `TensorVariable` instance into an `UnspecializedPythonVariable` instance.
return UnspecializedPythonVariable(
**dict(tensor_variable.__dict__),
raw_value=raw_value,
need_unwrap=need_unwrap,
)
| UnspecializedPythonVariable |
python | apache__airflow | providers/docker/tests/unit/docker/test_exceptions.py | {
"start": 1741,
"end": 3226
} | class ____:
@pytest.fixture(autouse=True)
def setup_patchers(self, docker_api_client_patcher):
self.client_mock = mock.MagicMock(spec=APIClient)
self.client_mock.wait.return_value = {"StatusCode": 0}
self.log_messages = ["container log 😁 ", b"byte string container log"]
self.client_mock.attach.return_value = self.log_messages
self.client_mock.logs.side_effect = (
lambda **kwargs: iter(self.log_messages[-kwargs["tail"] :])
if "tail" in kwargs
else iter(self.log_messages)
)
docker_api_client_patcher.return_value = self.client_mock
def test_docker_failed_exception(self, failed_msg, log_line, expected_message, skip_on_exit_code):
self.client_mock.attach.return_value = log_line
self.client_mock.wait.return_value = failed_msg
operator = DockerOperator(
image="ubuntu", owner="unittest", task_id="unittest", skip_on_exit_code=skip_on_exit_code
)
if skip_on_exit_code:
with pytest.raises(DockerContainerFailedSkipException) as raised_exception:
operator.execute(None)
else:
with pytest.raises(DockerContainerFailedException) as raised_exception:
operator.execute(None)
assert str(raised_exception.value) == expected_message
assert raised_exception.value.logs == [log_line[0].strip(), log_line[1].decode("utf-8")]
| TestDockerContainerExceptions |
python | django__django | tests/admin_widgets/tests.py | {
"start": 21774,
"end": 26401
} | class ____(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name="Linkin Park")
cls.album = band.album_set.create(
name="Hybrid Theory", cover_art=r"albums\hybrid_theory.jpg"
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test"></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
self.assertHTMLEqual(
w.render("test", SimpleUploadedFile("test", b"content")),
'<input type="file" name="test">',
)
def test_render_with_attrs_id(self):
storage_url = default_storage.url("")
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render("test", self.album.cover_art, attrs={"id": "test_id"}),
f'<p class="file-upload">Currently: <a href="{storage_url}albums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" id="test_id"></p>',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>'
'Change: <input type="file" name="test"></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
def test_render_disabled(self):
widget = widgets.AdminFileWidget(attrs={"disabled": True})
self.assertHTMLEqual(
widget.render("test", self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" disabled>'
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" disabled></p>'
% {
"STORAGE_URL": default_storage.url(""),
},
)
def test_render_checked(self):
storage_url = default_storage.url("")
widget = widgets.AdminFileWidget()
widget.checked = True
self.assertHTMLEqual(
widget.render("test", self.album.cover_art),
f'<p class="file-upload">Currently: <a href="{storage_url}albums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" checked>'
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" checked></p>',
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(
reverse("admin:admin_widgets_album_change", args=(self.album.id,))
)
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r"albums\hybrid_theory.jpg</a></div>"
% {"STORAGE_URL": default_storage.url("")},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art">',
html=True,
)
response = self.client.get(reverse("admin:admin_widgets_album_add"))
self.assertContains(
response,
'<div class="readonly">-</div>',
html=True,
)
@override_settings(ROOT_URLCONF="admin_widgets.urls")
| AdminFileWidgetTests |
python | getsentry__sentry | src/sentry/integrations/jira_server/integration.py | {
"start": 53014,
"end": 56050
} | class ____(IntegrationProvider):
key = IntegrationProviderSlug.JIRA_SERVER.value
name = "Jira Server"
metadata = metadata
integration_cls = JiraServerIntegration
needs_default_identity = True
features = frozenset(
[
IntegrationFeatures.ISSUE_BASIC,
IntegrationFeatures.ISSUE_SYNC,
IntegrationFeatures.USER_MAPPING,
]
)
setup_dialog_config = {"width": 1030, "height": 1000}
def get_pipeline_views(self) -> list[PipelineView[IntegrationPipeline]]:
return [InstallationConfigView(), OAuthLoginView(), OAuthCallbackView()]
def build_integration(self, state: Mapping[str, Any]) -> IntegrationData:
install = state["installation_data"]
access_token = state["access_token"]
webhook_secret = sha1_text(install["private_key"]).hexdigest()
hostname = urlparse(install["url"]).netloc
external_id = "{}:{}".format(hostname, install["consumer_key"])[:64]
credentials = {
"consumer_key": install["consumer_key"],
"private_key": install["private_key"],
"access_token": access_token["oauth_token"],
"access_token_secret": access_token["oauth_token_secret"],
}
# Create the webhook before the integration record exists
# so that if it fails we don't persist a broken integration.
self.create_webhook(external_id, webhook_secret, install, credentials)
return {
"name": install["consumer_key"],
"provider": IntegrationProviderSlug.JIRA_SERVER.value,
"external_id": external_id,
"metadata": {
"base_url": install["url"],
"domain_name": hostname,
"verify_ssl": install["verify_ssl"],
"webhook_secret": webhook_secret,
},
"user_identity": {
"type": IntegrationProviderSlug.JIRA_SERVER.value,
"external_id": external_id,
"scopes": [],
"data": credentials,
},
}
def create_webhook(self, external_id, webhook_secret, install, credentials):
client = JiraServerSetupClient(
install["url"], install["consumer_key"], install["private_key"], install["verify_ssl"]
)
try:
client.create_issue_webhook(external_id, webhook_secret, credentials)
except ApiError as err:
logger.info(
"jira-server.webhook.failed",
extra={"error": str(err), "external_id": external_id},
)
if err.json is None:
details = ""
else:
try:
details = next(x for x in err.json["messages"][0].values())
except (KeyError, TypeError, StopIteration):
details = ""
message = f"Could not create issue webhook in Jira. {details}"
raise IntegrationError(message)
| JiraServerIntegrationProvider |
python | walkccc__LeetCode | solutions/2095. Delete the Middle Node of a Linked List/2095.py | {
"start": 0,
"end": 324
} | class ____:
def deleteMiddle(self, head: ListNode | None) -> ListNode | None:
dummy = ListNode(0, head)
slow = dummy
fast = dummy
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
# Delete the middle node.
slow.next = slow.next.next
return dummy.next
| Solution |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/hooks/test_synapse_pipeline.py | {
"start": 1537,
"end": 7592
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_mock_connections):
create_mock_connections(
# connection_client_secret
Connection(
conn_id=DEFAULT_CONNECTION_CLIENT_SECRET,
conn_type="azure_synapse",
host=SYNAPSE_WORKSPACE_URL,
login="clientId",
password="clientSecret",
extra={"tenantId": "tenantId"},
),
# connection_default_credential
Connection(
conn_id=DEFAULT_CONNECTION_DEFAULT_CREDENTIAL,
conn_type="azure_synapse",
host=SYNAPSE_WORKSPACE_URL,
extra={},
),
# connection_missing_tenant_id
Connection(
conn_id="azure_synapse_missing_tenant_id",
conn_type="azure_synapse",
host=SYNAPSE_WORKSPACE_URL,
login="clientId",
password="clientSecret",
extra={},
),
)
@pytest.fixture
def hook(self):
client = AzureSynapsePipelineHook(
azure_synapse_conn_id=DEFAULT_CONNECTION_DEFAULT_CREDENTIAL,
azure_synapse_workspace_dev_endpoint=AZURE_SYNAPSE_WORKSPACE_DEV_ENDPOINT,
)
client._conn = MagicMock(spec=["pipeline_run", "pipeline"])
return client
@patch(f"{MODULE}.ClientSecretCredential")
def test_get_connection_by_credential_client_secret(self, mock_credential):
hook = AzureSynapsePipelineHook(
azure_synapse_conn_id=DEFAULT_CONNECTION_CLIENT_SECRET,
azure_synapse_workspace_dev_endpoint=AZURE_SYNAPSE_WORKSPACE_DEV_ENDPOINT,
)
with patch.object(hook, "_create_client") as mock_create_client:
mock_create_client.return_value = MagicMock()
connection = hook.get_conn()
assert connection is not None
mock_create_client.assert_called_with(
mock_credential(),
AZURE_SYNAPSE_WORKSPACE_DEV_ENDPOINT,
)
@patch(f"{MODULE}.get_sync_default_azure_credential")
def test_get_conn_by_default_azure_credential(self, mock_default_azure_credential):
hook = AzureSynapsePipelineHook(
azure_synapse_conn_id=DEFAULT_CONNECTION_DEFAULT_CREDENTIAL,
azure_synapse_workspace_dev_endpoint=AZURE_SYNAPSE_WORKSPACE_DEV_ENDPOINT,
)
with patch.object(hook, "_create_client") as mock_create_client:
mock_create_client.return_value = MagicMock()
connection = hook.get_conn()
assert connection is not None
mock_default_azure_credential.assert_called_with(
managed_identity_client_id=None, workload_identity_tenant_id=None
)
mock_create_client.assert_called_with(
mock_default_azure_credential(),
AZURE_SYNAPSE_WORKSPACE_DEV_ENDPOINT,
)
def test_run_pipeline(self, hook: AzureSynapsePipelineHook):
hook.run_pipeline(PIPELINE_NAME)
if hook._conn is not None and isinstance(hook._conn, ArtifactsClient):
hook._conn.pipeline.create_pipeline_run.assert_called_with(PIPELINE_NAME) # type: ignore[attr-defined]
def test_get_pipeline_run(self, hook: AzureSynapsePipelineHook):
hook.get_pipeline_run(run_id=RUN_ID)
if hook._conn is not None and isinstance(hook._conn, ArtifactsClient):
hook._conn.pipeline_run.get_pipeline_run.assert_called_with(run_id=RUN_ID) # type: ignore[attr-defined]
def test_cancel_run_pipeline(self, hook: AzureSynapsePipelineHook):
hook.cancel_run_pipeline(RUN_ID)
if hook._conn is not None and isinstance(hook._conn, ArtifactsClient):
hook._conn.pipeline_run.cancel_pipeline_run.assert_called_with(RUN_ID) # type: ignore[attr-defined]
_wait_for_pipeline_run_status_test_args = [
(AzureSynapsePipelineRunStatus.SUCCEEDED, AzureSynapsePipelineRunStatus.SUCCEEDED, True),
(AzureSynapsePipelineRunStatus.FAILED, AzureSynapsePipelineRunStatus.SUCCEEDED, False),
(AzureSynapsePipelineRunStatus.CANCELLED, AzureSynapsePipelineRunStatus.SUCCEEDED, False),
(AzureSynapsePipelineRunStatus.IN_PROGRESS, AzureSynapsePipelineRunStatus.SUCCEEDED, "timeout"),
(AzureSynapsePipelineRunStatus.QUEUED, AzureSynapsePipelineRunStatus.SUCCEEDED, "timeout"),
(AzureSynapsePipelineRunStatus.CANCELING, AzureSynapsePipelineRunStatus.SUCCEEDED, "timeout"),
(AzureSynapsePipelineRunStatus.SUCCEEDED, AzureSynapsePipelineRunStatus.TERMINAL_STATUSES, True),
(AzureSynapsePipelineRunStatus.FAILED, AzureSynapsePipelineRunStatus.TERMINAL_STATUSES, True),
(AzureSynapsePipelineRunStatus.CANCELLED, AzureSynapsePipelineRunStatus.TERMINAL_STATUSES, True),
]
@pytest.mark.parametrize(
argnames=("pipeline_run_status", "expected_status", "expected_output"),
argvalues=_wait_for_pipeline_run_status_test_args,
ids=[
f"run_status_{argval[0]}_expected_{argval[1]}"
if isinstance(argval[1], str)
else f"run_status_{argval[0]}_expected_AnyTerminalStatus"
for argval in _wait_for_pipeline_run_status_test_args
],
)
def test_wait_for_pipeline_run_status(self, hook, pipeline_run_status, expected_status, expected_output):
config = {"run_id": RUN_ID, "timeout": 3, "check_interval": 1, "expected_statuses": expected_status}
with patch.object(AzureSynapsePipelineHook, "get_pipeline_run") as mock_pipeline_run:
mock_pipeline_run.return_value.status = pipeline_run_status
if expected_output != "timeout":
assert hook.wait_for_pipeline_run_status(**config) == expected_output
else:
with pytest.raises(AzureSynapsePipelineRunException):
hook.wait_for_pipeline_run_status(**config)
| TestAzureSynapsePipelineHook |
python | pytorch__pytorch | torch/_dynamo/variables/dicts.py | {
"start": 36741,
"end": 40010
} | class ____(VariableTracker):
# proxies to the original dict_vt
def __init__(self, dv_dict: ConstDictVariable, **kwargs: Any) -> None:
super().__init__(**kwargs)
assert isinstance(dv_dict, ConstDictVariable)
self.dv_dict = dv_dict
def python_type(self) -> type:
return types.MappingProxyType
def unpack_var_sequence(self, tx: "InstructionTranslator") -> list[VariableTracker]:
return self.dv_dict.unpack_var_sequence(tx)
def reconstruct(self, codegen: "PyCodegen") -> None:
# load types.MappingProxyType
if self.source:
msg = (
f"Preexisting MappingProxyVariable (source: {self.source}) cannot be reconstructed "
"because the connection to the original dict will be lost."
)
unimplemented(
gb_type="mapping proxy cannot be reconstructed",
context=f"Source: {self.source}",
explanation=msg,
hints=[
"Use a mapping proxy constructed in the same `torch.compile` region.",
*graph_break_hints.SUPPORTABLE,
],
)
codegen.add_push_null(
lambda: codegen.extend_output(
[
codegen.create_load_python_module(types),
codegen.create_load_attr("MappingProxyType"),
]
)
)
codegen(self.dv_dict)
codegen.extend_output(create_call_function(1, False))
def call_method(
self,
tx: "InstructionTranslator",
name: str,
args: list[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
if self.source and tx.output.side_effects.has_existing_dict_mutation():
msg = (
"A dict has been modified while we have an existing mappingproxy object. "
"A mapping proxy object, as the name suggest, proxies a mapping "
"object (usually a dict). If the original dict object mutates, it "
"is reflected in the proxy object as well. For an existing proxy "
"object, we do not know the original dict it points to. Therefore, "
"for correctness we graph break when there is dict mutation and we "
"are trying to access a proxy object."
)
unimplemented(
gb_type="mapping proxy affected by dictionary mutation",
context=f"Source: {self.source}, Dict mutation detected",
explanation=msg,
hints=[
"Avoid modifying dictionaries that might be referenced by mapping proxy objects",
"Or avoid using the mapping proxy objects after modifying its underlying dictionary",
],
)
return self.dv_dict.call_method(tx, name, args, kwargs)
def call_obj_hasattr(
self, tx: "InstructionTranslator", name: str
) -> ConstantVariable:
if self.python_type() is types.MappingProxyType:
return ConstantVariable.create(name in types.MappingProxyType.__dict__)
return super().call_obj_hasattr(tx, name)
| MappingProxyVariable |
python | huggingface__transformers | src/transformers/models/lfm2_moe/modeling_lfm2_moe.py | {
"start": 6075,
"end": 6706
} | class ____(nn.Module):
def __init__(self, config: Lfm2MoeConfig, intermediate_size: Optional[int] = None):
super().__init__()
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size if intermediate_size is None else intermediate_size
self.w1 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.w3 = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.w2 = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
| Lfm2MoeMLP |
python | getsentry__sentry | src/sentry/auth/providers/saml2/generic/views.py | {
"start": 1850,
"end": 2772
} | class ____(AuthView):
def handle(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase:
op = "url"
forms: dict[str, Form | None] = {
"url": URLMetadataForm(),
"xml": XMLMetadataForm(),
"idp": SAMLForm(),
}
if "action_save" in request.POST:
op = request.POST["action_save"]
form_from_forms = forms[op]
if form_from_forms is None:
forms[op] = None
else:
form_cls = form_from_forms.__class__
forms[op] = process_metadata(form_cls, request, pipeline)
# process_metadata will return None when the action was successful and
# data was bound to the pipeline.
if not forms[op]:
return pipeline.next_step()
return self.respond("sentry_auth_saml2/select-idp.html", {"op": op, "forms": forms})
| SelectIdP |
python | scikit-learn__scikit-learn | sklearn/manifold/_t_sne.py | {
"start": 19039,
"end": 44044
} | class ____(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
"""T-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, default=2
Dimension of the embedded space.
perplexity : float, default=30.0
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selecting a value
between 5 and 50. Different values can result in significantly
different results. The perplexity must be less than the number
of samples.
early_exaggeration : float, default=12.0
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float or "auto", default="auto"
The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If
the learning rate is too high, the data may look like a 'ball' with any
point approximately equidistant from its nearest neighbours. If the
learning rate is too low, most points may look compressed in a dense
cloud with few outliers. If the cost function gets stuck in a bad local
minimum increasing the learning rate may help.
Note that many other t-SNE implementations (bhtsne, FIt-SNE, openTSNE,
etc.) use a definition of learning_rate that is 4 times smaller than
ours. So our learning_rate=200 corresponds to learning_rate=800 in
those other implementations. The 'auto' option sets the learning_rate
to `max(N / early_exaggeration / 4, 50)` where N is the sample size,
following [4] and [5].
.. versionchanged:: 1.2
The default value changed to `"auto"`.
max_iter : int, default=1000
Maximum number of iterations for the optimization. Should be at
least 250.
.. versionchanged:: 1.5
Parameter name changed from `n_iter` to `max_iter`.
n_iter_without_progress : int, default=300
Maximum number of iterations without progress before we abort the
optimization, used after 250 initial iterations with early
exaggeration. Note that progress is only checked every 50 iterations so
this value is rounded to the next multiple of 50.
.. versionadded:: 0.17
parameter *n_iter_without_progress* to control stopping criteria.
min_grad_norm : float, default=1e-7
If the gradient norm is below this threshold, the optimization will
be stopped.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
metric_params : dict, default=None
Additional keyword arguments for the metric function.
.. versionadded:: 1.1
init : {"random", "pca"} or ndarray of shape (n_samples, n_components), \
default="pca"
Initialization of embedding.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
.. versionchanged:: 1.2
The default value changed to `"pca"`.
verbose : int, default=0
Verbosity level.
random_state : int, RandomState instance or None, default=None
Determines the random number generator. Pass an int for reproducible
results across multiple function calls. Note that different
initializations might result in different local minima of the cost
function. See :term:`Glossary <random_state>`.
method : {'barnes_hut', 'exact'}, default='barnes_hut'
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
.. versionadded:: 0.17
Approximate optimization *method* via the Barnes-Hut.
angle : float, default=0.5
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search. This parameter
has no impact when ``metric="precomputed"`` or
(``metric="euclidean"`` and ``method="exact"``).
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.22
Attributes
----------
embedding_ : array-like of shape (n_samples, n_components)
Stores the embedding vectors.
kl_divergence_ : float
Kullback-Leibler divergence after optimization.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
learning_rate_ : float
Effective learning rate.
.. versionadded:: 1.2
n_iter_ : int
Number of iterations run.
See Also
--------
sklearn.decomposition.PCA : Principal component analysis that is a linear
dimensionality reduction method.
sklearn.decomposition.KernelPCA : Non-linear dimensionality reduction using
kernels and PCA.
MDS : Manifold learning using multidimensional scaling.
Isomap : Manifold learning based on Isometric Mapping.
LocallyLinearEmbedding : Manifold learning using Locally Linear Embedding.
SpectralEmbedding : Spectral embedding for non-linear dimensionality.
Notes
-----
For an example of using :class:`~sklearn.manifold.TSNE` in combination with
:class:`~sklearn.neighbors.KNeighborsTransformer` see
:ref:`sphx_glr_auto_examples_neighbors_approximate_nearest_neighbors.py`.
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
https://lvdmaaten.github.io/tsne/
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
https://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
[4] Belkina, A. C., Ciccolella, C. O., Anno, R., Halpert, R., Spidlen, J.,
& Snyder-Cappione, J. E. (2019). Automated optimized parameters for
T-distributed stochastic neighbor embedding improve visualization
and analysis of large datasets. Nature Communications, 10(1), 1-12.
[5] Kobak, D., & Berens, P. (2019). The art of using t-SNE for single-cell
transcriptomics. Nature Communications, 10(1), 1-14.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> X_embedded = TSNE(n_components=2, learning_rate='auto',
... init='random', perplexity=3).fit_transform(X)
>>> X_embedded.shape
(4, 2)
"""
_parameter_constraints: dict = {
"n_components": [Interval(Integral, 1, None, closed="left")],
"perplexity": [Interval(Real, 0, None, closed="neither")],
"early_exaggeration": [Interval(Real, 1, None, closed="left")],
"learning_rate": [
StrOptions({"auto"}),
Interval(Real, 0, None, closed="neither"),
],
"max_iter": [Interval(Integral, 250, None, closed="left")],
"n_iter_without_progress": [Interval(Integral, -1, None, closed="left")],
"min_grad_norm": [Interval(Real, 0, None, closed="left")],
"metric": [StrOptions(set(_VALID_METRICS) | {"precomputed"}), callable],
"metric_params": [dict, None],
"init": [
StrOptions({"pca", "random"}),
np.ndarray,
],
"verbose": ["verbose"],
"random_state": ["random_state"],
"method": [StrOptions({"barnes_hut", "exact"})],
"angle": [Interval(Real, 0, 1, closed="both")],
"n_jobs": [None, Integral],
}
# Control the number of exploration iterations with early_exaggeration on
_EXPLORATION_MAX_ITER = 250
# Control the number of iterations between progress checks
_N_ITER_CHECK = 50
def __init__(
self,
n_components=2,
*,
perplexity=30.0,
early_exaggeration=12.0,
learning_rate="auto",
max_iter=1000,
n_iter_without_progress=300,
min_grad_norm=1e-7,
metric="euclidean",
metric_params=None,
init="pca",
verbose=0,
random_state=None,
method="barnes_hut",
angle=0.5,
n_jobs=None,
):
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.max_iter = max_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.metric_params = metric_params
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.n_jobs = n_jobs
def _check_params_vs_input(self, X):
if self.perplexity >= X.shape[0]:
raise ValueError(
f"perplexity ({self.perplexity}) must be less "
f"than n_samples ({X.shape[0]})"
)
def _fit(self, X, skip_num_points=0):
"""Private function to fit the model using X as training data."""
if self.learning_rate == "auto":
# See issue #18018
self.learning_rate_ = X.shape[0] / self.early_exaggeration / 4
self.learning_rate_ = np.maximum(self.learning_rate_, 50)
else:
self.learning_rate_ = self.learning_rate
if self.method == "barnes_hut":
X = validate_data(
self,
X,
accept_sparse=["csr"],
ensure_min_samples=2,
dtype=[np.float32, np.float64],
)
else:
X = validate_data(
self,
X,
accept_sparse=["csr", "csc", "coo"],
dtype=[np.float32, np.float64],
)
if self.metric == "precomputed":
if isinstance(self.init, str) and self.init == "pca":
raise ValueError(
'The parameter init="pca" cannot be used with metric="precomputed".'
)
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
check_non_negative(
X,
(
"TSNE.fit(). With metric='precomputed', X "
"should contain positive distances."
),
)
if self.method == "exact" and issparse(X):
raise TypeError(
'TSNE with method="exact" does not accept sparse '
'precomputed distance matrix. Use method="barnes_hut" '
"or provide the dense distance matrix."
)
if self.method == "barnes_hut" and self.n_components > 3:
raise ValueError(
"'n_components' should be inferior to 4 for the "
"barnes_hut algorithm as it relies on "
"quad-tree or oct-tree."
)
random_state = check_random_state(self.random_state)
n_samples = X.shape[0]
neighbors_nn = None
if self.method == "exact":
# Retrieve the distance matrix, either using the precomputed one or
# computing it.
if self.metric == "precomputed":
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
# Euclidean is squared here, rather than using **= 2,
# because euclidean_distances already calculates
# squared distances, and returns np.sqrt(dist) for
# squared=False.
# Also, Euclidean is slower for n_jobs>1, so don't set here
distances = pairwise_distances(X, metric=self.metric, squared=True)
else:
metric_params_ = self.metric_params or {}
distances = pairwise_distances(
X, metric=self.metric, n_jobs=self.n_jobs, **metric_params_
)
if np.any(distances < 0):
raise ValueError(
"All distances should be positive, the metric given is not correct"
)
if self.metric != "euclidean":
distances **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be non-negative"
assert np.all(P <= 1), (
"All probabilities should be less or then equal to one"
)
else:
# Compute the number of nearest neighbors to find.
# LvdM uses 3 * perplexity as the number of neighbors.
# In the event that we have very small # of points
# set the neighbors to n - 1.
n_neighbors = min(n_samples - 1, int(3.0 * self.perplexity + 1))
if self.verbose:
print("[t-SNE] Computing {} nearest neighbors...".format(n_neighbors))
# Find the nearest neighbors for every point
knn = NearestNeighbors(
algorithm="auto",
n_jobs=self.n_jobs,
n_neighbors=n_neighbors,
metric=self.metric,
metric_params=self.metric_params,
)
t0 = time()
knn.fit(X)
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Indexed {} samples in {:.3f}s...".format(
n_samples, duration
)
)
t0 = time()
distances_nn = knn.kneighbors_graph(mode="distance")
duration = time() - t0
if self.verbose:
print(
"[t-SNE] Computed neighbors for {} samples in {:.3f}s...".format(
n_samples, duration
)
)
# Free the memory used by the ball_tree
del knn
# knn return the euclidean distance but we need it squared
# to be consistent with the 'exact' method. Note that the
# the method was derived using the euclidean method as in the
# input space. Not sure of the implication of using a different
# metric.
distances_nn.data **= 2
# compute the joint probability distribution for the input space
P = _joint_probabilities_nn(distances_nn, self.perplexity, self.verbose)
if isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == "pca":
pca = PCA(
n_components=self.n_components,
random_state=random_state,
)
# Always output a numpy array, no matter what is configured globally
pca.set_output(transform="default")
X_embedded = pca.fit_transform(X).astype(np.float32, copy=False)
# PCA is rescaled so that PC1 has standard deviation 1e-4 which is
# the default value for random initialization. See issue #18018.
X_embedded = X_embedded / np.std(X_embedded[:, 0]) * 1e-4
elif self.init == "random":
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
X_embedded = 1e-4 * random_state.standard_normal(
size=(n_samples, self.n_components)
).astype(np.float32)
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1, 1)
return self._tsne(
P,
degrees_of_freedom,
n_samples,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points,
)
def _tsne(
self,
P,
degrees_of_freedom,
n_samples,
X_embedded,
neighbors=None,
skip_num_points=0,
):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with two stages:
# * initial optimization with early exaggeration and momentum at 0.5
# * final optimization with momentum at 0.8
params = X_embedded.ravel()
opt_args = {
"it": 0,
"n_iter_check": self._N_ITER_CHECK,
"min_grad_norm": self.min_grad_norm,
"learning_rate": self.learning_rate_,
"verbose": self.verbose,
"kwargs": dict(skip_num_points=skip_num_points),
"args": [P, degrees_of_freedom, n_samples, self.n_components],
"n_iter_without_progress": self._EXPLORATION_MAX_ITER,
"max_iter": self._EXPLORATION_MAX_ITER,
"momentum": 0.5,
}
if self.method == "barnes_hut":
obj_func = _kl_divergence_bh
opt_args["kwargs"]["angle"] = self.angle
# Repeat verbose argument for _kl_divergence_bh
opt_args["kwargs"]["verbose"] = self.verbose
# Get the number of threads for gradient computation here to
# avoid recomputing it at each iteration.
opt_args["kwargs"]["num_threads"] = _openmp_effective_n_threads()
else:
obj_func = _kl_divergence
# Learning schedule (part 1): do 250 iteration with lower momentum but
# higher learning rate controlled via the early exaggeration parameter
P *= self.early_exaggeration
params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print(
"[t-SNE] KL divergence after %d iterations with early exaggeration: %f"
% (it + 1, kl_divergence)
)
# Learning schedule (part 2): disable early exaggeration and finish
# optimization with a higher momentum at 0.8
P /= self.early_exaggeration
remaining = self.max_iter - self._EXPLORATION_MAX_ITER
if it < self._EXPLORATION_MAX_ITER or remaining > 0:
opt_args["max_iter"] = self.max_iter
opt_args["it"] = it + 1
opt_args["momentum"] = 0.8
opt_args["n_iter_without_progress"] = self.n_iter_without_progress
params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args)
# Save the final number of iterations
self.n_iter_ = it
if self.verbose:
print(
"[t-SNE] KL divergence after %d iterations: %f"
% (it + 1, kl_divergence)
)
X_embedded = params.reshape(n_samples, self.n_components)
self.kl_divergence_ = kl_divergence
return X_embedded
@_fit_context(
# TSNE.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed output.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self._check_params_vs_input(X)
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
@_fit_context(
# TSNE.metric is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features) or \
(n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'. If the method is 'barnes_hut' and the metric is
'precomputed', X may be a precomputed sparse graph.
y : None
Ignored.
Returns
-------
self : object
Fitted estimator.
"""
self.fit_transform(X)
return self
@property
def _n_features_out(self):
"""Number of transformed output features."""
return self.embedding_.shape[1]
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.pairwise = self.metric == "precomputed"
tags.input_tags.sparse = True
return tags
| TSNE |
python | numba__numba | numba/tests/test_random.py | {
"start": 39099,
"end": 47337
} | class ____(BaseTest):
"""
Test array-producing variants of np.random.* functions.
"""
def _compile_array_dist(self, funcname, nargs):
qualname = "np.random.%s" % (funcname,)
argstring = ', '.join('abcd'[:nargs])
return jit_with_args(qualname, argstring)
def _check_array_dist(self, funcname, scalar_args):
"""
Check returning an array according to a given distribution.
"""
cfunc = self._compile_array_dist(funcname, len(scalar_args) + 1)
r = self._follow_numpy(get_np_state_ptr())
pyfunc = getattr(r, funcname)
for size in (8, (2, 3)):
args = scalar_args + (size,)
expected = pyfunc(*args)
got = cfunc(*args)
# Numpy may return int32s where we return int64s, adjust
if (expected.dtype == np.dtype('int32')
and got.dtype == np.dtype('int64')):
expected = expected.astype(got.dtype)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
args = scalar_args + (None,)
expected = pyfunc(*args)
got = cfunc(*args)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
def _check_array_dist_gamma(self, funcname, scalar_args, extra_pyfunc_args):
"""
Check returning an array according to a given gamma distribution,
where we use CPython's implementation rather than NumPy's.
"""
cfunc = self._compile_array_dist(funcname, len(scalar_args) + 1)
r = self._follow_cpython(get_np_state_ptr())
pyfunc = getattr(r, "gammavariate")
pyfunc_args = scalar_args + extra_pyfunc_args
pyrandom = lambda *_args: pyfunc(*pyfunc_args)
args = scalar_args + (None,)
expected = pyrandom()
got = cfunc(*args)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
for size in (8, (2, 3)):
args = scalar_args + (size,)
expected = np.empty(size)
expected_flat = expected.flat
for idx in range(expected.size):
expected_flat[idx] = pyrandom()
got = cfunc(*args)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
def _check_array_dist_self(self, funcname, scalar_args):
"""
Check function returning an array against its scalar implementation.
Because we use the CPython gamma distribution rather than the NumPy one,
distributions which use the gamma distribution vary in ways that are
difficult to compare. Instead, we compile both the array and scalar
versions and check that the array is filled with the same values as
we would expect from the scalar version.
"""
@numba.njit
def reset():
np.random.seed(1234)
array_func = self._compile_array_dist(funcname, len(scalar_args) + 1)
qualname = "np.random.%s" % (funcname,)
argstring = ', '.join('abcd'[:len(scalar_args)])
scalar_func = jit_with_args(qualname, argstring)
for size in (8, (2, 3)):
args = scalar_args + (size,)
reset()
got = array_func(*args)
reset()
# We're just going to go with whatever type the array version
# gives us and hope it's not Boolean or something useless.
expected = np.empty(size, dtype=got.dtype)
flat = expected.flat
for idx in range(expected.size):
flat[idx] = scalar_func(*scalar_args)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
reset()
args = scalar_args + (None,)
expected = scalar_func(*scalar_args)
reset()
got = array_func(*args)
self.assertPreciseEqual(expected, got, prec='double', ulps=5)
def test_numpy_randint(self):
cfunc = self._compile_array_dist("randint", 3)
low, high = 1000, 10000
size = (30, 30)
res = cfunc(low, high, size)
self.assertIsInstance(res, np.ndarray)
self.assertEqual(res.shape, size)
self.assertIn(res.dtype, (np.dtype('int32'), np.dtype('int64')))
self.assertTrue(np.all(res >= low))
self.assertTrue(np.all(res < high))
# Crude statistical tests
mean = (low + high) / 2
tol = (high - low) / 20
self.assertGreaterEqual(res.mean(), mean - tol)
self.assertLessEqual(res.mean(), mean + tol)
def test_numpy_random_random(self):
cfunc = self._compile_array_dist("random", 1)
size = (30, 30)
res = cfunc(size)
self.assertIsInstance(res, np.ndarray)
self.assertEqual(res.shape, size)
self.assertEqual(res.dtype, np.dtype('float64'))
# Results are within expected bounds
self.assertTrue(np.all(res >= 0.0))
self.assertTrue(np.all(res < 1.0))
# Crude statistical tests
self.assertTrue(np.any(res <= 0.1))
self.assertTrue(np.any(res >= 0.9))
mean = res.mean()
self.assertGreaterEqual(mean, 0.45)
self.assertLessEqual(mean, 0.55)
# Sanity-check various distributions. For convenience, we only check
# those distributions that produce the exact same values as Numpy's.
def test_numpy_beta(self):
self._check_array_dist_self("beta", (0.5, 2.5))
def test_numpy_binomial(self):
self._check_array_dist("binomial", (20, 0.5))
def test_numpy_chisquare(self):
self._check_array_dist_self("chisquare", (1.5,))
def test_numpy_exponential(self):
self._check_array_dist("exponential", (1.5,))
def test_numpy_f(self):
self._check_array_dist_self("f", (0.5, 1.5))
def test_numpy_gamma(self):
self._check_array_dist_gamma("gamma", (2.0, 1.0), ())
def test_numpy_geometric(self):
self._check_array_dist("geometric", (1.0,))
def test_numpy_gumbel(self):
self._check_array_dist("gumbel", (1.5, 0.5))
def test_numpy_hypergeometric(self):
self._check_array_dist("hypergeometric", (1000, 5000, 10))
def test_numpy_laplace(self):
self._check_array_dist("laplace", (1.5, 0.5))
def test_numpy_logistic(self):
self._check_array_dist("logistic", (1.5, 0.5))
def test_numpy_lognormal(self):
self._check_array_dist("lognormal", (1.5, 2.0))
def test_numpy_logseries(self):
self._check_array_dist("logseries", (0.8,))
def test_numpy_normal(self):
self._check_array_dist("normal", (0.5, 2.0))
def test_numpy_pareto(self):
self._check_array_dist("pareto", (0.5,))
def test_numpy_poisson(self):
self._check_array_dist("poisson", (0.8,))
def test_numpy_power(self):
self._check_array_dist("power", (0.8,))
def test_numpy_rand(self):
cfunc = jit(nopython=True)(numpy_check_rand)
expected, got = cfunc(42, 2, 3)
self.assertEqual(got.shape, (2, 3))
self.assertPreciseEqual(expected, got)
def test_numpy_randn(self):
cfunc = jit(nopython=True)(numpy_check_randn)
expected, got = cfunc(42, 2, 3)
self.assertEqual(got.shape, (2, 3))
self.assertPreciseEqual(expected, got)
def test_numpy_rayleigh(self):
self._check_array_dist("rayleigh", (0.8,))
def test_numpy_standard_cauchy(self):
self._check_array_dist("standard_cauchy", ())
def test_numpy_standard_exponential(self):
self._check_array_dist("standard_exponential", ())
def test_numpy_standard_gamma(self):
self._check_array_dist_gamma("standard_gamma", (2.0,), (1.0,))
def test_numpy_standard_normal(self):
self._check_array_dist("standard_normal", ())
def test_numpy_triangular(self):
self._check_array_dist("triangular", (1.5, 2.2, 3.5))
def test_numpy_uniform(self):
self._check_array_dist("uniform", (0.1, 0.4))
def test_numpy_wald(self):
self._check_array_dist("wald", (0.1, 0.4))
def test_numpy_vonmises(self):
self._check_array_dist_self("vonmises", (0.5, 2.5))
def test_numpy_zipf(self):
self._check_array_dist("zipf", (2.5,))
| TestRandomArrays |
python | wandb__wandb | wandb/sdk/launch/agent/run_queue_item_file_saver.py | {
"start": 175,
"end": 1308
} | class ____:
def __init__(
self,
agent_run: Optional["wandb.Run"],
run_queue_item_id: str,
):
self.run_queue_item_id = run_queue_item_id
self.run = agent_run
def save_contents(
self, contents: str, fname: str, file_sub_type: FileSubtypes
) -> Optional[List[str]]:
if not isinstance(self.run, wandb.Run):
wandb.termwarn("Not saving file contents because agent has no run")
return None
root_dir = self.run._settings.files_dir
saved_run_path = os.path.join(self.run_queue_item_id, file_sub_type, fname)
local_path = os.path.join(root_dir, saved_run_path)
os.makedirs(os.path.dirname(local_path), exist_ok=True)
with open(local_path, "w") as f:
f.write(contents)
res = self.run.save(local_path, base_path=root_dir, policy="now")
if isinstance(res, list):
return [saved_run_path]
else:
wandb.termwarn(
f"Failed to save files for run queue item: {self.run_queue_item_id}"
)
return None
| RunQueueItemFileSaver |
python | langchain-ai__langchain | libs/langchain/langchain_classic/output_parsers/retry.py | {
"start": 1106,
"end": 1247
} | class ____(TypedDict):
"""Retry chain input for RetryOutputParser."""
prompt: str
completion: str
| RetryOutputParserRetryChainInput |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classVar2.py | {
"start": 282,
"end": 630
} | class ____:
var1 = ""
def __init__(self) -> None:
self.var2 = ""
# This should generate an error because var2
# is not a class variable.
a: Proto = ProtoImpl()
def func1(x: Proto):
reveal_type(x.var1, expected_text="str")
reveal_type(x.var2, expected_text="str")
reveal_type(x.var3, expected_text="list[str]")
| ProtoImpl |
python | tensorflow__tensorflow | tensorflow/python/keras/optimizer_v2/learning_rate_schedule.py | {
"start": 28859,
"end": 33439
} | class ____(LearningRateSchedule):
"""A LearningRateSchedule that uses a linear cosine decay schedule.
See [Bello et al., ICML2017] Neural Optimizer Search with RL.
https://arxiv.org/abs/1709.07417
For the idea of warm starts here controlled by `num_periods`,
see [Loshchilov & Hutter, ICLR2016] SGDR: Stochastic Gradient Descent
with Warm Restarts. https://arxiv.org/abs/1608.03983
Note that linear cosine decay is more aggressive than cosine decay and
larger initial learning rates can typically be used.
When training a model, it is often recommended to lower the learning rate as
the training progresses. This schedule applies a linear cosine decay
function to an optimizer step, given a provided initial learning rate.
It requires a `step` value to compute the decayed learning rate. You can
just pass a TensorFlow variable that you increment at each training step.
The schedule a 1-arg callable that produces a decayed learning
rate when passed the current optimizer step. This can be useful for changing
the learning rate value across different invocations of optimizer functions.
It is computed as:
```python
def decayed_learning_rate(step):
step = min(step, decay_steps)
linear_decay = (decay_steps - step) / decay_steps
cosine_decay = 0.5 * (
1 + cos(pi * 2 * num_periods * step / decay_steps))
decayed = (alpha + linear_decay) * cosine_decay + beta
return initial_learning_rate * decayed
```
Example usage:
```python
decay_steps = 1000
lr_decayed_fn = (
tf.keras.experimental.LinearCosineDecay(
initial_learning_rate, decay_steps))
```
You can pass this schedule directly into a `tf.keras.optimizers.Optimizer`
as the learning rate. The learning rate schedule is also serializable and
deserializable using `tf.keras.optimizers.schedules.serialize` and
`tf.keras.optimizers.schedules.deserialize`.
Returns:
A 1-arg callable learning rate schedule that takes the current optimizer
step and outputs the decayed learning rate, a scalar `Tensor` of the same
type as `initial_learning_rate`.
"""
def __init__(
self,
initial_learning_rate,
decay_steps,
num_periods=0.5,
alpha=0.0,
beta=0.001,
name=None):
"""Applies linear cosine decay to the learning rate.
Args:
initial_learning_rate: A scalar `float32` or `float64` Tensor or a Python
number. The initial learning rate.
decay_steps: A scalar `int32` or `int64` `Tensor` or a Python number.
Number of steps to decay over.
num_periods: Number of periods in the cosine part of the decay.
See computation above.
alpha: See computation above.
beta: See computation above.
name: String. Optional name of the operation. Defaults to
'LinearCosineDecay'.
"""
super(LinearCosineDecay, self).__init__()
self.initial_learning_rate = initial_learning_rate
self.decay_steps = decay_steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
self.name = name
def __call__(self, step):
with ops.name_scope_v2(self.name or "LinearCosineDecay") as name:
initial_learning_rate = (
tensor_conversion.convert_to_tensor_v2_with_dispatch(
self.initial_learning_rate, name="initial_learning_rate"
)
)
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
num_periods = math_ops.cast(self.num_periods, dtype)
alpha = math_ops.cast(self.alpha, dtype)
beta = math_ops.cast(self.beta, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
linear_decayed = (decay_steps - global_step_recomp) / decay_steps
completed_fraction = global_step_recomp / decay_steps
fraction = 2.0 * num_periods * completed_fraction
cosine_decayed = 0.5 * (
1.0 + math_ops.cos(constant_op.constant(math.pi) * fraction))
linear_cosine_decayed = (alpha + linear_decayed) * cosine_decayed + beta
return math_ops.multiply(initial_learning_rate, linear_cosine_decayed,
name=name)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_steps": self.decay_steps,
"num_periods": self.num_periods,
"alpha": self.alpha,
"beta": self.beta,
"name": self.name
}
# Note: this code is still used by V1 APIs.
| LinearCosineDecay |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 8420,
"end": 10178
} | class ____(BaseTestAutoMover):
"""Represents a cloud-hosted query compiler that prefers to stay on the cloud only for big data"""
# Operations are more costly on this engine, even though it can handle larger datasets
_MAX_SIZE_THIS_ENGINE_CAN_HANDLE = BIG_DATA_CLOUD_MIN_NUM_ROWS * 10
_OPERATION_INITIALIZATION_OVERHEAD = QCCoercionCost.COST_MEDIUM
_OPERATION_PER_ROW_OVERHEAD = 10
def __init__(self, pandas_frame):
super().__init__(pandas_frame)
def stay_cost(self, api_cls_name, operation, arguments):
if operation == "read_json":
return QCCoercionCost.COST_IMPOSSIBLE
return super().stay_cost(api_cls_name, operation, arguments)
def get_backend(self) -> str:
return "Big_Data_Cloud"
@classmethod
def max_cost(cls):
return QCCoercionCost.COST_IMPOSSIBLE * 10
@classmethod
def move_to_me_cost(cls, other_qc, api_cls_name, operation, arguments):
if api_cls_name in ("DataFrame", "Series") and operation == "__init__":
if (query_compiler := arguments.get("query_compiler")) is not None:
# When we create a dataframe or series with a query compiler
# input, we should not switch the resulting dataframe or series
# to a different backend.
return (
QCCoercionCost.COST_ZERO
if isinstance(query_compiler, cls)
else QCCoercionCost.COST_IMPOSSIBLE
)
else:
# Moving the in-memory __init__ inputs to the cloud is expensive.
return QCCoercionCost.COST_HIGH
return super().move_to_me_cost(other_qc, api_cls_name, operation, arguments)
| CloudForBigDataQC |
python | astropy__astropy | astropy/utils/metadata/tests/test_metadata.py | {
"start": 1930,
"end": 2027
} | class ____(MetaBaseTest):
test_class = ExampleData
args = ()
@dataclass
| TestMetaExampleData |
python | getsentry__sentry | tests/sentry/api/endpoints/test_dif_assemble.py | {
"start": 764,
"end": 10180
} | class ____(APITestCase):
def setUp(self) -> None:
self.organization = self.create_organization(owner=self.user)
with assume_test_silo_mode(SiloMode.CONTROL):
self.token = ApiToken.objects.create(user=self.user, scope_list=["project:write"])
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(
teams=[self.team], organization=self.organization, name="foo"
)
self.url = reverse(
"sentry-api-0-assemble-dif-files", args=[self.organization.slug, self.project.slug]
)
def test_assemble_json_schema(self) -> None:
response = self.client.post(
self.url, data={"lol": "test"}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400, response.content
checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={checksum: "test"},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url, data={checksum: {}}, HTTP_AUTHORIZATION=f"Bearer {self.token.token}"
)
assert response.status_code == 400, response.content
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": []}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.NOT_FOUND
def test_assemble_check(self) -> None:
content = b"foo bar"
fileobj = ContentFile(content)
file1 = File.objects.create(name="baz.dSYM", type="default", size=7)
file1.putfile(fileobj, 3)
checksum = sha1(content).hexdigest()
blobs = FileBlob.objects.all()
checksums = []
for blob in blobs:
checksums.append(blob.checksum)
# Request to see of file is there
# file exists but we have no overship for the chunks
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.NOT_FOUND
assert set(response.data[checksum]["missingChunks"]) == set(checksums)
# Now we add ownership to the blob
blobs = FileBlob.objects.all()
for blob in blobs:
FileBlobOwner.objects.create(blob=blob, organization_id=self.organization.id)
# The request will start the job to assemble the file
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.CREATED
assert response.data[checksum]["missingChunks"] == []
# Finally, we simulate a successful job
ProjectDebugFile.objects.create(
file=file1,
checksum=file1.checksum,
object_name="baz.dSYM",
cpu_name="x86_64",
project_id=self.project.id,
debug_id="df449af8-0dcd-4320-9943-ec192134d593",
code_id="DF449AF80DCD43209943EC192134D593",
)
set_assemble_status(AssembleTask.DIF, self.project.id, checksum, None)
# Request now tells us that everything is alright
response = self.client.post(
self.url,
data={checksum: {"name": "dif", "chunks": checksums}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[checksum]["state"] == ChunkFileState.OK
assert response.data[checksum]["missingChunks"] == []
not_found_checksum = sha1(b"1").hexdigest()
response = self.client.post(
self.url,
data={not_found_checksum: {"name": "dif", "chunks": [not_found_checksum]}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[not_found_checksum]["state"] == ChunkFileState.NOT_FOUND
assert set(response.data[not_found_checksum]["missingChunks"]) == {not_found_checksum}
@patch("sentry.tasks.assemble.assemble_dif")
def test_assemble(self, mock_assemble_dif: MagicMock) -> None:
content1 = b"foo"
fileobj1 = ContentFile(content1)
checksum1 = sha1(content1).hexdigest()
content2 = b"bar"
fileobj2 = ContentFile(content2)
checksum2 = sha1(content2).hexdigest()
content3 = b"baz"
fileobj3 = ContentFile(content3)
checksum3 = sha1(content3).hexdigest()
total_checksum = sha1(content2 + content1 + content3).hexdigest()
# The order here is on purpose because we check for the order of checksums
blob1 = FileBlob.from_file(fileobj1)
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob1)
blob3 = FileBlob.from_file(fileobj3)
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob3)
blob2 = FileBlob.from_file(fileobj2)
# we make a request now but we are missing ownership for chunk 2
response = self.client.post(
self.url,
data={total_checksum: {"name": "test", "chunks": [checksum2, checksum1, checksum3]}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.NOT_FOUND
assert response.data[total_checksum]["missingChunks"] == [checksum2]
# we add ownership to chunk 2
FileBlobOwner.objects.get_or_create(organization_id=self.organization.id, blob=blob2)
# new request, ownership for all chunks is there but file does not exist yet
response = self.client.post(
self.url,
data={total_checksum: {"name": "test", "chunks": [checksum2, checksum1, checksum3]}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.CREATED
assert response.data[total_checksum]["missingChunks"] == []
chunks = [checksum2, checksum1, checksum3]
mock_assemble_dif.apply_async.assert_called_once_with(
kwargs={
"project_id": self.project.id,
"name": "test",
"chunks": chunks,
"checksum": total_checksum,
"debug_id": None,
}
)
assemble_result = assemble_file(
AssembleTask.DIF, self.project, "test", total_checksum, chunks, "project.dif"
)
assert assemble_result is not None
file = assemble_result.bundle
status, _ = get_assemble_status(AssembleTask.DIF, self.project.id, total_checksum)
assert status != ChunkFileState.ERROR
assert file.checksum == total_checksum
file_blob_index = FileBlobIndex.objects.all()
assert len(file_blob_index) == 3
def test_dif_response(self) -> None:
sym_file = self.load_fixture("crash.sym")
blob1 = FileBlob.from_file_with_organization(ContentFile(sym_file), self.organization)
total_checksum = sha1(sym_file).hexdigest()
chunks = [blob1.checksum]
assemble_dif(
project_id=self.project.id, name="crash.sym", checksum=total_checksum, chunks=chunks
)
response = self.client.post(
self.url,
data={total_checksum: {"name": "test.sym", "chunks": chunks}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.OK
assert response.data[total_checksum]["dif"]["cpuName"] == "x86_64"
assert (
response.data[total_checksum]["dif"]["uuid"] == "67e9247c-814e-392b-a027-dbde6748fcbf"
)
def test_dif_error_response(self) -> None:
sym_file = b"fail"
blob1 = FileBlob.from_file_with_organization(ContentFile(sym_file), self.organization)
total_checksum = sha1(sym_file).hexdigest()
chunks = [blob1.checksum]
assemble_dif(
project_id=self.project.id, name="test.sym", checksum=total_checksum, chunks=chunks
)
response = self.client.post(
self.url,
data={total_checksum: {"name": "test.sym", "chunks": []}},
HTTP_AUTHORIZATION=f"Bearer {self.token.token}",
)
assert response.status_code == 200, response.content
assert response.data[total_checksum]["state"] == ChunkFileState.ERROR
assert "unsupported object file format" in response.data[total_checksum]["detail"]
| DifAssembleEndpoint |
python | walkccc__LeetCode | solutions/2875. Minimum Size Subarray in Infinite Array/2875.py | {
"start": 0,
"end": 687
} | class ____:
def minSizeSubarray(self, nums: list[int], target: int) -> int:
summ = sum(nums)
n = len(nums)
remainingTarget = target % summ
repeatLength = (target // summ) * n
if remainingTarget == 0:
return repeatLength
suffixPlusPrefixLength = n
prefix = 0
prefixToIndex = {0: -1}
for i in range(2 * n):
prefix += nums[i % n]
if prefix - remainingTarget in prefixToIndex:
suffixPlusPrefixLength = min(
suffixPlusPrefixLength,
i - prefixToIndex[prefix - remainingTarget])
prefixToIndex[prefix] = i
return -1 if suffixPlusPrefixLength == n else suffixPlusPrefixLength + repeatLength
| Solution |
python | ray-project__ray | python/ray/serve/_private/build_app.py | {
"start": 494,
"end": 1330
} | class ____(dict, Generic[K, V]):
"""Dictionary that uses id() for keys instead of hash().
This is necessary because Application objects aren't hashable and we want each
instance to map to a unique key.
"""
def __getitem__(self, key: K) -> V:
if not isinstance(key, int):
key = id(key)
return super().__getitem__(key)
def __setitem__(self, key: K, value: V):
if not isinstance(key, int):
key = id(key)
return super().__setitem__(key, value)
def __delitem__(self, key: K):
if not isinstance(key, int):
key = id(key)
return super().__delitem__(key)
def __contains__(self, key: object):
if not isinstance(key, int):
key = id(key)
return super().__contains__(key)
@dataclass(frozen=True)
| IDDict |
python | getsentry__sentry | tests/acceptance/test_organization_dashboards.py | {
"start": 1107,
"end": 26995
} | class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
min_ago = before_now(minutes=1).isoformat()
self.store_event(
data={"event_id": "a" * 32, "message": "oh no", "timestamp": min_ago},
project_id=self.project.id,
)
self.dashboard = Dashboard.objects.create(
title="Dashboard 1", created_by_id=self.user.id, organization=self.organization
)
self.page = DashboardDetailPage(
self.browser, self.client, organization=self.organization, dashboard=self.dashboard
)
self.login_as(self.user)
def capture_screenshots(self, screenshot_name: str) -> None:
"""
Captures screenshots in both a pre and post refresh state.
Necessary for verifying that the layout persists after saving.
"""
self.page.wait_until_loaded()
self.browser.refresh()
self.page.wait_until_loaded()
def test_default_overview_dashboard_layout(self) -> None:
with self.feature(FEATURE_NAMES):
self.page.visit_default_overview()
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_add_and_move_new_widget_on_existing_dashboard(self) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
self.page.add_widget_through_dashboard("New Widget")
# Drag to the right
dragHandle = self.browser.element(WIDGET_DRAG_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, 1000, 0).perform()
self.page.save_dashboard()
self.capture_screenshots("dashboards - save new widget layout in custom dashboard")
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_create_new_dashboard_with_modified_widget_layout(self) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
# Create a new dashboard
self.page.visit_create_dashboard()
self.page.add_widget_through_dashboard("New Widget")
# Drag to the right
dragHandle = self.browser.element(WIDGET_DRAG_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, 1000, 0).perform()
self.page.save_dashboard()
# Wait for page redirect, or else loading check passes too early
wait = WebDriverWait(self.browser.driver, 10)
wait.until(
lambda driver: (
f"/organizations/{self.organization.slug}/dashboards/new/"
not in driver.current_url
)
)
self.capture_screenshots("dashboards - save widget layout in new custom dashboard")
def test_move_existing_widget_on_existing_dashboard(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Existing Widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
# Drag to the right
dragHandle = self.browser.element(WIDGET_DRAG_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, 1000, 0).perform()
self.page.save_dashboard()
self.capture_screenshots("dashboards - move existing widget on existing dashboard")
@pytest.mark.skip(reason="flaky: DD-1216")
def test_widget_edit_keeps_same_layout_after_modification(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Existing Widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
# Drag existing widget to the right
dragHandle = self.browser.element(WIDGET_DRAG_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, 1000, 0).perform()
# Edit the existing widget
button = self.browser.element(EDIT_WIDGET_BUTTON)
button.click()
title_input = self.browser.element(WIDGET_TITLE_FIELD)
title_input.clear()
title_input.send_keys(Keys.END, "Existing WidgetUPDATED!!")
button = self.browser.element('[aria-label="Update Widget"]')
button.click()
# Add and drag new widget to the right
self.page.add_widget_through_dashboard("New Widget")
dragHandle = self.browser.element(
f".react-grid-item:nth-of-type(2) {WIDGET_DRAG_HANDLE}"
)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, 1000, 0)
action.perform()
# Edit the new widget
button = self.browser.element(f".react-grid-item:nth-of-type(2) {EDIT_WIDGET_BUTTON}")
button.click()
title_input = self.browser.element(WIDGET_TITLE_FIELD)
title_input.clear()
title_input.send_keys(Keys.END, "New WidgetUPDATED!!")
button = self.browser.element('[aria-label="Update Widget"]')
button.click()
self.page.save_dashboard()
self.capture_screenshots(
"dashboards - edit widgets after layout change does not reset layout"
)
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_add_issue_widgets_do_not_overlap(self) -> None:
def add_issue_widget(widget_title: str) -> None:
self.browser.wait_until_clickable('[data-test-id="widget-add"]')
self.page.click_dashboard_add_widget_button()
title_input = self.browser.element(WIDGET_TITLE_FIELD)
title_input.clear()
title_input.send_keys(widget_title)
self.browser.element('[aria-label="Issues (States, Assignment, Time, etc.)"]').click()
button = self.browser.element('[aria-label="Add Widget"]')
button.click()
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
add_issue_widget("Issue Widget 1")
add_issue_widget("Issue Widget 2")
self.page.save_dashboard()
self.capture_screenshots("dashboards - issue widgets do not overlap")
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_resize_new_and_existing_widgets(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Existing Widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
# Resize existing widget
resizeHandle = self.browser.element(WIDGET_RESIZE_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 500, 0).perform()
self.page.add_widget_through_dashboard("New Widget")
# Drag it to the left for consistency
dragHandle = self.browser.element(
f".react-grid-item:nth-of-type(2) {WIDGET_DRAG_HANDLE}"
)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, -1000, 0).perform()
# Resize new widget, get the 2nd element instead of the "last" because the "last" is
# the add widget button
resizeHandle = self.browser.element(
f".react-grid-item:nth-of-type(2) {WIDGET_RESIZE_HANDLE}"
)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 500, 0).perform()
self.page.save_dashboard()
self.capture_screenshots("dashboards - resize new and existing widgets")
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_delete_existing_widget_does_not_trigger_new_widget_layout_reset(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Existing Widget",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 2, "h": 2, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
self.page.add_widget_through_dashboard("New Widget")
# Drag it to the bottom left
dragHandle = self.browser.element(
f".react-grid-item:nth-of-type(2) {WIDGET_DRAG_HANDLE}"
)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(dragHandle, -500, 500).perform()
# Resize new widget, get the 2nd element instead of the "last" because the "last" is
# the add widget button
resizeHandle = self.browser.element(
f".react-grid-item:nth-of-type(2) {WIDGET_RESIZE_HANDLE}"
)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 500, 0).perform()
# Delete first existing widget
delete_widget_button = self.browser.element(
'.react-grid-item:first-of-type [data-test-id="widget-delete"]'
)
delete_widget_button.click()
self.page.save_dashboard()
self.capture_screenshots(
"dashboards - delete existing widget does not reset new widget layout"
)
def test_resize_big_number_widget(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Big Number Widget",
display_type=DashboardWidgetDisplayTypes.BIG_NUMBER,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=existing_widget,
fields=["count_unique(issue)"],
columns=[],
aggregates=["count_unique(issue)"],
order=0,
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
# Resize existing widget
resizeHandle = self.browser.element(WIDGET_RESIZE_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 200, 200).perform()
self.page.save_dashboard()
self.capture_screenshots("dashboards - resize big number widget")
def test_default_layout_when_widgets_do_not_have_layout_set(self) -> None:
existing_widgets = DashboardWidget.objects.bulk_create(
[
DashboardWidget(
dashboard=self.dashboard,
title=f"Existing Widget {i}",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
for i in range(4)
]
)
DashboardWidgetQuery.objects.bulk_create(
[
DashboardWidgetQuery(
widget=existing_widget,
fields=["count()"],
columns=[],
aggregates=["count()"],
order=0,
)
for existing_widget in existing_widgets
]
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.wait_until_loaded()
def test_delete_widget_in_view_mode(self) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Big Number Widget",
display_type=DashboardWidgetDisplayTypes.BIG_NUMBER,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
)
DashboardWidgetQuery.objects.create(
widget=existing_widget,
fields=["count_unique(issue)"],
columns=[],
aggregates=["count_unique(issue)"],
order=0,
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
# Hover over the widget to show widget actions
self.browser.move_to('[aria-label="Widget panel"]')
self.browser.element('[aria-label="Widget actions"]').click()
self.browser.element('[data-test-id="delete-widget"]').click()
self.browser.element('[data-test-id="confirm-button"]').click()
self.page.wait_until_loaded()
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_cancel_without_changes_does_not_trigger_confirm_with_custom_widget_through_header(
self,
) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.click_dashboard_header_add_widget_button()
title_input = self.browser.element(WIDGET_TITLE_FIELD)
title_input.send_keys("New custom widget")
button = self.browser.element('[aria-label="Add Widget"]')
button.click()
self.page.wait_until_loaded()
# Should not trigger confirm dialog
self.page.enter_edit_state()
self.page.click_cancel_button()
wait = WebDriverWait(self.browser.driver, 5)
wait.until_not(EC.alert_is_present())
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_position_when_adding_multiple_widgets_through_add_widget_tile_in_edit(
self,
) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
self.page.enter_edit_state()
# Widgets should take up the whole first row and the first spot in second row
self.page.add_widget_through_dashboard("A")
self.page.add_widget_through_dashboard("B")
self.page.add_widget_through_dashboard("C")
self.page.add_widget_through_dashboard("D")
self.page.wait_until_loaded()
self.page.save_dashboard()
self.capture_screenshots(
"dashboards - position when adding multiple widgets through Add Widget tile in edit"
)
@pytest.mark.skip(reason="flaky: DD-1217")
def test_position_when_adding_multiple_widgets_through_add_widget_tile_in_create(
self,
) -> None:
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_create_dashboard()
# Widgets should take up the whole first row and the first spot in second row
self.page.add_widget_through_dashboard("A")
self.page.add_widget_through_dashboard("B")
self.page.add_widget_through_dashboard("C")
self.page.add_widget_through_dashboard("D")
self.page.wait_until_loaded()
self.page.save_dashboard()
# Wait for page redirect, or else loading check passes too early
wait = WebDriverWait(self.browser.driver, 10)
wait.until(
lambda driver: (
f"/organizations/{self.organization.slug}/dashboards/new/"
not in driver.current_url
)
)
self.capture_screenshots(
"dashboards - position when adding multiple widgets through Add Widget tile in create"
)
def test_deleting_stacked_widgets_by_context_menu_does_not_trigger_confirm_on_edit_cancel(
self,
) -> None:
layouts = [
{"x": 0, "y": 0, "w": 2, "h": 2, "minH": 2},
{"x": 0, "y": 2, "w": 2, "h": 2, "minH": 2},
]
existing_widgets = DashboardWidget.objects.bulk_create(
[
DashboardWidget(
dashboard=self.dashboard,
title=f"Existing Widget {i}",
display_type=DashboardWidgetDisplayTypes.LINE_CHART,
widget_type=DashboardWidgetTypes.TRANSACTION_LIKE,
interval="1d",
detail={"layout": layout},
)
for i, layout in enumerate(layouts)
]
)
DashboardWidgetQuery.objects.bulk_create(
DashboardWidgetQuery(
widget=widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
for widget in existing_widgets
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
# Hover over the widget to show widget actions
self.browser.move_to('[aria-label="Widget panel"]')
dropdown_trigger = self.browser.element('[aria-label="Widget actions"]')
dropdown_trigger.click()
delete_widget_menu_item = self.browser.element('[data-test-id="delete-widget"]')
delete_widget_menu_item.click()
confirm_button = self.browser.element('[data-test-id="confirm-button"]')
confirm_button.click()
wait = WebDriverWait(self.browser.driver, 5)
wait.until(
EC.presence_of_element_located(
(By.XPATH, "//*[contains(text(),'Dashboard updated')]")
)
)
# Should not trigger confirm dialog
self.page.enter_edit_state()
self.page.click_cancel_button()
wait.until_not(EC.alert_is_present())
@pytest.mark.skip(reason="TODO: Convert to new widget builder or test with jest")
def test_changing_number_widget_to_area_updates_widget_height(
self,
) -> None:
layouts = [
(DashboardWidgetDisplayTypes.BIG_NUMBER, {"x": 0, "y": 0, "w": 2, "h": 1, "minH": 1}),
(DashboardWidgetDisplayTypes.LINE_CHART, {"x": 0, "y": 1, "w": 2, "h": 2, "minH": 2}),
]
existing_widgets = DashboardWidget.objects.bulk_create(
[
DashboardWidget(
dashboard=self.dashboard,
title=f"Widget {i}",
display_type=display_type,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
detail={"layout": layout},
)
for i, (display_type, layout) in enumerate(layouts)
]
)
DashboardWidgetQuery.objects.bulk_create(
DashboardWidgetQuery(
widget=widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
for widget in existing_widgets
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
# Hover over the widget to show widget actions
self.browser.move_to('[aria-label="Widget panel"]')
# Open edit modal for first widget
dropdown_trigger = self.browser.element('[aria-label="Widget actions"]')
dropdown_trigger.click()
edit_widget_menu_item = self.browser.element('[data-test-id="edit-widget"]')
edit_widget_menu_item.click()
# Change the chart type to the first visualization option - Area chart
chart_type_input = self.browser.element("#react-select-2-input")
chart_type_input.send_keys("Area", Keys.ENTER)
button = self.browser.element('[aria-label="Update Widget"]')
button.click()
# No confirm dialog because of shifting lower element
self.page.enter_edit_state()
self.page.click_cancel_button()
wait = WebDriverWait(self.browser.driver, 5)
wait.until_not(EC.alert_is_present())
# Try to decrease height to 1 row, should stay at 2 rows
self.page.enter_edit_state()
resizeHandle = self.browser.element(WIDGET_RESIZE_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 0, -100).perform()
self.page.save_dashboard()
@pytest.mark.skip(reason="flaky behaviour due to loading spinner")
def test_changing_number_widget_larger_than_min_height_for_area_chart_keeps_height(
self,
) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Originally Big Number - 3 rows",
display_type=DashboardWidgetDisplayTypes.BIG_NUMBER,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 2, "h": 3, "minH": 1}},
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
# Open edit modal for first widget
dropdown_trigger = self.browser.element('[aria-label="Widget actions"]')
dropdown_trigger.click()
edit_widget_menu_item = self.browser.element('[data-test-id="edit-widget"]')
edit_widget_menu_item.click()
# Change the chart type to the first visualization option - Area chart
chart_type_input = self.browser.element("#react-select-2-input")
chart_type_input.send_keys("Area", Keys.ENTER)
button = self.browser.element('[aria-label="Update Widget"]')
button.click()
self.page.wait_until_loaded()
# Try to decrease height by >1 row, should be at 2 rows
self.page.enter_edit_state()
resizeHandle = self.browser.element(WIDGET_RESIZE_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 0, -400).perform()
self.page.save_dashboard()
@pytest.mark.skip(reason="flaky: DD-1211")
def test_changing_area_widget_larger_than_min_height_for_number_chart_keeps_height(
self,
) -> None:
existing_widget = DashboardWidget.objects.create(
dashboard=self.dashboard,
title="Originally Area Chart - 3 rows",
display_type=DashboardWidgetDisplayTypes.AREA_CHART,
widget_type=DashboardWidgetTypes.ERROR_EVENTS,
interval="1d",
detail={"layout": {"x": 0, "y": 0, "w": 2, "h": 3, "minH": 2}},
)
DashboardWidgetQuery.objects.create(
widget=existing_widget, fields=["count()"], columns=[], aggregates=["count()"], order=0
)
with self.feature(FEATURE_NAMES + EDIT_FEATURE):
self.page.visit_dashboard_detail()
# Open edit modal for first widget
dropdown_trigger = self.browser.element('[aria-label="Widget actions"]')
dropdown_trigger.click()
edit_widget_menu_item = self.browser.element('[data-test-id="edit-widget"]')
edit_widget_menu_item.click()
# Change the chart type to big number
chart_type_input = self.browser.element("#react-select-2-input")
chart_type_input.send_keys("Big Number", Keys.ENTER)
button = self.browser.element('[aria-label="Update Widget"]')
button.click()
self.page.wait_until_loaded()
# Decrease height by >1 row, should stop at 1 row
self.page.enter_edit_state()
resizeHandle = self.browser.element(WIDGET_RESIZE_HANDLE)
action = ActionChains(self.browser.driver)
action.drag_and_drop_by_offset(resizeHandle, 0, -400).perform()
self.page.save_dashboard()
@no_silo_test
| OrganizationDashboardsAcceptanceTest |
python | palantir__python-language-server | pyls/workspace.py | {
"start": 591,
"end": 4300
} | class ____(object):
M_PUBLISH_DIAGNOSTICS = 'textDocument/publishDiagnostics'
M_APPLY_EDIT = 'workspace/applyEdit'
M_SHOW_MESSAGE = 'window/showMessage'
def __init__(self, root_uri, endpoint, config=None):
self._config = config
self._root_uri = root_uri
self._endpoint = endpoint
self._root_uri_scheme = uris.urlparse(self._root_uri)[0]
self._root_path = uris.to_fs_path(self._root_uri)
self._docs = {}
# Cache jedi environments
self._environments = {}
# Whilst incubating, keep rope private
self.__rope = None
self.__rope_config = None
def _rope_project_builder(self, rope_config):
from rope.base.project import Project
# TODO: we could keep track of dirty files and validate only those
if self.__rope is None or self.__rope_config != rope_config:
rope_folder = rope_config.get('ropeFolder')
self.__rope = Project(self._root_path, ropefolder=rope_folder)
self.__rope.prefs.set('extension_modules', rope_config.get('extensionModules', []))
self.__rope.prefs.set('ignore_syntax_errors', True)
self.__rope.prefs.set('ignore_bad_imports', True)
self.__rope.validate()
return self.__rope
@property
def documents(self):
return self._docs
@property
def root_path(self):
return self._root_path
@property
def root_uri(self):
return self._root_uri
def is_local(self):
return (self._root_uri_scheme == '' or self._root_uri_scheme == 'file') and os.path.exists(self._root_path)
def get_document(self, doc_uri):
"""Return a managed document if-present, else create one pointing at disk.
See https://github.com/Microsoft/language-server-protocol/issues/177
"""
return self._docs.get(doc_uri) or self._create_document(doc_uri)
def get_maybe_document(self, doc_uri):
return self._docs.get(doc_uri)
def put_document(self, doc_uri, source, version=None):
self._docs[doc_uri] = self._create_document(doc_uri, source=source, version=version)
def rm_document(self, doc_uri):
self._docs.pop(doc_uri)
def update_document(self, doc_uri, change, version=None):
self._docs[doc_uri].apply_change(change)
self._docs[doc_uri].version = version
def update_config(self, settings):
self._config.update((settings or {}).get('pyls', {}))
for doc_uri in self.documents:
self.get_document(doc_uri).update_config(settings)
def apply_edit(self, edit):
return self._endpoint.request(self.M_APPLY_EDIT, {'edit': edit})
def publish_diagnostics(self, doc_uri, diagnostics):
self._endpoint.notify(self.M_PUBLISH_DIAGNOSTICS, params={'uri': doc_uri, 'diagnostics': diagnostics})
def show_message(self, message, msg_type=lsp.MessageType.Info):
self._endpoint.notify(self.M_SHOW_MESSAGE, params={'type': msg_type, 'message': message})
def source_roots(self, document_path):
"""Return the source roots for the given document."""
files = _utils.find_parents(self._root_path, document_path, ['setup.py', 'pyproject.toml']) or []
return list({os.path.dirname(project_file) for project_file in files}) or [self._root_path]
def _create_document(self, doc_uri, source=None, version=None):
path = uris.to_fs_path(doc_uri)
return Document(
doc_uri,
self,
source=source,
version=version,
extra_sys_path=self.source_roots(path),
rope_project_builder=self._rope_project_builder,
)
| Workspace |
python | getsentry__sentry | tests/sentry/api/bases/test_project.py | {
"start": 12359,
"end": 21913
} | class ____(ProjectPermissionBase):
def setUp(self) -> None:
super().setUp()
self.organization.flags.allow_joinleave = False
self.organization.save()
self.team = self.create_team(organization=self.organization)
self.project = self.create_project(organization=self.organization)
def test_regular_user(self) -> None:
user = self.create_user(is_superuser=False)
assert not self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_superuser(self) -> None:
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.project, user=user, is_superuser=True)
assert self.has_object_perm("POST", self.project, user=user, is_superuser=True)
assert self.has_object_perm("PUT", self.project, user=user, is_superuser=True)
assert self.has_object_perm("DELETE", self.project, user=user, is_superuser=True)
def test_member_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="member", teams=[team])
assert not self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_member_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="member", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
@with_feature("organizations:team-roles")
def test_member_with_team_membership_and_team_role(self) -> None:
team = self.create_team(organization=self.organization)
project = self.create_project(organization=self.organization, teams=[team])
user = self.create_user(is_superuser=False)
member = self.create_member(user=user, organization=self.organization, role="member")
self.create_team_membership(team, member, role="admin")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_admin_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="admin", teams=[team])
# if `allow_joinleave` is False, admins can't act on teams that
# they don't have access to
assert not self.has_object_perm("GET", self.project, user=user)
assert not self.has_object_perm("POST", self.project, user=user)
assert not self.has_object_perm("PUT", self.project, user=user)
assert not self.has_object_perm("DELETE", self.project, user=user)
def test_admin_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="admin", teams=[self.team]
)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="manager", teams=[team])
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="manager", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_manager_if_project_has_no_teams(self) -> None:
project = self.create_project(organization=self.organization, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="manager")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_owner_without_team_membership(self) -> None:
team = self.create_team(organization=self.organization)
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="owner", teams=[team])
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_owner_with_team_membership(self) -> None:
user = self.create_user(is_superuser=False)
self.create_member(
user=user, organization=self.organization, role="owner", teams=[self.team]
)
assert self.has_object_perm("GET", self.project, user=user)
assert self.has_object_perm("POST", self.project, user=user)
assert self.has_object_perm("PUT", self.project, user=user)
assert self.has_object_perm("DELETE", self.project, user=user)
def test_owner_if_project_has_no_teams(self) -> None:
project = self.create_project(organization=self.organization, teams=[])
user = self.create_user(is_superuser=False)
self.create_member(user=user, organization=self.organization, role="owner")
assert self.has_object_perm("GET", project, user=user)
assert self.has_object_perm("POST", project, user=user)
assert self.has_object_perm("PUT", project, user=user)
assert self.has_object_perm("DELETE", project, user=user)
def test_api_key_with_org_access(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["project:read"])
assert self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_without_org_access(self) -> None:
key = self.create_api_key(
organization=self.create_organization(), scope_list=["project:read"]
)
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_without_access(self) -> None:
key = self.create_api_key(organization=self.organization)
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_with_wrong_access(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["team:read"])
assert not self.has_object_perm("GET", self.project, auth=key)
assert not self.has_object_perm("POST", self.project, auth=key)
assert not self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
def test_api_key_with_wrong_access_for_method(self) -> None:
key = self.create_api_key(organization=self.organization, scope_list=["project:write"])
assert self.has_object_perm("GET", self.project, auth=key)
assert self.has_object_perm("POST", self.project, auth=key)
assert self.has_object_perm("PUT", self.project, auth=key)
assert not self.has_object_perm("DELETE", self.project, auth=key)
| ProjectPermissionNoJoinLeaveTest |
python | great-expectations__great_expectations | great_expectations/data_context/store/gx_cloud_store_backend.py | {
"start": 1113,
"end": 1176
} | class ____(TypedDict):
errors: List[ErrorDetail]
| ErrorPayload |
python | kamyu104__LeetCode-Solutions | Python/find-the-longest-substring-containing-vowels-in-even-counts.py | {
"start": 29,
"end": 529
} | class ____(object):
def findTheLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
VOWELS = "aeiou"
result, mask, lookup = 0, 0, [-2]*(2**len(VOWELS))
lookup[0] = -1
for i, c in enumerate(s):
index = VOWELS.find(c)
mask ^= (1 << index) if index >= 0 else 0
if lookup[mask] == -2:
lookup[mask] = i
result = max(result, i-lookup[mask])
return result
| Solution |
python | django__django | tests/m2m_signals/tests.py | {
"start": 174,
"end": 19116
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.vw = Car.objects.create(name="VW")
cls.bmw = Car.objects.create(name="BMW")
cls.toyota = Car.objects.create(name="Toyota")
cls.wheelset = Part.objects.create(name="Wheelset")
cls.doors = Part.objects.create(name="Doors")
cls.engine = Part.objects.create(name="Engine")
cls.airbag = Part.objects.create(name="Airbag")
cls.sunroof = Part.objects.create(name="Sunroof")
cls.alice = Person.objects.create(name="Alice")
cls.bob = Person.objects.create(name="Bob")
cls.chuck = Person.objects.create(name="Chuck")
cls.daisy = Person.objects.create(name="Daisy")
def setUp(self):
self.m2m_changed_messages = []
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
"instance": kwargs["instance"],
"action": kwargs["action"],
"reverse": kwargs["reverse"],
"model": kwargs["model"],
}
if kwargs["pk_set"]:
message["objects"] = list(
kwargs["model"].objects.filter(pk__in=kwargs["pk_set"])
)
self.m2m_changed_messages.append(message)
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
"""Install a listener on the two m2m relations."""
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_pk_set_on_repeated_add_remove(self):
"""
m2m_changed is always fired, even for repeated calls to the same
method, but the behavior of pk_sets differs by action.
- For signals related to `add()`, only PKs that will actually be
inserted are sent.
- For `remove()` all PKs are sent, even if they will not affect the DB.
"""
pk_sets_sent = []
def handler(signal, sender, **kwargs):
if kwargs["action"] in ["pre_add", "pre_remove"]:
pk_sets_sent.append(kwargs["pk_set"])
models.signals.m2m_changed.connect(handler, Car.default_parts.through)
self.vw.default_parts.add(self.wheelset)
self.vw.default_parts.add(self.wheelset)
self.vw.default_parts.remove(self.wheelset)
self.vw.default_parts.remove(self.wheelset)
expected_pk_sets = [
{self.wheelset.pk},
set(),
{self.wheelset.pk},
{self.wheelset.pk},
]
self.assertEqual(pk_sets_sent, expected_pk_sets)
models.signals.m2m_changed.disconnect(handler, Car.default_parts.through)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append(
{
"instance": self.doors,
"action": "pre_add",
"reverse": True,
"model": Car,
"objects": [self.bmw, self.toyota],
}
)
expected_messages.append(
{
"instance": self.doors,
"action": "post_add",
"reverse": True,
"model": Car,
"objects": [self.bmw, self.toyota],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.vw,
"action": "pre_remove",
"reverse": False,
"model": Part,
"objects": [self.airbag, self.engine],
},
{
"instance": self.vw,
"action": "post_remove",
"reverse": False,
"model": Part,
"objects": [self.airbag, self.engine],
},
],
)
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [self.airbag, self.sunroof],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [self.airbag, self.sunroof],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append(
{
"instance": self.airbag,
"action": "pre_add",
"reverse": True,
"model": Car,
"objects": [self.bmw, self.toyota],
}
)
expected_messages.append(
{
"instance": self.airbag,
"action": "post_add",
"reverse": True,
"model": Car,
"objects": [self.bmw, self.toyota],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.airbag,
"action": "pre_remove",
"reverse": True,
"model": Car,
"objects": [self.vw],
},
{
"instance": self.airbag,
"action": "post_remove",
"reverse": True,
"model": Car,
"objects": [self.vw],
},
],
)
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.vw,
"action": "pre_clear",
"reverse": False,
"model": Part,
},
{
"instance": self.vw,
"action": "post_clear",
"reverse": False,
"model": Part,
},
],
)
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.doors,
"action": "pre_clear",
"reverse": True,
"model": Car,
},
{
"instance": self.doors,
"action": "post_clear",
"reverse": True,
"model": Car,
},
],
)
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.airbag,
"action": "pre_clear",
"reverse": True,
"model": Car,
},
{
"instance": self.airbag,
"action": "post_clear",
"reverse": True,
"model": Car,
},
],
)
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name="Windows")
p6 = Part.objects.get(name="Windows")
expected_messages.append(
{
"instance": self.vw,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [p6],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [p6],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts.set([self.wheelset, self.doors, self.engine])
expected_messages.append(
{
"instance": self.vw,
"action": "pre_remove",
"reverse": False,
"model": Part,
"objects": [p6],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_remove",
"reverse": False,
"model": Part,
"objects": [p6],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_clear",
"reverse": False,
"model": Part,
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_clear",
"reverse": False,
"model": Part,
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [self.doors, self.engine, self.wheelset],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append(
{
"instance": self.vw,
"action": "pre_remove",
"reverse": False,
"model": Part,
"objects": [self.engine],
}
)
expected_messages.append(
{
"instance": self.vw,
"action": "post_remove",
"reverse": False,
"model": Part,
"objects": [self.engine],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name="Bugatti", price="1000000")
c4b = Car.objects.get(name="Bugatti")
c4.default_parts.set([self.doors])
expected_messages.append(
{
"instance": c4,
"action": "pre_add",
"reverse": False,
"model": Part,
"objects": [self.doors],
}
)
expected_messages.append(
{
"instance": c4,
"action": "post_add",
"reverse": False,
"model": Part,
"objects": [self.doors],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append(
{
"instance": self.engine,
"action": "pre_add",
"reverse": True,
"model": Car,
"objects": [c4b],
}
)
expected_messages.append(
{
"instance": self.engine,
"action": "post_add",
"reverse": True,
"model": Car,
"objects": [c4b],
}
)
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends.set([self.bob, self.chuck])
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.alice,
"action": "pre_add",
"reverse": False,
"model": Person,
"objects": [self.bob, self.chuck],
},
{
"instance": self.alice,
"action": "post_add",
"reverse": False,
"model": Person,
"objects": [self.bob, self.chuck],
},
],
)
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans.set([self.daisy])
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.alice,
"action": "pre_add",
"reverse": False,
"model": Person,
"objects": [self.daisy],
},
{
"instance": self.alice,
"action": "post_add",
"reverse": False,
"model": Person,
"objects": [self.daisy],
},
],
)
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols.set([self.alice, self.bob])
self.assertEqual(
self.m2m_changed_messages,
[
{
"instance": self.chuck,
"action": "pre_add",
"reverse": True,
"model": Person,
"objects": [self.alice, self.bob],
},
{
"instance": self.chuck,
"action": "post_add",
"reverse": True,
"model": Person,
"objects": [self.alice, self.bob],
},
],
)
| ManyToManySignalsTest |
python | realpython__materials | python-property/circle_v6.py | {
"start": 63,
"end": 267
} | class ____:
def __init__(self, radius):
self.radius = radius
@cached_property
def diameter(self):
sleep(0.5) # Simulate a costly computation
return self.radius * 2
| Circle |
python | huggingface__transformers | src/transformers/models/seed_oss/modeling_seed_oss.py | {
"start": 15700,
"end": 18839
} | class ____(SeedOssPreTrainedModel):
def __init__(self, config: SeedOssConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[SeedOssDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = SeedOssRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = SeedOssRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| SeedOssModel |
python | google__flatbuffers | grpc/examples/python/greeter/models/greeter_grpc_fb.py | {
"start": 530,
"end": 1423
} | class ____(object):
'''Interface exported by the server.'''
def SayHello(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SayManyHellos(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello),
'SayManyHellos': grpc.unary_stream_rpc_method_handler(
servicer.SayManyHellos),
}
generic_handler = grpc.method_handlers_generic_handler(
'models.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| GreeterServicer |
python | django__django | tests/model_enums/tests.py | {
"start": 8126,
"end": 8485
} | class ____(datetime.date, models.Choices):
APOLLO_11 = 1969, 7, 20, "Apollo 11 (Eagle)"
APOLLO_12 = 1969, 11, 19, "Apollo 12 (Intrepid)"
APOLLO_14 = 1971, 2, 5, "Apollo 14 (Antares)"
APOLLO_15 = 1971, 7, 30, "Apollo 15 (Falcon)"
APOLLO_16 = 1972, 4, 21, "Apollo 16 (Orion)"
APOLLO_17 = 1972, 12, 11, "Apollo 17 (Challenger)"
| MoonLandings |
python | doocs__leetcode | solution/2500-2599/2567.Minimum Score by Changing Two Elements/Solution.py | {
"start": 0,
"end": 166
} | class ____:
def minimizeSum(self, nums: List[int]) -> int:
nums.sort()
return min(nums[-1] - nums[2], nums[-2] - nums[1], nums[-3] - nums[0])
| Solution |
python | numba__numba | numba/tests/test_extending.py | {
"start": 10326,
"end": 10708
} | class ____(AbstractTemplate):
def generic(self, args, kws):
assert isinstance(args[0], types.MakeFunctionLiteral)
return signature(types.none, *args)
def mk_func_test_impl():
mk_func_input(lambda a: a)
# -----------------------------------------------------------------------
# Define a types derived from types.Callable and overloads for them
| MkFuncTyping |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 45214,
"end": 47326
} | class ____(AssetSelection):
selected_keys: Sequence[AssetKey]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
specified_keys = set(self.selected_keys)
missing_keys = {key for key in specified_keys if not asset_graph.has(key)}
if not allow_missing:
# Arbitrary limit to avoid huge error messages
keys_to_suggest = list(missing_keys)[:4]
suggestions = ""
for invalid_key in keys_to_suggest:
similar_names = resolve_similar_asset_names(
invalid_key, asset_graph.get_all_asset_keys()
)
if similar_names:
# Arbitrarily limit to 10 similar names to avoid a huge error message
subset_similar_names = similar_names[:10]
similar_to_string = ", ".join(
similar.to_string() for similar in subset_similar_names
)
suggestions += (
f"\n\nFor selected asset {invalid_key.to_string()}, did you mean one of "
f"the following?\n\t{similar_to_string}"
)
if missing_keys:
raise DagsterInvalidSubsetError(
f"AssetKey(s) {[k.to_user_string() for k in missing_keys]} were selected, but "
"no AssetsDefinition objects supply these keys. Make sure all keys are spelled "
"correctly, and all AssetsDefinitions are correctly added to the "
f"`Definitions`.{suggestions}"
)
return specified_keys - missing_keys
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return self
def needs_parentheses_when_operand(self) -> bool:
return len(self.selected_keys) > 1
def to_selection_str(self) -> str:
return " or ".join(f'key:"{x.to_user_string()}"' for x in self.selected_keys)
@whitelist_for_serdes
@record
| KeysAssetSelection |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 2127,
"end": 4390
} | class ____(SyncAPIResource):
@cached_property
def sessions(self) -> Sessions:
return Sessions(self._client)
@cached_property
def transcription_sessions(self) -> TranscriptionSessions:
return TranscriptionSessions(self._client)
@cached_property
def with_raw_response(self) -> RealtimeWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return RealtimeWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> RealtimeWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return RealtimeWithStreamingResponse(self)
def connect(
self,
*,
model: str,
extra_query: Query = {},
extra_headers: Headers = {},
websocket_connection_options: WebsocketConnectionOptions = {},
) -> RealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
Some notable benefits of the API include:
- Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
- Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
- Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
The Realtime API is a stateful, event-based API that communicates over a WebSocket.
"""
return RealtimeConnectionManager(
client=self._client,
extra_query=extra_query,
extra_headers=extra_headers,
websocket_connection_options=websocket_connection_options,
model=model,
)
| Realtime |
python | kamyu104__LeetCode-Solutions | Python/find-the-minimum-cost-array-permutation.py | {
"start": 77,
"end": 1033
} | class ____(object):
def findPermutation(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
INF = float("inf")
n = len(nums)
dp = [[(INF, -1) for _ in xrange(n-1)] for _ in xrange(1<<(n-1))]
for i in xrange(n-1):
dp[1<<i][i] = (abs((i+1)-nums[0]), -1)
for mask in xrange(1<<(n-1)):
for i in xrange(n-1):
if mask&(1<<i) == 0:
continue
for j in xrange(n-1):
if j == i or mask&(1<<j) == 0:
continue
dp[mask][i] = min(dp[mask][i], (dp[mask^(1<<i)][j][0]+abs((i+1)-nums[j+1]), j))
_, i = min((dp[-1][i][0]+abs(0-nums[i+1]), i) for i in xrange(n-1))
result = [0]
mask = (1<<(n-1))-1
while i != -1:
result.append(i+1)
mask, i = mask^(1<<i), dp[mask][i][1]
return result
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_http_status_code.py | {
"start": 666,
"end": 1681
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_http_status_code"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_http_status_code(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidHttpStatusCode |
python | ray-project__ray | rllib/utils/actor_manager.py | {
"start": 439,
"end": 1612
} | class ____:
"""A wrapper around a result or a RayError thrown during remote task/actor calls.
This is used to return data from `FaultTolerantActorManager` that allows us to
distinguish between RayErrors (remote actor related) and valid results.
"""
def __init__(self, result: Any = None, error: Exception = None):
"""One and only one of result or error should be set.
Args:
result: The result of the computation. Note that None is a valid result if
the remote function does not return anything.
error: Alternatively, the error that occurred during the computation.
"""
self._result = result
self._error = (
# Easier to handle if we show the user the original error.
error.as_instanceof_cause()
if isinstance(error, RayTaskError)
else error
)
@property
def ok(self):
return self._error is None
def get(self):
"""Returns the result or the error."""
if self._error:
return self._error
else:
return self._result
@DeveloperAPI
@dataclass
| ResultOrError |
python | TheAlgorithms__Python | graphs/greedy_best_first.py | {
"start": 1975,
"end": 5389
} | class ____:
"""
>>> grid = TEST_GRIDS[2]
>>> gbf = GreedyBestFirst(grid, (0, 0), (len(grid) - 1, len(grid[0]) - 1))
>>> [x.pos for x in gbf.get_successors(gbf.start)]
[(1, 0), (0, 1)]
>>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
(0, 1)
>>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])
(1, 0)
>>> gbf.retrace_path(gbf.start)
[(0, 0)]
>>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
[(0, 0), (1, 0), (2, 0), (2, 1), (3, 1), (4, 1), (4, 2), (4, 3),
(4, 4)]
"""
def __init__(
self, grid: list[list[int]], start: tuple[int, int], goal: tuple[int, int]
):
self.grid = grid
self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
self.open_nodes = [self.start]
self.closed_nodes: list[Node] = []
self.reached = False
def search(self) -> Path | None:
"""
Search for the path,
if a path is not found, only the starting position is returned
"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
current_node = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
self.reached = True
return self.retrace_path(current_node)
self.closed_nodes.append(current_node)
successors = self.get_successors(current_node)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(child_node)
if not self.reached:
return [self.start.pos]
return None
def get_successors(self, parent: Node) -> list[Node]:
"""
Returns a list of successors (both in the grid and free spaces)
"""
return [
Node(
pos_x,
pos_y,
self.target.pos_x,
self.target.pos_y,
parent.g_cost + 1,
parent,
)
for action in delta
if (
0 <= (pos_x := parent.pos_x + action[1]) < len(self.grid[0])
and 0 <= (pos_y := parent.pos_y + action[0]) < len(self.grid)
and self.grid[pos_y][pos_x] == 0
)
]
def retrace_path(self, node: Node | None) -> Path:
"""
Retrace the path from parents to parents until start node
"""
current_node = node
path = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
current_node = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
for idx, grid in enumerate(TEST_GRIDS):
print(f"==grid-{idx + 1}==")
init = (0, 0)
goal = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
greedy_bf = GreedyBestFirst(grid, init, goal)
path = greedy_bf.search()
if path:
for pos_x, pos_y in path:
grid[pos_x][pos_y] = 2
for elem in grid:
print(elem)
| GreedyBestFirst |
python | MongoEngine__mongoengine | tests/utils.py | {
"start": 2955,
"end": 3259
} | class ____(query_counter):
def get_ops(self):
ignore_query = dict(self._ignored_query)
ignore_query["command.count"] = {
"$ne": "system.profile"
} # Ignore the query issued by query_counter
return list(self.db.system.profile.find(ignore_query))
| db_ops_tracker |
python | qdrant__qdrant-client | qdrant_client/async_client_base.py | {
"start": 385,
"end": 12533
} | class ____:
def __init__(self, **kwargs: Any):
pass
async def search_matrix_offsets(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
**kwargs: Any,
) -> types.SearchMatrixOffsetsResponse:
raise NotImplementedError()
async def search_matrix_pairs(
self,
collection_name: str,
query_filter: Optional[types.Filter] = None,
limit: int = 3,
sample: int = 10,
using: Optional[str] = None,
**kwargs: Any,
) -> types.SearchMatrixPairsResponse:
raise NotImplementedError()
async def query_batch_points(
self, collection_name: str, requests: Sequence[types.QueryRequest], **kwargs: Any
) -> list[types.QueryResponse]:
raise NotImplementedError()
async def query_points(
self,
collection_name: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
offset: Optional[int] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
lookup_from: Optional[types.LookupLocation] = None,
**kwargs: Any,
) -> types.QueryResponse:
raise NotImplementedError()
async def query_points_groups(
self,
collection_name: str,
group_by: str,
query: Union[
types.PointId,
list[float],
list[list[float]],
types.SparseVector,
types.Query,
types.NumpyArray,
types.Document,
types.Image,
types.InferenceObject,
None,
] = None,
using: Optional[str] = None,
prefetch: Union[types.Prefetch, list[types.Prefetch], None] = None,
query_filter: Optional[types.Filter] = None,
search_params: Optional[types.SearchParams] = None,
limit: int = 10,
group_size: int = 3,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
score_threshold: Optional[float] = None,
with_lookup: Optional[types.WithLookupInterface] = None,
lookup_from: Optional[types.LookupLocation] = None,
**kwargs: Any,
) -> types.GroupsResult:
raise NotImplementedError()
async def scroll(
self,
collection_name: str,
scroll_filter: Optional[types.Filter] = None,
limit: int = 10,
order_by: Optional[types.OrderBy] = None,
offset: Optional[types.PointId] = None,
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
**kwargs: Any,
) -> tuple[list[types.Record], Optional[types.PointId]]:
raise NotImplementedError()
async def count(
self,
collection_name: str,
count_filter: Optional[types.Filter] = None,
exact: bool = True,
**kwargs: Any,
) -> types.CountResult:
raise NotImplementedError()
async def facet(
self,
collection_name: str,
key: str,
facet_filter: Optional[types.Filter] = None,
limit: int = 10,
exact: bool = False,
**kwargs: Any,
) -> types.FacetResponse:
raise NotImplementedError()
async def upsert(
self, collection_name: str, points: types.Points, **kwargs: Any
) -> types.UpdateResult:
raise NotImplementedError()
async def update_vectors(
self, collection_name: str, points: Sequence[types.PointVectors], **kwargs: Any
) -> types.UpdateResult:
raise NotImplementedError()
async def delete_vectors(
self,
collection_name: str,
vectors: Sequence[str],
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
raise NotImplementedError()
async def retrieve(
self,
collection_name: str,
ids: Sequence[types.PointId],
with_payload: Union[bool, Sequence[str], types.PayloadSelector] = True,
with_vectors: Union[bool, Sequence[str]] = False,
**kwargs: Any,
) -> list[types.Record]:
raise NotImplementedError()
async def delete(
self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any
) -> types.UpdateResult:
raise NotImplementedError()
async def set_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
key: Optional[str] = None,
**kwargs: Any,
) -> types.UpdateResult:
raise NotImplementedError()
async def overwrite_payload(
self,
collection_name: str,
payload: types.Payload,
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
raise NotImplementedError()
async def delete_payload(
self,
collection_name: str,
keys: Sequence[str],
points: types.PointsSelector,
**kwargs: Any,
) -> types.UpdateResult:
raise NotImplementedError()
async def clear_payload(
self, collection_name: str, points_selector: types.PointsSelector, **kwargs: Any
) -> types.UpdateResult:
raise NotImplementedError()
async def batch_update_points(
self,
collection_name: str,
update_operations: Sequence[types.UpdateOperation],
**kwargs: Any,
) -> list[types.UpdateResult]:
raise NotImplementedError()
async def update_collection_aliases(
self, change_aliases_operations: Sequence[types.AliasOperations], **kwargs: Any
) -> bool:
raise NotImplementedError()
async def get_collection_aliases(
self, collection_name: str, **kwargs: Any
) -> types.CollectionsAliasesResponse:
raise NotImplementedError()
async def get_aliases(self, **kwargs: Any) -> types.CollectionsAliasesResponse:
raise NotImplementedError()
async def get_collections(self, **kwargs: Any) -> types.CollectionsResponse:
raise NotImplementedError()
async def get_collection(self, collection_name: str, **kwargs: Any) -> types.CollectionInfo:
raise NotImplementedError()
async def collection_exists(self, collection_name: str, **kwargs: Any) -> bool:
raise NotImplementedError()
async def update_collection(self, collection_name: str, **kwargs: Any) -> bool:
raise NotImplementedError()
async def delete_collection(self, collection_name: str, **kwargs: Any) -> bool:
raise NotImplementedError()
async def create_collection(
self,
collection_name: str,
vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]],
**kwargs: Any,
) -> bool:
raise NotImplementedError()
async def recreate_collection(
self,
collection_name: str,
vectors_config: Union[types.VectorParams, Mapping[str, types.VectorParams]],
**kwargs: Any,
) -> bool:
raise NotImplementedError()
def upload_points(
self, collection_name: str, points: Iterable[types.PointStruct], **kwargs: Any
) -> None:
raise NotImplementedError()
def upload_collection(
self,
collection_name: str,
vectors: Union[
dict[str, types.NumpyArray], types.NumpyArray, Iterable[types.VectorStruct]
],
payload: Optional[Iterable[dict[Any, Any]]] = None,
ids: Optional[Iterable[types.PointId]] = None,
**kwargs: Any,
) -> None:
raise NotImplementedError()
async def create_payload_index(
self,
collection_name: str,
field_name: str,
field_schema: Optional[types.PayloadSchemaType] = None,
field_type: Optional[types.PayloadSchemaType] = None,
**kwargs: Any,
) -> types.UpdateResult:
raise NotImplementedError()
async def delete_payload_index(
self, collection_name: str, field_name: str, **kwargs: Any
) -> types.UpdateResult:
raise NotImplementedError()
async def list_snapshots(
self, collection_name: str, **kwargs: Any
) -> list[types.SnapshotDescription]:
raise NotImplementedError()
async def create_snapshot(
self, collection_name: str, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
raise NotImplementedError()
async def delete_snapshot(
self, collection_name: str, snapshot_name: str, **kwargs: Any
) -> Optional[bool]:
raise NotImplementedError()
async def list_full_snapshots(self, **kwargs: Any) -> list[types.SnapshotDescription]:
raise NotImplementedError()
async def create_full_snapshot(self, **kwargs: Any) -> Optional[types.SnapshotDescription]:
raise NotImplementedError()
async def delete_full_snapshot(self, snapshot_name: str, **kwargs: Any) -> Optional[bool]:
raise NotImplementedError()
async def recover_snapshot(
self, collection_name: str, location: str, **kwargs: Any
) -> Optional[bool]:
raise NotImplementedError()
async def list_shard_snapshots(
self, collection_name: str, shard_id: int, **kwargs: Any
) -> list[types.SnapshotDescription]:
raise NotImplementedError()
async def create_shard_snapshot(
self, collection_name: str, shard_id: int, **kwargs: Any
) -> Optional[types.SnapshotDescription]:
raise NotImplementedError()
async def delete_shard_snapshot(
self, collection_name: str, shard_id: int, snapshot_name: str, **kwargs: Any
) -> Optional[bool]:
raise NotImplementedError()
async def recover_shard_snapshot(
self, collection_name: str, shard_id: int, location: str, **kwargs: Any
) -> Optional[bool]:
raise NotImplementedError()
async def close(self, **kwargs: Any) -> None:
pass
def migrate(
self,
dest_client: "AsyncQdrantBase",
collection_names: Optional[list[str]] = None,
batch_size: int = 100,
recreate_on_collision: bool = False,
) -> None:
raise NotImplementedError()
async def create_shard_key(
self,
collection_name: str,
shard_key: types.ShardKey,
shards_number: Optional[int] = None,
replication_factor: Optional[int] = None,
placement: Optional[list[int]] = None,
**kwargs: Any,
) -> bool:
raise NotImplementedError()
async def delete_shard_key(
self, collection_name: str, shard_key: types.ShardKey, **kwargs: Any
) -> bool:
raise NotImplementedError()
async def info(self) -> types.VersionInfo:
raise NotImplementedError()
async def cluster_collection_update(
self, collection_name: str, cluster_operation: types.ClusterOperations, **kwargs: Any
) -> bool:
raise NotImplementedError()
async def collection_cluster_info(self, collection_name: str) -> types.CollectionClusterInfo:
raise NotImplementedError()
async def cluster_status(self) -> types.ClusterStatus:
raise NotImplementedError()
async def recover_current_peer(self) -> bool:
raise NotImplementedError()
async def remove_peer(self, peer_id: int, **kwargs: Any) -> bool:
raise NotImplementedError()
| AsyncQdrantBase |
python | tensorflow__tensorflow | tensorflow/python/distribute/values.py | {
"start": 18002,
"end": 43644
} | class ____(DistributedDelegate, variables_lib.Variable,
core.Tensor):
"""Holds a map from replica to variables."""
def __init__(self, strategy, values, aggregation, var_policy=None):
if (aggregation == variables_lib.VariableAggregation.MEAN and
not values[0].dtype.is_floating):
raise ValueError(
"creating distributed tf.Variable with aggregation=MEAN and a "
"non-floating dtype is not supported, please use a different "
"aggregation or dtype")
self._distribute_strategy = strategy
self._aggregation = aggregation
super(DistributedVariable, self).__init__(values)
self._common_name = self._primary.name.split(":")[0]
# Use a weakref to make it easy to map from the contained values
# to the container without introducing a reference cycle.
for v in values:
# ResourceVariable is a CompositeTensor. Attributes added to
# CompositeTensors will get lost through tf.nest packing and unpacking.
if isinstance(v, composite_tensor.CompositeTensor) and hasattr(
v, "handle"):
v.handle._distributed_container = weakref.ref(self) # pylint: disable=protected-access
else:
v._distributed_container = weakref.ref(self) # pylint: disable=protected-access
# Packed variable is used to reduce the overhead of function execution.
# For a DistributedVariable, only one variable handle is captured into a
# function graph. It's only supported in eager mode.
if ops.executing_eagerly_outside_functions() and getattr(
strategy, "_enable_packed_variable_in_eager_mode", False):
name = "%s/packed/" % self._common_name
if hasattr(values[0], "_vars"):
# Handle when the resource variables are "nested" underneath another
# layer of values, e.g., TPUReplicatedVariable, by packing all them
# together and pushing the packed var down a level
# pylint: disable=protected-access
packed_var = packed.PackedDistributedVariable(
sum((value._vars for value in values), []), name=name)
for value in values:
value._packed_var = packed_var
self._packed_var = None
# pylint: enable=protected-access
else:
self._packed_var = packed.PackedDistributedVariable(values, name=name)
else:
self._packed_var = None
# tf.keras keeps track of variables initialized using this attribute. When
# tf.keras gets the default session, it initializes all uninitialized vars.
# We need to make _keras_initialized a member of DistributedVariable because
# without this it will use `__getattr__` which will delegate to a component
# variable.
self._keras_initialized = False
# Typically, a `DistributedVariable`'s initializer is composed of the
# initializers of the components variables. However, in some cases, such as
# when restoring from a checkpoint, we may set the _initializer_op
# property on the entire `DistributedVariable`.
self._initializer_op = None
# Set a VariablePolicy which decides how we replicate/aggregate the given
# variable.
self._policy = var_policy
def __deepcopy__(self, memo):
"""Perform a deepcopy of the `DistributedVariable`.
Unlike the deepcopy of a regular tf.Variable, this keeps the original
strategy and devices of the `DistributedVariable`. To avoid confusion
with the behavior of deepcopy on a regular `Variable` (which does
copy into new devices), we only allow a deepcopy of a `DistributedVariable`
within its originating strategy scope.
Args:
memo: The memoization object for `deepcopy`.
Returns:
A deep copy of the current `DistributedVariable`.
Raises:
RuntimeError: If trying to deepcopy into a different strategy.
"""
with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):
new_values = []
for value in self._values:
with ops.device(value.device):
new_values.append(copy.deepcopy(value, memo))
copied_variable = type(self)(
strategy=self._distribute_strategy,
values=new_values,
aggregation=self._aggregation,
var_policy=copy.deepcopy(self._policy, memo))
memo[id(self)] = copied_variable
return copied_variable
def _use_packed_variable(self):
# Don't use packed variable when under a SaveContext to avoid explicit
# device placement on variable consuming ops.
return self._packed_var is not None and (
not values_util.is_saving_non_distributed())
def is_initialized(self, name=None):
"""Identifies if all the component variables are initialized.
Args:
name: Name of the final `logical_and` op.
Returns:
The op that evaluates to True or False depending on if all the
component variables are initialized.
"""
if values_util.is_saving_non_distributed():
return self._primary.is_initialized()
if self._use_packed_variable():
return self._packed_var.is_initialized()
result = self._primary.is_initialized()
# We iterate through the list of values except the last one to allow us to
# name the final `logical_and` op the same name that is passed by the user
# to the `is_initialized` op. For distributed variables, the
# `is_initialized` op is a `logical_and` op.
for v in self._values[1:-1]:
result = math_ops.logical_and(result, v.is_initialized())
result = math_ops.logical_and(
result, self._values[-1].is_initialized(), name=name)
return result
@property
def initializer(self):
if values_util.is_saving_non_distributed():
return self._primary.initializer
if self._initializer_op:
init_op = self._initializer_op
else:
# return grouped ops of all the var initializations of component values of
# the mirrored variable
init_op = control_flow_ops.group(
tuple(v.initializer for v in self._values))
return init_op
def initialized_value(self):
return self._get_on_device_or_primary().initialized_value()
def _is_mirrored(self):
return (self._policy is not None) and (self._policy._is_mirrored()) # pylint: disable=protected-access
@property
def initial_value(self):
return self._get_on_device_or_primary().initial_value
@property
def constraint(self):
return self._primary.constraint
@property
def graph(self):
return self._primary.graph
@property
def _shared_name(self):
return self._common_name
@property
def _unique_id(self):
return self._primary._unique_id # pylint: disable=protected-access
@property
def _graph_key(self):
"""Lets Optimizers know which graph this variable is from."""
return self._primary._graph_key # pylint: disable=protected-access
@property
def name(self):
return self._primary.name
@property
def dtype(self):
return self._primary.dtype
@property
def shape(self):
return self._primary.shape
@property
def synchronization(self):
return self._primary.synchronization
@property
def aggregation(self):
return self._aggregation
@property
def _packed_variable(self):
if self._use_packed_variable():
return self._packed_var
return None
@property
def handle(self):
if values_util.is_saving_non_distributed():
return self._primary.handle
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
raise ValueError(
"DistributedVariable.handle is not available outside the replica "
"context or a `tf.distribute.Strategy.update()` call.")
else:
if self._use_packed_variable():
return self._packed_var.handle
return self._values[replica_id].handle
def eval(self, session=None):
return self._get_on_device_or_primary().eval(session)
@property
def _save_slice_info(self):
return self._primary._save_slice_info # pylint: disable=protected-access
def _get_save_slice_info(self):
return self._primary._get_save_slice_info() # pylint: disable=protected-access
def _set_save_slice_info(self, save_slice_info):
for v in self._values:
v._set_save_slice_info(save_slice_info) # pylint: disable=protected-access
@property
def device(self):
return self._get_on_device_or_primary().device
@property
def trainable(self):
return self._primary.trainable
@property
def distribute_strategy(self):
return self._distribute_strategy
def get_shape(self) -> tensor_shape.TensorShape:
return self._primary.get_shape()
def to_proto(self, export_scope=None):
return self._primary.to_proto(export_scope=export_scope)
@property
def op(self) -> ops.Operation:
if values_util.is_saving_non_distributed():
return self._primary.op
# We want cross-replica code that does some var.op.X calls
# to work (even if the current device isn't in self._devices), but
# other uses of var.op in a cross-replica context to fail.
if distribute_lib.in_cross_replica_context():
return DistributedVarOp(self._primary.op.name, self._primary.op.graph,
self._primary.op.traceback, self._primary.op.type)
return self._get().op
@property
def _in_graph_mode(self):
return self._primary._in_graph_mode # pylint: disable=protected-access
def _get_replica(self, replica_id):
"""Returns the value on a device with the given replica_id."""
value = self._values[replica_id]
if self._use_packed_variable():
return self._packed_var.on_device(value.device)
else:
return value
def _get(self):
"""Returns the value for the current device or raises a ValueError."""
if values_util.is_saving_non_distributed():
return self._primary
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
return self._get_cross_replica()
else:
return self._get_replica(replica_id)
def _get_on_device_or_primary(self):
"""Returns value in same replica or device if possible, else the _primary."""
if values_util.is_saving_non_distributed():
return self._primary
replica_id = values_util.get_current_replica_id_as_int()
if replica_id is None:
# Try to find a value on the current device.
current_device = device_util.canonicalize(device_util.current())
for i, value in enumerate(self._values):
if device_util.canonicalize(value.device) == current_device:
return self._get_replica(i)
return self._get_replica(0)
else:
return self._get_replica(replica_id)
def read_value(self):
if values_util.is_saving_non_distributed():
return self._primary.read_value()
with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):
return array_ops.identity(self._get())
def value(self):
if values_util.is_saving_non_distributed():
return self._primary.value()
if self._policy:
return self._policy.value(self)
return self._get_on_device_or_primary().value()
def numpy(self):
if context.executing_eagerly():
return self.read_value().numpy()
else:
raise NotImplementedError("DistributedVariable.numpy() is only available "
"when eager execution is enabled.")
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_sub(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign_sub(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign_sub(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign_add(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign_add(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign_add(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign_add(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def assign(self, value, use_locking=False, name=None, read_value=True):
if values_util.is_saving_non_distributed():
return self._primary.assign(value, use_locking, name, read_value)
if self._policy:
return self._policy.assign(
self,
value,
use_locking=use_locking,
name=name,
read_value=read_value)
return values_util.on_write_assign(
self, value, use_locking=use_locking, name=name, read_value=read_value)
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_sub(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_sub(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_add(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_add(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_mul(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_mul(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_mul(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_div(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_div(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_div(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_min(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_min(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_min(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_max(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_max(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_max(
self, sparse_delta, use_locking=use_locking, name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if values_util.is_saving_non_distributed():
return self._primary.scatter_update(sparse_delta, use_locking, name)
if self._policy:
return self._policy.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
return values_util.scatter_update(
self, sparse_delta, use_locking=use_locking, name=name)
def __tf_tracing_type__(self, _):
return DistributedVariableTraceType(self)
def _gather_saveables_for_checkpoint(self):
"""Overrides Trackable method.
This allows both name-based and object-based save and restore of
DistributedVariables.
Returns:
A dictionary mapping attribute names to `SaveableObject` factories.
"""
def _saveable_factory(name=self._common_name):
return _DistributedVariableSaveable(self, self._primary, name)
return {trackable.VARIABLE_VALUE_KEY: _saveable_factory}
def _as_graph_element(self):
if values_util.is_saving_non_distributed():
return self._primary._as_graph_element() # pylint: disable=protected-access
if self._policy:
return self._policy._as_graph_element(self) # pylint: disable=protected-access
raise NotImplementedError(
"DistributedVariable._as_graph_element requires a valid "
"VariablePolicy. Please set the policy via the `var_policy` argument "
"in the constructor, or override this method in sub-classes which "
"support cross-replica accesses. "
f"Type name is {type(self)}"
)
def _get_cross_replica(self):
if values_util.is_saving_non_distributed():
return self._primary
if self._policy:
return self._policy._get_cross_replica(self) # pylint: disable=protected-access
raise NotImplementedError(
"DistributedVariable._get_cross_replica requires a valid "
"VariablePolicy. Please set the policy via the `var_policy` argument "
"in the constructor, or override this method in sub-classes which "
"support cross-replica accesses. "
f"Type name is {type(self)}"
)
def _update_cross_replica(self, update_fn, value, **kwargs):
"""Applies updates across replicas.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should has the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
values_util.mark_as_unsaveable()
return self.distribute_strategy.extended.update(
self, update_fn, args=(value,), kwargs=kwargs, group=True)
def _update_replica(self, update_fn, value, **kwargs):
"""Applies updates in one replica.
Args:
update_fn: A callable to update the variable. It should has the same
signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: remaining arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
if self._policy:
return self._policy._update_replica(self, update_fn, value, **kwargs) # pylint: disable=protected-access
raise NotImplementedError(
"DistributedVariable._update_replica requires a valid VariablePolicy. "
"Please set the policy via the `var_policy` argument in the "
"constructor, or override this method in sub-classes which support "
"cross-replica accesses. "
f"Type name is {type(self)}"
)
def _update(self, update_fn, value, **kwargs):
"""Applies updates depending on the context.
The method calls `_update_replica` in replica context,
`_update_cross_replica` in cross replica context, and `update_fn` in update
context.
If `read_value` is True, the method returns the updated Variable. If
`read_value` is False, the method returns the update `tf.Operation`.
Args:
update_fn: A callable to pass to `strategy.extended.update` to update the
variable. It should have the same signature as `Variable.assign()`.
value: value to be passed to `update_fn`.
**kwargs: keyword arguments to `update_fn`.
Returns:
Updated variable or `tf.Operation`.
"""
if values_util.is_saving_non_distributed():
return update_fn(self._primary, value, **kwargs)
with distribute_lib.enter_or_assert_strategy(self.distribute_strategy):
if distribute_lib.in_cross_replica_context():
update_replica_id = distribute_lib.get_update_replica_id()
if update_replica_id is not None:
replica_value = self._get_replica(update_replica_id)
return update_fn(replica_value, value, **kwargs)
return self._update_cross_replica(update_fn, value, **kwargs)
else:
values_util.assert_replica_context(self.distribute_strategy)
return self._update_replica(update_fn, value, **kwargs)
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
pass
def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):
"""Converts a variable to a tensor."""
if values_util.is_saving_non_distributed():
return ops.convert_to_tensor(
self._primary, dtype=dtype, name=name, as_ref=as_ref)
with distribute_lib.enter_or_assert_strategy(self._distribute_strategy):
return ops.convert_to_tensor(
self._get(), dtype=dtype, name=name, as_ref=as_ref)
def __tf_tensor__(self,
dtype: Optional[dtypes.DType] = None,
name: Optional[str] = None) -> tensor_lib.Tensor:
return self._dense_var_to_tensor(dtype, name)
def _export_to_saved_model_graph(self,
object_map=None,
tensor_map=None,
options=None,
**kwargs):
# Initialize for self._primary first, so that obj_map[self._primary] and
# resource_map[self._primary.handle] contain mapped values.
resource_list = self._primary._export_to_saved_model_graph( # pylint:disable=protected-access
object_map=object_map,
tensor_map=tensor_map,
options=options,
**kwargs)
for v in [v for v in self._values if v != self._primary]:
if (options.experimental_variable_policy # pylint:disable=protected-access
._expand_distributed_variables()):
resource_list.extend(
v._export_to_saved_model_graph( # pylint:disable=protected-access
object_map=object_map,
tensor_map=tensor_map,
options=options,
**kwargs)) # pylint:disable=protected-access
else:
object_map[v] = object_map[self._primary]
tensor_map[v.handle] = tensor_map[self._primary.handle]
resource_list.append(v.handle)
object_map[self] = object_map[self._primary]
tensor_map[self] = tensor_map[self._primary.handle]
resource_list.append(self)
if self._packed_var is not None:
tensor_map[self._packed_var.packed_handle] = tensor_map[
self._primary.handle]
resource_list.append(self._packed_var.packed_handle)
return resource_list
def _copy_trackable_to_cpu(self, object_map):
"""For implementing `Trackable`."""
if self not in object_map:
# If not populated, initialize the cpu copy first.
op_device = pydev.DeviceSpec.from_string(self.device).replace(
device_type="CPU", device_index=0).to_string()
with ops.device(op_device):
new_var = resource_variable_ops.UninitializedVariable(
trainable=self.trainable,
shape=self.shape,
dtype=self.dtype,
name=self._shared_name,
distribute_strategy=self._distribute_strategy,
aggregation=self._aggregation) # pylint: disable=protected-access
object_map[self] = new_var
# Then copy value of self to the copy.
destination_var = object_map[self]
with ops.device(destination_var.device):
destination_var.assign(self.read_value())
def _write_object_proto(self, proto, options):
"""Update a SavedObject proto for the caller.
If a DistributedVariable object supports this method, it will be called when
saving with a pre-built `SavedObject` proto representing the object, plus an
instance of `SaveOptions`. This method is then free to modify that proto
instance.
`DistributedVariable` with `AUTO` or `ON_WRITE` synchronization optionally
write out information about their components to the
`experimental_distributed_variable_components` field of a
`SavedVariable` (depending on the `SaveOptions` variable policy).
Args:
proto: A pre-built `SavedObject` proto for this object. It is assumed this
will be a `SavedVariable` instance.
options: A `SaveOptions` instance.
"""
resource_variable_ops.write_object_proto_for_resource_variable(
self, proto, options)
# Set protos in the saved model such that distributed variables are
# correctly restored on COMPOSITE devices (otherwise, task:0/TPU:0).
values_util.write_object_proto(self, proto, options)
@property
def is_distributed_variable(self):
return True
def __tf_experimental_restore_capture__(
self, concrete_function, internal_capture):
graph = concrete_function.graph
# Add given distributed variable to captures with given placeholder.
graph.replace_capture(self, internal_capture)
record.record_operation(
"captured_value", [internal_capture], [self],
backward_function=lambda x: [x],
forward_function=lambda x: [x])
return self
# We extend from `saveable_object.SaveableObject` instead of
# `saveable_object_util.ResourceVariableSaveable` since we need to read the
# value of ONREAD variables when saving. `SaveableObject` provides a way to
# specify the function to run to get the value of the variable or tensor at
# saving time. We can use this for both ON_READ and ON_WRITE variables.
# TODO(b/164586507): Consolidate ON_WRITE and ON_READ saving/restoring logic
# if possible.
| DistributedVariable |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_run.py | {
"start": 2271,
"end": 3111
} | class ____:
def test_template_fields(self):
operator = CloudRunCreateJobOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, job_name=JOB_NAME, job=JOB
)
_assert_common_template_fields(operator.template_fields)
assert "job_name" in operator.template_fields
@mock.patch(CLOUD_RUN_HOOK_PATH)
def test_create(self, hook_mock):
hook_mock.return_value.create_job.return_value = JOB
operator = CloudRunCreateJobOperator(
task_id=TASK_ID, project_id=PROJECT_ID, region=REGION, job_name=JOB_NAME, job=JOB
)
operator.execute(context=mock.MagicMock())
hook_mock.return_value.create_job.assert_called_once_with(
job_name=JOB_NAME, region=REGION, project_id=PROJECT_ID, job=JOB
)
| TestCloudRunCreateJobOperator |
python | ansible__ansible | lib/ansible/plugins/strategy/host_pinned.py | {
"start": 1720,
"end": 1875
} | class ____(FreeStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = True
| StrategyModule |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 751578,
"end": 756799
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"avatar_url",
"billing_info",
"created_at",
"database_id",
"description",
"description_html",
"location",
"members",
"name",
"organizations",
"owner_info",
"resource_path",
"slug",
"url",
"viewer_is_admin",
"website_url",
)
avatar_url = sgqlc.types.Field(
sgqlc.types.non_null(URI),
graphql_name="avatarUrl",
args=sgqlc.types.ArgDict(
(("size", sgqlc.types.Arg(Int, graphql_name="size", default=None)),)
),
)
billing_info = sgqlc.types.Field(EnterpriseBillingInfo, graphql_name="billingInfo")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
description = sgqlc.types.Field(String, graphql_name="description")
description_html = sgqlc.types.Field(
sgqlc.types.non_null(HTML), graphql_name="descriptionHTML"
)
location = sgqlc.types.Field(String, graphql_name="location")
members = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseMemberConnection),
graphql_name="members",
args=sgqlc.types.ArgDict(
(
(
"organization_logins",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="organizationLogins",
default=None,
),
),
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"order_by",
sgqlc.types.Arg(
EnterpriseMemberOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
(
"role",
sgqlc.types.Arg(
EnterpriseUserAccountMembershipRole,
graphql_name="role",
default=None,
),
),
(
"deployment",
sgqlc.types.Arg(
EnterpriseUserDeployment,
graphql_name="deployment",
default=None,
),
),
(
"has_two_factor_enabled",
sgqlc.types.Arg(
Boolean, graphql_name="hasTwoFactorEnabled", default=None
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
organizations = sgqlc.types.Field(
sgqlc.types.non_null(OrganizationConnection),
graphql_name="organizations",
args=sgqlc.types.ArgDict(
(
("query", sgqlc.types.Arg(String, graphql_name="query", default=None)),
(
"viewer_organization_role",
sgqlc.types.Arg(
RoleInOrganization,
graphql_name="viewerOrganizationRole",
default=None,
),
),
(
"order_by",
sgqlc.types.Arg(
OrganizationOrder,
graphql_name="orderBy",
default={"field": "LOGIN", "direction": "ASC"},
),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
owner_info = sgqlc.types.Field(EnterpriseOwnerInfo, graphql_name="ownerInfo")
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
slug = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="slug")
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
viewer_is_admin = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="viewerIsAdmin"
)
website_url = sgqlc.types.Field(URI, graphql_name="websiteUrl")
| Enterprise |
python | walkccc__LeetCode | solutions/3044. Most Frequent Prime/3044.py | {
"start": 0,
"end": 774
} | class ____:
def mostFrequentPrime(self, mat: list[list[int]]) -> int:
DIRS = ((1, 0), (1, -1), (0, -1), (-1, -1),
(-1, 0), (-1, 1), (0, 1), (1, 1))
m = len(mat)
n = len(mat[0])
count = collections.Counter()
def isPrime(num: int) -> bool:
return not any(num % i == 0 for i in range(2, int(num**0.5 + 1)))
for i in range(m):
for j in range(n):
for dx, dy in DIRS:
num = 0
x = i
y = j
while 0 <= x < m and 0 <= y < n:
num = num * 10 + mat[x][y]
if num > 10 and isPrime(num):
count[num] += 1
x += dx
y += dy
if not count.items():
return -1
return max(count.items(), key=lambda x: (x[1], x[0]))[0]
| Solution |
python | modin-project__modin | modin/db_conn.py | {
"start": 1553,
"end": 5904
} | class ____:
"""
Creates a SQL database connection.
Parameters
----------
lib : str
The library for the SQL connection.
*args : iterable
Positional arguments to pass when creating the connection.
**kwargs : dict
Keyword arguments to pass when creating the connection.
"""
lib: str
args: Sequence
kwargs: Dict
_dialect_is_microsoft_sql_cache: Optional[bool]
def __init__(self, lib: str, *args: Any, **kwargs: Any) -> None:
lib = lib.lower()
if lib not in (_PSYCOPG_LIB_NAME, _SQLALCHEMY_LIB_NAME):
raise UnsupportedDatabaseException(f"Unsupported database library {lib}")
self.lib = lib
self.args = args
self.kwargs = kwargs
self._dialect_is_microsoft_sql_cache = None
def _dialect_is_microsoft_sql(self) -> bool:
"""
Tell whether this connection requires Microsoft SQL dialect.
If this is a sqlalchemy connection, create an engine from args and
kwargs. If that engine's driver is pymssql or pyodbc, this
connection requires Microsoft SQL. Otherwise, it doesn't.
Returns
-------
bool
"""
if self._dialect_is_microsoft_sql_cache is None:
self._dialect_is_microsoft_sql_cache = False
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
self._dialect_is_microsoft_sql_cache = create_engine(
*self.args, **self.kwargs
).driver in ("pymssql", "pyodbc")
return self._dialect_is_microsoft_sql_cache
def get_connection(self) -> Any:
"""
Make the database connection and get it.
For psycopg2, pass all arguments to psycopg2.connect() and return the
result of psycopg2.connect(). For sqlalchemy, pass all arguments to
sqlalchemy.create_engine() and return the result of calling connect()
on the engine.
Returns
-------
Any
The open database connection.
"""
if self.lib == _PSYCOPG_LIB_NAME:
import psycopg2
return psycopg2.connect(*self.args, **self.kwargs)
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
return create_engine(*self.args, **self.kwargs).connect()
raise UnsupportedDatabaseException("Unsupported database library")
def get_string(self) -> str:
"""
Get input connection string.
Returns
-------
str
"""
return self.args[0]
def column_names_query(self, query: str) -> str:
"""
Get a query that gives the names of columns that `query` would produce.
Parameters
----------
query : str
The SQL query to check.
Returns
-------
str
"""
# This query looks odd, but it works in both PostgreSQL and Microsoft
# SQL, which doesn't let you use a "limit" clause to select 0 rows.
return f"SELECT * FROM ({query}) AS _MODIN_COUNT_QUERY WHERE 1 = 0"
def row_count_query(self, query: str) -> str:
"""
Get a query that gives the names of rows that `query` would produce.
Parameters
----------
query : str
The SQL query to check.
Returns
-------
str
"""
return f"SELECT COUNT(*) FROM ({query}) AS _MODIN_COUNT_QUERY"
def partition_query(self, query: str, limit: int, offset: int) -> str:
"""
Get a query that partitions the original `query`.
Parameters
----------
query : str
The SQL query to get a partition.
limit : int
The size of the partition.
offset : int
Where the partition begins.
Returns
-------
str
"""
return (
(
f"SELECT * FROM ({query}) AS _MODIN_COUNT_QUERY ORDER BY(SELECT NULL)"
+ f" OFFSET {offset} ROWS FETCH NEXT {limit} ROWS ONLY"
)
if self._dialect_is_microsoft_sql()
else f"SELECT * FROM ({query}) AS _MODIN_COUNT_QUERY LIMIT "
+ f"{limit} OFFSET {offset}"
)
| ModinDatabaseConnection |
python | getsentry__sentry | src/sentry/migrations/0990_groupowner_json_field.py | {
"start": 188,
"end": 1515
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = False
dependencies = [
("sentry", "0989_add_release_date_added_idx"),
]
operations = [
migrations.AlterField(
model_name="groupowner",
name="context",
field=sentry.db.models.fields.jsonfield.LegacyTextJSONField(null=True),
),
]
| Migration |
python | ApeWorX__ape | src/ape/plugins/account.py | {
"start": 167,
"end": 1028
} | class ____(PluginType):
"""
An account-related plugin. The plugin must register both
an :class:`ape.api.accounts.AccountContainerAPI` as well as an
:class:`ape.api.accounts.AccountAPI`.
"""
@hookspec
def account_types( # type: ignore[empty-body]
self,
) -> tuple[type["AccountContainerAPI"], type["AccountAPI"]]:
"""
A hook for returning a tuple of an account container and an account type.
Each account-base plugin defines and returns their own types here.
Usage example::
@plugins.register(plugins.AccountPlugin)
def account_types():
return AccountContainer, KeyfileAccount
Returns:
tuple[type[:class:`~ape.api.accounts.AccountContainerAPI`],
type[:class:`~ape.api.accounts.AccountAPI`]]
"""
| AccountPlugin |
python | Pylons__pyramid | tests/test_exceptions.py | {
"start": 1797,
"end": 2438
} | class ____(unittest.TestCase):
def _makeOne(self, message):
from pyramid.exceptions import Forbidden
return Forbidden(message)
def test_it(self):
from pyramid.interfaces import IExceptionResponse
e = self._makeOne('forbidden')
self.assertTrue(IExceptionResponse.providedBy(e))
self.assertEqual(e.status, '403 Forbidden')
self.assertEqual(e.message, 'forbidden')
def test_response_equivalence(self):
from pyramid.exceptions import Forbidden
from pyramid.httpexceptions import HTTPForbidden
self.assertTrue(Forbidden is HTTPForbidden)
| TestForbidden |
python | dask__distributed | distributed/collections.py | {
"start": 943,
"end": 7099
} | class ____(MutableSet[T]):
"""A set-like where the `pop` method returns the smallest item, as sorted by an
arbitrary key function. Ties are broken by oldest first.
Values must be compatible with :mod:`weakref`.
Parameters
----------
key: Callable
A function that takes a single element of the collection as a parameter and
returns a sorting key. The key does not need to be hashable and does not need to
support :mod:`weakref`.
Note
----
The key returned for each element should not to change over time. If it does, the
position in the heap won't change, even if the element is re-added, and it *may* not
change even if it's discarded and then re-added later.
"""
__slots__ = ("key", "_data", "_heap", "_inc", "_sorted")
key: Callable[[T], Any]
_data: set[T]
_heap: list[tuple[Any, int, weakref.ref[T]]]
_inc: int
_sorted: bool
def __init__(self, *, key: Callable[[T], Any]):
self.key = key
self._data = set()
self._inc = 0
self._heap = []
self._sorted = True
def __repr__(self) -> str:
return f"<{type(self).__name__}: {len(self)} items>"
def __reduce__(self) -> tuple[Callable, tuple]:
heap = [(k, i, v) for k, i, vref in self._heap if (v := vref()) in self._data]
return HeapSet._unpickle, (self.key, self._inc, heap)
@staticmethod
def _unpickle(
key: Callable[[T], Any], inc: int, heap: list[tuple[Any, int, T]]
) -> HeapSet[T]:
self = object.__new__(HeapSet)
self.key = key
self._data = {v for _, _, v in heap}
self._inc = inc
self._heap = [(k, i, weakref.ref(v)) for k, i, v in heap]
heapq.heapify(self._heap)
self._sorted = not heap
return self
def __contains__(self, value: object) -> bool:
return value in self._data
def __len__(self) -> int:
return len(self._data)
def add(self, value: T) -> None:
if value in self._data:
return
k = self.key(value)
vref = weakref.ref(value)
heapq.heappush(self._heap, (k, self._inc, vref))
self._sorted = False
self._data.add(value)
self._inc += 1
def discard(self, value: T) -> None:
self._data.discard(value)
if not self._data:
self.clear()
def peek(self) -> T:
"""Return the smallest element without removing it"""
if not self._data:
raise KeyError("peek into empty set")
while True:
value = self._heap[0][2]()
if value in self._data:
return value
heapq.heappop(self._heap)
self._sorted = False
def peekn(self, n: int) -> Iterator[T]:
"""Iterate over the n smallest elements without removing them.
This is O(1) for n == 1; O(n*logn) otherwise.
"""
if n <= 0 or not self:
return # empty iterator
if n == 1:
yield self.peek()
else:
# NOTE: we could pop N items off the queue, then push them back.
# But copying the list N times is probably slower than just sorting it
# with fast C code.
# If we had a `heappop` that sliced the list instead of popping from it,
# we could implement an optimized version for small `n`s.
yield from itertools.islice(self.sorted(), n)
def pop(self) -> T:
if not self._data:
raise KeyError("pop from an empty set")
while True:
_, _, vref = heapq.heappop(self._heap)
self._sorted = False
value = vref()
if value in self._data:
self._data.discard(value)
if not self._data:
self.clear()
return value
def peekright(self) -> T:
"""Return one of the largest elements (not necessarily the largest!) without
removing it. It's guaranteed that ``self.peekright() >= self.peek()``.
"""
if not self._data:
raise KeyError("peek into empty set")
while True:
value = self._heap[-1][2]()
if value in self._data:
return value
del self._heap[-1]
def popright(self) -> T:
"""Remove and return one of the largest elements (not necessarily the largest!)
It's guaranteed that ``self.popright() >= self.peek()``.
"""
if not self._data:
raise KeyError("pop from an empty set")
while True:
_, _, vref = self._heap.pop()
value = vref()
if value in self._data:
self._data.discard(value)
if not self._data:
self.clear()
return value
def __iter__(self) -> Iterator[T]:
"""Iterate over all elements. This is a O(n) operation which returns the
elements in pseudo-random order.
"""
return iter(self._data)
def sorted(self) -> Iterator[T]:
"""Iterate over all elements. This is a O(n*logn) operation which returns the
elements in order, from smallest to largest according to the key and insertion
order.
"""
if not self._sorted:
self._heap.sort() # A sorted list maintains the heap invariant
self._sorted = True
seen = set()
for _, _, vref in self._heap:
value = vref()
if value in self._data and value not in seen:
yield value
seen.add(value)
def clear(self) -> None:
self._data.clear()
self._heap.clear()
self._sorted = True
def sum_mappings(ds: Iterable[Mapping[K, V] | Iterable[tuple[K, V]]], /) -> dict[K, V]:
"""Sum the values of the given mappings, key by key"""
out: dict[K, V] = {}
for d in ds:
if isinstance(d, Mapping):
d = d.items()
for k, v in d:
try:
out[k] += v # type: ignore
except KeyError:
out[k] = v
return out
| HeapSet |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 11714,
"end": 13992
} | class ____(nn.Module):
def __init__(self, input_size, output_size, num_layers):
super().__init__()
# Stack the weights for N layers into a single tensor (num_layers, output_size, input_size)
self.weight = nn.Parameter(torch.randn(num_layers, output_size, input_size))
def forward(self, x, layer_idx=None):
"""
`MoshiFlexibleLinear` creates one linear layer per codebook. There's multiple ways to use it.
In the default case, `sequence_length=num_layers`, so each element of the sequence will be matmul to the weights corresponding to its index on the sequence.
For more advanced cases, one can specify which codebook's layer(s) to use with `layer_idx`.
If `layer_idx` indicates a single integer, all of the element of the sequence will be matmul to this single codebook's layer.
But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`.
Args:
x (`torch.FloatTensor): input to the layer of shape `(batch, num_layers, embed_dim)` or of shape `(batch, seq_length, embed_dim)`
layer_idx (`torch.Tensor`, *optional*):
Can be used to specify which codebook's layers(s) to use.
If it's a tensor of shape `(seq_length,)`, will matmul each element of the sequence to the corresponding weights.
But if `layer_idx` is a tensor of shape `(seq_length,)`, it will matmul each i-th element of the input sequence to the corresponding layer `weight[i]`.
"""
# Use torch.gather to select the corresponding weights for each sample
# (codebooks, output_size, hidden_size)
selected_weights = torch.index_select(self.weight, 0, layer_idx) if layer_idx is not None else self.weight
# (1, codebooks, hidden_size, output_size)
selected_weights = selected_weights.transpose(1, 2)[None, :, :, :]
# (batch_size, codebooks, 1, hidden_size) x (1, codebooks, hidden_size, output_size)
# -> (batch_size, codebooks, 1, output_size)
x = torch.matmul(x[:, :, None, :], selected_weights)
# (batch_size, codebooks, output_size)
return x.squeeze(2)
| MoshiFlexibleLinear |
python | pytorch__pytorch | torch/_dynamo/exc.py | {
"start": 5952,
"end": 6078
} | class ____(ArgsMismatchError):
"""
Internal error from cond() due to arguments mismatch.
"""
| CondOpArgsMismatchError |
python | kamyu104__LeetCode-Solutions | Python/optimal-partition-of-string.py | {
"start": 42,
"end": 398
} | class ____(object):
def partitionString(self, s):
"""
:type s: str
:rtype: int
"""
result, left = 1, 0
lookup = {}
for i, x in enumerate(s):
if x in lookup and lookup[x] >= left:
left = i
result += 1
lookup[x] = i
return result
| Solution |
python | altair-viz__altair | tests/utils/test_plugin_registry.py | {
"start": 96,
"end": 179
} | class ____(PluginRegistry[Callable[[int], int], int]):
pass
| TypedCallableRegistry |
python | pytorch__pytorch | torch/fx/experimental/proxy_tensor.py | {
"start": 71957,
"end": 72084
} | class ____(NameError):
pass
# Base class for inline _ModuleStackTracer.__init__.AttrProxy
| _ModuleNotInstalledAsSubmoduleError |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/managed_kafka.py | {
"start": 4031,
"end": 8533
} | class ____(ManagedKafkaBaseOperator):
"""
Create a new Apache Kafka cluster.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param location: Required. The ID of the Google Cloud region that the service belongs to.
:param cluster: Required. Configuration of the cluster to create. Its ``name`` field is ignored.
:param cluster_id: Required. The ID to use for the cluster, which will become the final component of
the cluster's name. The ID must be 1-63 characters long, and match the regular expression
``[a-z]([-a-z0-9]*[a-z0-9])?`` to comply with RFC 1035. This value is structured like: ``my-cluster-id``.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
to avoid duplication of requests. If a request times out or fails, retrying with the same ID
allows the server to recognize the previous attempt. For at least 60 minutes, the server ignores
duplicate requests bearing the same ID. For example, consider a situation where you make an
initial request and the request times out. If you make the request again with the same request ID
within 60 minutes of the last request, the server checks if an original operation with the same
request ID was received. If so, the server ignores the second request. The request ID must be a
valid UUID. A zero UUID is not supported (00000000-0000-0000-0000-000000000000).
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"cluster", "cluster_id"} | set(ManagedKafkaBaseOperator.template_fields)
)
operator_extra_links = (ApacheKafkaClusterLink(),)
def __init__(
self,
cluster: types.Cluster | dict,
cluster_id: str,
request_id: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.cluster = cluster
self.cluster_id = cluster_id
self.request_id = request_id
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location": self.location,
"cluster_id": self.cluster_id,
"project_id": self.project_id,
}
def execute(self, context: Context):
self.log.info("Creating an Apache Kafka cluster.")
ApacheKafkaClusterLink.persist(context=context)
try:
operation = self.hook.create_cluster(
project_id=self.project_id,
location=self.location,
cluster=self.cluster,
cluster_id=self.cluster_id,
request_id=self.request_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Waiting for operation to complete...")
cluster = self.hook.wait_for_operation(operation=operation, timeout=self.timeout)
self.log.info("Apache Kafka cluster was created.")
return types.Cluster.to_dict(cluster)
except AlreadyExists:
self.log.info("Apache Kafka cluster %s already exists.", self.cluster_id)
cluster = self.hook.get_cluster(
project_id=self.project_id,
location=self.location,
cluster_id=self.cluster_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return types.Cluster.to_dict(cluster)
| ManagedKafkaCreateClusterOperator |
python | sqlalchemy__sqlalchemy | examples/generic_associations/discriminator_on_association.py | {
"start": 1415,
"end": 1825
} | class ____(Base):
"""Associates a collection of Address objects
with a particular parent.
"""
__tablename__ = "address_association"
discriminator: Mapped[str] = mapped_column()
"""Refers to the type of parent."""
addresses: Mapped[list["Address"]] = relationship(
back_populates="association"
)
__mapper_args__ = {"polymorphic_on": discriminator}
| AddressAssociation |
python | joke2k__faker | faker/providers/automotive/ar_JO/__init__.py | {
"start": 48,
"end": 1567
} | class ____(AutomotiveProvider):
"""Implement automotive provider for ``ar_JO`` locale.
Sources:
- https://en.wikipedia.org/wiki/Vehicle_registration_plates_of_Jordan
"""
license_formats = (
"{{initials}}-####",
"{{initials}}-#####",
)
def initials(self) -> str:
"""Generate an initial number for license plates."""
return self.random_element(
[
"1", # Ministers
"2",
"3", # Parliament
"5", # General Government
"6", # Aqaba free zone
"7",
"8", # Diplomatic
"9", # Temporary
"10",
"23", # Passenger cars
"38",
"39", # Crew cabs
"41",
"42", # Light goods vehicles
"44", # Tractors
"46", # Motorcycles and scooters
"50", # Taxi
"56", # Small buses
"58", # Coaches
"60", # HGVs
"70", # Rental Cars
"71", # Trailer
"90", # Army
"95", # Ambulance
"96", # Gendarmerie
"99", # Police
]
)
def license_plate(self) -> str:
"""Generate a license plate."""
pattern: str = self.random_element(self.license_formats)
return self.numerify(self.generator.parse(pattern))
| Provider |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 5899,
"end": 16240
} | class ____(nn.Module):
def __init__(
self,
config: T5Config,
has_relative_attention_bias=False,
layer_idx: Optional[int] = None,
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.gradient_checkpointing = False
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_values=None,
query_length=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
batch_size, seq_length = hidden_states.shape[:2]
# if key_value_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
| T5Attention |
python | google__pytype | pytype/pyi/definitions.py | {
"start": 7194,
"end": 8957
} | class ____(visitors.Visitor):
"""Visitor for verifying TypeParameters used in mutations are in scope."""
def __init__(self):
super().__init__()
# A stack of type parameters introduced into the scope. The top of the stack
# contains the currently accessible parameter set.
self.type_params_in_scope = [set()]
self.current_function = None
def _AddParams(self, params):
top = self.type_params_in_scope[-1]
self.type_params_in_scope.append(top | params)
def _GetTypeParameters(self, node):
params = pytd_utils.GetTypeParameters(node)
return {x.name for x in params}
def EnterClass(self, node):
params = set()
for cls in node.bases:
params |= self._GetTypeParameters(cls)
self._AddParams(params)
def LeaveClass(self, _):
self.type_params_in_scope.pop()
def EnterFunction(self, node):
self.current_function = node
params = set()
for sig in node.signatures:
for arg in sig.params:
params |= self._GetTypeParameters(arg.type)
if sig.starargs:
params |= self._GetTypeParameters(sig.starargs.type)
if sig.starstarargs:
params |= self._GetTypeParameters(sig.starstarargs.type)
self._AddParams(params)
def LeaveFunction(self, _):
self.type_params_in_scope.pop()
self.current_function = None
def EnterParameter(self, node):
if isinstance(node.mutated_type, pytd.GenericType):
params = self._GetTypeParameters(node.mutated_type)
extra = params - self.type_params_in_scope[-1]
if extra:
fn = pytd_utils.Print(self.current_function)
msg = "Type parameter(s) {{{}}} not in scope in\n\n{}".format(
", ".join(sorted(extra)), fn
)
raise _ParseError(msg)
| _VerifyMutators |
python | redis__redis-py | redis/asyncio/cluster.py | {
"start": 72659,
"end": 74770
} | class ____(ABC):
@abstractmethod
async def initialize(self) -> "ClusterPipeline":
"""
Initialize the execution strategy.
See ClusterPipeline.initialize()
"""
pass
@abstractmethod
def execute_command(
self, *args: Union[KeyT, EncodableT], **kwargs: Any
) -> "ClusterPipeline":
"""
Append a raw command to the pipeline.
See ClusterPipeline.execute_command()
"""
pass
@abstractmethod
async def execute(
self, raise_on_error: bool = True, allow_redirections: bool = True
) -> List[Any]:
"""
Execute the pipeline.
It will retry the commands as specified by retries specified in :attr:`retry`
& then raise an exception.
See ClusterPipeline.execute()
"""
pass
@abstractmethod
def mset_nonatomic(
self, mapping: Mapping[AnyKeyT, EncodableT]
) -> "ClusterPipeline":
"""
Executes multiple MSET commands according to the provided slot/pairs mapping.
See ClusterPipeline.mset_nonatomic()
"""
pass
@abstractmethod
async def reset(self):
"""
Resets current execution strategy.
See: ClusterPipeline.reset()
"""
pass
@abstractmethod
def multi(self):
"""
Starts transactional context.
See: ClusterPipeline.multi()
"""
pass
@abstractmethod
async def watch(self, *names):
"""
Watch given keys.
See: ClusterPipeline.watch()
"""
pass
@abstractmethod
async def unwatch(self):
"""
Unwatches all previously specified keys
See: ClusterPipeline.unwatch()
"""
pass
@abstractmethod
async def discard(self):
pass
@abstractmethod
async def unlink(self, *names):
"""
"Unlink a key specified by ``names``"
See: ClusterPipeline.unlink()
"""
pass
@abstractmethod
def __len__(self) -> int:
pass
| ExecutionStrategy |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/redshift_cluster.py | {
"start": 1693,
"end": 16104
} | class ____(AwsBaseOperator[RedshiftHook]):
"""
Creates a new cluster with the specified parameters.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RedshiftCreateClusterOperator`
:param cluster_identifier: A unique identifier for the cluster.
:param node_type: The node type to be provisioned for the cluster. Refer
https://docs.aws.amazon.com/redshift/latest/mgmt/working-with-clusters.html#rs-node-type-info
for the list of available node types.
:param master_username: The username associated with the admin user account for
the cluster that is being created.
:param master_user_password: The password associated with the admin user account for
the cluster that is being created.
:param cluster_type: The type of the cluster ``single-node`` or ``multi-node``.
The default value is ``multi-node``.
:param db_name: The name of the first database to be created when the cluster is created.
:param number_of_nodes: The number of compute nodes in the cluster.
This param require when ``cluster_type`` is ``multi-node``.
:param cluster_security_groups: A list of security groups to be associated with this cluster.
:param vpc_security_group_ids: A list of VPC security groups to be associated with the cluster.
:param cluster_subnet_group_name: The name of a cluster subnet group to be associated with this cluster.
:param availability_zone: The EC2 Availability Zone (AZ).
:param preferred_maintenance_window: The time range (in UTC) during which automated cluster
maintenance can occur.
:param cluster_parameter_group_name: The name of the parameter group to be associated with this cluster.
:param automated_snapshot_retention_period: The number of days that automated snapshots are retained.
The default value is ``1``.
:param manual_snapshot_retention_period: The default number of days to retain a manual snapshot.
:param port: The port number on which the cluster accepts incoming connections.
The Default value is ``5439``.
:param cluster_version: The version of a Redshift engine software that you want to deploy on the cluster.
:param allow_version_upgrade: Whether major version upgrades can be applied during the maintenance window.
The Default value is ``True``.
:param publicly_accessible: Whether cluster can be accessed from a public network.
:param encrypted: Whether data in the cluster is encrypted at rest.
The default value is ``False``.
:param hsm_client_certificate_identifier: Name of the HSM client certificate
the Amazon Redshift cluster uses to retrieve the data.
:param hsm_configuration_identifier: Name of the HSM configuration
:param elastic_ip: The Elastic IP (EIP) address for the cluster.
:param tags: A list of tag instances
:param kms_key_id: KMS key id of encryption key.
:param enhanced_vpc_routing: Whether to create the cluster with enhanced VPC routing enabled
Default value is ``False``.
:param additional_info: Reserved
:param iam_roles: A list of IAM roles that can be used by the cluster to access other AWS services.
:param maintenance_track_name: Name of the maintenance track for the cluster.
:param snapshot_schedule_identifier: A unique identifier for the snapshot schedule.
:param availability_zone_relocation: Enable relocation for a Redshift cluster
between Availability Zones after the cluster is created.
:param aqua_configuration_status: The cluster is configured to use AQUA .
:param default_iam_role_arn: ARN for the IAM role.
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param wait_for_completion: Whether wait for the cluster to be in ``available`` state
:param max_attempt: The maximum number of attempts to be made. Default: 5
:param poll_interval: The amount of time in seconds to wait between attempts. Default: 60
:param deferrable: If True, the operator will run in deferrable mode
"""
template_fields: Sequence[str] = aws_template_fields(
"cluster_identifier",
"cluster_type",
"node_type",
"master_username",
"master_user_password",
"cluster_type",
"db_name",
"number_of_nodes",
"cluster_security_groups",
"vpc_security_group_ids",
"cluster_subnet_group_name",
"availability_zone",
"preferred_maintenance_window",
"cluster_parameter_group_name",
"automated_snapshot_retention_period",
"manual_snapshot_retention_period",
"port",
"cluster_version",
"allow_version_upgrade",
"publicly_accessible",
"encrypted",
"hsm_client_certificate_identifier",
"hsm_configuration_identifier",
"elastic_ip",
"tags",
"kms_key_id",
"enhanced_vpc_routing",
"additional_info",
"iam_roles",
"maintenance_track_name",
"snapshot_schedule_identifier",
"availability_zone_relocation",
"aqua_configuration_status",
"default_iam_role_arn",
)
ui_color = "#eeaa11"
ui_fgcolor = "#ffffff"
aws_hook_class = RedshiftHook
def __init__(
self,
*,
cluster_identifier: str,
node_type: str,
master_username: str,
master_user_password: str,
cluster_type: str = "multi-node",
db_name: str = "dev",
number_of_nodes: int = 1,
cluster_security_groups: list[str] | None = None,
vpc_security_group_ids: list[str] | None = None,
cluster_subnet_group_name: str | None = None,
availability_zone: str | None = None,
preferred_maintenance_window: str | None = None,
cluster_parameter_group_name: str | None = None,
automated_snapshot_retention_period: int = 1,
manual_snapshot_retention_period: int | None = None,
port: int = 5439,
cluster_version: str = "1.0",
allow_version_upgrade: bool = True,
publicly_accessible: bool = True,
encrypted: bool = False,
hsm_client_certificate_identifier: str | None = None,
hsm_configuration_identifier: str | None = None,
elastic_ip: str | None = None,
tags: list[Any] | None = None,
kms_key_id: str | None = None,
enhanced_vpc_routing: bool = False,
additional_info: str | None = None,
iam_roles: list[str] | None = None,
maintenance_track_name: str | None = None,
snapshot_schedule_identifier: str | None = None,
availability_zone_relocation: bool | None = None,
aqua_configuration_status: str | None = None,
default_iam_role_arn: str | None = None,
wait_for_completion: bool = False,
max_attempt: int = 5,
poll_interval: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
):
super().__init__(**kwargs)
self.cluster_identifier = cluster_identifier
self.node_type = node_type
self.master_username = master_username
self.master_user_password = master_user_password
self.cluster_type = cluster_type
self.db_name = db_name
self.number_of_nodes = number_of_nodes
self.cluster_security_groups = cluster_security_groups
self.vpc_security_group_ids = vpc_security_group_ids
self.cluster_subnet_group_name = cluster_subnet_group_name
self.availability_zone = availability_zone
self.preferred_maintenance_window = preferred_maintenance_window
self.cluster_parameter_group_name = cluster_parameter_group_name
self.automated_snapshot_retention_period = automated_snapshot_retention_period
self.manual_snapshot_retention_period = manual_snapshot_retention_period
self.port = port
self.cluster_version = cluster_version
self.allow_version_upgrade = allow_version_upgrade
self.publicly_accessible = publicly_accessible
self.encrypted = encrypted
self.hsm_client_certificate_identifier = hsm_client_certificate_identifier
self.hsm_configuration_identifier = hsm_configuration_identifier
self.elastic_ip = elastic_ip
self.tags = tags
self.kms_key_id = kms_key_id
self.enhanced_vpc_routing = enhanced_vpc_routing
self.additional_info = additional_info
self.iam_roles = iam_roles
self.maintenance_track_name = maintenance_track_name
self.snapshot_schedule_identifier = snapshot_schedule_identifier
self.availability_zone_relocation = availability_zone_relocation
self.aqua_configuration_status = aqua_configuration_status
self.default_iam_role_arn = default_iam_role_arn
self.wait_for_completion = wait_for_completion
self.max_attempt = max_attempt
self.poll_interval = poll_interval
self.deferrable = deferrable
self.kwargs = kwargs
def execute(self, context: Context):
self.log.info("Creating Redshift cluster %s", self.cluster_identifier)
params: dict[str, Any] = {}
if self.db_name:
params["DBName"] = self.db_name
if self.cluster_type:
params["ClusterType"] = self.cluster_type
if self.cluster_type == "multi-node":
params["NumberOfNodes"] = self.number_of_nodes
if self.cluster_security_groups:
params["ClusterSecurityGroups"] = self.cluster_security_groups
if self.vpc_security_group_ids:
params["VpcSecurityGroupIds"] = self.vpc_security_group_ids
if self.cluster_subnet_group_name:
params["ClusterSubnetGroupName"] = self.cluster_subnet_group_name
if self.availability_zone:
params["AvailabilityZone"] = self.availability_zone
if self.preferred_maintenance_window:
params["PreferredMaintenanceWindow"] = self.preferred_maintenance_window
if self.cluster_parameter_group_name:
params["ClusterParameterGroupName"] = self.cluster_parameter_group_name
if self.automated_snapshot_retention_period:
params["AutomatedSnapshotRetentionPeriod"] = self.automated_snapshot_retention_period
if self.manual_snapshot_retention_period:
params["ManualSnapshotRetentionPeriod"] = self.manual_snapshot_retention_period
if self.port:
params["Port"] = self.port
if self.cluster_version:
params["ClusterVersion"] = self.cluster_version
if self.allow_version_upgrade:
params["AllowVersionUpgrade"] = self.allow_version_upgrade
if self.encrypted:
params["Encrypted"] = self.encrypted
if self.hsm_client_certificate_identifier:
params["HsmClientCertificateIdentifier"] = self.hsm_client_certificate_identifier
if self.hsm_configuration_identifier:
params["HsmConfigurationIdentifier"] = self.hsm_configuration_identifier
if self.elastic_ip:
params["ElasticIp"] = self.elastic_ip
if self.tags:
params["Tags"] = self.tags
if self.kms_key_id:
params["KmsKeyId"] = self.kms_key_id
if self.enhanced_vpc_routing:
params["EnhancedVpcRouting"] = self.enhanced_vpc_routing
if self.additional_info:
params["AdditionalInfo"] = self.additional_info
if self.iam_roles:
params["IamRoles"] = self.iam_roles
if self.maintenance_track_name:
params["MaintenanceTrackName"] = self.maintenance_track_name
if self.snapshot_schedule_identifier:
params["SnapshotScheduleIdentifier"] = self.snapshot_schedule_identifier
if self.availability_zone_relocation:
params["AvailabilityZoneRelocation"] = self.availability_zone_relocation
if self.aqua_configuration_status:
params["AquaConfigurationStatus"] = self.aqua_configuration_status
if self.default_iam_role_arn:
params["DefaultIamRoleArn"] = self.default_iam_role_arn
# PubliclyAccessible is True by default on Redshift side, hence, we should always set it regardless
# of its value
params["PubliclyAccessible"] = self.publicly_accessible
cluster = self.hook.create_cluster(
self.cluster_identifier,
self.node_type,
self.master_username,
self.master_user_password,
params,
)
if self.deferrable:
self.defer(
trigger=RedshiftCreateClusterTrigger(
cluster_identifier=self.cluster_identifier,
waiter_delay=self.poll_interval,
waiter_max_attempts=self.max_attempt,
aws_conn_id=self.aws_conn_id,
),
method_name="execute_complete",
)
if self.wait_for_completion:
self.hook.get_conn().get_waiter("cluster_available").wait(
ClusterIdentifier=self.cluster_identifier,
WaiterConfig={
"Delay": self.poll_interval,
"MaxAttempts": self.max_attempt,
},
)
self.log.info("Created Redshift cluster %s", self.cluster_identifier)
self.log.info(cluster)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"Error creating cluster: {validated_event}")
| RedshiftCreateClusterOperator |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/fabric.py | {
"start": 7416,
"end": 7634
} | class ____(_PowerBIAsset):
"""Microsoft PowerBI DAX."""
_reader_method: ClassVar[FabricReaderMethods] = "evaluate_dax"
type: Literal["powerbi_dax"] = "powerbi_dax"
dax_string: str
@public_api
| PowerBIDax |
python | mlflow__mlflow | mlflow/tracing/constant.py | {
"start": 1843,
"end": 2103
} | class ____:
TOTAL_SIZE_BYTES = "total_size_bytes"
NUM_SPANS = "num_spans"
MAX_SPAN_SIZE_BYTES = "max"
P25_SPAN_SIZE_BYTES = "p25"
P50_SPAN_SIZE_BYTES = "p50"
P75_SPAN_SIZE_BYTES = "p75"
# A set of reserved attribute keys
| TraceSizeStatsKey |
python | apache__airflow | providers/google/tests/unit/google/ads/transfers/test_ads_to_gcs.py | {
"start": 1150,
"end": 2441
} | class ____:
@mock.patch("airflow.providers.google.ads.transfers.ads_to_gcs.GoogleAdsHook")
@mock.patch("airflow.providers.google.ads.transfers.ads_to_gcs.GCSHook")
def test_execute(self, mock_gcs_hook, mock_ads_hook):
op = GoogleAdsToGcsOperator(
gcp_conn_id=gcp_conn_id,
google_ads_conn_id=google_ads_conn_id,
client_ids=CLIENT_IDS,
query=QUERY,
attributes=FIELDS_TO_EXTRACT,
obj=GCS_OBJ_PATH,
bucket=BUCKET,
task_id="run_operator",
impersonation_chain=IMPERSONATION_CHAIN,
api_version=api_version,
)
op.execute({})
mock_ads_hook.assert_called_once_with(
gcp_conn_id=gcp_conn_id,
google_ads_conn_id=google_ads_conn_id,
api_version=api_version,
)
mock_ads_hook.return_value.search.assert_called_once_with(client_ids=CLIENT_IDS, query=QUERY)
mock_gcs_hook.assert_called_once_with(
gcp_conn_id=gcp_conn_id,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_gcs_hook.return_value.upload.assert_called_once_with(
bucket_name=BUCKET, object_name=GCS_OBJ_PATH, filename=mock.ANY, gzip=False
)
| TestGoogleAdsToGcsOperator |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py | {
"start": 5680,
"end": 6441
} | class ____:
"""Demo."""
@overload
def bar(self, x: int) -> int: ...
@overload
def bar(self, x: str) -> str:
...
def bar(self, x: int | str) -> int | str:
return x
# end
# E302
"""Main module."""
def fn():
pass
# end
# E302
import sys
def get_sys_path():
return sys.path
# end
# E302
def a():
pass
def b():
pass
# end
# E302
def a():
pass
# comment
def b():
pass
# end
# E302
def a():
pass
async def b():
pass
# end
# E302
async def x():
pass
async def x(y: int = 1):
pass
# end
# E302
def bar():
pass
def baz(): pass
# end
# E302
def bar(): pass
def baz():
pass
# end
# E302
def f():
pass
# comment
@decorator
def g():
pass
# end
# E302
| Foo |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 28218,
"end": 30283
} | class ____(greentest.TestCase):
def test_simple(self):
with gevent.spawn(gevent.sleep, timing.SMALL_TICK) as g:
self.assert_greenlet_spawned(g)
# It is completed after the suite
self.assert_greenlet_finished(g)
def test_wait_in_suite(self):
with gevent.spawn(self._raise_exception) as g:
with self.assertRaises(greentest.ExpectedException):
g.get()
self.assert_greenlet_finished(g)
@staticmethod
def _raise_exception():
raise greentest.ExpectedException
def test_greenlet_raises(self):
with gevent.spawn(self._raise_exception) as g:
pass
self.assert_greenlet_finished(g)
with self.assertRaises(greentest.ExpectedException):
g.get()
def test_join_raises(self):
suite_ran = 0
with self.assertRaises(ExpectedJoinError):
with GreenletRaisesJoin.spawn(gevent.sleep, timing.SMALL_TICK) as g:
self.assert_greenlet_spawned(g)
suite_ran = 1
self.assertTrue(suite_ran)
self.assert_greenlet_finished(g)
self.assertTrue(g.killed)
def test_suite_body_raises(self, delay=None):
greenlet_sleep = timing.SMALL_TICK if not delay else timing.LARGE_TICK
with self.assertRaises(SuiteExpectedException):
with GreenletRaisesJoin.spawn(gevent.sleep, greenlet_sleep) as g:
self.assert_greenlet_spawned(g)
if delay:
g.raise_on_join = False
gevent.sleep(delay)
raise SuiteExpectedException
self.assert_greenlet_finished(g)
self.assertTrue(g.killed)
if delay:
self.assertTrue(g.joined)
else:
self.assertFalse(g.joined)
self.assertFalse(g.successful())
with self.assertRaises(SuiteExpectedException):
g.get()
def test_suite_body_raises_with_delay(self):
self.test_suite_body_raises(delay=timing.SMALL_TICK)
| TestContextManager |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/classes7.py | {
"start": 114,
"end": 265
} | class ____(Generic[T]):
pass
IntBaseClass = BaseClass[float]
# This should generate an error because the same
# base class is used twice.
| BaseClass |
python | sympy__sympy | sympy/polys/domains/domain.py | {
"start": 3070,
"end": 3775
} | class ____(AbsElement, Protocol):
"""An element that can be compared to other elements.
Must support ``<``, ``<=``, ``>``, ``>=``.
"""
def __lt__(self, other: Self, /) -> bool: ...
def __le__(self, other: Self, /) -> bool: ...
def __gt__(self, other: Self, /) -> bool: ...
def __ge__(self, other: Self, /) -> bool: ...
Er = TypeVar('Er', bound=RingElement)
Es = TypeVar('Es', bound=RingElement)
Et = TypeVar('Et', bound=RingElement)
Eg = TypeVar('Eg', bound=RingElement)
Ef = TypeVar('Ef', bound=FieldElement)
Eeuclid = TypeVar('Eeuclid', bound=EuclidElement)
Eabs = TypeVar('Eabs', bound=AbsElement)
Eordered = TypeVar('Eordered', bound=OrderedElement)
@public
| OrderedElement |
python | pypa__setuptools | pkg_resources/__init__.py | {
"start": 91198,
"end": 97570
} | class ____:
"""Object representing an advertised importable object"""
def __init__(
self,
name: str,
module_name: str,
attrs: Iterable[str] = (),
extras: Iterable[str] = (),
dist: Distribution | None = None,
) -> None:
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self) -> str:
s = f"{self.name} = {self.module_name}"
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
extras = ','.join(self.extras)
s += f' [{extras}]'
return s
def __repr__(self) -> str:
return f"EntryPoint.parse({str(self)!r})"
@overload
def load(
self,
require: Literal[True] = True,
env: Environment | None = None,
installer: _InstallerType | None = None,
) -> _ResolvedEntryPoint: ...
@overload
def load(
self,
require: Literal[False],
*args: Any,
**kwargs: Any,
) -> _ResolvedEntryPoint: ...
def load(
self,
require: bool = True,
*args: Environment | _InstallerType | None,
**kwargs: Environment | _InstallerType | None,
) -> _ResolvedEntryPoint:
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
# We could pass `env` and `installer` directly,
# but keeping `*args` and `**kwargs` for backwards compatibility
self.require(*args, **kwargs) # type: ignore[arg-type]
return self.resolve()
def resolve(self) -> _ResolvedEntryPoint:
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc)) from exc
def require(
self,
env: Environment | None = None,
installer: _InstallerType | None = None,
) -> None:
if not self.dist:
error_cls = UnknownExtra if self.extras else AttributeError
raise error_cls("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src: str, dist: Distribution | None = None) -> Self:
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError
return req.extras
@classmethod
def parse_group(
cls,
group: str,
lines: _NestedStr,
dist: Distribution | None = None,
) -> dict[str, Self]:
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this: dict[str, Self] = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(
cls,
data: str | Iterable[str] | dict[str, str | Iterable[str]],
dist: Distribution | None = None,
) -> dict[str, dict[str, Self]]:
"""Parse a map of entry point groups"""
_data: Iterable[tuple[str | None, str | Iterable[str]]]
if isinstance(data, dict):
_data = data.items()
else:
_data = split_sections(data)
maps: dict[str, dict[str, Self]] = {}
for group, lines in _data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
| EntryPoint |
python | django__django | tests/model_fields/test_durationfield.py | {
"start": 2204,
"end": 2704
} | class ____(SimpleTestCase):
def test_invalid_string(self):
field = models.DurationField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean("not a datetime", None)
self.assertEqual(cm.exception.code, "invalid")
self.assertEqual(
cm.exception.message % cm.exception.params,
"“not a datetime” value has an invalid format. "
"It must be in [DD] [[HH:]MM:]ss[.uuuuuu] format.",
)
| TestValidation |
python | celery__celery | celery/beat.py | {
"start": 6520,
"end": 16356
} | class ____:
"""Scheduler for periodic tasks.
The :program:`celery beat` program may instantiate this class
multiple times for introspection purposes, but then with the
``lazy`` argument set. It's important for subclasses to
be idempotent when this argument is set.
Arguments:
schedule (~celery.schedules.schedule): see :attr:`schedule`.
max_interval (int): see :attr:`max_interval`.
lazy (bool): Don't set up the schedule.
"""
Entry = ScheduleEntry
#: The schedule dict/shelve.
schedule = None
#: Maximum time to sleep between re-checking the schedule.
max_interval = DEFAULT_MAX_INTERVAL
#: How often to sync the schedule (3 minutes by default)
sync_every = 3 * 60
#: How many tasks can be called before a sync is forced.
sync_every_tasks = None
_last_sync = None
_tasks_since_sync = 0
logger = logger # compat
def __init__(self, app, schedule=None, max_interval=None,
Producer=None, lazy=False, sync_every_tasks=None, **kwargs):
self.app = app
self.data = maybe_evaluate({} if schedule is None else schedule)
self.max_interval = (max_interval or
app.conf.beat_max_loop_interval or
self.max_interval)
self.Producer = Producer or app.amqp.Producer
self._heap = None
self.old_schedulers = None
self.sync_every_tasks = (
app.conf.beat_sync_every if sync_every_tasks is None
else sync_every_tasks)
if not lazy:
self.setup_schedule()
def install_default_entries(self, data):
entries = {}
if self.app.conf.result_expires and \
not self.app.backend.supports_autoexpire:
if 'celery.backend_cleanup' not in data:
entries['celery.backend_cleanup'] = {
'task': 'celery.backend_cleanup',
'schedule': crontab('0', '4', '*'),
'options': {'expires': 12 * 3600}}
self.update_from_dict(entries)
def apply_entry(self, entry, producer=None):
info('Scheduler: Sending due task %s (%s)', entry.name, entry.task)
try:
result = self.apply_async(entry, producer=producer, advance=False)
except Exception as exc: # pylint: disable=broad-except
error('Message Error: %s\n%s',
exc, traceback.format_stack(), exc_info=True)
else:
if result and hasattr(result, 'id'):
debug('%s sent. id->%s', entry.task, result.id)
else:
debug('%s sent.', entry.task)
def adjust(self, n, drift=-0.010):
if n and n > 0:
return n + drift
return n
def is_due(self, entry):
return entry.is_due()
def _when(self, entry, next_time_to_run, mktime=timegm):
"""Return a utc timestamp, make sure heapq in correct order."""
adjust = self.adjust
as_now = maybe_make_aware(entry.default_now())
return (mktime(as_now.utctimetuple()) +
as_now.microsecond / 1e6 +
(adjust(next_time_to_run) or 0))
def populate_heap(self, event_t=event_t, heapify=heapq.heapify):
"""Populate the heap with the data contained in the schedule."""
priority = 5
self._heap = []
for entry in self.schedule.values():
is_due, next_call_delay = entry.is_due()
self._heap.append(event_t(
self._when(
entry,
0 if is_due else next_call_delay
) or 0,
priority, entry
))
heapify(self._heap)
# pylint disable=redefined-outer-name
def tick(self, event_t=event_t, min=min, heappop=heapq.heappop,
heappush=heapq.heappush):
"""Run a tick - one iteration of the scheduler.
Executes one due task per call.
Returns:
float: preferred delay in seconds for next call.
"""
adjust = self.adjust
max_interval = self.max_interval
if (self._heap is None or
not self.schedules_equal(self.old_schedulers, self.schedule)):
self.old_schedulers = copy.copy(self.schedule)
self.populate_heap()
H = self._heap
if not H:
return max_interval
event = H[0]
entry = event[2]
is_due, next_time_to_run = self.is_due(entry)
if is_due:
verify = heappop(H)
if verify is event:
next_entry = self.reserve(entry)
self.apply_entry(entry, producer=self.producer)
heappush(H, event_t(self._when(next_entry, next_time_to_run),
event[1], next_entry))
return 0
else:
heappush(H, verify)
return min(verify[0], max_interval)
adjusted_next_time_to_run = adjust(next_time_to_run)
return min(adjusted_next_time_to_run if is_numeric_value(adjusted_next_time_to_run) else max_interval,
max_interval)
def schedules_equal(self, old_schedules, new_schedules):
if old_schedules is new_schedules is None:
return True
if old_schedules is None or new_schedules is None:
return False
if set(old_schedules.keys()) != set(new_schedules.keys()):
return False
for name, old_entry in old_schedules.items():
new_entry = new_schedules.get(name)
if not new_entry:
return False
if new_entry != old_entry:
return False
return True
def should_sync(self):
return (
(not self._last_sync or
(time.monotonic() - self._last_sync) > self.sync_every) or
(self.sync_every_tasks and
self._tasks_since_sync >= self.sync_every_tasks)
)
def reserve(self, entry):
new_entry = self.schedule[entry.name] = next(entry)
return new_entry
def apply_async(self, entry, producer=None, advance=True, **kwargs):
# Update time-stamps and run counts before we actually execute,
# so we have that done if an exception is raised (doesn't schedule
# forever.)
entry = self.reserve(entry) if advance else entry
task = self.app.tasks.get(entry.task)
try:
entry_args = _evaluate_entry_args(entry.args)
entry_kwargs = _evaluate_entry_kwargs(entry.kwargs)
if task:
return task.apply_async(entry_args, entry_kwargs,
producer=producer,
**entry.options)
else:
return self.send_task(entry.task, entry_args, entry_kwargs,
producer=producer,
**entry.options)
except Exception as exc: # pylint: disable=broad-except
reraise(SchedulingError, SchedulingError(
"Couldn't apply scheduled task {0.name}: {exc}".format(
entry, exc=exc)), sys.exc_info()[2])
finally:
self._tasks_since_sync += 1
if self.should_sync():
self._do_sync()
def send_task(self, *args, **kwargs):
return self.app.send_task(*args, **kwargs)
def setup_schedule(self):
self.install_default_entries(self.data)
self.merge_inplace(self.app.conf.beat_schedule)
def _do_sync(self):
try:
debug('beat: Synchronizing schedule...')
self.sync()
finally:
self._last_sync = time.monotonic()
self._tasks_since_sync = 0
def sync(self):
pass
def close(self):
self.sync()
def add(self, **kwargs):
entry = self.Entry(app=self.app, **kwargs)
self.schedule[entry.name] = entry
return entry
def _maybe_entry(self, name, entry):
if isinstance(entry, self.Entry):
entry.app = self.app
return entry
return self.Entry(**dict(entry, name=name, app=self.app))
def update_from_dict(self, dict_):
self.schedule.update({
name: self._maybe_entry(name, entry)
for name, entry in dict_.items()
})
def merge_inplace(self, b):
schedule = self.schedule
A, B = set(schedule), set(b)
# Remove items from disk not in the schedule anymore.
for key in A ^ B:
schedule.pop(key, None)
# Update and add new items in the schedule
for key in B:
entry = self.Entry(**dict(b[key], name=key, app=self.app))
if schedule.get(key):
schedule[key].update(entry)
else:
schedule[key] = entry
def _ensure_connected(self):
# callback called for each retry while the connection
# can't be established.
def _error_handler(exc, interval):
error('beat: Connection error: %s. '
'Trying again in %s seconds...', exc, interval)
return self.connection.ensure_connection(
_error_handler, self.app.conf.broker_connection_max_retries
)
def get_schedule(self):
return self.data
def set_schedule(self, schedule):
self.data = schedule
schedule = property(get_schedule, set_schedule)
@cached_property
def connection(self):
return self.app.connection_for_write()
@cached_property
def producer(self):
return self.Producer(self._ensure_connected(), auto_declare=False)
@property
def info(self):
return ''
| Scheduler |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 41842,
"end": 44649
} | class ____(_AbstractRelationshipLoader):
"""A relationship loader that emits a second SELECT statement."""
__slots__ = ()
def _setup_for_recursion(self, context, path, loadopt, join_depth=None):
effective_path = (
context.compile_state.current_path or orm_util.PathRegistry.root
) + path
top_level_context = context._get_top_level_context()
execution_options = util.immutabledict(
{"sa_top_level_orm_context": top_level_context}
)
if loadopt:
recursion_depth = loadopt.local_opts.get("recursion_depth", None)
unlimited_recursion = recursion_depth == -1
else:
recursion_depth = None
unlimited_recursion = False
if recursion_depth is not None:
if not self.parent_property._is_self_referential:
raise sa_exc.InvalidRequestError(
f"recursion_depth option on relationship "
f"{self.parent_property} not valid for "
"non-self-referential relationship"
)
recursion_depth = context.execution_options.get(
f"_recursion_depth_{id(self)}", recursion_depth
)
if not unlimited_recursion and recursion_depth < 0:
return (
effective_path,
False,
execution_options,
recursion_depth,
)
if not unlimited_recursion:
execution_options = execution_options.union(
{
f"_recursion_depth_{id(self)}": recursion_depth - 1,
}
)
if loading._PostLoad.path_exists(
context, effective_path, self.parent_property
):
return effective_path, False, execution_options, recursion_depth
path_w_prop = path[self.parent_property]
effective_path_w_prop = effective_path[self.parent_property]
if not path_w_prop.contains(context.attributes, "loader"):
if join_depth:
if effective_path_w_prop.length / 2 > join_depth:
return (
effective_path,
False,
execution_options,
recursion_depth,
)
elif effective_path_w_prop.contains_mapper(self.mapper):
return (
effective_path,
False,
execution_options,
recursion_depth,
)
return effective_path, True, execution_options, recursion_depth
@relationships.RelationshipProperty.strategy_for(lazy="immediate")
| _PostLoader |
python | huggingface__transformers | src/transformers/models/bert/modeling_bert.py | {
"start": 23718,
"end": 25547
} | class ____(ModelOutput):
r"""
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: Optional[torch.FloatTensor] = None
seq_relationship_logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://huggingface.co/papers/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
)
| BertForPreTrainingOutput |
python | pytorch__pytorch | torch/multiprocessing/queue.py | {
"start": 1123,
"end": 1477
} | class ____(multiprocessing.queues.SimpleQueue):
def _make_methods(self):
if not isinstance(self._reader, ConnectionWrapper):
self._reader: ConnectionWrapper = ConnectionWrapper(self._reader)
self._writer: ConnectionWrapper = ConnectionWrapper(self._writer)
super()._make_methods() # type: ignore[misc]
| SimpleQueue |
python | matplotlib__matplotlib | galleries/examples/specialty_plots/skewt.py | {
"start": 1185,
"end": 2602
} | class ____(maxis.XTick):
def draw(self, renderer):
# When adding the callbacks with `stack.callback`, we fetch the current
# visibility state of the artist with `get_visible`; the ExitStack will
# restore these states (`set_visible`) at the end of the block (after
# the draw).
with ExitStack() as stack:
for artist in [self.gridline, self.tick1line, self.tick2line,
self.label1, self.label2]:
stack.callback(artist.set_visible, artist.get_visible())
needs_lower = transforms.interval_contains(
self.axes.lower_xlim, self.get_loc())
needs_upper = transforms.interval_contains(
self.axes.upper_xlim, self.get_loc())
self.tick1line.set_visible(
self.tick1line.get_visible() and needs_lower)
self.label1.set_visible(
self.label1.get_visible() and needs_lower)
self.tick2line.set_visible(
self.tick2line.get_visible() and needs_upper)
self.label2.set_visible(
self.label2.get_visible() and needs_upper)
super().draw(renderer)
def get_view_interval(self):
return self.axes.xaxis.get_view_interval()
# This class exists to provide two separate sets of intervals to the tick,
# as well as create instances of the custom tick
| SkewXTick |
python | spyder-ide__spyder | spyder/plugins/debugger/widgets/breakpoint_table_view.py | {
"start": 1123,
"end": 1416
} | class ____:
# Triggers
ClearAllBreakpoints = 'clear_all_breakpoints_action'
ClearBreakpoint = 'clear_breakpoint_action'
EditBreakpoint = 'edit_breakpoint_action'
# --- Model
# ----------------------------------------------------------------------------
| BreakpointTableViewActions |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 9814,
"end": 10071
} | class ____(BooleanOption, metaclass=OptionType):
"""``greedy`` option to polynomial manipulation functions. """
option = 'greedy'
requires: list[str] = []
excludes = ['domain', 'split', 'gaussian', 'extension', 'modulus', 'symmetric']
| Greedy |
python | fluentpython__example-code-2e | 17-it-generator/sentence_iter2.py | {
"start": 222,
"end": 501
} | class ____:
def __init__(self, text):
self.text = text
def __repr__(self):
return f'Sentence({reprlib.repr(self.text)})'
def __iter__(self):
word_iter = RE_WORD.finditer(self.text) # <1>
return SentenceIter(word_iter) # <2>
| Sentence |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-rayyan/llama_index/readers/rayyan/base.py | {
"start": 169,
"end": 3989
} | class ____(BaseReader):
"""
Rayyan reader. Reads articles from a Rayyan review.
Args:
credentials_path (str): Rayyan credentials path.
rayyan_url (str, optional): Rayyan URL. Defaults to https://rayyan.ai.
Set to an alternative URL if you are using a non-production Rayyan instance.
"""
def __init__(
self, credentials_path: str, rayyan_url: str = "https://rayyan.ai"
) -> None:
"""Initialize Rayyan reader."""
from rayyan import Rayyan
from rayyan.user import User
logging.debug("Initializing Rayyan reader...")
self.rayyan = Rayyan(credentials_path, url=rayyan_url)
user = User(self.rayyan).get_info()
logging.info(f"Signed in successfully to Rayyan as: {user['displayName']}!")
def load_data(self, review_id: str, filters: dict = {}) -> List[Document]:
"""
Load articles from a review.
Args:
review_id (int): Rayyan review ID.
filters (dict, optional): Filters to apply to the review. Defaults to None. Passed to
the Rayyan review results method as is.
Returns:
List[Document]: List of documents.
"""
from tenacity import (
retry,
stop_after_attempt,
stop_after_delay,
stop_all,
wait_random_exponential,
)
from tqdm import tqdm
from rayyan.review import Review
rayyan_review = Review(self.rayyan)
my_review = rayyan_review.get(review_id)
logging.info(
f"Working on review: '{my_review['title']}' with {my_review['total_articles']} total articles."
)
result_params = {"start": 0, "length": 100}
result_params.update(filters)
@retry(
wait=wait_random_exponential(min=1, max=10),
stop=stop_all(stop_after_attempt(3), stop_after_delay(30)),
)
def fetch_results_with_retry():
logging.debug("Fetch parameters: %s", result_params)
return rayyan_review.results(review_id, result_params)
articles = []
logging.info("Fetching articles from Rayyan...")
total = my_review["total_articles"]
with tqdm(total=total) as pbar:
while len(articles) < total:
# retrieve articles in batches
review_results = fetch_results_with_retry()
fetched_articles = review_results["data"]
articles.extend(fetched_articles)
# update total in case filters are applied
if total != review_results["recordsFiltered"]:
total = review_results["recordsFiltered"]
pbar.total = total
result_params["start"] += len(fetched_articles)
pbar.update(len(fetched_articles))
results = []
for article in articles:
# iterate over all abstracts
abstracts = ""
if article["abstracts"] is not None:
abstracts_arr = [
abstract["content"] for abstract in article["abstracts"]
]
if len(abstracts_arr) > 0:
# map array into a string
abstracts = "\n".join(abstracts_arr)[0:1024].strip()
title = article["title"]
if title is not None:
title = title.strip()
body = f"{title}\n{abstracts}"
if body.strip() == "":
continue
extra_info = {"id": article["id"], "title": title}
results.append(
Document(
text=body,
extra_info=extra_info,
)
)
return results
| RayyanReader |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/test_translator.py | {
"start": 1283,
"end": 2690
} | class ____(DagsterFivetranTranslator):
def get_asset_spec(self, props: FivetranConnectorTableProps) -> AssetSpec:
default_spec = super().get_asset_spec(props)
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("prefix"),
metadata={**default_spec.metadata, "custom": "metadata"},
)
def test_translator_custom_metadata(
fetch_workspace_data_api_mocks: responses.RequestsMock,
) -> None:
with environ({"FIVETRAN_API_KEY": TEST_API_KEY, "FIVETRAN_API_SECRET": TEST_API_SECRET}):
resource = FivetranWorkspace(
account_id=TEST_ACCOUNT_ID,
api_key=EnvVar("FIVETRAN_API_KEY"),
api_secret=EnvVar("FIVETRAN_API_SECRET"),
)
actual_workspace_data = resource.get_or_fetch_workspace_data()
table_props_data = actual_workspace_data.to_fivetran_connector_table_props_data()
first_table_props_data = next(props for props in table_props_data)
asset_spec = MyCustomTranslator().get_asset_spec(first_table_props_data)
assert "custom" in asset_spec.metadata
assert asset_spec.metadata["custom"] == "metadata"
assert asset_spec.key.path == [
"prefix",
"schema_name_in_destination_1",
"table_name_in_destination_1",
]
assert "dagster/kind/fivetran" in asset_spec.tags
| MyCustomTranslator |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column07.py | {
"start": 315,
"end": 1348
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [68810240, 68811776]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"values": "=(Sheet1!$A$1:$A$2,Sheet1!$A$4:$A$5)",
"values_data": [1, 2, 4, 5],
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | scrapy__scrapy | scrapy/utils/testproc.py | {
"start": 1798,
"end": 2353
} | class ____(ProcessProtocol):
def __init__(self) -> None:
self.deferred: Deferred[TestProcessProtocol] = Deferred()
self.out: bytes = b""
self.err: bytes = b""
self.exitcode: int | None = None
def outReceived(self, data: bytes) -> None:
self.out += data
def errReceived(self, data: bytes) -> None:
self.err += data
def processEnded(self, status: Failure) -> None:
self.exitcode = cast("ProcessTerminated", status.value).exitCode
self.deferred.callback(self)
| TestProcessProtocol |
python | plotly__plotly.py | plotly/graph_objs/choroplethmapbox/_stream.py | {
"start": 233,
"end": 3556
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "choroplethmapbox"
_path_str = "choroplethmapbox.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmapbox.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.choroplethmapbox.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmapbox.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | getsentry__sentry | src/sentry/integrations/metric_alerts.py | {
"start": 2112,
"end": 2277
} | class ____(TypedDict):
title_link: str
title: str
text: str
status: str
logo_url: str
date_started: NotRequired[datetime | None]
| AttachmentInfo |
python | coleifer__peewee | tests/fields.py | {
"start": 45676,
"end": 46018
} | class ____(TestModel):
nq = ForeignKeyField(NQ, backref='items')
nq_null = ForeignKeyField(NQ, backref='null_items', null=True)
nq_lazy = ForeignKeyField(NQ, lazy_load=False, backref='lazy_items')
nq_lazy_null = ForeignKeyField(NQ, lazy_load=False,
backref='lazy_null_items', null=True)
| NQItem |
python | getsentry__sentry | tests/sentry/models/test_releasefile.py | {
"start": 7157,
"end": 10276
} | class ____(TransactionTestCase):
tick = 0.1 # seconds
def _create_update_fn(self, initial_delay, locked_delay, files, create):
def f():
sleep(initial_delay * self.tick)
with _ArtifactIndexGuard(self.release, None).writable_data(create=create) as data:
sleep(locked_delay * self.tick)
data.update_files(files)
return f
def test_locking(self) -> None:
release = self.release
dist = None
update1 = self._create_update_fn(0, 2, {"foo": "bar"}, create=True)
update2 = self._create_update_fn(1, 2, {"123": "xyz"}, create=True)
threads = [Thread(target=update1), Thread(target=update2)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Without locking, only key "123" would survive:
index = read_artifact_index(release, dist)
assert index is not None
assert index["files"].keys() == {"foo", "123"}
# Only one `File` was created:
assert File.objects.filter(name=ARTIFACT_INDEX_FILENAME).count() == 1
def delete():
sleep(2 * self.tick)
delete_from_artifact_index(release, dist, "foo")
update3 = self._create_update_fn(1, 2, {"abc": "666"}, create=True)
threads = [Thread(target=update3), Thread(target=delete)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Without locking, the delete would be surpassed by the slow update:
index = read_artifact_index(release, dist)
assert index is not None
assert index["files"].keys() == {"123", "abc"}
def test_lock_existing(self) -> None:
release = self.release
dist = None
with _ArtifactIndexGuard(release, dist).writable_data(create=True) as data:
data.update_files({"0": 0})
update1 = self._create_update_fn(0, 2, {"foo": "bar"}, create=False)
update2 = self._create_update_fn(1, 2, {"123": "xyz"}, create=False)
threads = [Thread(target=update1), Thread(target=update2)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Without locking, only keys "0", "123" would survive:
index = read_artifact_index(release, dist)
assert index is not None
assert index["files"].keys() == {"0", "foo", "123"}
def delete():
sleep(2 * self.tick)
delete_from_artifact_index(release, dist, "foo")
update3 = self._create_update_fn(1, 2, {"abc": "666"}, create=False)
threads = [Thread(target=update3), Thread(target=delete)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Without locking, the delete would be surpassed by the slow update:
index = read_artifact_index(release, dist)
assert index is not None
assert index["files"].keys() == {"0", "123", "abc"}
| ArtifactIndexGuardTestCase |
python | django__django | django/db/migrations/serializer.py | {
"start": 5057,
"end": 5589
} | class ____(BaseSerializer):
def serialize(self):
enum_class = self.value.__class__
module = enum_class.__module__
if issubclass(enum_class, enum.Flag):
members = list(self.value)
else:
members = (self.value,)
return (
" | ".join(
[
f"{module}.{enum_class.__qualname__}[{item.name!r}]"
for item in members
]
),
{"import %s" % module},
)
| EnumSerializer |
python | docker__docker-py | tests/unit/utils_test.py | {
"start": 16779,
"end": 22544
} | class ____(unittest.TestCase):
def test_split_port_with_host_ip(self):
internal_port, external_port = split_port("127.0.0.1:1000:2000")
assert internal_port == ["2000"]
assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_protocol(self):
for protocol in ['tcp', 'udp', 'sctp']:
internal_port, external_port = split_port(
f"127.0.0.1:1000:2000/{protocol}"
)
assert internal_port == [f"2000/{protocol}"]
assert external_port == [("127.0.0.1", "1000")]
def test_split_port_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000")
assert internal_port == ["2000"]
assert external_port == [("127.0.0.1", None)]
def test_split_port_range_with_host_ip_no_port(self):
internal_port, external_port = split_port("127.0.0.1::2000-2001")
assert internal_port == ["2000", "2001"]
assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)]
def test_split_port_with_host_port(self):
internal_port, external_port = split_port("1000:2000")
assert internal_port == ["2000"]
assert external_port == ["1000"]
def test_split_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000-2001")
assert internal_port == ["2000", "2001"]
assert external_port == ["1000", "1001"]
def test_split_port_random_port_range_with_host_port(self):
internal_port, external_port = split_port("1000-1001:2000")
assert internal_port == ["2000"]
assert external_port == ["1000-1001"]
def test_split_port_no_host_port(self):
internal_port, external_port = split_port("2000")
assert internal_port == ["2000"]
assert external_port is None
def test_split_port_range_no_host_port(self):
internal_port, external_port = split_port("2000-2001")
assert internal_port == ["2000", "2001"]
assert external_port is None
def test_split_port_range_with_protocol(self):
internal_port, external_port = split_port(
"127.0.0.1:1000-1001:2000-2001/udp")
assert internal_port == ["2000/udp", "2001/udp"]
assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]
def test_split_port_with_ipv6_address(self):
internal_port, external_port = split_port(
"2001:abcd:ef00::2:1000:2000")
assert internal_port == ["2000"]
assert external_port == [("2001:abcd:ef00::2", "1000")]
def test_split_port_with_ipv6_square_brackets_address(self):
internal_port, external_port = split_port(
"[2001:abcd:ef00::2]:1000:2000")
assert internal_port == ["2000"]
assert external_port == [("2001:abcd:ef00::2", "1000")]
def test_split_port_invalid(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000:tcp")
def test_split_port_invalid_protocol(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000/ftp")
def test_non_matching_length_port_ranges(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000-1010:2000-2002/tcp")
def test_port_and_range_invalid(self):
with pytest.raises(ValueError):
split_port("0.0.0.0:1000:2000-2002/tcp")
def test_port_only_with_colon(self):
with pytest.raises(ValueError):
split_port(":80")
def test_host_only_with_colon(self):
with pytest.raises(ValueError):
split_port("localhost:")
def test_with_no_container_port(self):
with pytest.raises(ValueError):
split_port("localhost:80:")
def test_split_port_empty_string(self):
with pytest.raises(ValueError):
split_port("")
def test_split_port_non_string(self):
assert split_port(1243) == (['1243'], None)
def test_build_port_bindings_with_one_port(self):
port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
assert port_bindings["1000"] == [("127.0.0.1", "1000")]
def test_build_port_bindings_with_matching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
assert port_bindings["1000"] == [
("127.0.0.1", "1000"), ("127.0.0.1", "2000")
]
def test_build_port_bindings_with_nonmatching_internal_ports(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
assert port_bindings["1000"] == [("127.0.0.1", "1000")]
assert port_bindings["2000"] == [("127.0.0.1", "2000")]
def test_build_port_bindings_with_port_range(self):
port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
assert port_bindings["1000"] == [("127.0.0.1", "1000")]
assert port_bindings["1001"] == [("127.0.0.1", "1001")]
def test_build_port_bindings_with_matching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
assert port_bindings["1000"] == [
("127.0.0.1", "1000"), ("127.0.0.1", "2000")
]
assert port_bindings["1001"] == [
("127.0.0.1", "1001"), ("127.0.0.1", "2001")
]
def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
port_bindings = build_port_bindings(
["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
assert port_bindings["1000"] == [("127.0.0.1", "1000")]
assert port_bindings["2000"] == [("127.0.0.1", "2000")]
| PortsTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.