language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | walkccc__LeetCode | solutions/906. Super Palindromes/906.py | {
"start": 0,
"end": 919
} | class ____:
def superpalindromesInRange(self, left: str, right: str) -> int:
def nextPalindrome(num: int) -> int:
s = str(num)
n = len(s)
half = s[0:(n + 1) // 2]
reversedHalf = half[:n // 2][::-1]
candidate = int(half + reversedHalf)
if candidate >= num:
return candidate
half = str(int(half) + 1)
reversedHalf = half[:n // 2][::-1]
return int(half + reversedHalf)
def isPalindrome(num: int) -> bool:
s = str(num)
l = 0
r = len(s) - 1
while l < r:
if s[l] != s[r]:
return False
l += 1
r -= 1
return True
ans = 0
l = int(left)
r = int(right)
i = math.isqrt(l)
while i * i <= r:
palindrome = nextPalindrome(i)
squared = palindrome**2
if squared <= r and isPalindrome(squared):
ans += 1
i = palindrome + 1
return ans
| Solution |
python | allegroai__clearml | clearml/automation/parameters.py | {
"start": 9561,
"end": 10604
} | class ____(Parameter):
"""
Discrete randomly sampled hyperparameter object.
"""
def __init__(self, name: str, values: Sequence[Any] = ()) -> ():
"""
Uniformly sample values form a list of discrete options.
:param str name: The parameter name. Match the task hyperparameter name.
:param list values: The list/tuple of valid parameter values to sample from.
"""
super(DiscreteParameterRange, self).__init__(name=name)
self.values = values
def get_value(self) -> Mapping[str, Any]:
"""
Return uniformly sampled value from the valid list of values.
:return: ``{self.name: random entry from self.value}``
"""
return {self.name: self._random.choice(self.values)}
def to_list(self) -> Sequence[Mapping[str, Any]]:
"""
Return a list of all the valid values of the Parameter.
:return: list of dicts ``{name: value}``
"""
return [{self.name: v} for v in self.values]
| DiscreteParameterRange |
python | huggingface__transformers | src/transformers/models/prophetnet/tokenization_prophetnet.py | {
"start": 7919,
"end": 10152
} | class ____:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
| WordpieceTokenizer |
python | ansible__ansible | test/lib/ansible_test/_internal/debugging.py | {
"start": 995,
"end": 3709
} | class ____(metaclass=abc.ABCMeta):
"""Common debugger settings."""
port: int = 5678
"""
The port on the origin host which is listening for incoming connections from the debugger.
SSH port forwarding will be automatically configured for non-local hosts to connect to this port as needed.
"""
def as_dict(self) -> dict[str, object]:
"""Convert this instance to a dict."""
data = dataclasses.asdict(self)
data.update(__type__=self.__class__.__name__)
return data
@classmethod
def from_dict(cls, value: dict[str, t.Any]) -> t.Self:
"""Load an instance from a dict."""
debug_cls = globals()[value.pop('__type__')]
return debug_cls(**value)
@classmethod
def get_debug_type(cls) -> str:
"""Return the name for this debugger."""
return cls.__name__.removesuffix('Settings').lower()
@classmethod
def get_config_env_var_name(cls) -> str:
"""Return the name of the environment variable used to customize settings for this debugger."""
return f'ANSIBLE_TEST_REMOTE_DEBUGGER_{cls.get_debug_type().upper()}'
@classmethod
def parse(cls, value: str) -> t.Self:
"""Parse debugger settings from the given JSON and apply defaults."""
try:
settings = cls(**json.loads(value))
except Exception as ex:
raise ApplicationError(f"Invalid {cls.get_debug_type()} settings: {ex}") from ex
return cls.apply_defaults(settings)
@classmethod
@abc.abstractmethod
def is_active(cls) -> bool:
"""Detect if the debugger is active."""
@classmethod
@abc.abstractmethod
def apply_defaults(cls, settings: t.Self) -> t.Self:
"""Apply defaults to the given settings."""
@abc.abstractmethod
def get_python_package(self) -> str:
"""The Python package to install for debugging."""
@abc.abstractmethod
def activate_debugger(self, profile: DebuggerProfile) -> None:
"""Activate the debugger in ansible-test after delegation."""
@abc.abstractmethod
def get_ansiballz_config(self, profile: DebuggerProfile) -> dict[str, object]:
"""Gets the extra configuration data for the AnsiballZ extension module."""
@abc.abstractmethod
def get_cli_arguments(self, profile: DebuggerProfile) -> list[str]:
"""Get command line arguments for the debugger when running Ansible CLI programs."""
@abc.abstractmethod
def get_environment_variables(self, profile: DebuggerProfile) -> dict[str, str]:
"""Get environment variables needed to configure the debugger for debugging."""
@dataclasses.dataclass(frozen=True, kw_only=True)
| DebuggerSettings |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/initVar1.py | {
"start": 144,
"end": 411
} | class ____:
init_var1: InitVarAlias[int]
init_var2: InitVar[int]
not_init_var1: int
c = Container(1, 2, 3)
reveal_type(c.not_init_var1, expected_text="int")
# This should generate an error
c.init_var1
# This should generate an error
c.init_var2
| Container |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/eradicate/ERA001.py | {
"start": 263,
"end": 1537
} | class ____():
pass
# b = c
dictionary = {
# "key1": 123, # noqa: ERA001
# "key2": 456,
# "key3": 789, # test
}
#import os # noqa
# case 1:
# try:
# try: # with comment
# try: print()
# except:
# except Foo:
# except Exception as e: print(e)
# Script tag without an opening tag (Error)
# requires-python = ">=3.11"
# dependencies = [
# "requests<3",
# "rich",
# ]
# ///
# Script tag (OK)
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "requests<3",
# "rich",
# ]
# ///
# Script tag with multiple closing tags (OK)
# /// script
# [tool.uv]
# extra-index-url = ["https://pypi.org/simple", """\
# https://example.com/
# ///
# """
# ]
# ///
print(1)
# Script tag without a closing tag (Error)
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "requests<3",
# "rich",
# ]
# Script tag block followed by normal block (Ok)
# /// script
# # https://github.com/astral-sh/ruff/issues/15321
# requires-python = ">=3.12"
# dependencies = [
# "requests<3",
# "rich",
# ]
# ///
#
# Foobar
# Regression tests for https://github.com/astral-sh/ruff/issues/19713
# mypy: ignore-errors
# pyright: ignore-errors
# pyrefly: ignore-errors
# ty: ignore[unresolved-import]
# pyrefly: ignore[unused-import]
print(1)
| A |
python | ApeWorX__ape | src/ape_ethereum/ecosystem.py | {
"start": 11295,
"end": 11537
} | class ____(BaseEthereumConfig):
mainnet: NetworkConfig = create_network_config(block_time=13)
holesky: NetworkConfig = create_network_config(block_time=13)
sepolia: NetworkConfig = create_network_config(block_time=15)
| EthereumConfig |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 48570,
"end": 48758
} | class ____(VOWarning, ValueError):
"""
All ``TIMESYS`` elements must have an ``ID`` attribute.
"""
message_template = "ID attribute is required for all TIMESYS elements"
| E22 |
python | getsentry__sentry | src/sentry/models/groupowner.py | {
"start": 1711,
"end": 1802
} | class ____(TypedDict):
type: str
owner: str
date_added: datetime
| OwnersSerialized |
python | cython__cython | docs/examples/tutorial/clibraries/queue3.py | {
"start": 75,
"end": 2303
} | class ____:
"""A queue class for C integer values.
>>> q = Queue()
>>> q.append(5)
>>> q.peek()
5
>>> q.pop()
5
"""
_c_queue = cython.declare(cython.pointer[cqueue.Queue])
def __cinit__(self):
self._c_queue = cqueue.queue_new()
if self._c_queue is cython.NULL:
raise MemoryError()
def __dealloc__(self):
if self._c_queue is not cython.NULL:
cqueue.queue_free(self._c_queue)
@cython.ccall
def append(self, value: cython.int):
if not cqueue.queue_push_tail(self._c_queue,
cast(cython.p_void, cast(cython.Py_ssize_t, value))):
raise MemoryError()
# The `cpdef` feature is obviously not available for the original "extend()"
# method, as the method signature is incompatible with Python argument
# types (Python does not have pointers). However, we can rename
# the C-ish "extend()" method to e.g. "extend_ints()", and write
# a new "extend()" method that provides a suitable Python interface by
# accepting an arbitrary Python iterable.
@cython.ccall
def extend(self, values):
for value in values:
self.append(value)
@cython.cfunc
def extend_ints(self, values: cython.p_int, count: cython.size_t):
value: cython.int
for value in values[:count]: # Slicing pointer to limit the iteration boundaries.
self.append(value)
@cython.ccall
@cython.exceptval(-1, check=True)
def peek(self) -> cython.int:
value: cython.int = cast(cython.Py_ssize_t, cqueue.queue_peek_head(self._c_queue))
if value == 0:
# this may mean that the queue is empty,
# or that it happens to contain a 0 value
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return value
@cython.ccall
@cython.exceptval(-1, check=True)
def pop(self) -> cython.int:
if cqueue.queue_is_empty(self._c_queue):
raise IndexError("Queue is empty")
return cast(cython.Py_ssize_t, cqueue.queue_pop_head(self._c_queue))
def __bool__(self):
return not cqueue.queue_is_empty(self._c_queue)
| Queue |
python | bokeh__bokeh | tests/unit/bokeh/util/test_strings.py | {
"start": 1436,
"end": 2198
} | class ____:
def test_no_argument(self) -> None:
doc__ = "hello world"
assert bus.format_docstring(doc__) == doc__
doc__ = None
assert bus.format_docstring(doc__) is None
def test_arguments_unused(self) -> None:
doc__ = "hello world"
assert bus.format_docstring(doc__, 'hello ', not_used='world') == doc__
doc__ = None
assert bus.format_docstring(doc__, 'hello ', not_used='world') is None
def test_arguments(self) -> None:
doc__ = "-- {}{as_parameter} --"
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') == "-- hello world --"
doc__ = None
assert bus.format_docstring(doc__, 'hello ', as_parameter='world') is None
| Test_format_docstring |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 220212,
"end": 220523
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("CheckRun", graphql_name="node")
| CheckRunEdge |
python | chroma-core__chroma | chromadb/server/fastapi/types.py | {
"start": 648,
"end": 908
} | class ____(BaseModel):
embeddings: Optional[List[Any]] = None
metadatas: Optional[List[Optional[Dict[Any, Any]]]] = None
documents: Optional[List[Optional[str]]] = None
uris: Optional[List[Optional[str]]] = None
ids: List[str]
| UpdateEmbedding |
python | gevent__gevent | src/gevent/tests/known_failures.py | {
"start": 3952,
"end": 4249
} | class ____(_Definition):
__slots__ = (
'reason',
)
def __init__(self, reason='', when=ALWAYS, run_alone=NEVER, ignore_coverage=NEVER,
options=None):
_Definition.__init__(self, when, run_alone, ignore_coverage, options)
self.reason = reason
| _Action |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py | {
"start": 21752,
"end": 27934
} | class ____(AlloyDBWriteBaseOperator):
"""
Create an Instance in an Alloy DB cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AlloyDBCreateInstanceOperator`
:param cluster_id: Required. ID of the cluster for creating an instance in.
:param instance_id: Required. ID of the instance to create.
:param instance_configuration: Required. Instance to create. For more details please see API documentation:
https://cloud.google.com/python/docs/reference/alloydb/latest/google.cloud.alloydb_v1.types.Instance
:param is_secondary: Required. Specifies if the Instance to be created is Primary or Secondary.
Please note, if set True, then specify the `instance_type` field in the instance.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_request: Optional. If set, performs request validation, but does not actually
execute the request.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"cluster_id", "instance_id", "is_secondary", "instance_configuration"}
| set(AlloyDBWriteBaseOperator.template_fields)
)
operator_extra_links = (AlloyDBClusterLink(),)
def __init__(
self,
cluster_id: str,
instance_id: str,
instance_configuration: alloydb_v1.Instance | dict,
is_secondary: bool = False,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.cluster_id = cluster_id
self.instance_id = instance_id
self.instance_configuration = instance_configuration
self.is_secondary = is_secondary
def _get_instance(self) -> proto.Message | None:
self.log.info("Checking if the instance %s exists already...", self.instance_id)
try:
instance = self.hook.get_instance(
cluster_id=self.cluster_id,
instance_id=self.instance_id,
location=self.location,
project_id=self.project_id,
)
except NotFound:
self.log.info("The instance %s does not exist yet.", self.instance_id)
except Exception as ex:
raise AirflowException(ex) from ex
else:
self.log.info(
"AlloyDB instance %s already exists in the cluster %s.",
self.cluster_id,
self.instance_id,
)
result = alloydb_v1.Instance.to_dict(instance)
return result
return None
@property
def extra_links_params(self) -> dict[str, Any]:
return {
"location_id": self.location,
"cluster_id": self.cluster_id,
"project_id": self.project_id,
}
def execute(self, context: Context) -> dict | None:
AlloyDBClusterLink.persist(context=context)
if instance := self._get_instance():
return instance
if self.validate_request:
self.log.info("Validating a Create AlloyDB instance request.")
else:
self.log.info("Creating an AlloyDB instance.")
try:
create_method = (
self.hook.create_secondary_instance if self.is_secondary else self.hook.create_instance
)
operation = create_method(
cluster_id=self.cluster_id,
instance_id=self.instance_id,
instance=self.instance_configuration,
location=self.location,
project_id=self.project_id,
request_id=self.request_id,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except Exception as ex:
raise AirflowException(ex)
else:
operation_result = self.get_operation_result(operation)
result = alloydb_v1.Instance.to_dict(operation_result) if operation_result else None
return result
| AlloyDBCreateInstanceOperator |
python | tiangolo__fastapi | docs_src/response_model/tutorial002.py | {
"start": 114,
"end": 350
} | class ____(BaseModel):
username: str
password: str
email: EmailStr
full_name: Union[str, None] = None
# Don't do this in production!
@app.post("/user/")
async def create_user(user: UserIn) -> UserIn:
return user
| UserIn |
python | doocs__leetcode | solution/2600-2699/2644.Find the Maximum Divisibility Score/Solution.py | {
"start": 0,
"end": 347
} | class ____:
def maxDivScore(self, nums: List[int], divisors: List[int]) -> int:
ans, mx = divisors[0], 0
for div in divisors:
cnt = sum(x % div == 0 for x in nums)
if mx < cnt:
mx, ans = cnt, div
elif mx == cnt and ans > div:
ans = div
return ans
| Solution |
python | google__jax | jax/_src/monitoring.py | {
"start": 1050,
"end": 1212
} | class ____(Protocol):
def __call__(self, event: str, duration_secs: float,
**kwargs: str | int) -> None:
...
| EventDurationListenerWithMetadata |
python | davidhalter__jedi | test/completion/recursion.py | {
"start": 1240,
"end": 1340
} | class ____:
def a(self, b):
for x in [self.a(i) for i in b]:
#?
x
| A |
python | kamyu104__LeetCode-Solutions | Python/minimum-area-rectangle.py | {
"start": 992,
"end": 1467
} | class ____(object):
def minAreaRect(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
lookup = set()
result = float("inf")
for x1, y1 in points:
for x2, y2 in lookup:
if (x1, y2) in lookup and (x2, y1) in lookup:
result = min(result, abs(x1-x2) * abs(y1-y2))
lookup.add((x1, y1))
return result if result != float("inf") else 0
| Solution2 |
python | protocolbuffers__protobuf | src/google/protobuf/util/python/field_mask_util_test.py | {
"start": 805,
"end": 5420
} | class ____(parameterized.TestCase):
def test_merge_message_to_simple(self):
source = timestamp_pb2.Timestamp(seconds=1, nanos=2)
mask = field_mask_pb2.FieldMask(paths=["seconds"])
destination = timestamp_pb2.Timestamp(seconds=3, nanos=4)
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, _DEFAULT_MERGE_OPTIONS, destination
)
self.assertEqual(result.seconds, 1)
@hp.given(
source=timestamp_strategy(),
mask=field_mask_strategy(),
)
def test_merge_message_to_hp(
self, source: timestamp_pb2.Timestamp, mask: field_mask_pb2.FieldMask
):
destination = timestamp_pb2.Timestamp(
seconds=_ORIGINAL_VALUE, nanos=_ORIGINAL_VALUE
)
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, _DEFAULT_MERGE_OPTIONS, destination
)
if "seconds" in mask.paths:
self.assertEqual(result.seconds, source.seconds)
else:
self.assertEqual(result.seconds, _ORIGINAL_VALUE)
if "nanos" in mask.paths:
self.assertEqual(result.nanos, source.nanos)
else:
self.assertEqual(result.nanos, _ORIGINAL_VALUE)
@parameterized.named_parameters(
dict(
testcase_name="replace_repeated_fields",
replace_repeated_fields=True,
expected_values=[struct_pb2.Value(string_value="hello")],
),
dict(
testcase_name="not_replace_repeated_fields",
replace_repeated_fields=False,
expected_values=[
struct_pb2.Value(string_value="world"),
struct_pb2.Value(string_value="hello"),
],
),
)
def test_merge_message_to_repeated_fields(
self,
replace_repeated_fields,
expected_values,
):
source = struct_pb2.ListValue(
values=[struct_pb2.Value(string_value="hello")]
)
mask = field_mask_pb2.FieldMask(paths=["values"])
options = field_mask_util.FieldMaskUtil.MergeOptions()
options.replace_repeated_fields = replace_repeated_fields
destination = struct_pb2.ListValue(
values=[struct_pb2.Value(string_value="world")]
)
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, options, destination
)
self.assertListEqual(list(result.values), expected_values)
@parameterized.named_parameters(
dict(
testcase_name="replace_message_fields",
replace_message_fields=True,
expected_nanos=0,
),
dict(
testcase_name="not_replace_message_fields",
replace_message_fields=False,
expected_nanos=4,
),
)
def test_merge_message_to_message_fields(
self,
replace_message_fields,
expected_nanos,
):
source = test_messages_pb2.TimestampWrapper(
timestamp=timestamp_pb2.Timestamp(seconds=1)
)
mask = field_mask_pb2.FieldMask(paths=["timestamp"])
options = field_mask_util.FieldMaskUtil.MergeOptions()
options.replace_message_fields = replace_message_fields
destination = test_messages_pb2.TimestampWrapper(
timestamp=timestamp_pb2.Timestamp(seconds=3, nanos=4)
)
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, options, destination
)
self.assertEqual(result.timestamp.seconds, 1)
self.assertEqual(result.timestamp.nanos, expected_nanos)
def test_nested_message_fields(self):
source = test_messages_pb2.TimestampWrapper(
timestamp=timestamp_pb2.Timestamp(seconds=1, nanos=2)
)
mask = field_mask_pb2.FieldMask(paths=["timestamp.seconds"])
destination = test_messages_pb2.TimestampWrapper(
timestamp=timestamp_pb2.Timestamp(seconds=3, nanos=4)
)
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, _DEFAULT_MERGE_OPTIONS, destination
)
self.assertEqual(result.timestamp.seconds, 1)
self.assertEqual(result.timestamp.nanos, 4)
# This is not compliant with https://google.aip.dev/161#map-fields
# The Python implementation only intends to wrap the C++ one that works this
# way.
def test_map_keys_are_unsupported(self):
source = test_messages_pb2.MapWrapper(
map={"key1": "new_value", "key2": "new_value"}
)
mask = field_mask_pb2.FieldMask(paths=["map.key2"])
destination = test_messages_pb2.MapWrapper(map={"key1": "original_value"})
result = field_mask_util.FieldMaskUtil.MergeMessageTo(
source, mask, _DEFAULT_MERGE_OPTIONS, destination
)
self.assertDictEqual(dict(result.map), {"key1": "original_value"})
if __name__ == "__main__":
unittest.main()
| FieldMaskUtilTest |
python | sqlalchemy__sqlalchemy | test/ext/test_associationproxy.py | {
"start": 95480,
"end": 96900
} | class ____(fixtures.DeclarativeMappedTest):
run_define_tables = None
run_create_tables = None
run_inserts = None
run_deletes = None
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Foo(Base):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
foo = Column(String) # assume some composite datatype
bar = association_proxy("foo", "attr")
def test_setattr(self):
Foo = self.classes.Foo
f1 = Foo()
assert_raises_message(
NotImplementedError,
"association proxy to a non-relationship "
"intermediary is not supported",
setattr,
f1,
"bar",
"asdf",
)
def test_getattr(self):
Foo = self.classes.Foo
f1 = Foo()
assert_raises_message(
NotImplementedError,
"association proxy to a non-relationship "
"intermediary is not supported",
getattr,
f1,
"bar",
)
def test_get_class_attr(self):
Foo = self.classes.Foo
assert_raises_message(
NotImplementedError,
"association proxy to a non-relationship "
"intermediary is not supported",
getattr,
Foo,
"bar",
)
| OnlyRelationshipTest |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/ops.py | {
"start": 469,
"end": 4273
} | class ____(Config):
connection_id: str = Field(
...,
description=(
"Parsed json dictionary representing the details of the Airbyte connector after the"
" sync successfully completes. See the [Airbyte API"
" Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"
" to see detailed information on this response."
),
)
poll_interval: float = Field(
DEFAULT_POLL_INTERVAL_SECONDS,
description=(
"The maximum time that will waited before this operation is timed out. By "
"default, this will never time out."
),
)
poll_timeout: Optional[float] = Field(
None,
description=(
"The maximum time that will waited before this operation is timed out. By "
"default, this will never time out."
),
)
yield_materializations: bool = Field(
True,
description=(
"If True, materializations corresponding to the results of the Airbyte sync will "
"be yielded when the op executes."
),
)
asset_key_prefix: list[str] = Field(
["airbyte"],
description=(
"If provided and yield_materializations is True, these components will be used to "
"prefix the generated asset keys."
),
)
@op(
ins={"start_after": In(Nothing)},
out=Out(
AirbyteOutput,
description=(
"Parsed json dictionary representing the details of the Airbyte connector after the"
" sync successfully completes. See the [Airbyte API"
" Docs](https://airbyte-public-api-docs.s3.us-east-2.amazonaws.com/rapidoc-api-docs.html#overview)"
" to see detailed information on this response."
),
),
tags={COMPUTE_KIND_TAG: "airbyte"},
)
def airbyte_sync_op(
context, config: AirbyteSyncConfig, airbyte: BaseAirbyteResource
) -> Iterable[Any]:
"""Executes a Airbyte job sync for a given ``connection_id``, and polls until that sync
completes, raising an error if it is unsuccessful. It outputs a AirbyteOutput which contains
the job details for a given ``connection_id``.
It requires the use of the :py:class:`~dagster_airbyte.airbyte_resource`, which allows it to
communicate with the Airbyte API.
Examples:
.. code-block:: python
from dagster import job
from dagster_airbyte import airbyte_resource, airbyte_sync_op
my_airbyte_resource = airbyte_resource.configured(
{
"host": {"env": "AIRBYTE_HOST"},
"port": {"env": "AIRBYTE_PORT"},
}
)
sync_foobar = airbyte_sync_op.configured({"connection_id": "foobar"}, name="sync_foobar")
@job(resource_defs={"airbyte": my_airbyte_resource})
def my_simple_airbyte_job():
sync_foobar()
@job(resource_defs={"airbyte": my_airbyte_resource})
def my_composed_airbyte_job():
final_foobar_state = sync_foobar(start_after=some_op())
other_op(final_foobar_state)
"""
airbyte_output = airbyte.sync_and_poll(
connection_id=config.connection_id,
poll_interval=config.poll_interval,
poll_timeout=config.poll_timeout,
)
if config.yield_materializations:
yield from generate_materializations(
airbyte_output, asset_key_prefix=config.asset_key_prefix
)
yield Output(
airbyte_output,
metadata={
**_get_attempt(airbyte_output.job_details.get("attempts", [{}])[-1]).get(
"totalStats", {}
)
},
)
| AirbyteSyncConfig |
python | doocs__leetcode | solution/1500-1599/1519.Number of Nodes in the Sub-Tree With the Same Label/Solution.py | {
"start": 0,
"end": 514
} | class ____:
def countSubTrees(self, n: int, edges: List[List[int]], labels: str) -> List[int]:
def dfs(i, fa):
ans[i] -= cnt[labels[i]]
cnt[labels[i]] += 1
for j in g[i]:
if j != fa:
dfs(j, i)
ans[i] += cnt[labels[i]]
g = defaultdict(list)
for a, b in edges:
g[a].append(b)
g[b].append(a)
cnt = Counter()
ans = [0] * n
dfs(0, -1)
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/cracking-the-safe.py | {
"start": 2387,
"end": 3121
} | class ____(object):
def crackSafe(self, n, k):
"""
:type n: int
:type k: int
:rtype: str
"""
result = [str(k-1)]*(n-1)
lookup = set()
total = k**n
while len(lookup) < total:
node = result[len(result)-n+1:]
for i in xrange(k): # preorder like traversal relative to initial result to avoid getting stuck, i.e. don't use k-1 until there is no other choice
neighbor = "".join(node) + str(i)
if neighbor not in lookup:
lookup.add(neighbor)
result.append(str(i))
break
return "".join(result)
# Time: O(n * k^n)
# Space: O(n * k^n)
| Solution4 |
python | google__jax | jax/_src/config.py | {
"start": 7474,
"end": 7542
} | class ____: pass
no_default = NoDefault()
config_states = {}
| NoDefault |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/pagination.py | {
"start": 672,
"end": 2007
} | class ____(RpcModel):
encoded_cursor: str | None = None
per_page: int = -1
@classmethod
def from_endpoint_request(cls, e: "Endpoint", request: Request) -> "RpcPaginationArgs":
return RpcPaginationArgs(
encoded_cursor=request.GET.get(e.cursor_name), per_page=e.get_per_page(request)
)
def do_hybrid_cloud_pagination(
self,
*,
description: str,
paginator_cls: type[PaginatorLike],
order_by: str,
queryset: Any,
cursor_cls: type[Cursor] = Cursor,
count_hits: bool | None = None,
) -> "RpcPaginationResult":
cursor = get_cursor(self.encoded_cursor, cursor_cls)
with sentry_sdk.start_span(
op="hybrid_cloud.paginate.get_result",
name=description,
) as span:
annotate_span_with_pagination_args(span, self.per_page)
paginator = get_paginator(
None, paginator_cls, dict(order_by=order_by, queryset=queryset.values("id"))
)
extra_args: Any = {}
if count_hits is not None:
extra_args["count_hits"] = count_hits
return RpcPaginationResult.from_cursor_result(
paginator.get_result(limit=self.per_page, cursor=cursor, **extra_args)
)
| RpcPaginationArgs |
python | huggingface__transformers | src/transformers/models/dpr/tokenization_dpr_fast.py | {
"start": 15274,
"end": 16103
} | class ____(CustomDPRReaderTokenizerMixin, BertTokenizer):
r"""
Constructs a "fast" DPRReader tokenizer (backed by HuggingFace's *tokenizers* library).
[`DPRReaderTokenizerFast`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization:
punctuation splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts
that are combined to be fed to the [`DPRReader`] model.
Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = DPRReaderTokenizer
__all__ = ["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"]
| DPRReaderTokenizerFast |
python | coleifer__peewee | peewee.py | {
"start": 187159,
"end": 187538
} | class ____(object):
def __init__(self):
self._refs = []
def set_field(self, model, field, name):
self._refs.append((model, field, name))
def set_model(self, through_model):
for src_model, m2mfield, name in self._refs:
m2mfield.through_model = through_model
src_model._meta.add_field(name, m2mfield)
| DeferredThroughModel |
python | django__django | django/forms/fields.py | {
"start": 10674,
"end": 12682
} | class ____(Field):
widget = NumberInput
default_error_messages = {
"invalid": _("Enter a whole number."),
}
re_decimal = _lazy_re_compile(r"\.0*\s*$")
def __init__(self, *, max_value=None, min_value=None, step_size=None, **kwargs):
self.max_value, self.min_value, self.step_size = max_value, min_value, step_size
if kwargs.get("localize") and self.widget == NumberInput:
# Localized number input is not well supported on most browsers
kwargs.setdefault("widget", super().widget)
super().__init__(**kwargs)
if max_value is not None:
self.validators.append(validators.MaxValueValidator(max_value))
if min_value is not None:
self.validators.append(validators.MinValueValidator(min_value))
if step_size is not None:
self.validators.append(
validators.StepValueValidator(step_size, offset=min_value)
)
def to_python(self, value):
"""
Validate that int() can be called on the input. Return the result
of int() or None for empty values.
"""
value = super().to_python(value)
if value in self.empty_values:
return None
if self.localize:
value = formats.sanitize_separators(value)
# Strip trailing decimal and zeros.
try:
value = int(self.re_decimal.sub("", str(value)))
except (ValueError, TypeError):
raise ValidationError(self.error_messages["invalid"], code="invalid")
return value
def widget_attrs(self, widget):
attrs = super().widget_attrs(widget)
if isinstance(widget, NumberInput):
if self.min_value is not None:
attrs["min"] = self.min_value
if self.max_value is not None:
attrs["max"] = self.max_value
if self.step_size is not None:
attrs["step"] = self.step_size
return attrs
| IntegerField |
python | django__django | django/db/models/functions/comparison.py | {
"start": 5709,
"end": 6409
} | class ____(Func):
"""
Return the minimum expression.
If any expression is null the return value is database-specific:
On PostgreSQL, return the minimum not-null expression.
On MySQL, Oracle, and SQLite, if any expression is null, return null.
"""
function = "LEAST"
def __init__(self, *expressions, **extra):
if len(expressions) < 2:
raise ValueError("Least must take at least two expressions")
super().__init__(*expressions, **extra)
def as_sqlite(self, compiler, connection, **extra_context):
"""Use the MIN function on SQLite."""
return super().as_sqlite(compiler, connection, function="MIN", **extra_context)
| Least |
python | huggingface__transformers | src/transformers/models/grounding_dino/processing_grounding_dino.py | {
"start": 3433,
"end": 3906
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": True,
"return_length": False,
"verbose": True,
}
}
| GroundingDinoProcessorKwargs |
python | huggingface__transformers | tests/models/roformer/test_modeling_roformer.py | {
"start": 14065,
"end": 18956
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RoFormerModel,
RoFormerForMaskedLM,
RoFormerForCausalLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": RoFormerModel,
"fill-mask": RoFormerForMaskedLM,
"question-answering": RoFormerForQuestionAnswering,
"text-classification": RoFormerForSequenceClassification,
"text-generation": RoFormerForCausalLM,
"token-classification": RoFormerForTokenClassification,
"zero-shot": RoFormerForSequenceClassification,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = RoFormerModelTester(self)
self.config_tester = ConfigTester(self, config_class=RoFormerConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_generate_causal_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_generate_causal_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
@slow
def test_model_from_pretrained(self):
model_name = "junnyu/roformer_chinese_small"
model = RoFormerModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@require_torch
| RoFormerModelTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 98553,
"end": 99224
} | class ____(Structure):
pass # opaque handle
c_nvmlGpuInstance_t = POINTER(struct_c_nvmlGpuInstance_t)
NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE = 0x0
NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE = 0x1
NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE = 0x2
NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE = 0x3
NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE = 0x4
NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE = 0x5
NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE = 0x6
NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 = 0x7
NVML_COMPUTE_INSTANCE_PROFILE_COUNT = 0x8
NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = 0x0
NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = 0x1
| struct_c_nvmlGpuInstance_t |
python | pytorch__pytorch | functorch/examples/maml_omniglot/support/omniglot_loaders.py | {
"start": 1040,
"end": 4639
} | class ____(data.Dataset):
urls = [
"https://github.com/brendenlake/omniglot/raw/master/python/images_background.zip",
"https://github.com/brendenlake/omniglot/raw/master/python/images_evaluation.zip",
]
raw_folder = "raw"
processed_folder = "processed"
training_file = "training.pt"
test_file = "test.pt"
"""
The items are (filename,category). The index of all the categories can be found in self.idx_classes
Args:
- root: the directory where the dataset will be stored
- transform: how to transform the input
- target_transform: how to transform the target
- download: need to download the dataset
"""
def __init__(self, root, transform=None, target_transform=None, download=False):
self.root = root
self.transform = transform
self.target_transform = target_transform
if not self._check_exists():
if download:
self.download()
else:
raise RuntimeError(
"Dataset not found." + " You can use download=True to download it"
)
self.all_items = find_classes(os.path.join(self.root, self.processed_folder))
self.idx_classes = index_classes(self.all_items)
def __getitem__(self, index):
filename = self.all_items[index][0]
img = str.join("/", [self.all_items[index][2], filename])
target = self.idx_classes[self.all_items[index][1]]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.all_items)
def _check_exists(self):
return os.path.exists(
os.path.join(self.root, self.processed_folder, "images_evaluation")
) and os.path.exists(
os.path.join(self.root, self.processed_folder, "images_background")
)
def download(self):
import urllib
import zipfile
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print("== Downloading " + url)
data = urllib.request.urlopen(url)
filename = url.rpartition("/")[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, "wb") as f:
f.write(data.read())
file_processed = os.path.join(self.root, self.processed_folder)
print("== Unzip from " + file_path + " to " + file_processed)
zip_ref = zipfile.ZipFile(file_path, "r")
zip_ref.extractall(file_processed)
zip_ref.close()
print("Download finished.")
def find_classes(root_dir):
retour = []
for root, dirs, files in os.walk(root_dir):
for f in files:
if f.endswith("png"):
r = root.split("/")
lr = len(r)
retour.append((f, r[lr - 2] + "/" + r[lr - 1], root))
print(f"== Found {len(retour)} items ")
return retour
def index_classes(items):
idx = {}
for i in items:
if i[1] not in idx:
idx[i[1]] = len(idx)
print(f"== Found {len(idx)} classes")
return idx
| Omniglot |
python | doocs__leetcode | solution/1100-1199/1143.Longest Common Subsequence/Solution.py | {
"start": 0,
"end": 451
} | class ____:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m, n = len(text1), len(text2)
f = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if text1[i - 1] == text2[j - 1]:
f[i][j] = f[i - 1][j - 1] + 1
else:
f[i][j] = max(f[i - 1][j], f[i][j - 1])
return f[m][n]
| Solution |
python | python__mypy | mypy/fscache.py | {
"start": 1241,
"end": 11102
} | class ____:
def __init__(self) -> None:
# The package root is not flushed with the caches.
# It is set by set_package_root() below.
self.package_root: list[str] = []
self.flush()
def set_package_root(self, package_root: list[str]) -> None:
self.package_root = package_root
def flush(self) -> None:
"""Start another transaction and empty all caches."""
self.stat_or_none_cache: dict[str, os.stat_result | None] = {}
self.listdir_cache: dict[str, list[str]] = {}
self.listdir_error_cache: dict[str, OSError] = {}
self.isfile_case_cache: dict[str, bool] = {}
self.exists_case_cache: dict[str, bool] = {}
self.read_cache: dict[str, bytes] = {}
self.read_error_cache: dict[str, Exception] = {}
self.hash_cache: dict[str, str] = {}
self.fake_package_cache: set[str] = set()
def stat_or_none(self, path: str) -> os.stat_result | None:
if path in self.stat_or_none_cache:
return self.stat_or_none_cache[path]
st = None
try:
st = os.stat(path)
except OSError:
if self.init_under_package_root(path):
try:
st = self._fake_init(path)
except OSError:
pass
self.stat_or_none_cache[path] = st
return st
def init_under_package_root(self, path: str) -> bool:
"""Is this path an __init__.py under a package root?
This is used to detect packages that don't contain __init__.py
files, which is needed to support Bazel. The function should
only be called for non-existing files.
It will return True if it refers to a __init__.py file that
Bazel would create, so that at runtime Python would think the
directory containing it is a package. For this to work you
must pass one or more package roots using the --package-root
flag.
As an exceptional case, any directory that is a package root
itself will not be considered to contain a __init__.py file.
This is different from the rules Bazel itself applies, but is
necessary for mypy to properly distinguish packages from other
directories.
See https://docs.bazel.build/versions/master/be/python.html,
where this behavior is described under legacy_create_init.
"""
if not self.package_root:
return False
dirname, basename = os.path.split(path)
if basename != "__init__.py":
return False
if not os.path.basename(dirname).isidentifier():
# Can't put an __init__.py in a place that's not an identifier
return False
st = self.stat_or_none(dirname)
if st is None:
return False
else:
if not stat.S_ISDIR(st.st_mode):
return False
ok = False
# skip if on a different drive
current_drive, _ = os.path.splitdrive(os.getcwd())
drive, _ = os.path.splitdrive(path)
if drive != current_drive:
return False
if os.path.isabs(path):
path = os.path.relpath(path)
path = os.path.normpath(path)
for root in self.package_root:
if path.startswith(root):
if path == root + basename:
# A package root itself is never a package.
ok = False
break
else:
ok = True
return ok
def _fake_init(self, path: str) -> os.stat_result:
"""Prime the cache with a fake __init__.py file.
This makes code that looks for path believe an empty file by
that name exists. Should only be called after
init_under_package_root() returns True.
"""
dirname, basename = os.path.split(path)
assert basename == "__init__.py", path
assert not os.path.exists(path), path # Not cached!
dirname = os.path.normpath(dirname)
st = os.stat(dirname) # May raise OSError
# Get stat result as a list so we can modify it.
seq: list[float] = list(st)
seq[stat.ST_MODE] = stat.S_IFREG | 0o444
seq[stat.ST_INO] = 1
seq[stat.ST_NLINK] = 1
seq[stat.ST_SIZE] = 0
st = os.stat_result(seq)
# Make listdir() and read() also pretend this file exists.
self.fake_package_cache.add(dirname)
return st
def listdir(self, path: str) -> list[str]:
path = os.path.normpath(path)
if path in self.listdir_cache:
res = self.listdir_cache[path]
# Check the fake cache.
if path in self.fake_package_cache and "__init__.py" not in res:
res.append("__init__.py") # Updates the result as well as the cache
return res
if path in self.listdir_error_cache:
raise copy_os_error(self.listdir_error_cache[path])
try:
results = os.listdir(path)
except OSError as err:
# Like above, take a copy to reduce memory use.
self.listdir_error_cache[path] = copy_os_error(err)
raise err
self.listdir_cache[path] = results
# Check the fake cache.
if path in self.fake_package_cache and "__init__.py" not in results:
results.append("__init__.py")
return results
def isfile(self, path: str) -> bool:
st = self.stat_or_none(path)
if st is None:
return False
return stat.S_ISREG(st.st_mode)
def isfile_case(self, path: str, prefix: str) -> bool:
"""Return whether path exists and is a file.
On case-insensitive filesystems (like Mac or Windows) this returns
False if the case of path's last component does not exactly match
the case found in the filesystem.
We check also the case of other path components up to prefix.
For example, if path is 'user-stubs/pack/mod.pyi' and prefix is 'user-stubs',
we check that the case of 'pack' and 'mod.py' matches exactly, 'user-stubs' will be
case insensitive on case insensitive filesystems.
The caller must ensure that prefix is a valid file system prefix of path.
"""
if not self.isfile(path):
# Fast path
return False
if path in self.isfile_case_cache:
return self.isfile_case_cache[path]
head, tail = os.path.split(path)
if not tail:
self.isfile_case_cache[path] = False
return False
try:
names = self.listdir(head)
# This allows one to check file name case sensitively in
# case-insensitive filesystems.
res = tail in names
except OSError:
res = False
if res:
# Also recursively check the other path components in case sensitive way.
res = self.exists_case(head, prefix)
self.isfile_case_cache[path] = res
return res
def exists_case(self, path: str, prefix: str) -> bool:
"""Return whether path exists - checking path components in case sensitive
fashion, up to prefix.
"""
if path in self.exists_case_cache:
return self.exists_case_cache[path]
head, tail = os.path.split(path)
if not head.startswith(prefix) or not tail:
# Only perform the check for paths under prefix.
self.exists_case_cache[path] = True
return True
try:
names = self.listdir(head)
# This allows one to check file name case sensitively in
# case-insensitive filesystems.
res = tail in names
except OSError:
res = False
if res:
# Also recursively check other path components.
res = self.exists_case(head, prefix)
self.exists_case_cache[path] = res
return res
def isdir(self, path: str) -> bool:
st = self.stat_or_none(path)
if st is None:
return False
return stat.S_ISDIR(st.st_mode)
def exists(self, path: str) -> bool:
st = self.stat_or_none(path)
return st is not None
def read(self, path: str) -> bytes:
if path in self.read_cache:
return self.read_cache[path]
if path in self.read_error_cache:
raise self.read_error_cache[path]
# Need to stat first so that the contents of file are from no
# earlier instant than the mtime reported by self.stat().
self.stat_or_none(path)
dirname, basename = os.path.split(path)
dirname = os.path.normpath(dirname)
# Check the fake cache.
if basename == "__init__.py" and dirname in self.fake_package_cache:
data = b""
else:
try:
with open(path, "rb") as f:
data = f.read()
except OSError as err:
self.read_error_cache[path] = err
raise
self.read_cache[path] = data
self.hash_cache[path] = hash_digest(data)
return data
def hash_digest(self, path: str) -> str:
if path not in self.hash_cache:
self.read(path)
return self.hash_cache[path]
def samefile(self, f1: str, f2: str) -> bool:
s1 = self.stat_or_none(f1)
s2 = self.stat_or_none(f2)
if s1 is None or s2 is None:
return False
return os.path.samestat(s1, s2)
def copy_os_error(e: OSError) -> OSError:
new = OSError(*e.args)
new.errno = e.errno
new.strerror = e.strerror
new.filename = e.filename
if e.filename2:
new.filename2 = e.filename2
return new
| FileSystemCache |
python | django__django | tests/timezones/admin.py | {
"start": 138,
"end": 339
} | class ____(admin.ModelAdmin):
readonly_fields = ("created", "updated")
site = admin.AdminSite(name="admin_tz")
site.register(Event, EventAdmin)
site.register(Timestamp, TimestampAdmin)
| TimestampAdmin |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/config.py | {
"start": 9296,
"end": 12586
} | class ____:
def __init__(self, db, db_opts, options, file_config):
self._set_name(db)
self.db = db
self.db_opts = db_opts
self.options = options
self.file_config = file_config
self.test_schema = "test_schema"
self.test_schema_2 = "test_schema_2"
self.is_async = db.dialect.is_async
from . import provision
self.is_default_dialect = provision.is_preferred_driver(self, db)
_stack = collections.deque()
_configs = set()
def __repr__(self):
return (
f"sqlalchemy.testing.config.Config"
f"({self.db.name}+{self.db.driver}, "
f"{self.db.dialect.server_version_info})"
)
def _set_name(self, db):
suffix = "_async" if db.dialect.is_async else ""
if db.dialect.server_version_info:
svi = ".".join(str(tok) for tok in db.dialect.server_version_info)
self.name = "%s+%s%s_[%s]" % (db.name, db.driver, suffix, svi)
else:
self.name = "%s+%s%s" % (db.name, db.driver, suffix)
@classmethod
def register(cls, db, db_opts, options, file_config):
"""add a config as one of the global configs.
If there are no configs set up yet, this config also
gets set as the "_current".
"""
global any_async
cfg = Config(db, db_opts, options, file_config)
# if any backends include an async driver, then ensure
# all setup/teardown and tests are wrapped in the maybe_async()
# decorator that will set up a greenlet context for async drivers.
any_async = any_async or cfg.is_async
cls._configs.add(cfg)
return cfg
@classmethod
def set_as_current(cls, config, namespace):
global db, _current, db_url, test_schema, test_schema_2, db_opts
_current = config
db_url = config.db.url
db_opts = config.db_opts
test_schema = config.test_schema
test_schema_2 = config.test_schema_2
namespace.db = db = config.db
@classmethod
def push_engine(cls, db, namespace):
assert _current, "Can't push without a default Config set up"
cls.push(
Config(
db, _current.db_opts, _current.options, _current.file_config
),
namespace,
)
@classmethod
def push(cls, config, namespace):
cls._stack.append(_current)
cls.set_as_current(config, namespace)
@classmethod
def pop(cls, namespace):
if cls._stack:
# a failed test w/ -x option can call reset() ahead of time
_current = cls._stack[-1]
del cls._stack[-1]
cls.set_as_current(_current, namespace)
@classmethod
def reset(cls, namespace):
if cls._stack:
cls.set_as_current(cls._stack[0], namespace)
cls._stack.clear()
@classmethod
def all_configs(cls):
return cls._configs
@classmethod
def all_dbs(cls):
for cfg in cls.all_configs():
yield cfg.db
def skip_test(self, msg):
skip_test(msg)
def skip_test(msg):
raise _fixture_functions.skip_test_exception(msg)
def async_test(fn):
return _fixture_functions.async_test(fn)
| Config |
python | scipy__scipy | benchmarks/benchmarks/test_functions.py | {
"start": 7101,
"end": 7418
} | class ____:
target_E = -959.6407
solution = [512, 404.2319]
xmin = np.array([-512., -512])
xmax = np.array([512., 512])
def fun(self, x):
a = -(x[1] + 47) * np.sin(np.sqrt(abs(x[1] + x[0]/2. + 47)))
b = -x[0] * np.sin(np.sqrt(abs(x[0] - (x[1] + 47))))
return a + b
| EggHolder |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/policy/checkpoint_manager.py | {
"start": 284,
"end": 456
} | class ____:
steps: int
file_path: str
reward: Optional[float]
creation_time: float
auxillary_file_paths: List[str] = attr.ib(factory=list)
| ModelCheckpoint |
python | django__django | tests/queries/models.py | {
"start": 6086,
"end": 6185
} | class ____(models.Model):
custom = models.ForeignKey(CustomPk, models.CASCADE, null=True)
| Related |
python | pytorch__pytorch | test/ao/sparsity/test_sparsity_utils.py | {
"start": 778,
"end": 5854
} | class ____(TestCase):
def test_module_to_fqn(self):
"""
Tests that module_to_fqn works as expected when compared to known good
module.get_submodule(fqn) function
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
fqn = module_to_fqn(model, module)
check_module = model.get_submodule(fqn)
self.assertEqual(module, check_module)
def test_module_to_fqn_fail(self):
"""
Tests that module_to_fqn returns None when an fqn that doesn't
correspond to a path to a node/tensor is given
"""
for model_class in model_list:
model = model_class()
fqn = module_to_fqn(model, torch.nn.Linear(3, 3))
self.assertEqual(fqn, None)
def test_module_to_fqn_root(self):
"""
Tests that module_to_fqn returns '' when model and target module are the same
"""
for model_class in model_list:
model = model_class()
fqn = module_to_fqn(model, model)
self.assertEqual(fqn, "")
def test_fqn_to_module(self):
"""
Tests that fqn_to_module operates as inverse
of module_to_fqn
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
fqn = module_to_fqn(model, module)
check_module = fqn_to_module(model, fqn)
self.assertEqual(module, check_module)
def test_fqn_to_module_fail(self):
"""
Tests that fqn_to_module returns None when it tries to
find an fqn of a module outside the model
"""
for model_class in model_list:
model = model_class()
fqn = "foo.bar.baz"
check_module = fqn_to_module(model, fqn)
self.assertEqual(check_module, None)
def test_fqn_to_module_for_tensors(self):
"""
Tests that fqn_to_module works for tensors, actually all parameters
of the model. This is tested by identifying a module with a tensor,
and generating the tensor_fqn using module_to_fqn on the module +
the name of the tensor.
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
module_fqn = module_to_fqn(model, module)
for tensor_name, tensor in module.named_parameters(recurse=False):
tensor_fqn = ( # string manip to handle tensors on root
module_fqn + ("." if module_fqn != "" else "") + tensor_name
)
check_tensor = fqn_to_module(model, tensor_fqn)
self.assertEqual(tensor, check_tensor)
def test_get_arg_info_from_tensor_fqn(self):
"""
Tests that get_arg_info_from_tensor_fqn works for all parameters of the model.
Generates a tensor_fqn in the same way as test_fqn_to_module_for_tensors and
then compares with known (parent) module and tensor_name as well as module_fqn
from module_to_fqn.
"""
for model_class in model_list:
model = model_class()
list_of_modules = [m for _, m in model.named_modules()] + [model]
for module in list_of_modules:
module_fqn = module_to_fqn(model, module)
for tensor_name, _ in module.named_parameters(recurse=False):
tensor_fqn = (
module_fqn + ("." if module_fqn != "" else "") + tensor_name
)
arg_info = get_arg_info_from_tensor_fqn(model, tensor_fqn)
self.assertEqual(arg_info["module"], module)
self.assertEqual(arg_info["module_fqn"], module_fqn)
self.assertEqual(arg_info["tensor_name"], tensor_name)
self.assertEqual(arg_info["tensor_fqn"], tensor_fqn)
def test_get_arg_info_from_tensor_fqn_fail(self):
"""
Tests that get_arg_info_from_tensor_fqn works as expected for invalid tensor_fqn
inputs. The string outputs still work but the output module is expected to be None.
"""
for model_class in model_list:
model = model_class()
tensor_fqn = "foo.bar.baz"
arg_info = get_arg_info_from_tensor_fqn(model, tensor_fqn)
self.assertEqual(arg_info["module"], None)
self.assertEqual(arg_info["module_fqn"], "foo.bar")
self.assertEqual(arg_info["tensor_name"], "baz")
self.assertEqual(arg_info["tensor_fqn"], "foo.bar.baz")
if __name__ == "__main__":
raise_on_run_directly("test/test_ao_sparsity.py")
| TestSparsityUtilFunctions |
python | tensorflow__tensorflow | tensorflow/python/framework/subscribe.py | {
"start": 2346,
"end": 13004
} | class ____(object):
"""Helper class to manage calculating and caching control_outputs in graph."""
__slots__ = ['cache']
def __init__(self):
self.cache = {}
def calc_control_outputs(self, graph):
"""Returns the map of control_outputs for a given graph.
Args:
graph: The graph to parse.
Returns:
A map of the control outputs.
"""
control_outputs = {}
for op in graph.get_operations():
for control_input in op.control_inputs:
if control_input not in control_outputs:
control_outputs[control_input] = set()
control_outputs[control_input].add(op)
return control_outputs
def get_control_outputs(self, op):
"""Return the control outputs for a given op.
Args:
op: The op to fetch control outputs for.
Returns:
Iterable of control output ops.
"""
if op.graph not in self.cache:
control_outputs = self.calc_control_outputs(op.graph)
self.cache[op.graph] = control_outputs
else:
control_outputs = self.cache[op.graph]
return control_outputs.get(op, [])
def _subscribe_new(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
Args:
tensor: `tf.Tensor`
side_effects: List of side_effect functions see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects.
"""
update_input = []
for consumer_op in list(tensor.consumers()): # explicit copy
update_input.append((consumer_op, list(consumer_op.inputs).index(tensor)))
update_control_input = control_cache.get_control_outputs(tensor.op)
# Trailing slash on name scope to replace the scope.
name_scope = tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
outs = []
for s in side_effects:
outs += s(tensor)
with ops.control_dependencies(outs):
out = array_ops.identity(tensor)
for consumer_op, index in update_input:
consumer_op._update_input(index, out) # pylint: disable=protected-access
for consumer_op in update_control_input:
# If an op has more than one output and two or more of its output tensors
# are subscribed at the same time, we remove the control dependency from
# the original op only once and we add the dependencies to all the
# new identities.
new_control_inputs = consumer_op.control_inputs
if tensor.op in new_control_inputs:
new_control_inputs.remove(tensor.op)
new_control_inputs.append(out.op)
# pylint: disable=protected-access
consumer_op._remove_all_control_inputs()
consumer_op._add_control_inputs(new_control_inputs)
# pylint: enable=protected-access
return out
def _subscribe_extend(tensor, side_effects):
"""Helper method to extend the list of side_effects for a subscribed tensor.
Args:
tensor: A `tf.Tensor` as returned by subscribe().
side_effects: List of side_effect functions, see subscribe for details.
Returns:
The given subscribed tensor (for API consistency).
"""
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
# Build the side effect graphs and add their outputs to the list of control
# dependencies for the subscribed tensor.
outs = []
name_scope = source_tensor.op.name + '/subscription/'
with ops.name_scope(name_scope):
for s in side_effects:
outs += s(source_tensor)
out_ops = [
out.op if isinstance(out, tensor_lib.Tensor) else out for out in outs
]
tensor.op._add_control_inputs(out_ops) # pylint: disable=protected-access
return tensor
def _is_subscribed_identity(tensor):
"""Checks if the given tensor is an identity op returned by `subscribe()`.
Args:
tensor: A `tf.Tensor` to check.
Returns:
True if the given tensor matches the criteria for subscription identities:
its op type is `Identity`, its name matches the name of its input and
conforms to the convention for subscribed nodes.
False otherwise.
"""
# Subscribed tensor are assumed to be identity ops.
if tensor.op.type != 'Identity':
return False
# Check that the tensor name matches the convention in place for identity ops
# created by subscribe().
match = re.match(r'(?P<prefix_name>^.*?)/subscription/Identity[^/]+',
tensor.name)
if match is None or len(match.groups()) != 1:
return False
prefix_name = match.group('prefix_name')
# Get a reference to the source tensor and check that it has a matching name.
assert len(tensor.op.inputs) == 1, 'Op {} must only have one input'.format(
tensor.op.name)
source_tensor = tensor.op.inputs[0]
if prefix_name != source_tensor.op.name:
return False
return True
def _subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This method will check if the given tensor has already been subscribed or if
it's a tensor returned by a previous call to `subscribe()` and, if so, will
reuse the existing identity op, appending the given side effects to the list
of existing ones.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
# Check if the given tensor has a numpy compatible type (see dtypes.py).
# If not, we cannot subscribe it, so we just return the original tensor.
if not tensor.dtype.is_numpy_compatible:
logging.debug(('Tensor {} has an un-supported {} type and cannot be '
'subscribed.').format(tensor.name, tensor.dtype))
return tensor
if _is_subscribed_identity(tensor):
return _subscribe_extend(tensor, side_effects)
# Check if the given tensor has already been subscribed by inspecting its
# outputs.
name_scope = tensor.op.name + '/subscription/Identity'
consumers = tensor.consumers()
matching_ops = [op for op in consumers if op.name.startswith(name_scope)]
assert len(matching_ops) <= 1, ('Op {} must only have one subscription '
'op connected to it').format(tensor.op.name)
if len(matching_ops) == 1:
candidate_tensor = matching_ops[0].outputs[0]
if _is_subscribed_identity(candidate_tensor):
return _subscribe_extend(candidate_tensor, side_effects)
return _subscribe_new(tensor, side_effects, control_cache)
@contextlib.contextmanager
def _preserve_control_flow_context(tensor):
"""Preserve the control flow context for the given tensor.
Sets the graph context to the tensor's context so that side effect ops are
added under the same context.
This is needed when subscribing to tensors defined within a conditional
block or a while loop. In these cases we need that the side-effect ops
are created within the same control flow context as that of the tensor
they are attached to.
Args:
tensor: tensor whose context should be preserved.
Yields:
None
"""
# pylint: disable=protected-access
context = tensor.op._get_control_flow_context()
# pylint: enable=protected-access
if context:
context.Enter()
try:
yield
finally:
if context:
context.Exit()
def _scoped_subscribe(tensor, side_effects, control_cache):
"""Helper method that subscribes a single tensor to a list of side_effects.
This is a thin wrapper around `_subscribe` and ensures that the side effect
ops are added within the same device and control flow context of the
subscribed tensor.
Args:
tensor: The `tf.Tensor` to be subscribed.
side_effects: List of side_effect functions, see subscribe for details.
control_cache: `_ControlOutputCache` helper to get control_outputs faster.
Returns:
The modified replacement to the passed in tensor which triggers the side
effects or the given tensor, if it was already been subscribed.
"""
with ops.device(tensor.device):
with _preserve_control_flow_context(tensor):
return _subscribe(tensor, side_effects, control_cache)
def subscribe(tensors, side_effects):
"""Subscribe to a tensor.
This method will attach side effect graphs to a given set
of tensors. Set of tensors follows from session.run and supports
single `Tensor`, `list`, nested `list`, `tuple`, `namedtuple`, or `dict`. It
returns the tensors in the same passed in structure, but as clones with
side effects applied. The supplied side effect graphs are specified
as a constructor function which takes the target tensor and
constructs a side effect graph and returns a list of ops that should
be control dependencies on fetching the tensor. It will append
'subscription' to the name scope of the tensor for every node in
the side effect graph. These control dependencies are what trigger
the side effects. Subscribe will construct the additions to your
graph and return the created identity tensor downstream of the control
dependencies. Use these tensors as you would normally in the rest of
your tensorflow code. If a given tensor has already been subscribed or a
tensor returned by a call to subscribe is passed, the previously created
identity tensor will be reused and the side effect graphs will be added to
the existing ones.
Args:
tensors: `Tensor` or set of tensors to subscribe to. Set of tensors format
follows from `Session.run` and supports single `Tensor`, `list`, nested
`list`, `tuple`, `namedtuple`, or `dict`.
side_effects: Function(s) that takes a `Tensor`, construct a subgraph, and
return a nonempty list of control dependencies. This can be a single
function or list of functions.
Returns:
Subscribed tensors, which are identity copies of the passed in tensors
in the same passed in structure, but the graph has been modified
such that these are downstream of the control dependencies for
the side effect graphs. Use these functionally equivalent tensors
instead of the passed in tensors for further construction or running.
"""
if not hasattr(side_effects, '__iter__'):
side_effects = [side_effects]
control_outputs = _ControlOutputCache()
result = _recursive_apply(
tensors, lambda t: _scoped_subscribe(t, side_effects, control_outputs))
return result
| _ControlOutputCache |
python | python__mypy | mypy/types.py | {
"start": 52831,
"end": 63144
} | class ____(ProperType):
"""An instance type of form C[T1, ..., Tn].
The list of type variables may be empty.
Several types have fallbacks to `Instance`, because in Python everything is an object
and this concept is impossible to express without intersection types. We therefore use
fallbacks for all "non-special" (like UninhabitedType, ErasedType etc) types.
"""
__slots__ = ("type", "args", "invalid", "type_ref", "last_known_value", "_hash", "extra_attrs")
def __init__(
self,
typ: mypy.nodes.TypeInfo,
args: Sequence[Type],
line: int = -1,
column: int = -1,
*,
last_known_value: LiteralType | None = None,
extra_attrs: ExtraAttrs | None = None,
) -> None:
super().__init__(line, column)
self.type = typ
self.args = tuple(args)
self.type_ref: str | None = None
# True if recovered after incorrect number of type arguments error
self.invalid = False
# This field keeps track of the underlying Literal[...] value associated with
# this instance, if one is known.
#
# This field is set whenever possible within expressions, but is erased upon
# variable assignment (see erasetype.remove_instance_last_known_values) unless
# the variable is declared to be final.
#
# For example, consider the following program:
#
# a = 1
# b: Final[int] = 2
# c: Final = 3
# print(a + b + c + 4)
#
# The 'Instance' objects associated with the expressions '1', '2', '3', and '4' will
# have last_known_values of type Literal[1], Literal[2], Literal[3], and Literal[4]
# respectively. However, the Instance object assigned to 'a' and 'b' will have their
# last_known_value erased: variable 'a' is mutable; variable 'b' was declared to be
# specifically an int.
#
# Or more broadly, this field lets this Instance "remember" its original declaration
# when applicable. We want this behavior because we want implicit Final declarations
# to act pretty much identically with constants: we should be able to replace any
# places where we use some Final variable with the original value and get the same
# type-checking behavior. For example, we want this program:
#
# def expects_literal(x: Literal[3]) -> None: pass
# var: Final = 3
# expects_literal(var)
#
# ...to type-check in the exact same way as if we had written the program like this:
#
# def expects_literal(x: Literal[3]) -> None: pass
# expects_literal(3)
#
# In order to make this work (especially with literal types), we need var's type
# (an Instance) to remember the "original" value.
#
# Preserving this value within expressions is useful for similar reasons.
#
# Currently most of mypy will ignore this field and will continue to treat this type like
# a regular Instance. We end up using this field only when we are explicitly within a
# Literal context.
self.last_known_value = last_known_value
# Cached hash value
self._hash = -1
# Additional attributes defined per instance of this type. For example modules
# have different attributes per instance of types.ModuleType.
self.extra_attrs = extra_attrs
def accept(self, visitor: TypeVisitor[T]) -> T:
return visitor.visit_instance(self)
def __hash__(self) -> int:
if self._hash == -1:
self._hash = hash((self.type, self.args, self.last_known_value, self.extra_attrs))
return self._hash
def __eq__(self, other: object) -> bool:
if not isinstance(other, Instance):
return NotImplemented
return (
self.type == other.type
and self.args == other.args
and self.last_known_value == other.last_known_value
and self.extra_attrs == other.extra_attrs
)
def serialize(self) -> JsonDict | str:
assert self.type is not None
type_ref = self.type.fullname
if not self.args and not self.last_known_value and not self.extra_attrs:
return type_ref
data: JsonDict = {
".class": "Instance",
"type_ref": type_ref,
"args": [arg.serialize() for arg in self.args],
}
if self.last_known_value is not None:
data["last_known_value"] = self.last_known_value.serialize()
data["extra_attrs"] = self.extra_attrs.serialize() if self.extra_attrs else None
return data
@classmethod
def deserialize(cls, data: JsonDict | str) -> Instance:
if isinstance(data, str):
inst = Instance(NOT_READY, [])
inst.type_ref = data
return inst
assert data[".class"] == "Instance"
args: list[Type] = []
if "args" in data:
args_list = data["args"]
assert isinstance(args_list, list)
args = [deserialize_type(arg) for arg in args_list]
inst = Instance(NOT_READY, args)
inst.type_ref = data["type_ref"] # Will be fixed up by fixup.py later.
if "last_known_value" in data:
inst.last_known_value = LiteralType.deserialize(data["last_known_value"])
if data.get("extra_attrs") is not None:
inst.extra_attrs = ExtraAttrs.deserialize(data["extra_attrs"])
return inst
def write(self, data: WriteBuffer) -> None:
write_tag(data, INSTANCE)
if not self.args and not self.last_known_value and not self.extra_attrs:
type_ref = self.type.fullname
if type_ref == "builtins.str":
write_tag(data, INSTANCE_STR)
elif type_ref == "builtins.function":
write_tag(data, INSTANCE_FUNCTION)
elif type_ref == "builtins.int":
write_tag(data, INSTANCE_INT)
elif type_ref == "builtins.bool":
write_tag(data, INSTANCE_BOOL)
elif type_ref == "builtins.object":
write_tag(data, INSTANCE_OBJECT)
else:
write_tag(data, INSTANCE_SIMPLE)
write_str_bare(data, type_ref)
return
write_tag(data, INSTANCE_GENERIC)
write_str(data, self.type.fullname)
write_type_list(data, self.args)
write_type_opt(data, self.last_known_value)
if self.extra_attrs is None:
write_tag(data, LITERAL_NONE)
else:
self.extra_attrs.write(data)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> Instance:
tag = read_tag(data)
# This is quite verbose, but this is very hot code, so we are not
# using dictionary lookups here.
if tag == INSTANCE_STR:
if instance_cache.str_type is None:
instance_cache.str_type = Instance(NOT_READY, [])
instance_cache.str_type.type_ref = "builtins.str"
return instance_cache.str_type
if tag == INSTANCE_FUNCTION:
if instance_cache.function_type is None:
instance_cache.function_type = Instance(NOT_READY, [])
instance_cache.function_type.type_ref = "builtins.function"
return instance_cache.function_type
if tag == INSTANCE_INT:
if instance_cache.int_type is None:
instance_cache.int_type = Instance(NOT_READY, [])
instance_cache.int_type.type_ref = "builtins.int"
return instance_cache.int_type
if tag == INSTANCE_BOOL:
if instance_cache.bool_type is None:
instance_cache.bool_type = Instance(NOT_READY, [])
instance_cache.bool_type.type_ref = "builtins.bool"
return instance_cache.bool_type
if tag == INSTANCE_OBJECT:
if instance_cache.object_type is None:
instance_cache.object_type = Instance(NOT_READY, [])
instance_cache.object_type.type_ref = "builtins.object"
return instance_cache.object_type
if tag == INSTANCE_SIMPLE:
inst = Instance(NOT_READY, [])
inst.type_ref = read_str_bare(data)
return inst
assert tag == INSTANCE_GENERIC
type_ref = read_str(data)
inst = Instance(NOT_READY, read_type_list(data))
inst.type_ref = type_ref
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == LITERAL_TYPE
inst.last_known_value = LiteralType.read(data)
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == EXTRA_ATTRS
inst.extra_attrs = ExtraAttrs.read(data)
assert read_tag(data) == END_TAG
return inst
def copy_modified(
self,
*,
args: Bogus[list[Type]] = _dummy,
last_known_value: Bogus[LiteralType | None] = _dummy,
) -> Instance:
new = Instance(
typ=self.type,
args=args if args is not _dummy else self.args,
line=self.line,
column=self.column,
last_known_value=(
last_known_value if last_known_value is not _dummy else self.last_known_value
),
extra_attrs=self.extra_attrs,
)
new.can_be_true = self.can_be_true
new.can_be_false = self.can_be_false
return new
def copy_with_extra_attr(self, name: str, typ: Type) -> Instance:
if self.extra_attrs:
existing_attrs = self.extra_attrs.copy()
else:
existing_attrs = ExtraAttrs({}, set(), None)
existing_attrs.attrs[name] = typ
new = self.copy_modified()
new.extra_attrs = existing_attrs
return new
def is_singleton_type(self) -> bool:
# TODO:
# Also make this return True if the type corresponds to NotImplemented?
return (
self.type.is_enum
and len(self.type.enum_members) == 1
or self.type.fullname in ELLIPSIS_TYPE_NAMES
)
| Instance |
python | numpy__numpy | benchmarks/benchmarks/bench_ma.py | {
"start": 8474,
"end": 9209
} | class ____(Benchmark):
param_names = ["size"]
params = [["small", "large"]]
def setup(self, size):
# Set the proportion of masked values.
prop_mask = 0.2
# Set up a "small" array with 10 vars and 10 obs.
rng = np.random.default_rng()
data = rng.random((10, 10), dtype=np.float32)
self.small = np.ma.array(data, mask=(data <= prop_mask))
# Set up a "large" array with 100 vars and 100 obs.
data = rng.random((100, 100), dtype=np.float32)
self.large = np.ma.array(data, mask=(data <= prop_mask))
def time_cov(self, size):
if size == "small":
np.ma.cov(self.small)
if size == "large":
np.ma.cov(self.large)
| Cov |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/polymorphic_function.py | {
"start": 5560,
"end": 7972
} | class ____(object):
"""Class keeping track of how many recent calls triggered tracing."""
__slots__ = ["_calls_per_tracings", "_call_count", "_total_warning_count"]
def __init__(self):
self._calls_per_tracings = []
self._total_warning_count = 0
self._call_count = 0
def called_with_tracing(self, function_name, omit_warning):
"""Updates the list of most recent calls' tracing information.
Warns the user when recent calls caused retracing too often.
Args:
function_name: the python function being traced.
omit_warning: If 'True', this call will not warn the user even if
retracing happens too often.
"""
self._call_count += 1
self._calls_per_tracings.append(1)
while self._calls_per_tracings:
if (self._call_count - self._calls_per_tracings[0] >
FREQUENT_TRACING_WARNING_MAX_CALL_HISTORY):
self._call_count -= self._calls_per_tracings.pop(0)
else:
break
if (omit_warning or self._total_warning_count >=
FREQUENT_TRACING_WARNING_MAX_WARNING_PER_DETECTOR):
return
if len(self._calls_per_tracings) >= FREQUENT_TRACING_WARNING_THRESHOLD:
self._total_warning_count += 1
logging.warning(
"{} out of the last {} calls to {} triggered tf.function "
"retracing. Tracing is expensive and the excessive number of "
"tracings could be due to (1) creating @tf.function repeatedly in "
"a loop, (2) passing tensors with different shapes, (3) passing "
"Python objects instead of tensors. For (1), please define your "
"@tf.function outside of the loop. For (2), @tf.function has "
"reduce_retracing=True option that can avoid unnecessary "
"retracing. For (3), please refer to "
"https://www.tensorflow.org/guide/function#controlling_retracing"
" and https://www.tensorflow.org/api_docs/python/tf/function for "
" more details.".format(
len(self._calls_per_tracings), self._call_count, function_name))
def called_without_tracing(self):
# We don't count tracing when users load a concrete function directly or
# call get_concrete_function, so the first call can be not a tracing call.
if not self._calls_per_tracings:
self._calls_per_tracings = [0]
self._calls_per_tracings[-1] += 1
self._call_count += 1
| _FrequentTracingDetector |
python | kubernetes-client__python | kubernetes/base/config/kube_config.py | {
"start": 25970,
"end": 34423
} | class ____:
"""Reads and merges configuration from one or more kube-config's.
The property `config` can be passed to the KubeConfigLoader as config_dict.
It uses a path attribute from ConfigNode to store the path to kubeconfig.
This path is required to load certs from relative paths.
A method `save_changes` updates changed kubeconfig's (it compares current
state of dicts with).
"""
def __init__(self, paths):
self.paths = []
self.config_files = {}
self.config_merged = None
if hasattr(paths, 'read'):
self._load_config_from_file_like_object(paths)
else:
self._load_config_from_file_path(paths)
@property
def config(self):
return self.config_merged
def _load_config_from_file_like_object(self, string):
if hasattr(string, 'getvalue'):
config = yaml.safe_load(string.getvalue())
else:
config = yaml.safe_load(string.read())
if config is None:
raise ConfigException(
'Invalid kube-config.')
if self.config_merged is None:
self.config_merged = copy.deepcopy(config)
# doesn't need to do any further merging
def _load_config_from_file_path(self, string):
for path in string.split(ENV_KUBECONFIG_PATH_SEPARATOR):
if path:
path = os.path.expanduser(path)
if os.path.exists(path):
self.paths.append(path)
self.load_config(path)
self.config_saved = copy.deepcopy(self.config_files)
def load_config(self, path):
with open(path) as f:
config = yaml.safe_load(f)
if config is None:
raise ConfigException(
'Invalid kube-config. '
'%s file is empty' % path)
if self.config_merged is None:
config_merged = copy.deepcopy(config)
for item in ('clusters', 'contexts', 'users'):
config_merged[item] = []
self.config_merged = ConfigNode(path, config_merged, path)
for item in ('clusters', 'contexts', 'users'):
self._merge(item, config.get(item, []) or [], path)
if 'current-context' in config:
self.config_merged.value['current-context'] = config['current-context']
self.config_files[path] = config
def _merge(self, item, add_cfg, path):
for new_item in add_cfg:
for exists in self.config_merged.value[item]:
if exists['name'] == new_item['name']:
break
else:
self.config_merged.value[item].append(ConfigNode(
'{}/{}'.format(path, new_item), new_item, path))
def save_changes(self):
for path in self.paths:
if self.config_saved[path] != self.config_files[path]:
self.save_config(path)
self.config_saved = copy.deepcopy(self.config_files)
def save_config(self, path):
with open(path, 'w') as f:
yaml.safe_dump(self.config_files[path], f,
default_flow_style=False)
def _get_kube_config_loader_for_yaml_file(
filename, persist_config=False, **kwargs):
return _get_kube_config_loader(
filename=filename,
persist_config=persist_config,
**kwargs)
def _get_kube_config_loader(
filename=None,
config_dict=None,
persist_config=False,
**kwargs):
if config_dict is None:
kcfg = KubeConfigMerger(filename)
if persist_config and 'config_persister' not in kwargs:
kwargs['config_persister'] = kcfg.save_changes
if kcfg.config is None:
raise ConfigException(
'Invalid kube-config file. '
'No configuration found.')
return KubeConfigLoader(
config_dict=kcfg.config,
config_base_path=None,
**kwargs)
else:
return KubeConfigLoader(
config_dict=config_dict,
config_base_path=None,
**kwargs)
def list_kube_config_contexts(config_file=None):
if config_file is None:
config_file = KUBE_CONFIG_DEFAULT_LOCATION
loader = _get_kube_config_loader(filename=config_file)
return loader.list_contexts(), loader.current_context
def load_kube_config(config_file=None, context=None,
client_configuration=None,
persist_config=True,
temp_file_path=None):
"""Loads authentication and cluster information from kube-config file
and stores them in kubernetes.client.configuration.
:param config_file: Name of the kube-config file.
:param context: set the active context. If is set to None, current_context
from config file will be used.
:param client_configuration: The kubernetes.client.Configuration to
set configs to.
:param persist_config: If True, config file will be updated when changed
(e.g GCP token refresh).
:param temp_file_path: store temp files path.
"""
if config_file is None:
config_file = KUBE_CONFIG_DEFAULT_LOCATION
loader = _get_kube_config_loader(
filename=config_file, active_context=context,
persist_config=persist_config,
temp_file_path=temp_file_path)
if client_configuration is None:
config = type.__call__(Configuration)
loader.load_and_set(config)
Configuration.set_default(config)
else:
loader.load_and_set(client_configuration)
def load_kube_config_from_dict(config_dict, context=None,
client_configuration=None,
persist_config=True,
temp_file_path=None):
"""Loads authentication and cluster information from config_dict file
and stores them in kubernetes.client.configuration.
:param config_dict: Takes the config file as a dict.
:param context: set the active context. If is set to None, current_context
from config file will be used.
:param client_configuration: The kubernetes.client.Configuration to
set configs to.
:param persist_config: If True, config file will be updated when changed
(e.g GCP token refresh).
:param temp_file_path: store temp files path.
"""
if config_dict is None:
raise ConfigException(
'Invalid kube-config dict. '
'No configuration found.')
loader = _get_kube_config_loader(
config_dict=config_dict, active_context=context,
persist_config=persist_config,
temp_file_path=temp_file_path)
if client_configuration is None:
config = type.__call__(Configuration)
loader.load_and_set(config)
Configuration.set_default(config)
else:
loader.load_and_set(client_configuration)
def new_client_from_config(
config_file=None,
context=None,
persist_config=True,
client_configuration=None):
"""
Loads configuration the same as load_kube_config but returns an ApiClient
to be used with any API object. This will allow the caller to concurrently
talk with multiple clusters.
"""
if client_configuration is None:
client_configuration = type.__call__(Configuration)
load_kube_config(config_file=config_file, context=context,
client_configuration=client_configuration,
persist_config=persist_config)
return ApiClient(configuration=client_configuration)
def new_client_from_config_dict(
config_dict=None,
context=None,
persist_config=True,
temp_file_path=None,
client_configuration=None):
"""
Loads configuration the same as load_kube_config_from_dict but returns an ApiClient
to be used with any API object. This will allow the caller to concurrently
talk with multiple clusters.
"""
if client_configuration is None:
client_configuration = type.__call__(Configuration)
load_kube_config_from_dict(config_dict=config_dict, context=context,
client_configuration=client_configuration,
persist_config=persist_config,
temp_file_path=temp_file_path)
return ApiClient(configuration=client_configuration)
| KubeConfigMerger |
python | django__django | tests/generic_views/test_edit.py | {
"start": 2709,
"end": 3180
} | class ____(SimpleTestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get("/")
self.assertEqual({"initial": {}, "prefix": None}, mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF="generic_views.urls")
| ModelFormMixinTests |
python | Lightning-AI__lightning | src/lightning/fabric/plugins/environments/xla.py | {
"start": 904,
"end": 4004
} | class ____(ClusterEnvironment):
"""Cluster environment for training on a TPU Pod with the `PyTorch/XLA <https://pytorch.org/xla>`_ library.
A list of environment variables set by XLA can be found
`here <https://github.com/pytorch/xla/blob/master/torch_xla/core/xla_env_vars.py>`_.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
super().__init__(*args, **kwargs)
@property
@override
def creates_processes_externally(self) -> bool:
return False
@property
@override
def main_address(self) -> str:
# unused by lightning
raise NotImplementedError
@property
@override
def main_port(self) -> int:
# unused by lightning
raise NotImplementedError
@staticmethod
@override
def detect() -> bool:
return XLAAccelerator.is_available()
@override
@functools.lru_cache(maxsize=1)
def world_size(self) -> int:
"""The number of processes across all devices and hosts.
The output is cached for performance.
"""
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.world_size()
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
@override
def set_world_size(self, size: int) -> None:
log.debug("XLAEnvironment.set_world_size was called, but setting world size is not allowed. Ignored.")
@override
@functools.lru_cache(maxsize=1)
def global_rank(self) -> int:
"""The rank (index) of the currently running process across all host and devices.
The output is cached for performance.
"""
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.global_ordinal()
import torch_xla.core.xla_model as xm
return xm.get_ordinal()
@override
def set_global_rank(self, rank: int) -> None:
log.debug("XLAEnvironment.set_global_rank was called, but setting global rank is not allowed. Ignored.")
@override
@functools.lru_cache(maxsize=1)
def local_rank(self) -> int:
"""The rank (index) of the currently running process inside of the current host.
The output is cached for performance.
"""
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.local_ordinal()
import torch_xla.core.xla_model as xm
return xm.get_local_ordinal()
@override
@functools.lru_cache(maxsize=1)
def node_rank(self) -> int:
"""The rank (index) of the host on which the current process runs.
The output is cached for performance.
"""
if _XLA_GREATER_EQUAL_2_1:
from torch_xla import runtime as xr
return xr.host_index()
import torch_xla.core.xla_env_vars as xenv
from torch_xla.utils.utils import getenv_as
return getenv_as(xenv.HOST_ORDINAL, int, 0)
| XLAEnvironment |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_be_edtf_parseable.py | {
"start": 1294,
"end": 3366
} | class ____(ColumnMapMetricProvider):
condition_metric_name = "column_values.edtf_parseable"
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, level=None, **kwargs):
def is_parseable(val):
try:
if type(val) != str: # noqa: E721
raise TypeError( # noqa: TRY003
"Values passed to expect_column_values_to_be_edtf_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
)
return complies_to_level(val, level)
except (ValueError, OverflowError):
return False
if level is not None and type(level) != int: # noqa: E721
raise TypeError("level must be of type int.") # noqa: TRY003
return column.map(is_parseable)
## When the correct map_metric was added to ExpectColumnValuesToBeEdtfParseable below
## and tests were run, the tests for spark were failing with
## `ModuleNotFoundError: No module named 'expectations'`, so commenting out for now
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, level=None, **kwargs):
# def is_parseable(val):
# try:
# if type(val) != str:
# raise TypeError(
# "Values passed to expect_column_values_to_be_edtf_parseable must be of type string.\nIf you want to validate a column of dates or timestamps, please call the expectation before converting from string format."
# )
#
# return complies_to_level(val, level)
#
# except (ValueError, OverflowError):
# return False
#
# if level is not None and type(level) != int:
# raise TypeError("level must be of type int.")
#
# is_parseable_udf = F.udf(is_parseable, pyspark.types.BooleanType())
# return is_parseable_udf(column)
| ColumnValuesEdtfParseable |
python | PrefectHQ__prefect | src/prefect/_internal/concurrency/threads.py | {
"start": 4683,
"end": 11026
} | class ____(Portal):
"""
A portal to a worker running on a thread with an event loop.
"""
def __init__(
self,
name: str = "EventLoopThread",
daemon: bool = False,
run_once: bool = False,
):
self.thread = threading.Thread(
name=name, daemon=daemon, target=self._entrypoint
)
self._ready_future: concurrent.futures.Future[bool] = (
concurrent.futures.Future()
)
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._shutdown_event: Event = Event()
self._run_once: bool = run_once
self._submitted_count: int = 0
self._on_shutdown: list[Call[Any]] = []
self._lock = threading.Lock()
# Track this instance for fork handling
_active_instances.add(self)
if not daemon:
atexit.register(self.shutdown)
def reset_for_fork(self) -> None:
"""Reset state after fork() to prevent deadlocks in child process."""
self._loop = None
self._ready_future = concurrent.futures.Future()
self._shutdown_event = Event()
self._lock = threading.Lock()
self._submitted_count = 0
self._on_shutdown = []
def start(self):
"""
Start the worker thread; raises any exceptions encountered during startup.
"""
with self._lock:
if self._loop is None:
self.thread.start()
self._ready_future.result()
def submit(self, call: Call[T]) -> Call[T]:
if self._loop is None:
self.start()
with self._lock:
if self._submitted_count > 0 and self._run_once:
raise RuntimeError(
"Worker configured to only run once. A call has already been"
" submitted."
)
if self._shutdown_event.is_set():
raise RuntimeError("Worker is shutdown.")
# Track the portal running the call
call.set_runner(self)
if self._run_once:
call.future.add_done_callback(lambda _: self.shutdown())
# Submit the call to the event loop
assert self._loop is not None
asyncio.run_coroutine_threadsafe(self._run_call(call), self._loop)
self._submitted_count += 1
return call
def shutdown(self) -> None:
"""
Shutdown the worker thread. Does not wait for the thread to stop.
"""
with self._lock:
self._shutdown_event.set()
@property
def name(self) -> str:
return self.thread.name
@property
def running(self) -> bool:
return not self._shutdown_event.is_set()
@property
def loop(self) -> asyncio.AbstractEventLoop | None:
return self._loop
def _entrypoint(self):
"""
Entrypoint for the thread.
Immediately create a new event loop and pass control to `run_until_shutdown`.
"""
try:
asyncio.run(self._run_until_shutdown())
except BaseException:
# Log exceptions that crash the thread
logger.exception("%s encountered exception", self.name)
raise
async def _run_until_shutdown(self):
try:
self._loop = asyncio.get_running_loop()
self._ready_future.set_result(True)
except Exception as exc:
self._ready_future.set_exception(exc)
return
await self._shutdown_event.wait()
for call in self._on_shutdown:
await self._run_call(call)
# Empty the list to allow calls to be garbage collected. Issue #10338.
self._on_shutdown = []
async def _run_call(self, call: Call[Any]) -> None:
task = call.run()
if task is not None:
await task
def add_shutdown_call(self, call: Call[Any]) -> None:
self._on_shutdown.append(call)
def __enter__(self):
self.start()
return self
def __exit__(self, *_):
self.shutdown()
# the GLOBAL LOOP is used for background services, like logs
_global_loop: Optional[EventLoopThread] = None
# the RUN SYNC LOOP is used exclusively for running async functions in a sync context via asyncutils.run_sync
_run_sync_loop: Optional[EventLoopThread] = None
def get_global_loop() -> EventLoopThread:
"""
Get the global loop thread.
Creates a new one if there is not one available.
"""
global _global_loop
# Create a new worker on first call or if the existing worker is dead
if (
_global_loop is None
or not _global_loop.thread.is_alive()
or not _global_loop.running
):
_global_loop = EventLoopThread(daemon=True, name="GlobalEventLoopThread")
_global_loop.start()
return _global_loop
def in_global_loop() -> bool:
"""
Check if called from the global loop.
"""
if _global_loop is None:
# Avoid creating a global loop if there isn't one
return False
return getattr(get_global_loop(), "_loop") == get_running_loop()
def get_run_sync_loop() -> EventLoopThread:
"""
Get the run_sync loop thread.
Creates a new one if there is not one available.
"""
global _run_sync_loop
# Create a new worker on first call or if the existing worker is dead
if (
_run_sync_loop is None
or not _run_sync_loop.thread.is_alive()
or not _run_sync_loop.running
):
_run_sync_loop = EventLoopThread(daemon=True, name="RunSyncEventLoopThread")
_run_sync_loop.start()
return _run_sync_loop
def in_run_sync_loop() -> bool:
"""
Check if called from the global loop.
"""
if _run_sync_loop is None:
# Avoid creating a global loop if there isn't one
return False
return getattr(get_run_sync_loop(), "_loop") == get_running_loop()
def wait_for_global_loop_exit(timeout: Optional[float] = None) -> None:
"""
Shutdown the global loop and wait for it to exit.
"""
loop_thread = get_global_loop()
loop_thread.shutdown()
if threading.get_ident() == loop_thread.thread.ident:
raise RuntimeError("Cannot wait for the loop thread from inside itself.")
loop_thread.thread.join(timeout)
| EventLoopThread |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 16855,
"end": 17278
} | class ____(ProviderError):
"""
Raised when connecting a provider to the wrong network.
"""
def __init__(self, chain_id: int, network: "NetworkAPI"):
message = (
f"Provider connected to chain ID '{chain_id}', which does not match "
f"network chain ID '{network.chain_id}'. Are you connected to '{network.name}'?"
)
super().__init__(message)
| NetworkMismatchError |
python | pyca__cryptography | tests/hazmat/primitives/test_x963_vectors.py | {
"start": 659,
"end": 2077
} | class ____:
_algorithms_dict: typing.ClassVar[
typing.Dict[str, typing.Type[hashes.HashAlgorithm]]
] = {
"SHA-1": hashes.SHA1,
"SHA-224": hashes.SHA224,
"SHA-256": hashes.SHA256,
"SHA-384": hashes.SHA384,
"SHA-512": hashes.SHA512,
}
def test_x963(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("KDF", "ansx963_2001.txt"), load_x963_vectors
)
for vector in vectors:
with subtests.test():
hashfn = self._algorithms_dict[vector["hash"]]
_skip_hashfn_unsupported(backend, hashfn())
key = binascii.unhexlify(vector["Z"])
sharedinfo = None
if vector["sharedinfo_length"] != 0:
sharedinfo = binascii.unhexlify(vector["sharedinfo"])
key_data_len = vector["key_data_length"] // 8
key_data = binascii.unhexlify(vector["key_data"])
xkdf = X963KDF(
algorithm=hashfn(),
length=key_data_len,
sharedinfo=sharedinfo,
backend=backend,
)
xkdf.verify(key, key_data)
def test_unsupported_hash(self, backend):
with pytest.raises(pytest.skip.Exception):
_skip_hashfn_unsupported(backend, DummyHashAlgorithm())
| TestX963 |
python | ApeWorX__ape | src/ape/api/networks.py | {
"start": 2787,
"end": 24079
} | class ____(ExtraAttributesMixin, BaseInterfaceModel):
"""
A set of related networks, such as Ethereum.
"""
name: str
"""
The name of the ecosystem. This should be set the same name as the plugin.
"""
# TODO: In 0.9, make @property that returns value from config,
# and use REQUEST_HEADER as plugin-defined constants.
request_header: dict = {}
"""A shareable HTTP header for network requests."""
fee_token_symbol: str
"""The token symbol for the currency that pays for fees, such as ETH."""
fee_token_decimals: int = 18
"""The number of the decimals the fee token has."""
_default_network: Optional[str] = None
"""The default network of the ecosystem, such as ``local``."""
@model_validator(mode="after")
def _validate_ecosystem(self):
headers = RPCHeaders(**self.request_header)
headers["User-Agent"] = f"ape-{self.name}"
self.request_header = dict(**headers)
return self
@log_instead_of_fail(default="<EcosystemAPI>")
def __repr__(self) -> str:
return f"<{self.name}>"
@property
def data_folder(self) -> Path:
"""
The path to the ecosystem's data folder,
e.g. ``$HOME/.ape/{self.name}`` unless overridden.
"""
return self.config_manager.DATA_FOLDER / self.name
@property
def custom_network(self) -> "NetworkAPI":
"""
A :class:`~ape.api.networks.NetworkAPI` for custom networks where the
network is either not known, unspecified, or does not have an Ape plugin.
"""
ethereum_class = None
for plugin_name, ecosystem_class in self.plugin_manager.ecosystems:
if plugin_name == "ethereum":
ethereum_class = ecosystem_class
break
if ethereum_class is None:
raise NetworkError("Core Ethereum plugin missing.")
init_kwargs = {"name": "ethereum"}
evm_ecosystem = ethereum_class(**init_kwargs) # type: ignore
return NetworkAPI(
name="custom",
ecosystem=evm_ecosystem,
_default_provider="node",
_is_custom=True,
)
@classmethod
@abstractmethod
def decode_address(cls, raw_address: "RawAddress") -> AddressType:
"""
Convert a raw address to the ecosystem's native address type.
Args:
raw_address (:class:`~ape.types.address.RawAddress`): The address to
convert.
Returns:
:class:`~ape.types.address.AddressType`
"""
@classmethod
@abstractmethod
def encode_address(cls, address: AddressType) -> "RawAddress":
"""
Convert the ecosystem's native address type to a raw integer or str address.
Args:
address (:class:`~ape.types.address.AddressType`): The address to convert.
Returns:
:class:`~ape.types.address.RawAddress`
"""
@raises_not_implemented
def encode_contract_blueprint( # type: ignore[empty-body]
self, contract_type: "ContractType", *args, **kwargs
) -> "TransactionAPI":
"""
Encode a unique type of transaction that allows contracts to be created
from other contracts, such as
`EIP-5202 <https://eips.ethereum.org/EIPS/eip-5202>`__
or Starknet's ``Declare`` transaction type.
Args:
contract_type (``ContractType``): The type of contract to create a blueprint for.
This is the type of contract that will get created by factory contracts.
*args (Any): Calldata, if applicable.
**kwargs (Any): Transaction specifications, such as ``value``.
Returns:
:class:`~ape.ape.transactions.TransactionAPI`
"""
@abstractmethod
def decode_receipt(self, data: dict) -> "ReceiptAPI":
"""
Convert data to :class:`~ape.api.transactions.ReceiptAPI`.
Args:
data (Dict): A dictionary of Receipt properties.
Returns:
:class:`~ape.api.transactions.ReceiptAPI`
"""
@abstractmethod
def decode_block(self, data: dict) -> "BlockAPI":
"""
Decode data to a :class:`~ape.api.providers.BlockAPI`.
Args:
data (Dict): A dictionary of data to decode.
Returns:
:class:`~ape.api.providers.BlockAPI`
"""
@property
def config(self) -> PluginConfig:
"""
The configuration of the ecosystem. See :class:`ape.managers.config.ConfigManager`
for more information on plugin configurations.
Returns:
:class:`ape.api.config.PluginConfig`
"""
return self.config_manager.get_config(self.name)
@property
def networks(self) -> dict[str, "NetworkAPI"]:
"""
A dictionary of network names mapped to their API implementation.
Returns:
dict[str, :class:`~ape.api.networks.NetworkAPI`]
"""
return {
**self._networks_from_evmchains,
**self._networks_from_plugins,
**self._custom_networks,
}
@cached_property
def _networks_from_plugins(self) -> dict[str, "NetworkAPI"]:
return {
network_name: network_class(name=network_name, ecosystem=self)
for _, (ecosystem_name, network_name, network_class) in self.plugin_manager.networks
if ecosystem_name == self.name
}
@cached_property
def _networks_from_evmchains(self) -> dict[str, "NetworkAPI"]:
# NOTE: Purposely exclude plugins here so we also prefer plugins.
networks = {
network_name: create_network_type(data["chainId"], data["chainId"])(
name=network_name, ecosystem=self
)
for network_name, data in PUBLIC_CHAIN_META.get(self.name, {}).items()
if network_name not in self._networks_from_plugins
}
forked_networks: dict[str, ForkedNetworkAPI] = {}
for network_name, network in networks.items():
if network_name.endswith("-fork"):
# Already a fork.
continue
fork_network_name = f"{network_name}-fork"
if any(x == fork_network_name for x in networks):
# The forked version of this network is already known.
continue
forked_networks[fork_network_name] = ForkedNetworkAPI(
name=fork_network_name, ecosystem=self
)
return {**networks, **forked_networks}
@property
def _custom_networks(self) -> dict[str, "NetworkAPI"]:
"""
Networks from config.
"""
networks: dict[str, NetworkAPI] = {}
custom_networks: list[dict] = [
n
for n in self.network_manager.custom_networks
if n.get("ecosystem", self.network_manager.default_ecosystem.name) == self.name
]
# Ensure forks are added automatically for custom networks.
forked_custom_networks = []
for net in custom_networks:
if net["name"].endswith("-fork"):
# Already a fork.
continue
fork_network_name = f"{net['name']}-fork"
if any(x["name"] == fork_network_name for x in custom_networks):
# The forked version of this network is already known.
continue
# Create a forked network mirroring the custom network.
forked_net = copy.deepcopy(net)
forked_net["name"] = fork_network_name
forked_custom_networks.append(forked_net)
# NOTE: Forked custom networks are still custom networks.
custom_networks.extend(forked_custom_networks)
for custom_net in custom_networks:
model_data = copy.deepcopy(custom_net)
net_name = custom_net["name"]
if net_name in networks:
raise NetworkError(
f"More than one network named '{net_name}' in ecosystem '{self.name}'."
)
is_fork = net_name.endswith("-fork")
model_data["ecosystem"] = self
network_type = create_network_type(
custom_net["chain_id"], custom_net["chain_id"], is_fork=is_fork
)
if "request_header" not in model_data:
model_data["request_header"] = self.request_header
network_api = network_type.model_validate(model_data)
network_api._default_provider = custom_net.get("default_provider", "node")
network_api._is_custom = True
networks[net_name] = network_api
return networks
def __post_init__(self):
if len(self.networks) == 0:
raise NetworkError("Must define at least one network in ecosystem")
def __ape_extra_attributes__(self) -> Iterator[ExtraModelAttributes]:
yield ExtraModelAttributes(
name="networks",
attributes=lambda: self.networks,
include_getattr=True,
include_getitem=True,
)
def add_network(self, network_name: str, network: "NetworkAPI"):
"""
Attach a new network to an ecosystem (e.g. L2 networks like Optimism).
Raises:
:class:`~ape.exceptions.NetworkError`: When the network already exists.
Args:
network_name (str): The name of the network to add.
network (:class:`~ape.api.networks.NetworkAPI`): The network to add.
"""
if network_name in self.networks:
raise NetworkError(f"Unable to overwrite existing network '{network_name}'.")
else:
self.networks[network_name] = network
@property
def default_network_name(self) -> str:
"""
The name of the default network in this ecosystem.
Returns:
str
"""
if network := self._default_network:
# Was set programmatically.
return network
networks = self.networks
if network := self.config.get("default_network"):
# Default found in config. Ensure is an installed network.
if network in networks:
return network
if LOCAL_NETWORK_NAME in networks:
# Default to the LOCAL_NETWORK_NAME, at last resort.
return LOCAL_NETWORK_NAME
elif len(networks) >= 1:
# Use the first network.
key = next(iter(networks.keys()))
return networks[key].name
# Very unlikely scenario.
raise NetworkError("No networks found.")
@property
def default_network(self) -> "NetworkAPI":
return self.get_network(self.default_network_name)
def set_default_network(self, network_name: str):
"""
Change the default network.
Raises:
:class:`~ape.exceptions.NetworkError`: When the network does not exist.
Args:
network_name (str): The name of the default network to switch to.
"""
if network_name in self.networks:
self._default_network = network_name
else:
raise NetworkNotFoundError(network_name, ecosystem=self.name, options=self.networks)
@abstractmethod
def encode_deployment(
self, deployment_bytecode: HexBytes, abi: "ConstructorABI", *args, **kwargs
) -> "TransactionAPI":
"""
Create a deployment transaction in the given ecosystem.
This may require connecting to other networks.
Args:
deployment_bytecode (HexBytes): The bytecode to deploy.
abi (ConstructorABI): The constructor interface of the contract.
*args (Any): Constructor arguments.
**kwargs (Any): Transaction arguments.
Returns:
class:`~ape.api.transactions.TransactionAPI`
"""
@abstractmethod
def encode_transaction(
self, address: AddressType, abi: "MethodABI", *args, **kwargs
) -> "TransactionAPI":
"""
Encode a transaction object from a contract function's ABI and call arguments.
Additionally, update the transaction arguments with the overrides in ``kwargs``.
Args:
address (:class:`~ape.types.address.AddressType`): The address of the contract.
abi (``MethodABI``): The function to call on the contract.
*args (Any): Function arguments.
**kwargs (Any): Transaction arguments.
Returns:
class:`~ape.api.transactions.TransactionAPI`
"""
@abstractmethod
def decode_logs(self, logs: Sequence[dict], *events: "EventABI") -> Iterator["ContractLog"]:
"""
Decode any contract logs that match the given event ABI from the raw log data.
Args:
logs (Sequence[dict]): A list of raw log data from the chain.
*events (EventABI): Event definitions to decode.
Returns:
Iterator[:class:`~ape.types.ContractLog`]
"""
@raises_not_implemented
def decode_primitive_value( # type: ignore[empty-body]
self, value: Any, output_type: Union[str, tuple, list]
) -> Union[str, HexBytes, tuple]:
"""
Decode a primitive value-type given its ABI type as a ``str``
and the value itself. This method is a hook for converting
addresses, HexBytes, or other primitive data-types into
friendlier Python equivalents.
Args:
value (Any): The value to decode.
output_type (Union[str, tuple, list]): The value type.
Returns:
Union[str, HexBytes, tuple]
"""
@abstractmethod
def create_transaction(self, **kwargs) -> "TransactionAPI":
"""
Create a transaction using key-value arguments.
Args:
**kwargs: Everything the transaction needs initialize.
Returns:
class:`~ape.api.transactions.TransactionAPI`
"""
@abstractmethod
def decode_calldata(self, abi: Union["ConstructorABI", "MethodABI"], calldata: bytes) -> dict:
"""
Decode method calldata.
Args:
abi (Union[ConstructorABI, MethodABI]): The method called.
calldata (bytes): The raw calldata bytes.
Returns:
Dict: A mapping of input names to decoded values.
If an input is anonymous, it should use the stringified
index of the input as the key.
"""
@abstractmethod
def encode_calldata(self, abi: Union["ConstructorABI", "MethodABI"], *args: Any) -> HexBytes:
"""
Encode method calldata.
Args:
abi (Union[ConstructorABI, MethodABI]): The ABI of the method called.
*args (Any): The arguments given to the method.
Returns:
HexBytes: The encoded calldata of the arguments to the given method.
"""
@abstractmethod
def decode_returndata(self, abi: "MethodABI", raw_data: bytes) -> Any:
"""
Get the result of a contract call.
Arg:
abi (MethodABI): The method called.
raw_data (bytes): Raw returned data.
Returns:
Any: All of the values returned from the contract function.
"""
@raises_not_implemented
def get_deployment_address( # type: ignore[empty-body]
self,
address: AddressType,
nonce: int,
) -> AddressType:
"""
Calculate the deployment address of a contract before it is deployed.
This is useful if the address is an argument to another contract's deployment
and you have not yet deployed the first contract yet.
"""
def get_network(self, network_name: str) -> "NetworkAPI":
"""
Get the network for the given name.
Args:
network_name (str): The name of the network to get.
Raises:
:class:`~ape.exceptions.NetworkNotFoundError`: When the network is not present.
Returns:
:class:`~ape.api.networks.NetworkAPI`
"""
names = {network_name, network_name.replace("-", "_"), network_name.replace("_", "-")}
networks = self.networks
for name in names:
if name in networks:
return networks[name]
elif name == "custom":
# Is an adhoc-custom network NOT from config.
return self.custom_network
raise NetworkNotFoundError(network_name, ecosystem=self.name, options=networks)
def get_network_data(
self, network_name: str, provider_filter: Optional[Collection[str]] = None
) -> dict:
"""
Get a dictionary of data about providers in the network.
**NOTE**: The keys are added in an opinionated order for nicely
translating into ``yaml``.
Args:
network_name (str): The name of the network to get provider data from.
provider_filter (Optional[Collection[str]]): Optionally filter the providers
by name.
Returns:
dict: A dictionary containing the providers in a network.
"""
data: dict[str, Any] = {"name": str(network_name)}
# Only add isDefault key when True
if network_name == self.default_network_name:
data["isDefault"] = True
data["providers"] = []
network = self[network_name]
if network.explorer:
data["explorer"] = str(network.explorer.name)
for provider_name in network.providers:
if provider_filter and provider_name not in provider_filter:
continue
provider_data: dict = {"name": str(provider_name)}
# Only add isDefault key when True
if provider_name == network.default_provider_name:
provider_data["isDefault"] = True
data["providers"].append(provider_data)
return data
def get_proxy_info(self, address: AddressType) -> Optional[ProxyInfoAPI]:
"""
Information about a proxy contract such as proxy type and implementation address.
Args:
address (:class:`~ape.types.address.AddressType`): The address of the contract.
Returns:
Optional[:class:`~ape.api.networks.ProxyInfoAPI`]: Returns ``None`` if the contract
does not use any known proxy pattern.
"""
return None
def get_method_selector(self, abi: "MethodABI") -> HexBytes:
"""
Get a contract method selector, typically via hashing such as ``keccak``.
Defaults to using ``keccak`` but can be overridden in different ecosystems.
Override example::
from ape.api import EcosystemAPI
from eth_pydantic_types import HexBytes
class MyEcosystem(EcosystemAPI):
def get_method_selector(self, abi: MethodABI) -> HexBytes:
return HexBytes(abi.selector.encode()) # Simple bytes selector
Args:
abi (MethodABI): The ABI object to use when calculating the
selector bytes.
Returns:
HexBytes: The hashed method selector value.
"""
return HexBytes(keccak(text=abi.selector)[:4])
def enrich_trace(self, trace: "TraceAPI", **kwargs) -> "TraceAPI":
"""
Enhance the data in the call tree using information about the ecosystem.
Args:
trace (:class:`~ape.api.trace.TraceAPI`): The trace to enrich.
**kwargs: Additional kwargs to control enrichment, defined at the
plugin level.
Returns:
:class:`~ape.api.trace.TraceAPI`
"""
return trace
@raises_not_implemented
def get_python_types( # type: ignore[empty-body]
self, abi_type: "ABIType"
) -> Union[type, Sequence]:
"""
Get the Python types for a given ABI type.
Args:
abi_type (``ABIType``): The ABI type to get the Python types for.
Returns:
Union[Type, Sequence]: The Python types for the given ABI type.
"""
@raises_not_implemented
def decode_custom_error(
self,
data: HexBytes,
address: AddressType,
**kwargs,
) -> Optional[CustomError]:
"""
Decode a custom error class from an ABI defined in a contract.
Args:
data (HexBytes): The error data containing the selector
and input data.
address (AddressType): The address of the contract containing
the error.
**kwargs: Additional init kwargs for the custom error class.
Returns:
Optional[CustomError]: If it able to decode one, else ``None``.
"""
def _get_request_headers(self) -> RPCHeaders:
# Internal helper method called by NetworkManager
headers = RPCHeaders(**self.request_header)
# Have to do it this way to avoid "multiple-keys" error.
configured_headers: dict = self.config.get("request_headers", {})
for key, value in configured_headers.items():
headers[key] = value
return headers
| EcosystemAPI |
python | joke2k__faker | tests/providers/test_date_time.py | {
"start": 2577,
"end": 2883
} | class ____(tzinfo):
"""
UTC implementation taken from Python's docs.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return timedelta(0)
utc = UTC()
| UTC |
python | getsentry__sentry | src/sentry/rules/conditions/event_attribute.py | {
"start": 10787,
"end": 11102
} | class ____(AttributeHandler):
minimum_path_length = 2
@classmethod
def _handle(cls, path: list[str], event: GroupEvent) -> list[str]:
if path[1] != "name":
return []
return [event.data.get("sdk", {}).get(path[1])]
@attribute_registry.register("stacktrace")
| SdkAttributeHandler |
python | pytorch__pytorch | torch/_export/db/examples/fn_with_kwargs.py | {
"start": 41,
"end": 731
} | class ____(torch.nn.Module):
"""
Keyword arguments are not supported at the moment.
"""
def forward(self, pos0, tuple0, *myargs, mykw0, **mykwargs):
out = pos0
for arg in tuple0:
out = out * arg
for arg in myargs:
out = out * arg
out = out * mykw0
out = out * mykwargs["input0"] * mykwargs["input1"]
return out
example_args = (
torch.randn(4),
(torch.randn(4), torch.randn(4)),
*[torch.randn(4), torch.randn(4)]
)
example_kwargs = {
"mykw0": torch.randn(4),
"input0": torch.randn(4),
"input1": torch.randn(4),
}
tags = {"python.data-structure"}
model = FnWithKwargs()
| FnWithKwargs |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/sparse_ops/sparse_tensor_dense_matmul_op_d9m_test.py | {
"start": 3860,
"end": 4892
} | class ____(test.TestCase):
"""Test that SparseTensorDenseMatul operates reproducibly (on CPU only)."""
@test_util.run_in_graph_and_eager_modes
def testForward(self):
for data_type in [
np.float16, np.float32, np.float64, np.complex64, np.complex128
]: # skipping int32 and bfloat16
sparse_input, dense_input = _gen_data(
m=2430,
k=615,
n=857,
nnz=(1 << 16) + 243,
row_occupied_rate=0.02,
data_type=data_type,
seed=123)
with self.session(), test_util.force_cpu():
result_a = sparse_ops.sparse_tensor_dense_matmul(
sparse_input, dense_input)
for _ in range(5):
result_b = sparse_ops.sparse_tensor_dense_matmul(
sparse_input, dense_input)
self.assertAllEqual(result_a, result_b)
if __name__ == "__main__":
# TODO(reedwm): Merge this file with sparse_tensor_dense_matmul_test.py
config.enable_op_determinism()
test.main()
| SparseTensorDenseMatmulOpDeterministicTest |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/stateful.py | {
"start": 11807,
"end": 20702
} | class ____(metaclass=StateMachineMeta):
"""A RuleBasedStateMachine gives you a structured way to define state machines.
The idea is that a state machine carries the system under test and some supporting
data. This data can be stored in instance variables or
divided into Bundles. The state machine has a set of rules which may read data
from bundles (or just from normal strategies), push data onto
bundles, change the state of the machine, or verify properties.
At any given point a random applicable rule will be executed.
"""
_setup_state_per_class: ClassVar[dict[type, _SetupState]] = {}
def __init__(self) -> None:
setup_state = self.setup_state()
if not setup_state.rules:
raise InvalidDefinition(
f"State machine {type(self).__name__} defines no rules"
)
if isinstance(s := vars(type(self)).get("settings"), Settings):
tname = type(self).__name__
descr = f"settings({s.show_changed()})"
raise InvalidDefinition(
f"Assigning settings = {descr} as a class attribute does nothing. "
f"Assign to {tname}.TestCase.settings, or use @{descr} as a decorator "
f"on the {tname} class."
)
self.rules = setup_state.rules
self.invariants = setup_state.invariants
# copy since we pop from this as we run initialize rules.
self._initialize_rules_to_run = setup_state.initializers.copy()
self.bundles: dict[str, list] = {}
self.names_counters: collections.Counter = collections.Counter()
self.names_list: list[str] = []
self.names_to_values: dict[str, Any] = {}
self.__stream = StringIO()
self.__printer = RepresentationPrinter(
self.__stream, context=_current_build_context.value
)
self._rules_strategy = RuleStrategy(self)
def _pretty_print(self, value):
if isinstance(value, VarReference):
return value.name
elif isinstance(value, list) and all(
isinstance(item, VarReference) for item in value
):
return "[" + ", ".join([item.name for item in value]) + "]"
self.__stream.seek(0)
self.__stream.truncate(0)
self.__printer.output_width = 0
self.__printer.buffer_width = 0
self.__printer.buffer.clear()
self.__printer.pretty(value)
self.__printer.flush()
return self.__stream.getvalue()
def __repr__(self):
return f"{type(self).__name__}({nicerepr(self.bundles)})"
def _new_name(self, target):
result = f"{target}_{self.names_counters[target]}"
self.names_counters[target] += 1
self.names_list.append(result)
return result
def _last_names(self, n: int) -> list[str]:
len_ = len(self.names_list)
assert len_ >= n
return self.names_list[len_ - n :]
def bundle(self, name):
return self.bundles.setdefault(name, [])
@classmethod
def setup_state(cls):
try:
return cls._setup_state_per_class[cls]
except KeyError:
pass
rules: list[Rule] = []
initializers: list[Rule] = []
invariants: list[Invariant] = []
for _name, f in inspect.getmembers(cls):
rule = getattr(f, RULE_MARKER, None)
initializer = getattr(f, INITIALIZE_RULE_MARKER, None)
invariant = getattr(f, INVARIANT_MARKER, None)
if rule is not None:
rules.append(rule)
if initializer is not None:
initializers.append(initializer)
if invariant is not None:
invariants.append(invariant)
if (
getattr(f, PRECONDITIONS_MARKER, None) is not None
and rule is None
and invariant is None
):
raise InvalidDefinition(
f"{_rule_qualname(f)} has been decorated with @precondition, "
"but not @rule (or @invariant), which is not allowed. A "
"precondition must be combined with a rule or an invariant, "
"since it has no effect alone."
)
state = _SetupState(
rules=rules, initializers=initializers, invariants=invariants
)
cls._setup_state_per_class[cls] = state
return state
def _repr_step(self, rule: "Rule", data: Any, result: Any) -> str:
output_assignment = ""
extra_assignment_lines = []
if rule.targets:
number_of_results = (
len(result.values) if isinstance(result, MultipleResults) else 1
)
number_of_last_names = len(rule.targets) * number_of_results
last_names = self._last_names(number_of_last_names)
if isinstance(result, MultipleResults):
if len(result.values) == 1:
# len-1 tuples
output_per_target = [f"({name},)" for name in last_names]
output_assignment = " = ".join(output_per_target) + " = "
elif result.values:
# multiple values, multiple targets -- use the first target
# for the assignment from function, and do the other target
# assignments on separate lines
names_per_target = list(batched(last_names, number_of_results))
first_target_output = ", ".join(names_per_target[0])
output_assignment = first_target_output + " = "
for other_target_names in names_per_target[1:]:
other_target_output = ", ".join(other_target_names)
extra_assignment_lines.append(
other_target_output + " = " + first_target_output
)
else:
output_assignment = " = ".join(last_names) + " = "
args = ", ".join(f"{k}={v}" for k, v in data.items())
output_line = f"{output_assignment}state.{rule.function.__name__}({args})"
return "\n".join([output_line] + extra_assignment_lines)
def _add_results_to_targets(self, targets, results):
# Note, the assignment order here is reflected in _repr_step
for target in targets:
for result in results:
name = self._new_name(target)
def printer(obj, p, cycle, name=name):
return p.text(name)
# see
# https://github.com/HypothesisWorks/hypothesis/pull/4266#discussion_r1949619102
if not _is_singleton(result):
self.__printer.singleton_pprinters.setdefault(id(result), printer)
self.names_to_values[name] = result
self.bundles.setdefault(target, []).append(VarReference(name))
def check_invariants(self, settings, output, runtimes):
for invar in self.invariants:
if self._initialize_rules_to_run and not invar.check_during_init:
continue
if not all(precond(self) for precond in invar.preconditions):
continue
name = invar.function.__name__
if (
current_build_context().is_final
or settings.verbosity >= Verbosity.debug
or observability_enabled()
):
output(f"state.{name}()")
start = perf_counter()
result = invar.function(self)
runtimes[f"execute:invariant:{name}"] += perf_counter() - start
if result is not None:
fail_health_check(
settings,
"The return value of an @invariant is always ignored, but "
f"{invar.function.__qualname__} returned {result!r} "
"instead of None",
HealthCheck.return_value,
)
def teardown(self):
"""Called after a run has finished executing to clean up any necessary
state.
Does nothing by default.
"""
TestCase = TestCaseProperty()
@classmethod
@lru_cache
def _to_test_case(cls):
class StateMachineTestCase(TestCase):
settings = Settings(deadline=None, suppress_health_check=list(HealthCheck))
def runTest(self):
run_state_machine_as_test(cls, settings=self.settings)
runTest.is_hypothesis_test = True
runTest._hypothesis_state_machine_class = cls
StateMachineTestCase.__name__ = cls.__name__ + ".TestCase"
StateMachineTestCase.__qualname__ = cls.__qualname__ + ".TestCase"
return StateMachineTestCase
@dataclass(slots=True, frozen=False)
| RuleBasedStateMachine |
python | ray-project__ray | python/ray/tune/stopper/experiment_plateau.py | {
"start": 121,
"end": 3208
} | class ____(Stopper):
"""Early stop the experiment when a metric plateaued across trials.
Stops the entire experiment when the metric has plateaued
for more than the given amount of iterations specified in
the patience parameter.
Args:
metric: The metric to be monitored.
std: The minimal standard deviation after which
the tuning process has to stop.
top: The number of best models to consider.
mode: The mode to select the top results.
Can either be "min" or "max".
patience: Number of epochs to wait for
a change in the top models.
Raises:
ValueError: If the mode parameter is not "min" nor "max".
ValueError: If the top parameter is not an integer
greater than 1.
ValueError: If the standard deviation parameter is not
a strictly positive float.
ValueError: If the patience parameter is not
a strictly positive integer.
"""
def __init__(
self,
metric: str,
std: float = 0.001,
top: int = 10,
mode: str = "min",
patience: int = 0,
):
if mode not in ("min", "max"):
raise ValueError("The mode parameter can only be either min or max.")
if not isinstance(top, int) or top <= 1:
raise ValueError(
"Top results to consider must be"
" a positive integer greater than one."
)
if not isinstance(patience, int) or patience < 0:
raise ValueError("Patience must be a strictly positive integer.")
if not isinstance(std, float) or std <= 0:
raise ValueError(
"The standard deviation must be a strictly positive float number."
)
self._mode = mode
self._metric = metric
self._patience = patience
self._iterations = 0
self._std = std
self._top = top
self._top_values = []
def __call__(self, trial_id, result):
"""Return a boolean representing if the tuning has to stop."""
self._top_values.append(result[self._metric])
if self._mode == "min":
self._top_values = sorted(self._top_values)[: self._top]
else:
self._top_values = sorted(self._top_values)[-self._top :]
# If the current iteration has to stop
if self.has_plateaued():
# we increment the total counter of iterations
self._iterations += 1
else:
# otherwise we reset the counter
self._iterations = 0
# and then call the method that re-executes
# the checks, including the iterations.
return self.stop_all()
def has_plateaued(self):
return (
len(self._top_values) == self._top and np.std(self._top_values) <= self._std
)
def stop_all(self):
"""Return whether to stop and prevent trials from starting."""
return self.has_plateaued() and self._iterations >= self._patience
| ExperimentPlateauStopper |
python | scipy__scipy | scipy/interpolate/tests/test_bsplines.py | {
"start": 36347,
"end": 45276
} | class ____:
#
# Test that FITPACK-based spl* functions can deal with BSpline objects
#
def setup_method(self):
xx = np.linspace(0, 4.*np.pi, 41)
yy = np.cos(xx)
b = make_interp_spline(xx, yy)
self.tck = (b.t, b.c, b.k)
self.xx, self.yy, self.b = xx, yy, b
self.xnew = np.linspace(0, 4.*np.pi, 21)
c2 = np.c_[b.c, b.c, b.c]
self.c2 = np.dstack((c2, c2))
self.b2 = BSpline(b.t, self.c2, b.k)
def test_splev(self):
xnew, b, b2 = self.xnew, self.b, self.b2
# check that splev works with 1-D array of coefficients
# for array and scalar `x`
xp_assert_close(splev(xnew, b),
b(xnew), atol=1e-15, rtol=1e-15)
xp_assert_close(splev(xnew, b.tck),
b(xnew), atol=1e-15, rtol=1e-15)
xp_assert_close(np.asarray([splev(x, b) for x in xnew]),
b(xnew), atol=1e-15, rtol=1e-15)
# With N-D coefficients, there's a quirck:
# splev(x, BSpline) is equivalent to BSpline(x)
with assert_raises(ValueError, match="Calling splev.. with BSpline"):
splev(xnew, b2)
# However, splev(x, BSpline.tck) needs some transposes. This is because
# BSpline interpolates along the first axis, while the legacy FITPACK
# wrapper does list(map(...)) which effectively interpolates along the
# last axis. Like so:
sh = tuple(range(1, b2.c.ndim)) + (0,) # sh = (1, 2, 0)
cc = b2.c.transpose(sh)
tck = (b2.t, cc, b2.k)
xp_assert_close(np.asarray(splev(xnew, tck)),
b2(xnew).transpose(sh), atol=1e-15, rtol=1e-15)
def test_splrep(self):
x, y = self.xx, self.yy
# test that "new" splrep is equivalent to _impl.splrep
tck = splrep(x, y)
t, c, k = _impl.splrep(x, y)
xp_assert_close(tck[0], t, atol=1e-15)
xp_assert_close(tck[1], c, atol=1e-15)
assert tck[2] == k
# also cover the `full_output=True` branch
tck_f, _, _, _ = splrep(x, y, full_output=True)
xp_assert_close(tck_f[0], t, atol=1e-15)
xp_assert_close(tck_f[1], c, atol=1e-15)
assert tck_f[2] == k
# test that the result of splrep roundtrips with splev:
# evaluate the spline on the original `x` points
yy = splev(x, tck)
xp_assert_close(y, yy, atol=1e-15)
# ... and also it roundtrips if wrapped in a BSpline
b = BSpline(*tck)
xp_assert_close(y, b(x), atol=1e-15)
def test_splrep_errors(self):
# test that both "old" and "new" splrep raise for an N-D ``y`` array
# with n > 1
x, y = self.xx, self.yy
y2 = np.c_[y, y]
with assert_raises(ValueError):
splrep(x, y2)
with assert_raises(ValueError):
_impl.splrep(x, y2)
# input below minimum size
with assert_raises(TypeError, match="m > k must hold"):
splrep(x[:3], y[:3])
with assert_raises(TypeError, match="m > k must hold"):
_impl.splrep(x[:3], y[:3])
def test_splprep(self):
x = np.arange(15, dtype=np.float64).reshape((3, 5))
b, u = splprep(x)
tck, u1 = _impl.splprep(x)
# test the roundtrip with splev for both "old" and "new" output
xp_assert_close(u, u1, atol=1e-15)
xp_assert_close(np.asarray(splev(u, b)), x, atol=1e-15)
xp_assert_close(np.asarray(splev(u, tck)), x, atol=1e-15)
# cover the ``full_output=True`` branch
(b_f, u_f), _, _, _ = splprep(x, s=0, full_output=True)
xp_assert_close(u, u_f, atol=1e-15)
xp_assert_close(np.asarray(splev(u_f, b_f)), x, atol=1e-15)
def test_splprep_errors(self):
# test that both "old" and "new" code paths raise for x.ndim > 2
x = np.arange(3*4*5).reshape((3, 4, 5))
with assert_raises(ValueError, match="too many values to unpack"):
splprep(x)
with assert_raises(ValueError, match="too many values to unpack"):
_impl.splprep(x)
# input below minimum size
x = np.linspace(0, 40, num=3)
with assert_raises(TypeError, match="m > k must hold"):
splprep([x])
with assert_raises(TypeError, match="m > k must hold"):
_impl.splprep([x])
# automatically calculated parameters are non-increasing
# see gh-7589
x = [-50.49072266, -50.49072266, -54.49072266, -54.49072266]
with assert_raises(ValueError, match="Invalid inputs"):
splprep([x])
with assert_raises(ValueError, match="Invalid inputs"):
_impl.splprep([x])
# given non-increasing parameter values u
x = [1, 3, 2, 4]
u = [0, 0.3, 0.2, 1]
with assert_raises(ValueError, match="Invalid inputs"):
splprep(*[[x], None, u])
def test_sproot(self):
b, b2 = self.b, self.b2
roots = np.array([0.5, 1.5, 2.5, 3.5])*np.pi
# sproot accepts a BSpline obj w/ 1-D coef array
xp_assert_close(sproot(b), roots, atol=1e-7, rtol=1e-7)
xp_assert_close(sproot((b.t, b.c, b.k)), roots, atol=1e-7, rtol=1e-7)
# ... and deals with trailing dimensions if coef array is N-D
with assert_raises(ValueError, match="Calling sproot.. with BSpline"):
sproot(b2, mest=50)
# and legacy behavior is preserved for a tck tuple w/ N-D coef
c2r = b2.c.transpose(1, 2, 0)
rr = np.asarray(sproot((b2.t, c2r, b2.k), mest=50))
assert rr.shape == (3, 2, 4)
xp_assert_close(rr - roots, np.zeros_like(rr), atol=1e-12)
def test_splint(self):
# test that splint accepts BSpline objects
b, b2 = self.b, self.b2
xp_assert_close(splint(0, 1, b),
splint(0, 1, b.tck), atol=1e-14, check_0d=False)
xp_assert_close(splint(0, 1, b),
b.integrate(0, 1), atol=1e-14, check_0d=False)
# ... and deals with N-D arrays of coefficients
with assert_raises(ValueError, match="Calling splint.. with BSpline"):
splint(0, 1, b2)
# and the legacy behavior is preserved for a tck tuple w/ N-D coef
c2r = b2.c.transpose(1, 2, 0)
integr = np.asarray(splint(0, 1, (b2.t, c2r, b2.k)))
assert integr.shape == (3, 2)
xp_assert_close(integr,
splint(0, 1, b), atol=1e-14, check_shape=False)
def test_splder(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
b_c = b.c.copy()
if ct > 0:
b_c = np.r_[b_c, np.zeros((ct,) + b_c.shape[1:])]
for n in [1, 2, 3]:
bd = splder(b)
tck_d = _impl.splder((b.t.copy(), b_c, b.k))
xp_assert_close(bd.t, tck_d[0], atol=1e-15)
xp_assert_close(bd.c, tck_d[1], atol=1e-15)
assert bd.k == tck_d[2]
assert isinstance(bd, BSpline)
assert isinstance(tck_d, tuple) # back-compat: tck in and out
def test_splantider(self):
for b in [self.b, self.b2]:
# pad the c array (FITPACK convention)
ct = len(b.t) - len(b.c)
b_c = b.c.copy()
if ct > 0:
b_c = np.r_[b_c, np.zeros((ct,) + b_c.shape[1:])]
for n in [1, 2, 3]:
bd = splantider(b)
tck_d = _impl.splantider((b.t.copy(), b_c, b.k))
xp_assert_close(bd.t, tck_d[0], atol=1e-15)
xp_assert_close(bd.c, tck_d[1], atol=1e-15)
assert bd.k == tck_d[2]
assert isinstance(bd, BSpline)
assert isinstance(tck_d, tuple) # back-compat: tck in and out
def test_insert(self):
b, b2, xx = self.b, self.b2, self.xx
j = b.t.size // 2
tn = 0.5*(b.t[j] + b.t[j+1])
bn, tck_n = insert(tn, b), insert(tn, (b.t, b.c, b.k))
xp_assert_close(splev(xx, bn),
splev(xx, tck_n), atol=1e-15)
assert isinstance(bn, BSpline)
assert isinstance(tck_n, tuple) # back-compat: tck in, tck out
# for N-D array of coefficients, BSpline.c needs to be transposed
# after that, the results are equivalent.
sh = tuple(range(b2.c.ndim))
c_ = b2.c.transpose(sh[1:] + (0,))
tck_n2 = insert(tn, (b2.t, c_, b2.k))
bn2 = insert(tn, b2)
# need a transpose for comparing the results, cf test_splev
xp_assert_close(np.asarray(splev(xx, tck_n2)).transpose(2, 0, 1),
bn2(xx), atol=1e-15)
assert isinstance(bn2, BSpline)
assert isinstance(tck_n2, tuple) # back-compat: tck in, tck out
@make_xp_test_case(make_interp_spline)
| TestInterop |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 1951,
"end": 2269
} | class ____(PydanticDeprecationWarning):
"""A specific `PydanticDeprecationWarning` subclass defining functionality deprecated since Pydantic 2.0."""
def __init__(self, message: str, *args: object) -> None:
super().__init__(message, *args, since=(2, 0), expected_removal=(3, 0))
| PydanticDeprecatedSince20 |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 182384,
"end": 185277
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[3, 3, 3]", L_y_: "f32[3, 3, 3]"):
l_x_ = L_x_
l_y_ = L_y_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
child: "f32[3, 3, 3]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
child_1: "f32[3, 3, 3]" = torch._C._functorch._wrap_for_grad(l_y_, 1); l_y_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
_set_tensor_requires_grad: "f32[3, 3, 3]" = torch._functorch.eager_transforms._set_tensor_requires_grad(child); _set_tensor_requires_grad = None
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
set_inplace_requires_grad_allowed_2 = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed_2 = None
_set_tensor_requires_grad_1: "f32[3, 3, 3]" = torch._functorch.eager_transforms._set_tensor_requires_grad(child_1); _set_tensor_requires_grad_1 = None
set_inplace_requires_grad_allowed_3 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_3 = None
sin: "f32[3, 3, 3]" = child.sin()
add: "f32[3, 3, 3]" = sin + child_1; sin = None
output: "f32[]" = add.sum(); add = None
aux: "f32[3, 3, 3]" = child.cos()
_autograd_grad = torch._functorch.eager_transforms._autograd_grad((output,), [child, child_1], create_graph = True); child = child_1 = None
getitem: "f32[3, 3, 3]" = _autograd_grad[0]
getitem_1: "f32[3, 3, 3]" = _autograd_grad[1]; _autograd_grad = None
_unwrap_for_grad: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(getitem, 1); getitem = None
_unwrap_for_grad_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(getitem_1, 1); getitem_1 = None
output_1: "f32[]" = torch._C._functorch._unwrap_for_grad(output, 1); output = output_1 = None
aux_1: "f32[3, 3, 3]" = torch._C._functorch._unwrap_for_grad(aux, 1); aux = None
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
return (_unwrap_for_grad, _unwrap_for_grad_1, aux_1)
""",
)
self.assertExpectedInline(
actual_tuple_var,
"""\
| GraphModule |
python | FactoryBoy__factory_boy | tests/test_mongoengine.py | {
"start": 667,
"end": 851
} | class ____(MongoEngineFactory):
class Meta:
model = Person
name = factory.Sequence(lambda n: 'name%d' % n)
address = factory.SubFactory(AddressFactory)
| PersonFactory |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/side_channel/outgoing_message.py | {
"start": 122,
"end": 1961
} | class ____:
"""
Utility class for forming the message that is written to a SideChannel.
All data is written in little-endian format using the struct module.
"""
def __init__(self):
"""
Create an OutgoingMessage with an empty buffer.
"""
self.buffer = bytearray()
def write_bool(self, b: bool) -> None:
"""
Append a boolean value.
"""
self.buffer += struct.pack("<?", b)
def write_int32(self, i: int) -> None:
"""
Append an integer value.
"""
self.buffer += struct.pack("<i", i)
def write_float32(self, f: float) -> None:
"""
Append a float value. It will be truncated to 32-bit precision.
"""
self.buffer += struct.pack("<f", f)
def write_float32_list(self, float_list: List[float]) -> None:
"""
Append a list of float values. They will be truncated to 32-bit precision.
"""
self.write_int32(len(float_list))
for f in float_list:
self.write_float32(f)
def write_string(self, s: str) -> None:
"""
Append a string value. Internally, it will be encoded to ascii, and the
encoded length will also be written to the message.
"""
encoded_key = s.encode("ascii")
self.write_int32(len(encoded_key))
self.buffer += encoded_key
def set_raw_bytes(self, buffer: bytearray) -> None:
"""
Set the internal buffer to a new bytearray. This will overwrite any existing data.
:param buffer:
:return:
"""
if self.buffer:
logger.warning(
"Called set_raw_bytes but the message already has been written to. This will overwrite data."
)
self.buffer = bytearray(buffer)
| OutgoingMessage |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_param.py | {
"start": 413,
"end": 661
} | class ____(TypedDict, total=False):
type: Required[Literal["object"]]
properties: Optional[Dict[str, object]]
required: Optional[SequenceNotStr[str]]
InputSchema: TypeAlias = Union[InputSchemaTyped, Dict[str, object]]
| InputSchemaTyped |
python | apache__airflow | providers/standard/src/airflow/providers/standard/operators/branch.py | {
"start": 1302,
"end": 2919
} | class ____(SkipMixin):
"""Utility helper which handles the branching as one-liner."""
def do_branch(
self, context: Context, branches_to_execute: str | Iterable[str] | None
) -> str | Iterable[str] | None:
"""Implement the handling of branching including logging."""
self.log.info("Branch into %s", branches_to_execute)
if branches_to_execute is None:
# When None is returned, skip all downstream tasks
self.skip_all_except(context["ti"], None)
else:
branch_task_ids = self._expand_task_group_roots(context["ti"], branches_to_execute)
self.skip_all_except(context["ti"], branch_task_ids)
return branches_to_execute
def _expand_task_group_roots(
self, ti: RuntimeTaskInstanceProtocol, branches_to_execute: str | Iterable[str]
) -> Iterable[str]:
"""Expand any task group into its root task ids."""
if TYPE_CHECKING:
assert ti.task
task = ti.task
dag = task.dag
if TYPE_CHECKING:
assert dag
if isinstance(branches_to_execute, str) or not isinstance(branches_to_execute, Iterable):
branches_to_execute = [branches_to_execute]
for branch in branches_to_execute:
if branch in dag.task_group_dict:
tg = dag.task_group_dict[branch]
root_ids = [root.task_id for root in tg.roots]
self.log.info("Expanding task group %s into %s", tg.group_id, root_ids)
yield from root_ids
else:
yield branch
| BranchMixIn |
python | doocs__leetcode | lcci/08.02.Robot in a Grid/Solution.py | {
"start": 0,
"end": 543
} | class ____:
def pathWithObstacles(self, obstacleGrid: List[List[int]]) -> List[List[int]]:
def dfs(i, j):
if i >= m or j >= n or obstacleGrid[i][j] == 1:
return False
ans.append([i, j])
obstacleGrid[i][j] = 1
if (i == m - 1 and j == n - 1) or dfs(i + 1, j) or dfs(i, j + 1):
return True
ans.pop()
return False
m, n = len(obstacleGrid), len(obstacleGrid[0])
ans = []
return ans if dfs(0, 0) else []
| Solution |
python | weaviate__weaviate-python-client | weaviate/auth.py | {
"start": 2515,
"end": 3975
} | class ____:
@staticmethod
def api_key(api_key: str) -> _APIKey:
return _APIKey(api_key)
@staticmethod
def client_credentials(
client_secret: str, scope: Optional[SCOPES] = None
) -> _ClientCredentials:
return _ClientCredentials(client_secret, scope)
@staticmethod
def client_password(
username: str, password: str, scope: Optional[SCOPES] = None
) -> _ClientPassword:
return _ClientPassword(username=username, password=password, scope=scope)
@staticmethod
def bearer_token(
access_token: str, expires_in: int = 60, refresh_token: Optional[str] = None
) -> _BearerToken:
return _BearerToken(
access_token=access_token,
expires_in=expires_in,
refresh_token=refresh_token,
)
OidcAuth = Union[_BearerToken, _ClientPassword, _ClientCredentials]
AuthCredentials = Union[OidcAuth, _APIKey]
# required to ease v3 -> v4 transition
AuthApiKey = _APIKey
"""
.. deprecated:: 4.0.0
Use :meth:`~weaviate.auth.Auth.api_key` instead.
"""
AuthBearerToken = _BearerToken
"""
.. deprecated:: 4.0.0
Use :meth:`~weaviate.auth.Auth.bearer_token` instead.
"""
AuthClientCredentials = _ClientCredentials
"""
.. deprecated:: 4.0.0
Use :meth:`~weaviate.auth.Auth.client_credentials` instead.
"""
AuthClientPassword = _ClientPassword
"""
.. deprecated:: 4.0.0
Use :meth:`~weaviate.auth.Auth.client_password` instead.
"""
| Auth |
python | kamyu104__LeetCode-Solutions | Python/basic-calculator.py | {
"start": 1373,
"end": 2396
} | class ____(object):
# @param {string} s
# @return {integer}
def calculate(self, s):
operands, operators = [], []
operand = ""
for i in reversed(xrange(len(s))):
if s[i].isdigit():
operand += s[i]
if i == 0 or not s[i-1].isdigit():
operands.append(int(operand[::-1]))
operand = ""
elif s[i] == ')' or s[i] == '+' or s[i] == '-':
operators.append(s[i])
elif s[i] == '(':
while operators[-1] != ')':
self.compute(operands, operators)
operators.pop()
while operators:
self.compute(operands, operators)
return operands[-1]
def compute(self, operands, operators):
left, right = operands.pop(), operands.pop()
op = operators.pop()
if op == '+':
operands.append(left + right)
elif op == '-':
operands.append(left - right)
| Solution2 |
python | pola-rs__polars | py-polars/src/polars/io/cloud/credential_provider/_providers.py | {
"start": 9782,
"end": 15947
} | class ____(CachingCredentialProvider):
"""
Azure Credential Provider.
Using this requires the `azure-identity` Python package to be installed.
.. warning::
This functionality is considered **unstable**. It may be changed
at any point without it being considered a breaking change.
"""
def __init__(
self,
*,
scopes: list[str] | None = None,
tenant_id: str | None = None,
credential: Any | None = None,
_storage_account: str | None = None,
) -> None:
"""
Initialize a credential provider for Microsoft Azure.
By default, this uses `azure.identity.DefaultAzureCredential()`.
Parameters
----------
scopes
Scopes to pass to `get_token`
tenant_id
Azure tenant ID.
credential
Optionally pass an instantiated Azure credential class to use (e.g.
`azure.identity.DefaultAzureCredential`). The credential class must
have a `get_token()` method.
"""
msg = "`CredentialProviderAzure` functionality is considered unstable"
issue_unstable_warning(msg)
self.account_name = _storage_account
self.scopes = (
scopes if scopes is not None else ["https://storage.azure.com/.default"]
)
self.tenant_id = tenant_id
self.credential = credential
if credential is not None:
# If the user passes a credential class, we just need to ensure it
# has a `get_token()` method.
if not hasattr(credential, "get_token"):
msg = (
f"the provided `credential` object {credential!r} does "
"not have a `get_token()` method."
)
raise ValueError(msg)
# We don't need the module if we are permitted and able to retrieve the
# account key from the Azure CLI.
elif self._try_get_azure_storage_account_credential_if_permitted() is None:
self._ensure_module_availability()
if verbose():
eprint(
"[CredentialProviderAzure]: "
f"{self.account_name = } "
f"{self.tenant_id = } "
f"{self.scopes = } "
)
super().__init__()
def retrieve_credentials_impl(self) -> CredentialProviderFunctionReturn:
"""Fetch the credentials."""
if (
v := self._try_get_azure_storage_account_credential_if_permitted()
) is not None:
return v
import azure.identity
credential = self.credential or azure.identity.DefaultAzureCredential()
token = credential.get_token(*self.scopes, tenant_id=self.tenant_id)
return {
"bearer_token": token.token,
}, token.expires_on
def _try_get_azure_storage_account_credential_if_permitted(
self,
) -> CredentialProviderFunctionReturn | None:
POLARS_AUTO_USE_AZURE_STORAGE_ACCOUNT_KEY = os.getenv(
"POLARS_AUTO_USE_AZURE_STORAGE_ACCOUNT_KEY"
)
verbose = polars._utils.logging.verbose()
if verbose:
eprint(
"[CredentialProviderAzure]: "
f"{self.account_name = } "
f"{POLARS_AUTO_USE_AZURE_STORAGE_ACCOUNT_KEY = }"
)
if (
self.account_name is not None
and POLARS_AUTO_USE_AZURE_STORAGE_ACCOUNT_KEY == "1"
):
try:
creds = {
"account_key": self._get_azure_storage_account_key_az_cli(
self.account_name
)
}
if verbose:
eprint(
"[CredentialProviderAzure]: Retrieved account key from Azure CLI"
)
except Exception as e:
if verbose:
eprint(
f"[CredentialProviderAzure]: Could not retrieve account key from Azure CLI: {e}"
)
else:
return creds, None
return None
@classmethod
def _ensure_module_availability(cls) -> None:
if importlib.util.find_spec("azure.identity") is None:
msg = "azure-identity must be installed to use `CredentialProviderAzure`"
raise ImportError(msg)
@staticmethod
def _extract_adls_uri_storage_account(uri: str) -> str | None:
# "abfss://{CONTAINER}@{STORAGE_ACCOUNT}.dfs.core.windows.net/"
# ^^^^^^^^^^^^^^^^^
try:
return (
uri.split("://", 1)[1]
.split("/", 1)[0]
.split("@", 1)[1]
.split(".dfs.core.windows.net", 1)[0]
)
except IndexError:
return None
@classmethod
def _get_azure_storage_account_key_az_cli(cls, account_name: str) -> str:
# [
# {
# "creationTime": "1970-01-01T00:00:00.000000+00:00",
# "keyName": "key1",
# "permissions": "FULL",
# "value": "..."
# },
# {
# "creationTime": "1970-01-01T00:00:00.000000+00:00",
# "keyName": "key2",
# "permissions": "FULL",
# "value": "..."
# }
# ]
return json.loads(
cls._azcli(
"storage",
"account",
"keys",
"list",
"--output",
"json",
"--account-name",
account_name,
)
)[0]["value"]
@classmethod
def _azcli_version(cls) -> str | None:
try:
return json.loads(cls._azcli("version"))["azure-cli"]
except Exception:
return None
@staticmethod
def _azcli(*args: str) -> bytes:
return subprocess.check_output(
["az", *args] if sys.platform != "win32" else ["cmd", "/C", "az", *args]
)
| CredentialProviderAzure |
python | python-openxml__python-docx | src/docx/styles/style.py | {
"start": 6141,
"end": 7667
} | class ____(CharacterStyle):
"""A paragraph style.
A paragraph style provides both character formatting and paragraph formatting such
as indentation and line-spacing.
"""
def __repr__(self):
return "_ParagraphStyle('%s') id: %s" % (self.name, id(self))
@property
def next_paragraph_style(self):
"""|_ParagraphStyle| object representing the style to be applied automatically
to a new paragraph inserted after a paragraph of this style.
Returns self if no next paragraph style is defined. Assigning |None| or `self`
removes the setting such that new paragraphs are created using this same style.
"""
next_style_elm = self._element.next_style
if next_style_elm is None:
return self
if next_style_elm.type != WD_STYLE_TYPE.PARAGRAPH:
return self
return StyleFactory(next_style_elm)
@next_paragraph_style.setter
def next_paragraph_style(self, style):
if style is None or style.style_id == self.style_id:
self._element._remove_next()
else:
self._element.get_or_add_next().val = style.style_id
@property
def paragraph_format(self):
"""The |ParagraphFormat| object providing access to the paragraph formatting
properties for this style such as indentation."""
return ParagraphFormat(self._element)
# -- just in case someone uses the old name in an extension function --
_ParagraphStyle = ParagraphStyle
| ParagraphStyle |
python | sympy__sympy | sympy/utilities/_compilation/runners.py | {
"start": 243,
"end": 8126
} | class ____:
""" CompilerRunner base class.
Parameters
==========
sources : list of str
Paths to sources.
out : str
flags : iterable of str
Compiler flags.
run_linker : bool
compiler_name_exe : (str, str) tuple
Tuple of compiler name & command to call.
cwd : str
Path of root of relative paths.
include_dirs : list of str
Include directories.
libraries : list of str
Libraries to link against.
library_dirs : list of str
Paths to search for shared libraries.
std : str
Standard string, e.g. ``'c++11'``, ``'c99'``, ``'f2003'``.
define: iterable of strings
macros to define
undef : iterable of strings
macros to undefine
preferred_vendor : string
name of preferred vendor e.g. 'gnu' or 'intel'
Methods
=======
run():
Invoke compilation as a subprocess.
"""
environ_key_compiler: str # e.g. 'CC', 'CXX', ...
environ_key_flags: str # e.g. 'CFLAGS', 'CXXFLAGS', ...
environ_key_ldflags: str = "LDFLAGS" # typically 'LDFLAGS'
# Subclass to vendor/binary dict
compiler_dict: dict[str, str]
# Standards should be a tuple of supported standards
# (first one will be the default)
standards: tuple[None | str, ...]
# Subclass to dict of binary/formater-callback
std_formater: dict[str, Callable[[Optional[str]], str]]
# subclass to be e.g. {'gcc': 'gnu', ...}
compiler_name_vendor_mapping: dict[str, str]
def __init__(self, sources, out, flags=None, run_linker=True, compiler=None, cwd='.',
include_dirs=None, libraries=None, library_dirs=None, std=None, define=None,
undef=None, strict_aliasing=None, preferred_vendor=None, linkline=None, **kwargs):
if isinstance(sources, str):
raise ValueError("Expected argument sources to be a list of strings.")
self.sources = list(sources)
self.out = out
self.flags = flags or []
if os.environ.get(self.environ_key_flags):
self.flags += os.environ[self.environ_key_flags].split()
self.cwd = cwd
if compiler:
self.compiler_name, self.compiler_binary = compiler
elif os.environ.get(self.environ_key_compiler):
self.compiler_binary = os.environ[self.environ_key_compiler]
for k, v in self.compiler_dict.items():
if k in self.compiler_binary:
self.compiler_vendor = k
self.compiler_name = v
break
else:
self.compiler_vendor, self.compiler_name = list(self.compiler_dict.items())[0]
warnings.warn("failed to determine what kind of compiler %s is, assuming %s" %
(self.compiler_binary, self.compiler_name))
else:
# Find a compiler
if preferred_vendor is None:
preferred_vendor = os.environ.get('SYMPY_COMPILER_VENDOR', None)
self.compiler_name, self.compiler_binary, self.compiler_vendor = self.find_compiler(preferred_vendor)
if self.compiler_binary is None:
raise ValueError("No compiler found (searched: {})".format(', '.join(self.compiler_dict.values())))
self.define = define or []
self.undef = undef or []
self.include_dirs = include_dirs or []
self.libraries = libraries or []
self.library_dirs = library_dirs or []
self.std = std or self.standards[0]
self.run_linker = run_linker
if self.run_linker:
# both gnu and intel compilers use '-c' for disabling linker
self.flags = list(filter(lambda x: x != '-c', self.flags))
else:
if '-c' not in self.flags:
self.flags.append('-c')
if self.std:
self.flags.append(self.std_formater[
self.compiler_name](self.std))
self.linkline = (linkline or []) + [lf for lf in map(
str.strip, os.environ.get(self.environ_key_ldflags, "").split()
) if lf != ""]
if strict_aliasing is not None:
nsa_re = re.compile("no-strict-aliasing$")
sa_re = re.compile("strict-aliasing$")
if strict_aliasing is True:
if any(map(nsa_re.match, flags)):
raise CompileError("Strict aliasing cannot be both enforced and disabled")
elif any(map(sa_re.match, flags)):
pass # already enforced
else:
flags.append('-fstrict-aliasing')
elif strict_aliasing is False:
if any(map(nsa_re.match, flags)):
pass # already disabled
else:
if any(map(sa_re.match, flags)):
raise CompileError("Strict aliasing cannot be both enforced and disabled")
else:
flags.append('-fno-strict-aliasing')
else:
msg = "Expected argument strict_aliasing to be True/False, got {}"
raise ValueError(msg.format(strict_aliasing))
@classmethod
def find_compiler(cls, preferred_vendor=None):
""" Identify a suitable C/fortran/other compiler. """
candidates = list(cls.compiler_dict.keys())
if preferred_vendor:
if preferred_vendor in candidates:
candidates = [preferred_vendor]+candidates
else:
raise ValueError("Unknown vendor {}".format(preferred_vendor))
name, path = find_binary_of_command([cls.compiler_dict[x] for x in candidates])
return name, path, cls.compiler_name_vendor_mapping[name]
def cmd(self):
""" List of arguments (str) to be passed to e.g. ``subprocess.Popen``. """
cmd = (
[self.compiler_binary] +
self.flags +
['-U'+x for x in self.undef] +
['-D'+x for x in self.define] +
['-I'+x for x in self.include_dirs] +
self.sources
)
if self.run_linker:
cmd += (['-L'+x for x in self.library_dirs] +
['-l'+x for x in self.libraries] +
self.linkline)
counted = []
for envvar in re.findall(r'\$\{(\w+)\}', ' '.join(cmd)):
if os.getenv(envvar) is None:
if envvar not in counted:
counted.append(envvar)
msg = "Environment variable '{}' undefined.".format(envvar)
raise CompileError(msg)
return cmd
def run(self):
self.flags = unique_list(self.flags)
# Append output flag and name to tail of flags
self.flags.extend(['-o', self.out])
env = os.environ.copy()
env['PWD'] = self.cwd
# NOTE: intel compilers seems to need shell=True
p = subprocess.Popen(' '.join(self.cmd()),
shell=True,
cwd=self.cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
comm = p.communicate()
try:
self.cmd_outerr = comm[0].decode('utf-8')
except UnicodeDecodeError:
self.cmd_outerr = comm[0].decode('iso-8859-1') # win32
self.cmd_returncode = p.returncode
# Error handling
if self.cmd_returncode != 0:
msg = "Error executing '{}' in {} (exited status {}):\n {}\n".format(
' '.join(self.cmd()), self.cwd, str(self.cmd_returncode), self.cmd_outerr
)
raise CompileError(msg)
return self.cmd_outerr, self.cmd_returncode
| CompilerRunner |
python | getsentry__sentry | src/sentry/workflow_engine/migrations/0097_add_unique_constraint_to_datasource.py | {
"start": 155,
"end": 1727
} | class ____(CheckedMigration):
# This flag is used to mark that a migration shouldn't be automatically run in production.
# This should only be used for operations where it's safe to run the migration after your
# code has deployed. So this should not be used for most operations that alter the schema
# of a table.
# Here are some things that make sense to mark as post deployment:
# - Large data migrations. Typically we want these to be run manually so that they can be
# monitored and not block the deploy for a long period of time while they run.
# - Adding indexes to large tables. Since this can take a long time, we'd generally prefer to
# run this outside deployments so that we don't block them. Note that while adding an index
# is a schema change, it's completely safe to run the operation after the code has deployed.
# Once deployed, run these manually via: https://develop.sentry.dev/database-migrations/#migration-deployment
is_post_deployment = True
dependencies = [
("sentry", "1003_group_history_prev_history_safe_removal"),
("workflow_engine", "0096_delete_non_single_written_fire_history"),
]
operations = [
migrations.AddConstraint(
model_name="datasource",
constraint=models.UniqueConstraint(
fields=("type", "source_id"), name="unique_type_source_id"
),
),
migrations.RemoveIndex(
model_name="datasource",
name="workflow_en_type_66eafc_idx",
),
]
| Migration |
python | django__django | tests/inline_formsets/tests.py | {
"start": 4344,
"end": 8565
} | class ____(TestCase):
def test_inline_formset_factory(self):
"""
These should both work without a problem.
"""
inlineformset_factory(Parent, Child, fk_name="mother", fields="__all__")
inlineformset_factory(Parent, Child, fk_name="father", fields="__all__")
def test_exception_on_unspecified_foreign_key(self):
"""
Child has two ForeignKeys to Parent, so if we don't specify which one
to use for the inline formset, we should get an exception.
"""
msg = (
"'inline_formsets.Child' has more than one ForeignKey to "
"'inline_formsets.Parent'."
)
with self.assertRaisesMessage(ValueError, msg):
inlineformset_factory(Parent, Child)
def test_fk_name_not_foreign_key_field_from_child(self):
"""
If we specify fk_name, but it isn't a ForeignKey from the child model
to the parent model, we should get an exception.
"""
msg = "fk_name 'school' is not a ForeignKey to 'inline_formsets.Parent'."
with self.assertRaisesMessage(ValueError, msg):
inlineformset_factory(Parent, Child, fk_name="school")
def test_non_foreign_key_field(self):
"""
If the field specified in fk_name is not a ForeignKey, we should get an
exception.
"""
with self.assertRaisesMessage(
ValueError, "'inline_formsets.Child' has no field named 'test'."
):
inlineformset_factory(Parent, Child, fk_name="test")
def test_any_iterable_allowed_as_argument_to_exclude(self):
# Regression test for #9171.
inlineformset_factory(Parent, Child, exclude=["school"], fk_name="mother")
inlineformset_factory(Parent, Child, exclude=("school",), fk_name="mother")
@skipUnlessDBFeature("allows_auto_pk_0")
def test_zero_primary_key(self):
# Regression test for #21472
poet = Poet.objects.create(id=0, name="test")
poet.poem_set.create(name="test poem")
PoemFormSet = inlineformset_factory(Poet, Poem, fields="__all__", extra=0)
formset = PoemFormSet(None, instance=poet)
self.assertEqual(len(formset.forms), 1)
def test_unsaved_fk_validate_unique(self):
poet = Poet(name="unsaved")
PoemFormSet = inlineformset_factory(Poet, Poem, fields=["name"])
data = {
"poem_set-TOTAL_FORMS": "2",
"poem_set-INITIAL_FORMS": "0",
"poem_set-MAX_NUM_FORMS": "2",
"poem_set-0-name": "Poem",
"poem_set-1-name": "Poem",
}
formset = PoemFormSet(data, instance=poet)
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(), ["Please correct the duplicate data for name."]
)
def test_fk_not_duplicated_in_form_fields(self):
"""
A foreign key name isn't duplicated in form._meta fields (#21332).
"""
poet = Poet.objects.create(name="test")
poet.poem_set.create(name="first test poem")
poet.poem_set.create(name="second test poem")
poet.poem_set.create(name="third test poem")
PoemFormSet = inlineformset_factory(Poet, Poem, fields=("name",), extra=0)
formset = PoemFormSet(None, instance=poet)
self.assertEqual(len(formset.forms), 3)
self.assertEqual(["name", "poet"], PoemFormSet.form._meta.fields)
def test_fk_in_all_formset_forms(self):
"""
A foreign key field is in Meta for all forms in the formset (#26538).
"""
class PoemModelForm(ModelForm):
def __init__(self, *args, **kwargs):
assert "poet" in self._meta.fields
super().__init__(*args, **kwargs)
poet = Poet.objects.create(name="test")
poet.poem_set.create(name="first test poem")
poet.poem_set.create(name="second test poem")
PoemFormSet = inlineformset_factory(
Poet, Poem, form=PoemModelForm, fields=("name",), extra=0
)
formset = PoemFormSet(None, instance=poet)
formset.forms # Trigger form instantiation to run the assert above.
| InlineFormsetFactoryTest |
python | instagram__MonkeyType | tests/test_stubs.py | {
"start": 12488,
"end": 13336
} | class ____:
def test_render(self):
cm_stub = _func_stub_from_callable(Dummy.a_class_method.__func__)
im_stub = _func_stub_from_callable(Dummy.an_instance_method)
class_stub = ClassStub('Test', function_stubs=(cm_stub, im_stub),
attribute_stubs=[
AttributeStub('foo', int),
AttributeStub('bar', str),
])
expected = '\n'.join([
'class Test:',
' bar: str',
' foo: int',
' @classmethod',
' def a_class_method(cls, foo: Any) -> Optional[frame]: ...',
' def an_instance_method(self, foo: Any, bar: Any) -> Optional[frame]: ...',
])
assert class_stub.render() == expected
| TestClassStub |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 37762,
"end": 48012
} | class ____:
name: str
cpp: Callable[..., str]
# None when not impl in libdevice/triton
triton: Optional[Callable[..., str]] = None
# None when not impl in aten/.../vec
cppvec: Optional[Callable[..., str]] = None
type_promotion_kind: ELEMENTWISE_TYPE_PROMOTION_KIND = (
ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT
)
halide: Optional[Callable[..., str]] = None
mps: Optional[Callable[..., str]] = None
# NB: if you add a new special function, don't forget to update
# torch._inductor.ops_handler too
pointwise_overrides_data: dict[str, OverridesData] = dict(
airy_ai=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"airy_ai_forward({x})",
name="special_airy_ai",
),
bessel_j0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"bessel_j0_forward({x})",
triton=lambda x: f"libdevice.j0({x})",
name="special_bessel_j0",
),
bessel_j1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"bessel_j1_forward({x})",
triton=lambda x: f"libdevice.j1({x})",
name="special_bessel_j1",
),
bessel_y0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"bessel_y0_forward({x})",
triton=lambda x: f"libdevice.y0({x})",
name="special_bessel_y0",
),
bessel_y1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"bessel_y1_forward({x})",
triton=lambda x: f"libdevice.y1({x})",
name="special_bessel_y1",
),
digamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_digamma({x})",
cppvec=lambda x: f"{x}.digamma()",
name="digamma",
),
# no cpp nor triton implementation for entr, it is defined as decomposition
# erf, erfc
erfcx=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_erfcx({x})",
triton=lambda x: f"libdevice.erfcx({x})",
name="special_erfcx",
),
fma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y, z: f"std::fma({x}, {y}, {z})",
cppvec=lambda x, y, z: f"fmadd({x}, {y}, {z})",
triton=lambda x, y, z: f"libdevice.fma({x}, {y}, {z})",
name="fma",
),
# erfinv, exp2, expit, gammaln
igamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"calc_igamma({x}, {y})",
name="igamma",
),
igammac=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"calc_igammac({x}, {y})",
name="igammac",
),
gammainc=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"calc_igamma({x}, {y})",
name="special_gammainc",
),
gammaincc=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"calc_igammac({x}, {y})",
name="special_gammaincc",
),
i0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_i0({x})",
triton=lambda x: f"libdevice.cyl_bessel_i0({x})",
cppvec=lambda x: f"{x}.i0()",
name="i0",
),
i0e=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_i0e({x})",
cppvec=lambda x: f"{x}.i0e()",
name="special_i0e",
),
i1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_i1({x})",
triton=lambda x: f"libdevice.cyl_bessel_i1({x})",
name="special_i1",
),
i1e=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_i1e({x})",
name="special_i1e",
),
log_ndtr=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_log_ndtr({x})",
name="special_log_ndtr",
),
# logit
modified_bessel_i0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"modified_bessel_i0_forward({x})",
triton=lambda x: f"libdevice.cyl_bessel_i0({x})",
name="special_modified_bessel_i0",
),
modified_bessel_i1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"modified_bessel_i1_forward({x})",
triton=lambda x: f"libdevice.cyl_bessel_i1({x})",
name="special_modified_bessel_i1",
),
modified_bessel_k0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"modified_bessel_k0_forward({x})",
name="special_modified_bessel_k0",
),
modified_bessel_k1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"modified_bessel_k1_forward({x})",
name="special_modified_bessel_k1",
),
# multigamma
ndtr=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_ndtr({x})",
name="special_ndtr",
),
ndtri=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"calc_ndtri({x})",
name="special_ndtri",
),
polygamma=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x,
y: f"{x} == 0 ? calc_digamma({y}) : ({x} == 1 ? trigamma({y}) : calc_polygamma({y}, {x}))",
name="polygamma",
),
# psi - alias to digamma
# round
scaled_modified_bessel_k0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"scaled_modified_bessel_k0_forward({x})",
name="special_scaled_modified_bessel_k0",
),
scaled_modified_bessel_k1=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"scaled_modified_bessel_k1_forward({x})",
name="special_scaled_modified_bessel_k1",
),
# sinc
spherical_bessel_j0=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x: f"spherical_bessel_j0_forward({x})",
name="special_spherical_bessel_j0",
),
zeta=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"zeta({x}, {y})",
name="special_zeta",
),
chebyshev_polynomial_t=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"chebyshev_polynomial_t_forward({x}, {y})",
name="special_chebyshev_polynomial_t",
),
chebyshev_polynomial_u=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"chebyshev_polynomial_u_forward({x}, {y})",
name="special_chebyshev_polynomial_u",
),
chebyshev_polynomial_v=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"chebyshev_polynomial_v_forward({x}, {y})",
name="special_chebyshev_polynomial_v",
),
chebyshev_polynomial_w=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"chebyshev_polynomial_w_forward({x}, {y})",
name="special_chebyshev_polynomial_w",
),
legendre_polynomial_p=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"legendre_polynomial_p_forward({x}, {y})",
name="special_legendre_polynomial_p",
),
shifted_chebyshev_polynomial_t=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"shifted_chebyshev_polynomial_t_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_t",
),
shifted_chebyshev_polynomial_u=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"shifted_chebyshev_polynomial_u_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_u",
),
shifted_chebyshev_polynomial_v=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"shifted_chebyshev_polynomial_v_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_v",
),
shifted_chebyshev_polynomial_w=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"shifted_chebyshev_polynomial_w_forward({x}, {y})",
name="special_shifted_chebyshev_polynomial_w",
),
hermite_polynomial_h=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"hermite_polynomial_h_forward({x}, {y})",
name="special_hermite_polynomial_h",
),
hermite_polynomial_he=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"hermite_polynomial_he_forward({x}, {y})",
name="special_hermite_polynomial_he",
),
laguerre_polynomial_l=OverridesData(
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
cpp=lambda x, y: f"laguerre_polynomial_l_forward({x}, {y})",
name="special_laguerre_polynomial_l",
),
)
def is_buffer_removed(name: str) -> bool:
return any(
name in x
for x in (
V.graph.removed_buffers,
V.kernel.removed_buffers,
V.graph.inplaced_to_remove,
V.kernel.inplaced_to_remove,
)
)
| OverridesData |
python | django__django | django/contrib/postgres/operations.py | {
"start": 4058,
"end": 5190
} | class ____(NotInTransactionMixin, AddIndex):
"""Create an index using PostgreSQL's CREATE INDEX CONCURRENTLY syntax."""
atomic = False
category = OperationCategory.ADDITION
def describe(self):
return "Concurrently create index %s on field(s) %s of model %s" % (
self.index.name,
", ".join(self.index.fields),
self.model_name,
)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = to_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.add_index(model, self.index, concurrently=True)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self._ensure_not_in_transaction(schema_editor)
model = from_state.apps.get_model(app_label, self.model_name)
if self.allow_migrate_model(schema_editor.connection.alias, model):
schema_editor.remove_index(model, self.index, concurrently=True)
| AddIndexConcurrently |
python | milvus-io__pymilvus | pymilvus/exceptions.py | {
"start": 1548,
"end": 1627
} | class ____(MilvusException):
"""Raise when params are incorrect"""
| ParamError |
python | numba__numba | numba/core/pylowering.py | {
"start": 517,
"end": 2273
} | class ____:
"""
A sentinel value for undefined variable created by Expr.undef.
"""
def __repr__(self):
return "<undefined>"
_UNDEFINED = _Undefined()
# Map operators to methods on the PythonAPI class
PYTHON_BINOPMAP = {
operator.add: ("number_add", False),
operator.sub: ("number_subtract", False),
operator.mul: ("number_multiply", False),
operator.truediv: ("number_truedivide", False),
operator.floordiv: ("number_floordivide", False),
operator.mod: ("number_remainder", False),
operator.pow: ("number_power", False),
operator.lshift: ("number_lshift", False),
operator.rshift: ("number_rshift", False),
operator.and_: ("number_and", False),
operator.or_: ("number_or", False),
operator.xor: ("number_xor", False),
# inplace operators
operator.iadd: ("number_add", True),
operator.isub: ("number_subtract", True),
operator.imul: ("number_multiply", True),
operator.itruediv: ("number_truedivide", True),
operator.ifloordiv: ("number_floordivide", True),
operator.imod: ("number_remainder", True),
operator.ipow: ("number_power", True),
operator.ilshift: ("number_lshift", True),
operator.irshift: ("number_rshift", True),
operator.iand: ("number_and", True),
operator.ior: ("number_or", True),
operator.ixor: ("number_xor", True),
}
PYTHON_BINOPMAP[operator.matmul] = ("number_matrix_multiply", False)
PYTHON_BINOPMAP[operator.imatmul] = ("number_matrix_multiply", True)
PYTHON_COMPAREOPMAP = {
operator.eq: '==',
operator.ne: '!=',
operator.lt: '<',
operator.le: '<=',
operator.gt: '>',
operator.ge: '>=',
operator.is_: 'is',
operator.is_not: 'is not',
operator.contains: 'in'
}
| _Undefined |
python | PyCQA__pylint | tests/data/clientmodule_test.py | {
"start": 114,
"end": 546
} | class ____:
""" Ancestor method """
cls_member = DoNothing()
def __init__(self, value):
local_variable = 0
self.attr = 'this method shouldn\'t have a docstring'
self.__value = value
def get_value(self):
""" nice docstring ;-) """
return self.__value
def set_value(self, value):
self.__value = value
return 'this method shouldn\'t have a docstring'
| Ancestor |
python | pytorch__pytorch | test/nn/test_convolution.py | {
"start": 1902,
"end": 52977
} | class ____(NNTestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
def test_conv_backcompat(self):
from torch.serialization import SourceChangeWarning
# This file was generated by running on PyTorch 1.0.1 on Python 2:
#
# import torch
# from torch import nn
# m = nn.Conv2d(1, 1, 1)
# torch.save(m, 'legacy_conv2d.pt')
#
# NB: This Pickle also contains some Unicode data!
path = download_file("https://download.pytorch.org/test_data/legacy_conv2d.pt")
with warnings.catch_warnings():
warnings.simplefilter("ignore", SourceChangeWarning)
# weights_only=False as this is legacy code that saves the model
m = torch.load(path, encoding="utf-8", weights_only=False)
input = torch.randn((1, 1, 1, 1), dtype=torch.float)
self.assertEqual(m(input).size(), (1, 1, 1, 1))
def test_huge_padding(self):
class Conv1dModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv1d(
in_channels=16,
out_channels=32,
kernel_size=3,
stride=1,
padding=9223372036854775803,
)
self.add_module(name="conv1", module=self.conv1)
input_data = torch.randn(1, 16, 100)
model = Conv1dModule()
with self.assertRaisesRegex(
RuntimeError,
r"Given padding=9223372036854775803 at dimension 0 , expected padding to be at most",
):
model.conv1(input_data)
class ConvTransposed1dModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_transposed1d = nn.ConvTranspose1d(
in_channels=16,
out_channels=32,
kernel_size=3,
stride=2,
padding=9223372036854775803,
)
self.add_module(name="conv_transposed1d", module=self.conv_transposed1d)
input_data = torch.randn(1, 16, 100)
model = ConvTransposed1dModule()
with self.assertRaisesRegex(
RuntimeError,
r"Given padding=9223372036854775803 at dimension 0 , expected padding to be at most",
):
model.conv_transposed1d(input_data)
def test_invalid_conv1d(self):
for dtype in [
torch.half,
torch.bfloat16,
torch.float,
torch.double,
torch.cfloat,
torch.cdouble,
]:
module = nn.Conv1d(
in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True
).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(
RuntimeError,
r"Calculated padded input size per channel: \(4\). "
+ r"Kernel size: \(10\). Kernel size can\'t be greater than actual input size",
):
module(input)
# Negative stride check
module = nn.Conv1d(
in_channels=3, out_channels=6, kernel_size=3, stride=-1, bias=True
).to(dtype)
input = torch.randn(1, 3, 4).to(dtype)
with self.assertRaisesRegex(
RuntimeError, "non-positive stride is not supported"
):
module(input)
def test_mismatch_shape_conv2d(self):
for dtype in (torch.float, torch.cfloat):
x = torch.randn(1, 10, 1, 28, 28, dtype=dtype)
w = torch.randn(6, 1, 5, 5, dtype=dtype)
with self.assertRaisesRegex(
RuntimeError,
r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d, but got "
+ r"input of size: \[1, 10, 1, 28, 28\]",
):
F.conv2d(x, w)
def test_conv2d_discontiguous_weight(self):
for dtype in (torch.float, torch.cfloat):
# Test for https://github.com/pytorch/pytorch/issues/55781
x = torch.ones(64, 16, 16, 16, dtype=dtype)
weight = (
torch.arange(0, 1.0, 1 / 2.0**10)
.reshape(32, 16, 1, 2)
.to(dtype)[:, :, :, ::2]
)
self.assertFalse(weight.is_contiguous())
y = torch.nn.functional.conv2d(x, weight, None)
if torch.backends.mkldnn.is_available():
# Disable MKLDNN explicitly, so that either NNPACK or THCNN will be used
with torch.backends.mkldnn.flags(enabled=False):
y_ = torch.nn.functional.conv2d(x, weight, None)
self.assertEqual(y, y_)
self.assertEqual(y.sum(), 4186112.0)
def test_invalid_conv2d(self):
for dtype in [
torch.half,
torch.bfloat16,
torch.float,
torch.double,
torch.cfloat,
torch.cdouble,
]:
module = torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2).to(
dtype
)
input = torch.empty(1, 1, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
module = nn.Conv2d(
in_channels=3, out_channels=33, kernel_size=10, stride=1, bias=True
)
input = torch.randn(1, 3, 1, 1)
with self.assertRaisesRegex(
RuntimeError,
r"Calculated padded input size per channel: \(1 x 1\). "
+ r"Kernel size: \(10 x 10\). Kernel size can\'t be greater than actual input size",
):
module(input)
# Negative stride check
module = nn.Conv2d(
in_channels=3, out_channels=6, kernel_size=4, stride=-1, bias=True
).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(
RuntimeError, "non-positive stride is not supported"
):
module(input)
# Zero stride check
module = nn.Conv2d(
in_channels=3, out_channels=6, kernel_size=4, stride=0, bias=True
).to(dtype)
input = torch.randn(1, 3, 4, 4).to(dtype)
with self.assertRaisesRegex(
RuntimeError, "non-positive stride is not supported"
):
module(input)
def test_invalid_conv3d(self):
for dtype in [
torch.half,
torch.bfloat16,
torch.float,
torch.double,
torch.cfloat,
torch.cdouble,
]:
module = torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2).to(
dtype
)
input = torch.empty(1, 1, 4, 4, 4).to(dtype)
self.assertRaises(RuntimeError, lambda: module(input))
# Negative stride check
module = torch.nn.Conv3d(1, 1, kernel_size=3, stride=-2)
input = torch.empty(1, 1, 4, 4, 4)
with self.assertRaisesRegex(
RuntimeError, "non-positive stride is not supported"
):
module(input)
def test_conv_invalid_groups(self):
with self.assertRaisesRegex(ValueError, "groups must be a positive integer"):
torch.nn.Conv1d(1, 1, kernel_size=3, dilation=2, stride=2, groups=0)
with self.assertRaisesRegex(ValueError, "groups must be a positive integer"):
torch.nn.Conv2d(1, 1, kernel_size=3, dilation=2, stride=2, groups=-1)
with self.assertRaisesRegex(ValueError, "groups must be a positive integer"):
torch.nn.Conv3d(1, 1, kernel_size=3, dilation=2, stride=2, groups=-2)
def test_conv_aten_invalid_groups(self):
# test low-level aten ops with invalid groups parameter
grad_output = torch.randn(2, 4, 8, dtype=torch.double)
input = torch.randn(2, 5, 8, dtype=torch.double)
weight = torch.randn(5, 4, 3, dtype=torch.double)
bias_sizes = [4]
stride = [1]
padding = [1]
dilation = [1]
transposed = True
output_padding = [0]
output_mask = [True, True, True]
# test groups=0
with self.assertRaisesRegex(
RuntimeError, "expected groups to be greater than 0, but got groups=0"
):
torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
bias_sizes,
stride,
padding,
dilation,
transposed,
output_padding,
0,
output_mask,
)
# test groups=-1
with self.assertRaisesRegex(
RuntimeError, "expected groups to be greater than 0, but got groups=-1"
):
torch.ops.aten.convolution_backward(
grad_output,
input,
weight,
bias_sizes,
stride,
padding,
dilation,
transposed,
output_padding,
-1,
output_mask,
)
def test_conv3d_overflow_values(self):
input = torch.full(
(
0,
7,
9,
1,
5,
),
0,
dtype=torch.float32,
requires_grad=False,
)
weight = torch.full(
(
9,
1,
),
4.14214e16,
dtype=torch.float32,
requires_grad=False,
)
stride = [5, 5, 5]
with self.assertRaisesRegex(ValueError, "Padding height too large"):
torch.ops.aten.slow_conv3d(
input,
weight,
kernel_size=[5, 5, 5],
bias=None,
stride=stride,
padding=[2**62, 2**62, 2**62],
)
with self.assertRaisesRegex(
RuntimeError, "Kernel height x width product is too large:"
):
torch.ops.aten.slow_conv3d(
input,
weight,
kernel_size=[2**32, 2**32, 2**32],
bias=None,
stride=stride,
padding=[2**31, 2**31, 2**31],
)
def test_Conv1d_module_same_padding(self):
# Compare module against functional: without strides/dilation, asymmetric padding
x = torch.rand(1, 1, 20)
module = nn.Conv1d(
in_channels=1, out_channels=1, kernel_size=10, padding="same"
)
expect = F.conv1d(x, module.weight, module.bias, padding="same")
self.assertEqual(expect, module(x))
# Test dilation, symmetric padding
module = nn.Conv1d(
in_channels=1, out_channels=1, kernel_size=10, padding="same", dilation=2
)
expect = F.conv1d(x, module.weight, module.bias, padding="same", dilation=2)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv1d(
in_channels=1,
out_channels=1,
kernel_size=10,
padding="same",
padding_mode="replicate",
)
x_padded = F.pad(x, [4, 5], mode="replicate")
expect = F.conv1d(x_padded, module.weight, module.bias, padding="valid")
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, "Invalid padding string"):
module = nn.Conv1d(
in_channels=3, out_channels=33, kernel_size=10, padding="foo"
)
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv1d(
in_channels=3, out_channels=33, kernel_size=10, padding="same", stride=2
)
def test_Conv2d_module_same_padding(self):
# Compare module against functional:
# without strides/dilation, both symmetric and asymmetric padding
x = torch.rand(1, 1, 9, 20)
module = nn.Conv2d(
in_channels=1, out_channels=1, kernel_size=(5, 10), padding="same"
)
expect = F.conv2d(x, module.weight, module.bias, padding="same")
self.assertEqual(expect, module(x))
# with dilation, symmetric padding
module = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(3, 4),
padding="same",
dilation=(1, 2),
)
expect = F.conv2d(
x, module.weight, module.bias, padding="same", dilation=(1, 2)
)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv2d(
in_channels=1,
out_channels=1,
kernel_size=(3, 4),
padding="same",
padding_mode="reflect",
)
x_padded = F.pad(x, [1, 2, 1, 1], mode="reflect")
expect = F.conv2d(x_padded, module.weight, module.bias, padding="valid")
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, "Invalid padding string"):
module = nn.Conv2d(
in_channels=3, out_channels=33, kernel_size=10, padding="foo"
)
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(
in_channels=3, out_channels=33, kernel_size=10, padding="same", stride=2
)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(
in_channels=3,
out_channels=33,
kernel_size=10,
padding="same",
stride=(1, 3),
)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv2d(
in_channels=3,
out_channels=33,
kernel_size=10,
padding="same",
stride=(4, 1),
)
def test_Conv3d_module_same_padding(self):
# Compare module against functional:
x = torch.rand(1, 1, 4, 4, 4)
# without dilation, both symmetric and asymmetric padding
module = nn.Conv3d(
in_channels=1, out_channels=1, kernel_size=(2, 3, 4), padding="same"
)
expect = F.conv3d(x, module.weight, module.bias, padding="same")
self.assertEqual(expect, module(x))
# with dilation, both symmetric and asymmetric padding
module = nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(2, 3, 4),
padding="same",
dilation=(3, 2, 1),
)
expect = F.conv3d(
x, module.weight, module.bias, padding="same", dilation=(3, 2, 1)
)
self.assertEqual(expect, module(x))
# Test non-zero padding_mode, requiring explicit padding
module = nn.Conv3d(
in_channels=1,
out_channels=1,
kernel_size=(2, 3, 4),
padding="same",
padding_mode="circular",
)
x_padded = F.pad(x, [1, 2, 1, 1, 0, 1], mode="circular")
expect = F.conv3d(x_padded, module.weight, module.bias, padding="valid")
self.assertEqual(expect, module(x))
self.assertEqual(x.size(), expect.size())
# Test connstruction with invalid padding string raises
with self.assertRaisesRegex(ValueError, "Invalid padding string"):
module = nn.Conv3d(
in_channels=3, out_channels=33, kernel_size=10, padding="foo"
)
# Test connstruction with same padding and strides raises
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv3d(
in_channels=3, out_channels=33, kernel_size=10, padding="same", stride=2
)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv3d(
in_channels=3,
out_channels=33,
kernel_size=10,
padding="same",
stride=(1, 1, 3),
)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv3d(
in_channels=3,
out_channels=33,
kernel_size=10,
padding="same",
stride=(1, 4, 1),
)
with self.assertRaisesRegex(ValueError, "padding='same'"):
module = nn.Conv3d(
in_channels=3,
out_channels=33,
kernel_size=10,
padding="same",
stride=(5, 1, 1),
)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_thnn_conv_strided_padded_dilated(self):
for convfn, dims, transposed in (
(torch.nn.functional.conv2d, 2, False),
(torch.nn.functional.conv_transpose2d, 2, True),
(torch.nn.functional.conv3d, 3, False),
(torch.nn.functional.conv_transpose3d, 3, True),
):
for stride, padding, dilation in (
(2, 0, 1),
(1, 1, 1),
(2, 1, 1),
(1, 0, 2),
):
kwargs = {"stride": stride, "padding": padding, "dilation": dilation}
inp_shape = (1, 2) + dims * (4,)
weight_shape = (2, 2) + dims * (1,)
inputs = torch.randn(
inp_shape, dtype=torch.double, device="cuda", requires_grad=True
)
weight = torch.randn(
weight_shape, dtype=torch.double, device="cuda", requires_grad=True
)
bias = torch.randn(
2, dtype=torch.double, device="cuda", requires_grad=True
)
with torch.backends.cudnn.flags(enabled=False):
res = convfn(inputs, weight, bias, **kwargs)
res_cpu = convfn(inputs.cpu(), weight.cpu(), bias.cpu(), **kwargs)
self.assertEqual(res, res_cpu)
with torch.backends.cudnn.flags(enabled=False):
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs, weight, bias),
)
torch.autograd.gradcheck(
lambda x, w, b: convfn(x, w, b, **kwargs),
(inputs.cpu(), weight.cpu(), bias.cpu()),
)
def test_Conv2d_inconsistent_types(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float)
weights = torch.randn(1, 1, 3, 3, dtype=torch.double)
# inconsistent types should raise an exception
self.assertRaises(RuntimeError, lambda: nn.functional.conv2d(inputs, weights))
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float())
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_Conv2d_inconsistent_types_on_GPU_without_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=False):
# inconsistent types should raise an exception
self.assertRaises(
RuntimeError, lambda: nn.functional.conv2d(inputs, weights)
)
self.assertRaises(
RuntimeError,
lambda: nn.functional.conv2d(inputs, weights.float(), bias),
)
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_1x1(self):
in_channels = 2
mod = torch.nn.Conv2d(2, 2, 1, bias=False).to(dtype=torch.double)
input = torch.randn(
1, in_channels, 5, 5, requires_grad=True, dtype=torch.double
)
for enabled in (False, True):
with torch.backends.mkldnn.flags(enabled=enabled):
gradcheck(F.conv2d, (input, mod.weight))
def test_Conv2d_OneDNN(self):
def run_once(group_val=24, dilation=1):
ifm = torch.ones([1, group_val, 6, 6], dtype=torch.float32)
weights = torch.ones([group_val, 1, 3, 3], dtype=torch.float32)
op = torch.nn.Conv2d(
in_channels=group_val,
out_channels=group_val,
kernel_size=[3, 3],
stride=[2, 2],
padding=[1, 1],
dilation=[dilation, dilation],
groups=group_val,
bias=False,
padding_mode="zeros",
)
op.weight.data = weights
res = op(ifm)
grad_in = torch.ones(res.shape, dtype=torch.float32)
res.backward(grad_in)
return op.weight.grad
for gorup_val in (24, 48, 23, 25):
for dilation in (1, 2):
with torch.backends.mkldnn.flags(enabled=False):
without_onednn = run_once(gorup_val, dilation)
with torch.backends.mkldnn.flags(enabled=True):
with_onednn = run_once(gorup_val, dilation)
self.assertEqual(without_onednn, with_onednn)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
@unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_cudnn_non_contiguous(self):
x = torch.randn(192, 16, 50).cuda()
x = x.permute(0, 2, 1).contiguous().permute(0, 2, 1)
m = torch.nn.Conv1d(
in_channels=16, out_channels=32, kernel_size=2, bias=True
).cuda()
m(x)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
@unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_cudnn_not_mutate_stride(self):
weight = torch.randn(64, 64, 1, 1)
x = torch.randn(2, 64, 10, 10).to(memory_format=torch.channels_last)
weight_stride = weight.stride()
def conv(x, weight):
return torch.convolution(
x,
weight,
stride=(1, 1),
padding=(0, 0),
dilation=(1, 1),
transposed=False,
output_padding=(0, 0),
groups=1,
bias=None,
)
# should have run in nhwc without mutating input strides
out_nhwc = conv(x, weight)
self.assertEqual(weight.stride(), weight_stride)
self.assertTrue(out_nhwc.is_contiguous(memory_format=torch.channels_last))
x = x.contiguous(memory_format=torch.contiguous_format)
out_c = conv(x, weight)
self.assertTrue(out_c.is_contiguous(memory_format=torch.contiguous_format))
self.assertEqual(out_c, out_nhwc)
self.assertEqual(weight.stride(), weight_stride)
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
@unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
def test_Conv2d_inconsistent_types_on_GPU_with_cudnn(self):
inputs = torch.randn(4, 1, 7, 7, dtype=torch.float, device="cuda")
weights = torch.randn(1, 1, 3, 3, dtype=torch.double, device="cuda")
bias = torch.randn(1, dtype=torch.double, device="cuda")
with torch.backends.cudnn.flags(enabled=True):
# inconsistent types should raise an exception
self.assertRaises(
RuntimeError, lambda: nn.functional.conv2d(inputs, weights)
)
self.assertRaises(
RuntimeError,
lambda: nn.functional.conv2d(inputs, weights.float(), bias),
)
# but it should work with the same type
nn.functional.conv2d(inputs.float(), weights.float(), bias.float())
def test_Conv2d_missing_argument(self):
c = nn.Conv2d(3, 3, 3)
self.assertRaises(TypeError, lambda: c(None))
def test_Conv2d_backward_twice(self):
input = torch.randn(2, 3, 5, 5)
c = nn.Conv2d(3, 3, 3)
o1 = c(input)
o1.sum().backward()
self.assertRaisesRegex(
RuntimeError, "Specify retain_graph=True", lambda: o1.sum().backward()
)
def test_conv_modules_raise_error_on_incorrect_input_size(self):
for dtype in [torch.half, torch.bfloat16, torch.double, torch.float]:
modules = [
nn.Conv1d(3, 8, 3).to(dtype),
nn.ConvTranspose1d(3, 8, 3).to(dtype),
nn.Conv2d(3, 8, 3).to(dtype),
nn.ConvTranspose2d(3, 8, 3).to(dtype),
nn.Conv3d(3, 8, 3).to(dtype),
nn.ConvTranspose3d(3, 8, 3).to(dtype),
]
invalid_input_dims = [(1, 4), (1, 4), (2, 5), (2, 5), (3, 6), (3, 6)]
for invalid_dims, module in zip(invalid_input_dims, modules):
for dims in invalid_dims:
input = torch.empty(torch.Size((3,) * dims))
self.assertRaises(RuntimeError, lambda: module(input))
def test_conv_shapecheck(self):
def test(should_raise, module, input_size, dtype):
input = torch.empty(3, *input_size).to(dtype)
if should_raise:
self.assertRaises(RuntimeError, lambda: module(input))
else:
# just run it to ensure no exception raised.
module(input)
for dtype in [
torch.half,
torch.bfloat16,
torch.float,
torch.double,
torch.cfloat,
torch.cdouble,
]:
# Conv1d
test(True, nn.Conv1d(1, 1, 3).to(dtype), (1, 2), dtype)
test(True, nn.Conv1d(1, 1, 3, stride=2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2).to(dtype), (1, 2), dtype)
test(False, nn.Conv1d(1, 1, 2, stride=2).to(dtype), (1, 2), dtype)
test(
False, nn.Conv1d(1, 1, 3, stride=2, padding=1).to(dtype), (1, 2), dtype
)
# Conv2d
test(True, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 2, 2), dtype)
test(False, nn.Conv2d(1, 1, (3, 3)).to(dtype), (1, 3, 3), dtype)
test(False, nn.Conv2d(1, 1, (3, 3), padding=1).to(dtype), (1, 2, 2), dtype)
# Conv3D
test(True, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 2, 2, 2), dtype)
test(False, nn.Conv3d(1, 1, (3, 3, 3)).to(dtype), (1, 3, 3, 3), dtype)
test(
False,
nn.Conv3d(1, 1, (3, 3, 3), padding=1).to(dtype),
(1, 2, 2, 2),
dtype,
)
def test_ConvTranspose2d_output_size(self):
m = nn.ConvTranspose2d(3, 4, 3, 3, 0, 2)
i = torch.randn(2, 3, 6, 6)
for h in range(15, 22):
for w in range(15, 22):
if 18 <= h <= 20 and 18 <= w <= 20:
output = m(i, output_size=(h, w))
self.assertEqual(output.size()[2:], (h, w))
else:
self.assertRaises(ValueError, lambda: m(i, (h, w)))
def test_ConvTranspose2d_output_size_downsample_upsample(self):
b, c, hid_c = 2, 3, 2
for h in range(13, 24):
for w in range(13, 17):
for k in range(2, 5):
for d in range(1, 5):
for s in range(1, 4):
for p in range(3):
conv = nn.Conv2d(
in_channels=c,
out_channels=hid_c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
t_conv = nn.ConvTranspose2d(
in_channels=hid_c,
out_channels=c,
kernel_size=k,
stride=s,
padding=p,
dilation=d,
)
i = torch.randn(b, c, h, w)
out = t_conv(conv(i), output_size=i.shape)
self.assertEqual(out.size()[2:], i.size()[2:])
def test_ConvTranspose3d_correct_output_size(self):
# Check that ConvTranspose3d can take a 5d output_size.
m = nn.ConvTranspose3d(2, 2, 2)
i = torch.rand(1, 2, 1, 1, 1)
m(i, output_size=(1, 2, 2, 2, 2))
@unittest.skipIf(not TEST_CUDA, "CUDA not available")
def test_ConvTranspose2d_half_cublas_gemm(self):
with torch.backends.cudnn.flags(enabled=False):
inputs = torch.randn(1, 1, 16, 16, device="cuda", dtype=torch.half)
deconv = (
nn.ConvTranspose2d(1, 1, 3, stride=2, padding=1, output_padding=1)
.cuda()
.half()
)
output = deconv(inputs)
output.mean().backward()
# For https://github.com/pytorch/pytorch/pull/1273
# Almost identical to the above `test_Conv2d_naive_groups`
@torch.backends.cudnn.flags(enabled=True, deterministic=True, benchmark=False)
@torch.backends.miopen.flags(immediate=True)
@tf32_on_and_off(0.001)
def test_Conv2d_groups_nobias(self):
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 4, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype],
rtol=0,
)
# Almost identical to the above `test_Conv2d_naive_groups`
# Covering special case when group > 1, input-channel / group < 16 and output-channel is multiple of 16
# See also https://github.com/pytorch/pytorch/pull/18463#issuecomment-476563686
# and https://github.com/pytorch/pytorch/pull/18463#issuecomment-477001024
@torch.backends.cudnn.flags(enabled=True, deterministic=True, benchmark=False)
@torch.backends.miopen.flags(immediate=True)
@tf32_on_and_off(0.001)
def test_Conv2d_groups_nobias_v2(self):
torch.manual_seed(123)
dev_dtypes = [("cpu", torch.float)]
if TEST_CUDA:
dev_dtypes += [("cuda", torch.float), ("cuda", torch.half)]
if AMPERE_OR_ROCM:
dev_dtypes += [("cuda", torch.bfloat16)]
for device, dtype in dev_dtypes:
m = nn.Conv2d(4, 16, kernel_size=3, groups=2, bias=False).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv2d(2, 8, kernel_size=3, bias=False).to(device, dtype)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=1e-1 if dtype == torch.half else dtype2prec_DONTUSE[dtype],
rtol=0,
)
# CPU-only test for group conv3d fast implementation using bmm
# See: https://github.com/pytorch/pytorch/pull/36355
def test_Conv3d_groups_nobias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=False).to("cpu", torch.float)
i = torch.randn(
2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True
)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=False).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float],
)
def test_Conv3d_groups_wbias(self):
torch.manual_seed(123)
m = nn.Conv3d(4, 16, kernel_size=3, groups=2, bias=True).to("cpu", torch.float)
i = torch.randn(
2, 4, 6, 6, 6, device="cpu", dtype=torch.float, requires_grad=True
)
output = m(i)
grad_output = torch.randn(2, 16, 4, 4, 4, device="cpu", dtype=torch.float)
output.backward(grad_output)
m1 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m1.weight.data.copy_(m.weight.data[:8])
m1.bias.data.copy_(m.bias.data[:8])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :8].contiguous())
m2 = nn.Conv3d(2, 8, kernel_size=3, bias=True).to("cpu", torch.float)
m2.weight.data.copy_(m.weight.data[8:])
m2.bias.data.copy_(m.bias.data[8:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 8:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float],
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float],
)
self.assertEqual(
m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[torch.float],
rtol=dtype2prec_DONTUSE[torch.float],
)
def test_conv_tbc(self):
with set_default_dtype(torch.double):
inp = torch.randn(9, 4, 5, requires_grad=True)
weight = torch.randn(3, 5, 6, requires_grad=True)
bias = torch.randn(6, requires_grad=True)
gradcheck(
lambda i, w, b, pad: F.conv_tbc(i, w, b, pad), (inp, weight, bias, 3)
)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_grouped_conv_cudnn_nhwc_support(self):
# in order to catch the hols in grouped convolution in nhwc support for earlier cudnn version
input = torch.randn((16, 16, 8, 8), dtype=torch.float16, device="cuda").to(
memory_format=torch.channels_last
)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float16, device="cuda").to(
memory_format=torch.channels_last
)
torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), False, (0, 0), 4)
input = torch.randn((16, 8, 8, 8), dtype=torch.float16, device="cuda").to(
memory_format=torch.channels_last
)
torch.convolution(input, weight, None, (1, 1), (1, 1), (1, 1), True, (0, 0), 4)
@unittest.expectedFailure
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(not TEST_CUDNN, "needs cudnn")
def test_conv_cudnn_memory_layout_dominance(self):
# desired behavior here is to have the memory_layout of conv.weight to
# dominate the layout of output.
# which is not the same as current behavior, we'll fix this in
# following up PRs and remove the `expectedFailure` tag
input = torch.randint(
1, 10, (2, 8, 4, 4), dtype=torch.float32, device="cuda", requires_grad=True
)
conv = nn.Conv2d(8, 4, 3).cuda().float()
out = conv(input)
self.assertTrue(out.is_contiguous())
input = input.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous())
conv.weight.data = conv.weight.contiguous(memory_format=torch.channels_last)
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
input = input.contiguous()
out = conv(input)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_cudnn_noncontiguous_weight(self):
# Noncontiguous weights must be contiguous() before being
# passed to cuDNN
input = torch.tensor([1, 1, 1], dtype=torch.double, device="cuda").view(1, 1, 3)
weights1 = torch.tensor([1], dtype=torch.double, device="cuda").expand(1, 1, 2)
weights2 = (
torch.tensor([1], dtype=torch.double, device="cuda")
.expand(1, 1, 2)
.contiguous()
)
self.assertEqual(
F.conv1d(input, weights1, bias=None, stride=2, dilation=2),
F.conv1d(input, weights2, bias=None, stride=2, dilation=2),
)
def run_grad_conv_test(self, func_forward, func_backward, dim=1, gradient="input"):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out, dilation in product(
[1, 2], [1, 2], [0, 1, 2], [2], [3], [1]
):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(dim):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(input_shape, requires_grad=True)
weight = torch.randn(weight_shape, requires_grad=True)
if has_bias:
bias = torch.randn([chan_out], requires_grad=True)
output = func_forward(
input,
weight,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
gradient_o = torch.randn(output.shape)
gradient_w = torch.autograd.grad(
output, input if (gradient == "input") else weight, gradient_o
)
self.assertEqual(
gradient_w[0],
func_backward(
input_shape if (gradient == "input") else input,
weight_shape if (gradient == "weight") else weight,
gradient_o,
stride=stride,
padding=padding,
dilation=dilation,
),
)
def test_grad_conv1d_input(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_input, 1, "input")
def test_grad_conv1d_weight(self):
self.run_grad_conv_test(F.conv1d, F.grad.conv1d_weight, 1, "weight")
def test_grad_conv2d_input(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_input, 2, "input")
def test_grad_conv2d_weight(self):
self.run_grad_conv_test(F.conv2d, F.grad.conv2d_weight, 2, "weight")
def test_grad_conv3d_input(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_input, 3, "input")
def test_grad_conv3d_weight(self):
self.run_grad_conv_test(F.conv3d, F.grad.conv3d_weight, 3, "weight")
@unittest.skipIf(not torch._nnpack_available(), "NNPACK unavailable")
def test_nnpack_conv(self):
for kern, inp_size in [(3, 6), (3, 7), (4, 9)]:
for batch, stride, padding, chan_in, chan_out in product(
[1, 2, 3, 4], [1, 2], [0, 1, 2], [2], [3]
):
for has_bias in [True, False]:
input_shape = [batch, chan_in]
weight_shape = [chan_out, chan_in]
for _ in range(2):
input_shape.append(inp_size)
weight_shape.append(kern)
input = torch.randn(
input_shape, requires_grad=True, dtype=torch.float
)
weight = torch.randn(
weight_shape, requires_grad=True, dtype=torch.float
)
if has_bias:
bias = torch.randn(
[chan_out], requires_grad=True, dtype=torch.float
)
output = torch._nnpack_spatial_convolution(
input, weight, stride=stride, padding=padding, bias=bias
)
output_expected = torch.nn.functional.conv2d(
input, weight, stride=stride, padding=padding, bias=bias
)
self.assertEqual(output, output_expected, atol=3e-4, rtol=0)
gradient_o = torch.randn(output.shape, dtype=torch.float)
grads = torch.autograd.grad(output, [input, weight], gradient_o)
grads_expected = torch.autograd.grad(
output_expected, [input, weight], gradient_o
)
for gr, gr_expected in zip(grads, grads_expected):
self.assertEqual(gr, gr_expected, atol=3e-4, rtol=0)
def test_conv_padding_mode(self):
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode="xyz")
with self.assertRaisesRegex(ValueError, "padding_mode must be one of"):
nn.Conv2d(3, 3, 3, padding_mode=3)
with self.assertRaisesRegex(ValueError, 'Only "zeros" '):
nn.ConvTranspose2d(3, 3, 3, padding_mode="reflect")
def test_functional_grad_conv(self):
# Conv 1D
input = torch.randn(1, 1, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, requires_grad=True)
output = F.conv1d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
grad_input_autograd, grad_weight_autograd = torch.autograd.grad(
output, (input, weight), grad_output
)
grad_input_functional = torch.nn.grad.conv1d_input(
input.shape, weight, grad_output, dilation=2
)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv1d_weight(
input, weight.shape, grad_output, dilation=2
)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
# Conv 2D
input = torch.randn(1, 1, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, requires_grad=True)
output = F.conv2d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(
output, (input, weight), grad_output
)
grad_input_functional = torch.nn.grad.conv2d_input(
input.shape, weight, grad_output, dilation=2
)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv2d_weight(
input, weight.shape, grad_output, dilation=2
)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
# Conv 3D
input = torch.randn(1, 1, 5, 5, 5, requires_grad=True)
weight = torch.randn(1, 1, 3, 3, 3, requires_grad=True)
output = F.conv3d(input, weight, dilation=2)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(
output, (input, weight), grad_output
)
grad_input_functional = torch.nn.grad.conv3d_input(
input.shape, weight, grad_output, dilation=2
)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv3d_weight(
input, weight.shape, grad_output, dilation=2
)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
def test_functional_grad_conv2d(self):
BATCH_SIZE = 4
IN_CH = 8
OUT_CH = 16
SPATIAL = 32
def _test_conv2d(stride, kernel_size, groups, dilation):
padding = kernel_size // 2
input = (
torch.empty(BATCH_SIZE, IN_CH, SPATIAL, SPATIAL)
.uniform_(-8.0, 8.0)
.requires_grad_(True)
)
weight = (
torch.empty(OUT_CH, IN_CH // groups, kernel_size, kernel_size)
.uniform_(-4.0, 4.0)
.requires_grad_(True)
)
output = F.conv2d(
input,
weight,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
grad_output = torch.randn(output.shape)
(grad_input_autograd, grad_weight_autograd) = torch.autograd.grad(
output, (input, weight), grad_output
)
grad_input_functional = torch.nn.grad.conv2d_input(
input.shape,
weight,
grad_output,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
self.assertEqual(grad_input_functional, grad_input_autograd)
grad_weight_functional = torch.nn.grad.conv2d_weight(
input,
weight.shape,
grad_output,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
self.assertEqual(grad_weight_functional, grad_weight_autograd)
strides = [1, 2]
kernel_sizes = [1, 3, 5]
groups = [1, 2, 4]
dilates = [1, 2]
for s, k, g, d in product(strides, kernel_sizes, groups, dilates):
_test_conv2d(s, k, g, d)
def test_permute_conv2d_issue_120211(self):
def reproducer(radius: int):
image = torch.rand(1, 1024, 1024, 3)
image = image.permute(0, 3, 1, 2)
kernel_x = torch.zeros([3, 1, 1, radius * 2 + 1], device=image.device)
image = torch.nn.functional.conv2d(image, kernel_x, groups=image.shape[-3])
for i in range(128):
# This should not fail
reproducer(radius=i)
def test_conv3d_issue_120406(self):
# This should not fail
F.conv3d(torch.ones(2, 3, 8, 9, 26), torch.ones(3, 1, 1, 1, 17), groups=3)
def test_conv1d_issue_120547(self):
weight = torch.ones([16, 1, 32])
bias = torch.ones([16])
stride, padding, dilation, groups = (1, 16, 1, 16)
input = torch.rand((1, 1, 16))
input = input.transpose(1, 2)
# This should not fail
F.conv1d(input, weight, bias, stride, padding, dilation, groups)
| TestConvolutionNN |
python | joke2k__faker | faker/providers/ssn/it_IT/__init__.py | {
"start": 96486,
"end": 101143
} | class ____(SsnProvider):
"""
Generates italian fiscal codes.
"""
def ssn(self) -> str:
sex: int = self.random_int(min=0, max=1)
surname: str = self._get_surname_letters()
name: str = self._get_name_letters(sex)
year: str = "%02d" % self.random_int(min=0, max=99)
is_leap_year: bool = self.is_leap_year(int(year))
month: str = self.random_element(MONTHS_LIST)
max_day: int = self._get_max_day(is_leap_year=is_leap_year, month=month)
day: str = "%02d" % (self.random_int(min=1, max=max_day) + (40 if sex == 1 else 0))
municipality: str = self.random_element(MUNICIPALITIES_LIST)
code: str = f"{surname}{name}{year}{month}{day}{municipality}"
return code + checksum(code)
vat_id_formats = ("IT###########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Italian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
def _get_name_letters(self, sex: int) -> str:
"""
Rules:
* take all consonants in their order
* if >= 4, take the 1st, 3rd and 4th
* if < 3 take the vowels also; vowels must go _after_ the consonants
and must be taken in the order they appear (LUCA -> LCU)
* if == 3 return all 3 consonants
* if name is < 3 chars, pad it on the right with "X" (LI -> LIX)
Args:
sex: int
Returns:
str
"""
if sex == 1:
name = self.generator.first_name_male().upper()
else:
name = self.generator.first_name_female().upper()
name = self._transliterate_name(name)
if len(name) < 3:
return self._pad_shorter(name)
name_consonants = self._get_consonants(name)
cons_len = len(name_consonants)
if cons_len >= 4:
name_part = "".join([name_consonants[0], name_consonants[1], name_consonants[3]])
elif cons_len < 3:
name_part = "".join(name_consonants + self._get_vowels(name))[:3]
else:
name_part = "".join(name_consonants)
return name_part
def _get_surname_letters(self) -> str:
"""
Rules:
* if consonants >=3 : take the first 3
* if less, pad them with vowels; vowels come after the consonants and in the order they appear (ROSA -> RSO)
* if surname is less than 3 chars, pad it on the right with 'X' (FO -> FOX)
Returns:
str
"""
surname = self.generator.last_name().upper()
surname = self._transliterate_name(surname)
if len(surname) < 3:
return self._pad_shorter(surname)
surname_consonants = self._get_consonants(surname)
cons_len = len(surname_consonants)
if cons_len < 3:
surname_part = "".join(surname_consonants + self._get_vowels(surname))[:3]
else:
surname_part = "".join(surname_consonants)[:3]
return surname_part
def _transliterate_name(self, name: str) -> str:
nfkd_form: str = unicodedata.normalize("NFKD", name)
return "".join([c for c in nfkd_form if unicodedata.combining(c) == 0])
def _get_vowels(self, sequence: str) -> list:
"""
Returns list of vowels in provided string
"""
vowels = []
for char in sequence:
if char in VOWELS:
vowels.append(char)
return vowels
def _get_consonants(self, sequence: str) -> list:
"""
Returns list of consonants in provided string
"""
consonants = []
for char in sequence:
if char in CONSONANTS:
consonants.append(char)
return consonants
def _pad_shorter(self, sequence: str) -> str:
"""
Pads shorter string with the allowed char
"""
return sequence.ljust(3, "X")
@staticmethod
def is_leap_year(year: int) -> bool:
"""
Checks if the one given is a leap year
"""
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
return True
return False
def _get_max_day(self, is_leap_year: bool, month: str) -> int:
"""
Returns the maximum day for the current month
"""
if month in ["D", "H", "P", "S"]:
max_day = 30
elif month == "B":
max_day = 29 if is_leap_year else 28
else:
max_day = 31
return max_day
| Provider |
python | walkccc__LeetCode | solutions/3331. Find Subtree Sizes After Changes/3331.py | {
"start": 0,
"end": 903
} | class ____:
def findSubtreeSizes(self, parent: list[int], s: str) -> list[int]:
n = len(parent)
ans = [0] * n
newParent = parent.copy()
tree = [[] for _ in range(n)]
for i in range(1, n):
closest = self._findClosestAncestor(i, parent, s)
if closest != -1:
newParent[i] = closest
for i in range(1, n):
tree[newParent[i]].append(i)
self._dfs(tree, 0, ans)
return ans
def _findClosestAncestor(self, u: int, parent: list[int], s: str) -> int:
"""
Returns the closest ancestor of node `u` that has the same value as `u`.
"""
curr = parent[u]
while curr != -1:
if s[curr] == s[u]:
return curr
curr = parent[curr]
return -1
def _dfs(self, tree: list[list[int]], u: int, ans: list[int]) -> int:
sz = 1
for v in tree[u]:
sz += self._dfs(tree, v, ans)
ans[u] = sz
return sz
| Solution |
python | nedbat__coveragepy | tests/test_context.py | {
"start": 5092,
"end": 8178
} | class ____(CoverageTest):
"""Tests of dynamically changing contexts."""
SOURCE = """\
def helper(lineno):
x = 2
def test_one():
a = 5
helper(6)
def test_two():
a = 9
b = 10
if a > 11:
b = 12
assert a == (13-4)
assert b == (14-4)
helper(15)
test_one()
x = 18
helper(19)
test_two()
"""
OUTER_LINES = [1, 4, 8, 17, 18, 19, 2, 20]
TEST_ONE_LINES = [5, 6, 2]
TEST_TWO_LINES = [9, 10, 11, 13, 14, 15, 2]
def test_dynamic_alone(self) -> None:
self.make_file("two_tests.py", self.SOURCE)
cov = coverage.Coverage(source=["."])
cov.set_option("run:dynamic_context", "test_function")
self.start_import_stop(cov, "two_tests")
data = cov.get_data()
full_names = {os.path.basename(f): f for f in data.measured_files()}
fname = full_names["two_tests.py"]
assert_count_equal(
data.measured_contexts(),
["", "two_tests.test_one", "two_tests.test_two"],
)
def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert_count_equal(lines, sorted_lines(data, fname))
assert_context_lines("", self.OUTER_LINES)
assert_context_lines("two_tests.test_one", self.TEST_ONE_LINES)
assert_context_lines("two_tests.test_two", self.TEST_TWO_LINES)
def test_static_and_dynamic(self) -> None:
self.make_file("two_tests.py", self.SOURCE)
cov = coverage.Coverage(context="stat", source=["."])
cov.set_option("run:dynamic_context", "test_function")
self.start_import_stop(cov, "two_tests")
data = cov.get_data()
full_names = {os.path.basename(f): f for f in data.measured_files()}
fname = full_names["two_tests.py"]
assert_count_equal(
data.measured_contexts(),
["stat", "stat|two_tests.test_one", "stat|two_tests.test_two"],
)
def assert_context_lines(context: str, lines: list[TLineNo]) -> None:
data.set_query_context(context)
assert_count_equal(lines, sorted_lines(data, fname))
assert_context_lines("stat", self.OUTER_LINES)
assert_context_lines("stat|two_tests.test_one", self.TEST_ONE_LINES)
assert_context_lines("stat|two_tests.test_two", self.TEST_TWO_LINES)
def get_qualname() -> str | None:
"""Helper to return qualname_from_frame for the caller."""
stack = inspect.stack()[1:]
if any(sinfo[0].f_code.co_name == "get_qualname" for sinfo in stack):
# We're calling ourselves recursively, maybe because we're testing
# properties. Return an int to try to get back on track.
return 17 # type: ignore[return-value]
caller_frame = stack[0][0]
return qualname_from_frame(caller_frame)
# pylint: disable=missing-class-docstring, missing-function-docstring, unused-argument
| DynamicContextTest |
python | django__django | django/contrib/admin/options.py | {
"start": 4072,
"end": 24416
} | class ____(metaclass=forms.MediaDefiningClass):
"""Functionality common to both ModelAdmin and InlineAdmin."""
autocomplete_fields = ()
raw_id_fields = ()
fields = None
exclude = None
fieldsets = None
form = forms.ModelForm
filter_vertical = ()
filter_horizontal = ()
radio_fields = {}
prepopulated_fields = {}
formfield_overrides = {}
readonly_fields = ()
ordering = None
sortable_by = None
view_on_site = True
show_full_result_count = True
checks_class = BaseModelAdminChecks
def check(self, **kwargs):
return self.checks_class().check(self, **kwargs)
def __init__(self):
# Merge FORMFIELD_FOR_DBFIELD_DEFAULTS with the formfield_overrides
# rather than simply overwriting.
overrides = copy.deepcopy(FORMFIELD_FOR_DBFIELD_DEFAULTS)
for k, v in self.formfield_overrides.items():
overrides.setdefault(k, {}).update(v)
self.formfield_overrides = overrides
def formfield_for_dbfield(self, db_field, request, **kwargs):
"""
Hook for specifying the form Field instance for a given database Field
instance.
If kwargs are given, they're passed to the form Field's constructor.
"""
# If the field specifies choices, we don't need to look for special
# admin widgets - we just need to use a select widget of some kind.
if db_field.choices:
return self.formfield_for_choice_field(db_field, request, **kwargs)
# ForeignKey or ManyToManyFields
if isinstance(db_field, (models.ForeignKey, models.ManyToManyField)):
# Combine the field kwargs with any options for
# formfield_overrides. Make sure the passed in **kwargs override
# anything in formfield_overrides because **kwargs is more
# specific, and should always win.
if db_field.__class__ in self.formfield_overrides:
kwargs = {**self.formfield_overrides[db_field.__class__], **kwargs}
# Get the correct formfield.
if isinstance(db_field, models.ForeignKey):
formfield = self.formfield_for_foreignkey(db_field, request, **kwargs)
elif isinstance(db_field, models.ManyToManyField):
formfield = self.formfield_for_manytomany(db_field, request, **kwargs)
# For non-raw_id fields, wrap the widget with a wrapper that adds
# extra HTML -- the "add other" interface -- to the end of the
# rendered output. formfield can be None if it came from a
# OneToOneField with parent_link=True or a M2M intermediary.
if formfield and db_field.name not in self.raw_id_fields:
try:
related_modeladmin = self.admin_site.get_model_admin(
db_field.remote_field.model
)
except NotRegistered:
wrapper_kwargs = {}
else:
wrapper_kwargs = {
"can_add_related": related_modeladmin.has_add_permission(
request
),
"can_change_related": related_modeladmin.has_change_permission(
request
),
"can_delete_related": related_modeladmin.has_delete_permission(
request
),
"can_view_related": related_modeladmin.has_view_permission(
request
),
}
formfield.widget = widgets.RelatedFieldWidgetWrapper(
formfield.widget,
db_field.remote_field,
self.admin_site,
**wrapper_kwargs,
)
return formfield
# If we've got overrides for the formfield defined, use 'em. **kwargs
# passed to formfield_for_dbfield override the defaults.
for klass in db_field.__class__.mro():
if klass in self.formfield_overrides:
kwargs = {**copy.deepcopy(self.formfield_overrides[klass]), **kwargs}
return db_field.formfield(**kwargs)
# For any other type of field, just call its formfield() method.
return db_field.formfield(**kwargs)
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if "widget" not in kwargs:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
if "choices" not in kwargs:
kwargs["choices"] = db_field.get_choices(
include_blank=db_field.blank, blank_choice=[("", _("None"))]
)
return db_field.formfield(**kwargs)
def get_field_queryset(self, db, db_field, request):
"""
If the ModelAdmin specifies ordering, the queryset should respect that
ordering. Otherwise don't specify the queryset, let the field decide
(return None in that case).
"""
try:
related_admin = self.admin_site.get_model_admin(db_field.remote_field.model)
except NotRegistered:
return None
else:
ordering = related_admin.get_ordering(request)
if ordering is not None and ordering != ():
return db_field.remote_field.model._default_manager.using(db).order_by(
*ordering
)
return None
def formfield_for_foreignkey(self, db_field, request, **kwargs):
"""
Get a form Field for a ForeignKey.
"""
db = kwargs.get("using")
if "widget" not in kwargs:
if db_field.name in self.get_autocomplete_fields(request):
kwargs["widget"] = AutocompleteSelect(
db_field, self.admin_site, using=db
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ForeignKeyRawIdWidget(
db_field.remote_field, self.admin_site, using=db
)
elif db_field.name in self.radio_fields:
kwargs["widget"] = widgets.AdminRadioSelect(
attrs={
"class": get_ul_class(self.radio_fields[db_field.name]),
}
)
kwargs["empty_label"] = (
kwargs.get("empty_label", _("None")) if db_field.blank else None
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
return db_field.formfield(**kwargs)
def formfield_for_manytomany(self, db_field, request, **kwargs):
"""
Get a form Field for a ManyToManyField.
"""
# If it uses an intermediary model that isn't auto created, don't show
# a field in admin.
if not db_field.remote_field.through._meta.auto_created:
return None
db = kwargs.get("using")
if "widget" not in kwargs:
autocomplete_fields = self.get_autocomplete_fields(request)
if db_field.name in autocomplete_fields:
kwargs["widget"] = AutocompleteSelectMultiple(
db_field,
self.admin_site,
using=db,
)
elif db_field.name in self.raw_id_fields:
kwargs["widget"] = widgets.ManyToManyRawIdWidget(
db_field.remote_field,
self.admin_site,
using=db,
)
elif db_field.name in [*self.filter_vertical, *self.filter_horizontal]:
kwargs["widget"] = widgets.FilteredSelectMultiple(
db_field.verbose_name, db_field.name in self.filter_vertical
)
if "queryset" not in kwargs:
queryset = self.get_field_queryset(db, db_field, request)
if queryset is not None:
kwargs["queryset"] = queryset
form_field = db_field.formfield(**kwargs)
if (
isinstance(form_field.widget, SelectMultiple)
and form_field.widget.allow_multiple_selected
and not isinstance(
form_field.widget, (CheckboxSelectMultiple, AutocompleteSelectMultiple)
)
):
msg = _(
"Hold down “Control”, or “Command” on a Mac, to select more than one."
)
help_text = form_field.help_text
form_field.help_text = (
format_lazy("{} {}", help_text, msg) if help_text else msg
)
return form_field
def get_autocomplete_fields(self, request):
"""
Return a list of ForeignKey and/or ManyToMany fields which should use
an autocomplete widget.
"""
return self.autocomplete_fields
def get_view_on_site_url(self, obj=None):
if obj is None or not self.view_on_site:
return None
if callable(self.view_on_site):
return self.view_on_site(obj)
elif hasattr(obj, "get_absolute_url"):
# use the ContentType lookup if view_on_site is True
return reverse(
"admin:view_on_site",
kwargs={
"content_type_id": get_content_type_for_model(obj).pk,
"object_id": obj.pk,
},
current_app=self.admin_site.name,
)
def get_empty_value_display(self):
"""
Return the empty_value_display set on ModelAdmin or AdminSite.
"""
try:
return mark_safe(self.empty_value_display)
except AttributeError:
return mark_safe(self.admin_site.empty_value_display)
def get_exclude(self, request, obj=None):
"""
Hook for specifying exclude.
"""
return self.exclude
def get_fields(self, request, obj=None):
"""
Hook for specifying fields.
"""
if self.fields:
return self.fields
# _get_form_for_get_fields() is implemented in subclasses.
form = self._get_form_for_get_fields(request, obj)
return [*form.base_fields, *self.get_readonly_fields(request, obj)]
def get_fieldsets(self, request, obj=None):
"""
Hook for specifying fieldsets.
"""
if self.fieldsets:
return self.fieldsets
return [(None, {"fields": self.get_fields(request, obj)})]
def get_inlines(self, request, obj):
"""Hook for specifying custom inlines."""
return self.inlines
def get_ordering(self, request):
"""
Hook for specifying field ordering.
"""
return self.ordering or () # otherwise we might try to *None, which is bad ;)
def get_readonly_fields(self, request, obj=None):
"""
Hook for specifying custom readonly fields.
"""
return self.readonly_fields
def get_prepopulated_fields(self, request, obj=None):
"""
Hook for specifying custom prepopulated fields.
"""
return self.prepopulated_fields
def get_queryset(self, request):
"""
Return a QuerySet of all model instances that can be edited by the
admin site. This is used by changelist_view.
"""
qs = self.model._default_manager.get_queryset()
# TODO: this should be handled by some parameter to the ChangeList.
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
def get_sortable_by(self, request):
"""Hook for specifying which fields can be sorted in the changelist."""
return (
self.sortable_by
if self.sortable_by is not None
else self.get_list_display(request)
)
def lookup_allowed(self, lookup, value, request):
from django.contrib.admin.filters import SimpleListFilter
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for fk_lookup in model._meta.related_fkey_lookups:
# As ``limit_choices_to`` can be a callable, invoke it here.
if callable(fk_lookup):
fk_lookup = fk_lookup()
if (lookup, value) in widgets.url_params_from_lookup_dict(
fk_lookup
).items():
return True
relation_parts = []
prev_field = None
parts = lookup.split(LOOKUP_SEP)
for part in parts:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on nonexistent fields are ok, since they're ignored
# later.
break
if not prev_field or (
prev_field.is_relation
and field not in model._meta.parents.values()
and field is not model._meta.auto_field
and (
model._meta.auto_field is None
or part not in getattr(prev_field, "to_fields", [])
)
and (field.is_relation or not field.primary_key)
):
relation_parts.append(part)
if not getattr(field, "path_infos", None):
# This is not a relational field, so further parts
# must be transforms.
break
prev_field = field
model = field.path_infos[-1].to_opts.model
if len(relation_parts) <= 1:
# Either a local field filter, or no fields at all.
return True
valid_lookups = {self.date_hierarchy}
for filter_item in self.get_list_filter(request):
if isinstance(filter_item, type) and issubclass(
filter_item, SimpleListFilter
):
valid_lookups.add(filter_item.parameter_name)
elif isinstance(filter_item, (list, tuple)):
valid_lookups.add(filter_item[0])
else:
valid_lookups.add(filter_item)
# Is it a valid relational lookup?
return not {
LOOKUP_SEP.join(relation_parts),
LOOKUP_SEP.join([*relation_parts, part]),
}.isdisjoint(valid_lookups)
def to_field_allowed(self, request, to_field):
"""
Return True if the model associated with this admin should be
allowed to be referenced by the specified field.
"""
try:
field = self.opts.get_field(to_field)
except FieldDoesNotExist:
return False
# Always allow referencing the primary key since it's already possible
# to get this information from the change view URL.
if field.primary_key:
return True
# Allow reverse relationships to models defining m2m fields if they
# target the specified field.
for many_to_many in self.opts.many_to_many:
if many_to_many.m2m_target_field_name() == to_field:
return True
# Make sure at least one of the models registered for this site
# references this field through a FK or a M2M relationship.
registered_models = set()
for model, admin in self.admin_site._registry.items():
registered_models.add(model)
for inline in admin.inlines:
registered_models.add(inline.model)
related_objects = (
f
for f in self.opts.get_fields(include_hidden=True)
if (f.auto_created and not f.concrete)
)
for related_object in related_objects:
related_model = related_object.related_model
remote_field = related_object.field.remote_field
if (
any(issubclass(model, related_model) for model in registered_models)
and hasattr(remote_field, "get_related_field")
and remote_field.get_related_field() == field
):
return True
return False
def has_add_permission(self, request):
"""
Return True if the given request has permission to add an object.
Can be overridden by the user in subclasses.
"""
opts = self.opts
codename = get_permission_codename("add", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_change_permission(self, request, obj=None):
"""
Return True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to change the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to change *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("change", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_delete_permission(self, request, obj=None):
"""
Return True if the given request has permission to delete the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
codename = get_permission_codename("delete", opts)
return request.user.has_perm("%s.%s" % (opts.app_label, codename))
def has_view_permission(self, request, obj=None):
"""
Return True if the given request has permission to view the given
Django model instance. The default implementation doesn't examine the
`obj` parameter.
If overridden by the user in subclasses, it should return True if the
given request has permission to view the `obj` model instance. If `obj`
is None, it should return True if the request has permission to view
any object of the given type.
"""
opts = self.opts
codename_view = get_permission_codename("view", opts)
codename_change = get_permission_codename("change", opts)
return request.user.has_perm(
"%s.%s" % (opts.app_label, codename_view)
) or request.user.has_perm("%s.%s" % (opts.app_label, codename_change))
def has_view_or_change_permission(self, request, obj=None):
return self.has_view_permission(request, obj) or self.has_change_permission(
request, obj
)
def has_module_permission(self, request):
"""
Return True if the given request has any permission in the given
app label.
Can be overridden by the user in subclasses. In such case it should
return True if the given request has permission to view the module on
the admin index page and access the module's index page. Overriding it
does not restrict access to the add, change or delete views. Use
`ModelAdmin.has_(add|change|delete)_permission` for that.
"""
return request.user.has_module_perms(self.opts.app_label)
| BaseModelAdmin |
python | django-import-export__django-import-export | import_export/widgets.py | {
"start": 23652,
"end": 25772
} | class ____(Widget):
"""
Widget that converts between representations of a ManyToMany relationships
as a list and an actual ManyToMany field.
:param model: The model the ManyToMany field refers to (required).
:param separator: Defaults to ``','``.
:param field: A field on the related model. Default is ``pk``.
"""
def __init__(self, model, separator=None, field=None, **kwargs):
if separator is None:
separator = ","
if field is None:
field = "pk"
self.model = model
self.separator = separator
self.field = field
super().__init__(**kwargs)
def clean(self, value, row=None, **kwargs):
"""
Converts a separated string of values into a QuerySet for ManyToMany
relationships.
Splits the input by the configured separator and looks up model instances
using the specified field. Filters out empty values after splitting.
:param value: String of separated values, or a single numeric value.
:param row: The current row being processed.
:param **kwargs: Optional keyword arguments.
:returns: A QuerySet containing the related model instances, or an empty
QuerySet
if no value provided.
"""
if not value:
return self.model.objects.none()
if isinstance(value, (float, int)):
ids = [int(value)]
else:
ids = value.split(self.separator)
ids = filter(None, [i.strip() for i in ids])
return self.model.objects.filter(**{"%s__in" % self.field: ids})
def render(self, value, obj=None, **kwargs):
"""
:return: A string with values separated by ``separator``.
``None`` values are returned as empty strings.
``coerce_to_string`` has no effect on the return value.
"""
self._obj_deprecation_warning(obj)
if value is not None:
ids = [smart_str(getattr(obj, self.field)) for obj in value.all()]
return self.separator.join(ids)
return ""
| ManyToManyWidget |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 504577,
"end": 505310
} | class ____(sgqlc.types.Type):
"""Parameters to be used for the committer_email_pattern rule"""
__schema__ = github_schema
__field_names__ = ("name", "negate", "operator", "pattern")
name = sgqlc.types.Field(String, graphql_name="name")
"""How this rule will appear to users."""
negate = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="negate")
"""If true, the rule will fail if the pattern matches."""
operator = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="operator")
"""The operator to use for matching."""
pattern = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="pattern")
"""The pattern to match with."""
| CommitterEmailPatternParameters |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 10104,
"end": 10696
} | class ____(IOMultipleNetCDF):
def setup(self):
# TODO: Lazily skipped in CI as it is very demanding and slow.
# Improve times and remove errors.
_skip_slow()
self.make_ds()
self.format = "NETCDF3_64BIT"
def time_write_dataset_netcdf4(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="netcdf4", format=self.format
)
def time_write_dataset_scipy(self):
xr.save_mfdataset(
self.ds_list, self.filenames_list, engine="scipy", format=self.format
)
| IOWriteMultipleNetCDF3 |
python | astropy__astropy | astropy/nddata/mixins/tests/test_ndslicing.py | {
"start": 461,
"end": 657
} | class ____(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
| NDDataSliceable |
python | rq__rq | tests/test_spawn_worker.py | {
"start": 2757,
"end": 4210
} | class ____(TimeoutTestCase, RQTestCase):
@slow
def test_idle_worker_warm_shutdown(self):
"""worker with no ongoing job receiving single SIGTERM signal and shutting down"""
w = SpawnWorker('foo', connection=self.connection)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), False))
p.start()
w.work()
p.join(1)
self.assertFalse(w._stop_requested)
@slow
def test_working_worker_cold_shutdown(self):
"""Busy worker shuts down immediately on double SIGTERM signal"""
fooq = Queue('foo', connection=self.connection)
w = SpawnWorker(fooq)
sentinel_file = '/tmp/.rq_sentinel_cold'
self.assertFalse(
os.path.exists(sentinel_file), f'{sentinel_file} file should not exist yet, delete that file and try again.'
)
fooq.enqueue(create_file_after_timeout, sentinel_file, 5)
self.assertFalse(w._stop_requested)
p = Process(target=kill_worker, args=(os.getpid(), True))
p.start()
self.assertRaises(SystemExit, w.work)
p.join(1)
self.assertTrue(w._stop_requested)
self.assertFalse(os.path.exists(sentinel_file))
shutdown_requested_date = w.shutdown_requested_date
self.assertIsNotNone(shutdown_requested_date)
self.assertEqual(type(shutdown_requested_date).__name__, 'datetime')
| WorkerShutdownTestCase |
python | PrefectHQ__prefect | src/prefect/cli/_prompts.py | {
"start": 10920,
"end": 22536
} | class ____(PromptBase[str]):
response_type: type[str] = str
validate_error_message = "[prompt.invalid]Please enter a valid timezone."
def process_response(self, value: str) -> str:
try:
is_valid_timezone(value)
return value
except ValueError:
raise InvalidResponse(self.validate_error_message)
def prompt_rrule_schedule(console: Console) -> RRuleSchedule:
"""
Prompts the user to enter an RRule string and timezone.
"""
rrule = RRuleStringPrompt.ask(
"[bold][green]?[/] RRule string",
console=console,
default="RRULE:FREQ=DAILY;INTERVAL=1",
)
timezone = CronTimezonePrompt.ask(
"[bold][green]?[/] Timezone", console=console, default="UTC"
)
return RRuleSchedule(rrule=rrule, timezone=timezone)
# Schedule type prompting utilities
def prompt_schedule_type(console: Console) -> str:
"""
Prompts the user to select a schedule type from a list of options.
"""
selection = prompt_select_from_table(
console,
"What type of schedule would you like to use?",
[
{"header": "Schedule Type", "key": "type"},
{"header": "Description", "key": "description"},
],
[
{
"type": "Interval",
"description": (
"Allows you to set flow runs to be executed at fixed time"
" intervals."
),
},
{
"type": "Cron",
"description": (
"Allows you to define recurring flow runs based on a specified"
" pattern using cron syntax."
),
},
{
"type": "RRule",
"description": (
"Allows you to define recurring flow runs using RFC 2445 recurrence"
" rules."
),
},
],
)
return selection["type"]
def prompt_schedules(console: Console) -> list[dict[str, Any]]:
"""
Prompt the user to configure schedules for a deployment.
"""
schedules: list[dict[str, Any]] = []
if confirm(
"Would you like to configure schedules for this deployment?", default=True
):
add_schedule = True
while add_schedule:
schedule_type = prompt_schedule_type(console)
if schedule_type == "Cron":
schedule = prompt_cron_schedule(console)
elif schedule_type == "Interval":
schedule = prompt_interval_schedule(console)
elif schedule_type == "RRule":
schedule = prompt_rrule_schedule(console)
else:
raise Exception("Invalid schedule type")
is_schedule_active = confirm(
"Would you like to activate this schedule?", default=True
)
schedules.append({"schedule": schedule, "active": is_schedule_active})
add_schedule = confirm(
"Would you like to add another schedule?", default=False
)
return schedules
@client_injector
async def prompt_select_work_pool(
client: "PrefectClient",
console: Console,
prompt: str = "Which work pool would you like to deploy this flow to?",
) -> str:
work_pools = await client.read_work_pools()
work_pool_options = [
work_pool.model_dump()
for work_pool in work_pools
if work_pool.type != "prefect-agent"
]
if not work_pool_options:
work_pool = await prompt_create_work_pool(console)
return work_pool.name
else:
selected_work_pool_row = prompt_select_from_table(
console,
prompt,
[
{"header": "Work Pool Name", "key": "name"},
{"header": "Infrastructure Type", "key": "type"},
{"header": "Description", "key": "description"},
],
work_pool_options,
)
return selected_work_pool_row["name"]
async def prompt_build_custom_docker_image(
console: Console,
deployment_config: dict[str, Any],
) -> dict[str, Any] | None:
if not confirm(
"Would you like to build a custom Docker image for this deployment?",
console=console,
default=False,
):
return
build_step = {
"requires": "prefect-docker>=0.3.1",
"id": "build-image",
}
if os.path.exists("Dockerfile"):
if confirm(
"Would you like to use the Dockerfile in the current directory?",
console=console,
default=True,
):
build_step["dockerfile"] = "Dockerfile"
else:
if confirm(
"A Dockerfile exists. You chose not to use it. A temporary Dockerfile"
" will be automatically built during the deployment build step. If"
" another file named 'Dockerfile' already exists at that time, the"
" build step will fail. Would you like to rename your existing"
" Dockerfile?"
):
new_dockerfile_name = prompt(
"New Dockerfile name", default="Dockerfile.backup"
)
shutil.move("Dockerfile", new_dockerfile_name)
build_step["dockerfile"] = "auto"
else:
# this will otherwise raise when build steps are run as the auto-build feature
# executed in the build_docker_image step will create a temporary Dockerfile
raise ValueError(
"A Dockerfile already exists. Please remove or rename the existing"
" one."
)
else:
build_step["dockerfile"] = "auto"
repo_name = prompt("Repository name (e.g. your Docker Hub username)").rstrip("/")
image_name = prompt("Image name", default=deployment_config["name"])
build_step["image_name"] = f"{repo_name}/{image_name}"
build_step["tag"] = prompt("Image tag", default="latest")
console.print(
"Image"
f" [bold][yellow]{build_step['image_name']}:{build_step['tag']}[/yellow][/bold]"
" will be built."
)
return {"prefect_docker.deployments.steps.build_docker_image": build_step}
async def prompt_push_custom_docker_image(
console: Console,
deployment_config: dict[str, Any],
build_docker_image_step: dict[str, Any],
) -> tuple[dict[str, Any] | None, dict[str, Any]]:
if not confirm(
"Would you like to push this image to a remote registry?",
console=console,
default=False,
):
return None, build_docker_image_step
push_step = {
"requires": "prefect-docker>=0.3.1",
"image_name": "{{ build-image.image_name }}",
"tag": "{{ build-image.tag }}",
}
registry_url = prompt("Registry URL", default="docker.io").rstrip("/")
repo_and_image_name = build_docker_image_step[
"prefect_docker.deployments.steps.build_docker_image"
]["image_name"]
full_image_name = f"{registry_url}/{repo_and_image_name}"
build_docker_image_step["prefect_docker.deployments.steps.build_docker_image"][
"image_name"
] = full_image_name
if confirm("Is this a private registry?", console=console):
docker_credentials = {}
docker_credentials["registry_url"] = registry_url
if confirm(
"Would you like use prefect-docker to manage Docker registry credentials?",
console=console,
default=False,
):
try:
import prefect_docker
except ImportError:
console.print("Installing prefect-docker...")
await ainstall_packages(["prefect[docker]"], stream_output=True)
import prefect_docker
credentials_block = prefect_docker.DockerRegistryCredentials
push_step["credentials"] = (
"{{ prefect_docker.docker-registry-credentials.docker_registry_creds_name }}"
)
docker_registry_creds_name = f"deployment-{slugify(deployment_config['name'])}-{slugify(deployment_config['work_pool']['name'])}-registry-creds"
create_new_block = False
try:
await credentials_block.aload(docker_registry_creds_name)
if not confirm(
(
"Would you like to use the existing Docker registry credentials"
f" block {docker_registry_creds_name}?"
),
console=console,
default=True,
):
create_new_block = True
except ValueError:
create_new_block = True
if create_new_block:
docker_credentials["username"] = prompt(
"Docker registry username", console=console
)
try:
docker_credentials["password"] = prompt(
"Docker registry password",
console=console,
password=True,
)
except GetPassWarning:
docker_credentials["password"] = prompt(
"Docker registry password",
console=console,
)
new_creds_block = credentials_block(
username=docker_credentials["username"],
password=docker_credentials["password"],
registry_url=docker_credentials["registry_url"],
)
coro = new_creds_block.save(
name=docker_registry_creds_name, overwrite=True
)
if TYPE_CHECKING:
assert asyncio.iscoroutine(coro)
await coro
return {
"prefect_docker.deployments.steps.push_docker_image": push_step
}, build_docker_image_step
@client_injector
async def prompt_create_work_pool(
client: "PrefectClient",
console: Console,
):
if not confirm(
(
"Looks like you don't have any work pools this flow can be deployed to."
" Would you like to create one?"
),
default=True,
console=console,
):
raise ValueError(
"A work pool is required to deploy this flow. Please specify a work pool"
" name via the '--pool' flag or in your prefect.yaml file."
)
async with get_collections_metadata_client() as collections_client:
worker_metadata = await collections_client.read_worker_metadata()
selected_worker_row = prompt_select_from_table(
console,
prompt="What infrastructure type would you like to use for your new work pool?",
columns=[
{"header": "Type", "key": "type"},
{"header": "Description", "key": "description"},
],
data=[
worker
for collection in worker_metadata.values()
for worker in collection.values()
if worker["type"] != "prefect-agent"
],
table_kwargs={"show_lines": True},
)
work_pool_name = prompt("Work pool name")
work_pool = await client.create_work_pool(
WorkPoolCreate(name=work_pool_name, type=selected_worker_row["type"])
)
console.print(f"Your work pool {work_pool.name!r} has been created!", style="green")
return work_pool
| RRuleTimezonePrompt |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/asyncio/result.py | {
"start": 1939,
"end": 16744
} | class ____(_WithKeys, AsyncCommon[Row[Unpack[_Ts]]]):
"""An asyncio wrapper around a :class:`_result.Result` object.
The :class:`_asyncio.AsyncResult` only applies to statement executions that
use a server-side cursor. It is returned only from the
:meth:`_asyncio.AsyncConnection.stream` and
:meth:`_asyncio.AsyncSession.stream` methods.
.. note:: As is the case with :class:`_engine.Result`, this object is
used for ORM results returned by :meth:`_asyncio.AsyncSession.execute`,
which can yield instances of ORM mapped objects either individually or
within tuple-like rows. Note that these result objects do not
deduplicate instances or rows automatically as is the case with the
legacy :class:`_orm.Query` object. For in-Python de-duplication of
instances or rows, use the :meth:`_asyncio.AsyncResult.unique` modifier
method.
.. versionadded:: 1.4
"""
__slots__ = ()
_real_result: Result[Unpack[_Ts]]
def __init__(self, real_result: Result[Unpack[_Ts]]):
self._real_result = real_result
self._metadata = real_result._metadata
self._unique_filter_state = real_result._unique_filter_state
self._source_supports_scalars = real_result._source_supports_scalars
self._post_creational_filter = None
# BaseCursorResult pre-generates the "_row_getter". Use that
# if available rather than building a second one
if "_row_getter" in real_result.__dict__:
self._set_memoized_attribute(
"_row_getter", real_result.__dict__["_row_getter"]
)
@property
@deprecated(
"2.1.0",
"The :attr:`.AsyncResult.t` attribute is deprecated, :class:`.Row` "
"now behaves like a tuple and can unpack types directly.",
)
def t(self) -> AsyncTupleResult[Tuple[Unpack[_Ts]]]:
"""Apply a "typed tuple" typing filter to returned rows.
The :attr:`_asyncio.AsyncResult.t` attribute is a synonym for
calling the :meth:`_asyncio.AsyncResult.tuples` method.
.. versionadded:: 2.0
.. seealso::
:ref:`change_10635` - describes a migration path from this
workaround for SQLAlchemy 2.1.
"""
return self # type: ignore
@deprecated(
"2.1.0",
"The :meth:`.AsyncResult.tuples` method is deprecated, "
":class:`.Row` now behaves like a tuple and can unpack types "
"directly.",
)
def tuples(self) -> AsyncTupleResult[Tuple[Unpack[_Ts]]]:
"""Apply a "typed tuple" typing filter to returned rows.
This method returns the same :class:`_asyncio.AsyncResult` object
at runtime,
however annotates as returning a :class:`_asyncio.AsyncTupleResult`
object that will indicate to :pep:`484` typing tools that plain typed
``Tuple`` instances are returned rather than rows. This allows
tuple unpacking and ``__getitem__`` access of :class:`_engine.Row`
objects to by typed, for those cases where the statement invoked
itself included typing information.
.. versionadded:: 2.0
:return: the :class:`_result.AsyncTupleResult` type at typing time.
.. seealso::
:ref:`change_10635` - describes a migration path from this
workaround for SQLAlchemy 2.1.
:attr:`_asyncio.AsyncResult.t` - shorter synonym
:attr:`_engine.Row.t` - :class:`_engine.Row` version
"""
return self # type: ignore
@_generative
def unique(self, strategy: Optional[_UniqueFilterType] = None) -> Self:
"""Apply unique filtering to the objects returned by this
:class:`_asyncio.AsyncResult`.
Refer to :meth:`_engine.Result.unique` in the synchronous
SQLAlchemy API for a complete behavioral description.
"""
self._unique_filter_state = (set(), strategy)
return self
def columns(self, *col_expressions: _KeyIndexType) -> Self:
r"""Establish the columns that should be returned in each row.
Refer to :meth:`_engine.Result.columns` in the synchronous
SQLAlchemy API for a complete behavioral description.
"""
return self._column_slices(col_expressions)
async def partitions(
self, size: Optional[int] = None
) -> AsyncIterator[Sequence[Row[Unpack[_Ts]]]]:
"""Iterate through sub-lists of rows of the size given.
An async iterator is returned::
async def scroll_results(connection):
result = await connection.stream(select(users_table))
async for partition in result.partitions(100):
print("list of rows: %s" % partition)
Refer to :meth:`_engine.Result.partitions` in the synchronous
SQLAlchemy API for a complete behavioral description.
"""
getter = self._manyrow_getter
while True:
partition = await greenlet_spawn(getter, self, size)
if partition:
yield partition
else:
break
async def fetchall(self) -> Sequence[Row[Unpack[_Ts]]]:
"""A synonym for the :meth:`_asyncio.AsyncResult.all` method.
.. versionadded:: 2.0
"""
return await greenlet_spawn(self._allrows)
async def fetchone(self) -> Optional[Row[Unpack[_Ts]]]:
"""Fetch one row.
When all rows are exhausted, returns None.
This method is provided for backwards compatibility with
SQLAlchemy 1.x.x.
To fetch the first row of a result only, use the
:meth:`_asyncio.AsyncResult.first` method. To iterate through all
rows, iterate the :class:`_asyncio.AsyncResult` object directly.
:return: a :class:`_engine.Row` object if no filters are applied,
or ``None`` if no rows remain.
"""
row = await greenlet_spawn(self._onerow_getter, self)
if row is _NO_ROW:
return None
else:
return row
async def fetchmany(
self, size: Optional[int] = None
) -> Sequence[Row[Unpack[_Ts]]]:
"""Fetch many rows.
When all rows are exhausted, returns an empty list.
This method is provided for backwards compatibility with
SQLAlchemy 1.x.x.
To fetch rows in groups, use the
:meth:`._asyncio.AsyncResult.partitions` method.
:return: a list of :class:`_engine.Row` objects.
.. seealso::
:meth:`_asyncio.AsyncResult.partitions`
"""
return await greenlet_spawn(self._manyrow_getter, self, size)
async def all(self) -> Sequence[Row[Unpack[_Ts]]]:
"""Return all rows in a list.
Closes the result set after invocation. Subsequent invocations
will return an empty list.
:return: a list of :class:`_engine.Row` objects.
"""
return await greenlet_spawn(self._allrows)
def __aiter__(self) -> AsyncResult[Unpack[_Ts]]:
return self
async def __anext__(self) -> Row[Unpack[_Ts]]:
row = await greenlet_spawn(self._onerow_getter, self)
if row is _NO_ROW:
raise StopAsyncIteration()
else:
return row
async def first(self) -> Optional[Row[Unpack[_Ts]]]:
"""Fetch the first row or ``None`` if no row is present.
Closes the result set and discards remaining rows.
.. note:: This method returns one **row**, e.g. tuple, by default.
To return exactly one single scalar value, that is, the first
column of the first row, use the
:meth:`_asyncio.AsyncResult.scalar` method,
or combine :meth:`_asyncio.AsyncResult.scalars` and
:meth:`_asyncio.AsyncResult.first`.
Additionally, in contrast to the behavior of the legacy ORM
:meth:`_orm.Query.first` method, **no limit is applied** to the
SQL query which was invoked to produce this
:class:`_asyncio.AsyncResult`;
for a DBAPI driver that buffers results in memory before yielding
rows, all rows will be sent to the Python process and all but
the first row will be discarded.
.. seealso::
:ref:`migration_20_unify_select`
:return: a :class:`_engine.Row` object, or None
if no rows remain.
.. seealso::
:meth:`_asyncio.AsyncResult.scalar`
:meth:`_asyncio.AsyncResult.one`
"""
return await greenlet_spawn(self._only_one_row, False, False, False)
async def one_or_none(self) -> Optional[Row[Unpack[_Ts]]]:
"""Return at most one result or raise an exception.
Returns ``None`` if the result has no rows.
Raises :class:`.MultipleResultsFound`
if multiple rows are returned.
.. versionadded:: 1.4
:return: The first :class:`_engine.Row` or ``None`` if no row
is available.
:raises: :class:`.MultipleResultsFound`
.. seealso::
:meth:`_asyncio.AsyncResult.first`
:meth:`_asyncio.AsyncResult.one`
"""
return await greenlet_spawn(self._only_one_row, True, False, False)
@overload
async def scalar_one(self: AsyncResult[_T]) -> _T: ...
@overload
async def scalar_one(self) -> Any: ...
async def scalar_one(self) -> Any:
"""Return exactly one scalar result or raise an exception.
This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and
then :meth:`_asyncio.AsyncScalarResult.one`.
.. seealso::
:meth:`_asyncio.AsyncScalarResult.one`
:meth:`_asyncio.AsyncResult.scalars`
"""
return await greenlet_spawn(self._only_one_row, True, True, True)
@overload
async def scalar_one_or_none(
self: AsyncResult[_T],
) -> Optional[_T]: ...
@overload
async def scalar_one_or_none(self) -> Optional[Any]: ...
async def scalar_one_or_none(self) -> Optional[Any]:
"""Return exactly one scalar result or ``None``.
This is equivalent to calling :meth:`_asyncio.AsyncResult.scalars` and
then :meth:`_asyncio.AsyncScalarResult.one_or_none`.
.. seealso::
:meth:`_asyncio.AsyncScalarResult.one_or_none`
:meth:`_asyncio.AsyncResult.scalars`
"""
return await greenlet_spawn(self._only_one_row, True, False, True)
async def one(self) -> Row[Unpack[_Ts]]:
"""Return exactly one row or raise an exception.
Raises :class:`.NoResultFound` if the result returns no
rows, or :class:`.MultipleResultsFound` if multiple rows
would be returned.
.. note:: This method returns one **row**, e.g. tuple, by default.
To return exactly one single scalar value, that is, the first
column of the first row, use the
:meth:`_asyncio.AsyncResult.scalar_one` method, or combine
:meth:`_asyncio.AsyncResult.scalars` and
:meth:`_asyncio.AsyncResult.one`.
.. versionadded:: 1.4
:return: The first :class:`_engine.Row`.
:raises: :class:`.MultipleResultsFound`, :class:`.NoResultFound`
.. seealso::
:meth:`_asyncio.AsyncResult.first`
:meth:`_asyncio.AsyncResult.one_or_none`
:meth:`_asyncio.AsyncResult.scalar_one`
"""
return await greenlet_spawn(self._only_one_row, True, True, False)
@overload
async def scalar(self: AsyncResult[_T]) -> Optional[_T]: ...
@overload
async def scalar(self) -> Any: ...
async def scalar(self) -> Any:
"""Fetch the first column of the first row, and close the result set.
Returns ``None`` if there are no rows to fetch.
No validation is performed to test if additional rows remain.
After calling this method, the object is fully closed,
e.g. the :meth:`_engine.CursorResult.close`
method will have been called.
:return: a Python scalar value, or ``None`` if no rows remain.
"""
return await greenlet_spawn(self._only_one_row, False, False, True)
async def freeze(self) -> FrozenResult[Unpack[_Ts]]:
"""Return a callable object that will produce copies of this
:class:`_asyncio.AsyncResult` when invoked.
The callable object returned is an instance of
:class:`_engine.FrozenResult`.
This is used for result set caching. The method must be called
on the result when it has been unconsumed, and calling the method
will consume the result fully. When the :class:`_engine.FrozenResult`
is retrieved from a cache, it can be called any number of times where
it will produce a new :class:`_engine.Result` object each time
against its stored set of rows.
.. seealso::
:ref:`do_orm_execute_re_executing` - example usage within the
ORM to implement a result-set cache.
"""
return await greenlet_spawn(FrozenResult, self)
@overload
def scalars(
self: AsyncResult[_T, Unpack[TupleAny]], index: Literal[0]
) -> AsyncScalarResult[_T]: ...
@overload
def scalars(
self: AsyncResult[_T, Unpack[TupleAny]],
) -> AsyncScalarResult[_T]: ...
@overload
def scalars(self, index: _KeyIndexType = 0) -> AsyncScalarResult[Any]: ...
def scalars(self, index: _KeyIndexType = 0) -> AsyncScalarResult[Any]:
"""Return an :class:`_asyncio.AsyncScalarResult` filtering object which
will return single elements rather than :class:`_row.Row` objects.
Refer to :meth:`_result.Result.scalars` in the synchronous
SQLAlchemy API for a complete behavioral description.
:param index: integer or row key indicating the column to be fetched
from each row, defaults to ``0`` indicating the first column.
:return: a new :class:`_asyncio.AsyncScalarResult` filtering object
referring to this :class:`_asyncio.AsyncResult` object.
"""
return AsyncScalarResult(self._real_result, index)
def mappings(self) -> AsyncMappingResult:
"""Apply a mappings filter to returned rows, returning an instance of
:class:`_asyncio.AsyncMappingResult`.
When this filter is applied, fetching rows will return
:class:`_engine.RowMapping` objects instead of :class:`_engine.Row`
objects.
:return: a new :class:`_asyncio.AsyncMappingResult` filtering object
referring to the underlying :class:`_result.Result` object.
"""
return AsyncMappingResult(self._real_result)
| AsyncResult |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/integration/streams/test_onetimes.py | {
"start": 503,
"end": 1826
} | class ____(StreamTestCase):
_STREAM_NAME = "onetimes"
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
output = read_full_refresh(self._config, _STREAM_NAME)
assert len(output.records) == 1
@HttpMocker()
def test_given_multiple_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
self.stream_request().with_limit(250).with_next_page_token(NEXT_PAGE_TOKEN).build(),
get_stream_response(_STREAM_NAME).with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
http_mocker.get(
self.stream_request().with_limit(250).with_updated_at_min(START_DATE).build(),
get_stream_response(_STREAM_NAME).with_pagination().with_record(get_stream_record(_STREAM_NAME, "id", _CURSOR_FIELD)).build(),
)
output = read_full_refresh(self._config, _STREAM_NAME)
assert len(output.records) == 2
@freezegun.freeze_time(NOW.isoformat())
| TestFullRefresh |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 99749,
"end": 100143
} | class ____(Token):
"""
A token that will never match.
"""
def __init__(self) -> None:
super().__init__()
self._may_return_empty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
raise ParseException(instring, loc, self.errmsg, self)
| NoMatch |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/pipes/clients/dataproc_job.py | {
"start": 1117,
"end": 2349
} | class ____(TypedDict):
request: NotRequired[SubmitJobRequest]
project_id: NotRequired[str]
region: NotRequired[str]
job: NotRequired[Job]
retry: NotRequired[Retry]
timeout: NotRequired[float]
metadata: NotRequired[Sequence[tuple[str, Union[str, bytes]]]]
def _inject_pipes_args_into_list(args: Sequence[str], session: PipesSession) -> list[str]:
args_list = list(args)
for key, value in session.get_bootstrap_cli_arguments().items():
args_list.extend([key, value])
return args_list
# Only lowercase letters, numbers, and dashes are allowed. The value must start with lowercase letter or number and end with a lowercase letter or number.
DATAPROC_LABELS_PATTERN = r"[^a-z0-9-]|^[^a-z0-9]|[^a-z0-9]$"
def _sanitize_labels(labels: Mapping[str, Any]) -> dict[str, str]:
# only alphanumeric characters, hyphens, and underscores are allowed in labels
return {
re.sub(DATAPROC_LABELS_PATTERN, "-", k.lower()): re.sub(
DATAPROC_LABELS_PATTERN, "-", v.lower()
)
for k, v in labels.items()
}
JOB_TYPES_WITH_ARGS = [
"hadoop_job",
"spark_job",
"pyspark_job",
"spark_r_job",
"flink_job",
]
@public
@preview
| SubmitJobParams |
python | spyder-ide__spyder | spyder/api/widgets/toolbars.py | {
"start": 1337,
"end": 1874
} | class ____(QObject):
"""
Filter tool tip events on toolbuttons.
"""
def eventFilter(self, obj, event):
event_type = event.type()
action = obj.defaultAction() if isinstance(obj, QToolButton) else None
if event_type == QEvent.ToolTip and action is not None:
if action.tip is None:
return action.text_beside_icon
return QObject.eventFilter(self, obj, event)
# ---- Styles
# ----------------------------------------------------------------------------
| ToolTipFilter |
python | nedbat__coveragepy | tests/test_api.py | {
"start": 33271,
"end": 39031
} | class ____(IncludeOmitTestsMixin, CoverageTest):
"""Test using `source`, `include`, and `omit` when measuring code."""
def setUp(self) -> None:
super().setUp()
# These tests use the TESTS_DIR/modules files, but they cd into it. To
# keep tests from cross-contaminating, we make a copy of the files.
# Since we need to import from there, we also add it to the beginning
# of sys.path.
shutil.copytree(
nice_file(TESTS_DIR, "modules"),
"tests_dir_modules",
ignore=shutil.ignore_patterns("__pycache__"),
)
sys.path.insert(0, abs_file("tests_dir_modules"))
def coverage_usepkgs_counts(self, **kwargs: TCovKwargs) -> dict[str, int]:
"""Run coverage on usepkgs and return a line summary.
Arguments are passed to the `coverage.Coverage` constructor.
"""
cov = coverage.Coverage(**kwargs)
with cov.collect():
import usepkgs # pylint: disable=import-error, unused-import
with self.assert_warnings(cov, []):
data = cov.get_data()
summary = line_counts(data)
for k, v in list(summary.items()):
assert k.endswith(".py")
summary[k[:-3]] = v
return summary
def coverage_usepkgs(self, **kwargs: TCovKwargs) -> Iterable[str]:
summary = self.coverage_usepkgs_counts(**kwargs)
return list(summary)
def test_source_include_exclusive(self) -> None:
cov = coverage.Coverage(source=["pkg1"], include=["pkg2"])
with self.assert_warnings(cov, ["--include is ignored because --source is set"]):
cov.start()
cov.stop()
def test_source_package_as_package(self) -> None:
assert not os.path.isdir("pkg1")
lines = self.coverage_usepkgs_counts(source=["pkg1"])
self.filenames_in(list(lines), "p1a p1b")
self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb")
# Because source= was specified, we do search for un-executed files.
assert lines["p1c"] == 0
def test_source_package_as_dir(self) -> None:
os.chdir("tests_dir_modules")
assert os.path.isdir("pkg1")
lines = self.coverage_usepkgs_counts(source=["pkg1"])
self.filenames_in(list(lines), "p1a p1b")
self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb")
# Because source= was specified, we do search for un-executed files.
assert lines["p1c"] == 0
def test_source_package_dotted_sub(self) -> None:
lines = self.coverage_usepkgs_counts(source=["pkg1.sub"])
self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb")
# Because source= was specified, we do search for un-executed files.
assert lines["runmod3"] == 0
def test_source_package_dotted_p1b(self) -> None:
lines = self.coverage_usepkgs_counts(source=["pkg1.p1b"])
self.filenames_in(list(lines), "p1b")
self.filenames_not_in(list(lines), "p1a p1c p2a p2b othera otherb osa osb")
def test_source_package_part_omitted(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/218
# Used to be if you omitted something executed and inside the source,
# then after it was executed but not recorded, it would be found in
# the search for un-executed files, and given a score of 0%.
# The omit arg is by path, so need to be in the modules directory.
os.chdir("tests_dir_modules")
lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["pkg1/p1b.py"])
self.filenames_in(list(lines), "p1a")
self.filenames_not_in(list(lines), "p1b")
assert lines["p1c"] == 0
def test_source_package_as_package_part_omitted(self) -> None:
# https://github.com/coveragepy/coveragepy/issues/638
lines = self.coverage_usepkgs_counts(source=["pkg1"], omit=["*/p1b.py"])
self.filenames_in(list(lines), "p1a")
self.filenames_not_in(list(lines), "p1b")
assert lines["p1c"] == 0
def test_ambiguous_source_package_as_dir(self) -> None:
# pkg1 is a directory and a pkg, since we cd into tests_dir_modules/ambiguous
os.chdir("tests_dir_modules/ambiguous")
# pkg1 defaults to directory because tests_dir_modules/ambiguous/pkg1 exists
lines = self.coverage_usepkgs_counts(source=["pkg1"])
self.filenames_in(list(lines), "ambiguous")
self.filenames_not_in(list(lines), "p1a p1b p1c")
def test_ambiguous_source_package_as_package(self) -> None:
# pkg1 is a directory and a pkg, since we cd into tests_dir_modules/ambiguous
os.chdir("tests_dir_modules/ambiguous")
lines = self.coverage_usepkgs_counts(source_pkgs=["pkg1"])
self.filenames_in(list(lines), "p1a p1b")
self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb ambiguous")
# Because source= was specified, we do search for un-executed files.
assert lines["p1c"] == 0
def test_source_dirs(self) -> None:
os.chdir("tests_dir_modules")
assert os.path.isdir("pkg1")
lines = self.coverage_usepkgs_counts(source_dirs=["pkg1"])
self.filenames_in(list(lines), "p1a p1b")
self.filenames_not_in(list(lines), "p2a p2b othera otherb osa osb")
# Because source_dirs= was specified, we do search for un-executed files.
assert lines["p1c"] == 0
def test_non_existent_source_dir(self) -> None:
with pytest.raises(
ConfigError,
match=re.escape("Source dir is not a directory: 'i-do-not-exist'"),
):
self.coverage_usepkgs_counts(source_dirs=["i-do-not-exist"])
| SourceIncludeOmitTest |
python | gevent__gevent | src/gevent/tests/test__issue1686.py | {
"start": 543,
"end": 2878
} | class ____(unittest.TestCase):
def test(self): # pylint:disable=too-many-locals
# If this test is broken, there are a few failure modes.
# - In the original examples, the parent process just hangs, because the
# child has raced ahead, spawned the greenlet and read the data. When the
# greenlet goes to read in the parent, it blocks, and the hub and loop
# wait for it.
# - Here, our child detects the greenlet ran when it shouldn't and
# raises an error, which translates to a non-zero exit status,
# which the parent checks for and fails by raising an exception before
# returning control to the hub. We can replicate the hang by removing the
# assertion in the child.
from time import sleep as hang
from gevent import get_hub
from gevent import spawn
from gevent.socket import wait_read
from gevent.os import nb_read
from gevent.os import nb_write
from gevent.os import make_nonblocking
from gevent.os import fork
from gevent.os import waitpid
pipe_read_fd, pipe_write_fd = os.pipe()
make_nonblocking(pipe_read_fd)
make_nonblocking(pipe_write_fd)
run = []
def reader():
run.append(1)
return nb_read(pipe_read_fd, 4096)
# Put data in the pipe
DATA = b'test'
nb_write(pipe_write_fd, DATA)
# Make sure we're ready to read it
wait_read(pipe_read_fd)
# Schedule a greenlet to start
reader = spawn(reader)
hub = get_hub()
pid = fork()
if pid == 0:
# Child destroys the hub. The reader should not have run.
hub.destroy(destroy_loop=True)
self.assertFalse(run)
os._exit(0)
return # pylint:disable=unreachable
# The parent.
# Briefly prevent us from spinning our event loop.
hang(0.5)
wait_child_result = waitpid(pid, 0)
self.assertEqual(wait_child_result, (pid, 0))
# We should get the data; the greenlet only runs in the parent.
data = reader.get()
self.assertEqual(run, [1])
self.assertEqual(data, DATA)
if __name__ == '__main__':
greentest.main()
| TestDestroyInChildWithActiveSpawn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.