language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/issues/endpoints/test_event_owners.py | {
"start": 333,
"end": 8771
} | class ____(APITestCase):
def setUp(self) -> None:
self.login_as(user=self.user)
self.user2 = self.create_user("user2@example.com")
self.user3 = self.create_user("user3@example.com")
self.team = self.create_team(
organization=self.organization, slug="tiger-team", members=[self.user]
)
self.team2 = self.create_team(
organization=self.organization, slug="tiger-team2", members=[self.user2]
)
self.team3 = self.create_team(
organization=self.organization, slug="tiger-team3", members=[self.user3]
)
self.project = self.create_project(
organization=self.organization, teams=[self.team, self.team2, self.team3], slug="bengal"
)
def test_no_rules(self) -> None:
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 0
assert resp.data["rule"] is None
assert len(resp.data["rules"]) == 0
def test_no_matching_owners(self) -> None:
rule_a = Rule(Matcher("path", "bar.py"), [Owner("user", self.user.email)])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([rule_a]), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 0
assert resp.data["rule"] is None
assert len(resp.data["rules"]) == 0
def test_matching_non_existing_owner(self) -> None:
rule_a = Rule(Matcher("path", "*"), [Owner("user", "doesnotexist@fake.com")])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([rule_a]), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 0
assert resp.data["rule"] == Matcher(type="path", pattern="*")
assert len(resp.data["rules"]) == 1
def test_one_owner(self) -> None:
rule_a = Rule(Matcher("path", "*.py"), [Owner("user", self.user.email)])
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema([rule_a]), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 1
assert resp.data["owners"][0]["id"] == str(self.user.id)
assert resp.data["rule"] == Matcher("path", "*.py")
assert len(resp.data["rules"]) == 1
def test_multiple_owners(self) -> None:
users = [self.user, self.user2, self.user3]
rules = [
Rule(Matcher("path", "*.py"), [Owner("user", users[0].email)]),
Rule(Matcher("path", "*foo*"), [Owner("user", users[1].email)]),
Rule(Matcher("path", "*"), [Owner("user", users[2].email)]),
]
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema(rules), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 3
assert [o["id"] for o in resp.data["owners"]] == [str(u.id) for u in users]
assert resp.data["rule"] == Matcher("path", "*.py")
assert len(resp.data["rules"]) == 3
def test_multiple_owners_order_matters(self) -> None:
users = [self.user, self.user2, self.user3]
rules = [
Rule(Matcher("path", "*.py"), [Owner("user", users[0].email)]),
Rule(Matcher("path", "*foo*"), [Owner("user", users[1].email)]),
Rule(Matcher("path", "*"), [Owner("user", users[2].email)]),
]
rules.reverse()
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema(rules), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 3
assert [o["id"] for o in resp.data["owners"]] == [str(u.id) for u in reversed(users)]
assert resp.data["rule"] == Matcher("path", "*")
assert len(resp.data["rules"]) == 3
def test_owners_of_different_types_ordered_correctly(self) -> None:
owners = [self.user, self.team3, self.user2, self.team2, self.user3, self.team]
rules = [
Rule(Matcher("path", "*.py"), [Owner("user", owners[0].email)]),
Rule(Matcher("path", "*py"), [Owner("team", owners[1].slug)]),
Rule(Matcher("path", "*foo*"), [Owner("user", owners[2].email)]),
Rule(Matcher("path", "*y"), [Owner("team", owners[3].slug)]),
Rule(Matcher("path", "*"), [Owner("user", owners[4].email)]),
Rule(Matcher("path", "*o.py"), [Owner("team", owners[5].slug)]),
]
ProjectOwnership.objects.create(
project_id=self.project.id, schema=dump_schema(rules), fallthrough=True
)
event1 = self.store_event(
data={"stacktrace": {"frames": [{"filename": "foo.py"}]}}, project_id=self.project.id
)
self.path = reverse(
"sentry-api-0-event-owners",
kwargs={
"organization_id_or_slug": self.organization.slug,
"project_id_or_slug": self.project.slug,
"event_id": event1.event_id,
},
)
resp = self.client.get(self.path)
assert resp.status_code == 200
assert len(resp.data["owners"]) == 6
assert [o["id"] for o in resp.data["owners"]] == [str(o.id) for o in owners]
assert [o["type"] for o in resp.data["owners"]] == ["user", "team"] * 3
assert resp.data["rule"] == Matcher("path", "*.py")
assert len(resp.data["rules"]) == 6
| ProjectOwnershipEndpointTestCase |
python | pallets__jinja | src/jinja2/utils.py | {
"start": 2179,
"end": 13155
} | class ____(enum.Enum):
context = enum.auto()
eval_context = enum.auto()
environment = enum.auto()
@classmethod
def from_obj(cls, obj: F) -> t.Optional["_PassArg"]:
if hasattr(obj, "jinja_pass_arg"):
return obj.jinja_pass_arg # type: ignore
return None
def internalcode(f: F) -> F:
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj: t.Any) -> bool:
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from .runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable: t.Iterable[t.Any]) -> None:
"""Consumes an iterable without doing anything with it."""
for _ in iterable:
pass
def clear_caches() -> None:
"""Jinja keeps internal caches for environments and lexers. These are
used so that Jinja doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
measuring memory consumption you may want to clean the caches.
"""
from .environment import get_spontaneous_environment
from .lexer import _lexer_cache
get_spontaneous_environment.cache_clear()
_lexer_cache.clear()
def import_string(import_name: str, silent: bool = False) -> t.Any:
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ":" in import_name:
module, obj = import_name.split(":", 1)
elif "." in import_name:
module, _, obj = import_name.rpartition(".")
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename: str, mode: str = "rb") -> t.IO[t.Any] | None:
"""Returns a file descriptor for the filename if that file exists,
otherwise ``None``.
"""
if not os.path.isfile(filename):
return None
return open(filename, mode)
def object_type_repr(obj: t.Any) -> str:
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return "None"
elif obj is Ellipsis:
return "Ellipsis"
cls = type(obj)
if cls.__module__ == "builtins":
return f"{cls.__name__} object"
return f"{cls.__module__}.{cls.__name__} object"
def pformat(obj: t.Any) -> str:
"""Format an object using :func:`pprint.pformat`."""
from pprint import pformat
return pformat(obj)
_http_re = re.compile(
r"""
^
(
(https?://|www\.) # scheme or www
(([\w%-]+\.)+)? # subdomain
(
[a-z]{2,63} # basic tld
|
xn--[\w%]{2,59} # idna tld
)
|
([\w%-]{2,63}\.)+ # basic domain
(com|net|int|edu|gov|org|info|mil) # basic tld
|
(https?://) # scheme
(
(([\d]{1,3})(\.[\d]{1,3}){3}) # IPv4
|
(\[([\da-f]{0,4}:){2}([\da-f]{0,4}:?){1,6}]) # IPv6
)
)
(?::[\d]{1,5})? # port
(?:[/?#]\S*)? # path, query, and fragment
$
""",
re.IGNORECASE | re.VERBOSE,
)
_email_re = re.compile(r"^\S+@\w[\w.-]*\.\w+$")
def urlize(
text: str,
trim_url_limit: int | None = None,
rel: str | None = None,
target: str | None = None,
extra_schemes: t.Iterable[str] | None = None,
) -> str:
"""Convert URLs in text into clickable links.
This may not recognize links in some situations. Usually, a more
comprehensive formatter, such as a Markdown library, is a better
choice.
Works on ``http://``, ``https://``, ``www.``, ``mailto:``, and email
addresses. Links with trailing punctuation (periods, commas, closing
parentheses) and leading punctuation (opening parentheses) are
recognized excluding the punctuation. Email addresses that include
header fields are not recognized (for example,
``mailto:address@example.com?cc=copy@example.com``).
:param text: Original text containing URLs to link.
:param trim_url_limit: Shorten displayed URL values to this length.
:param target: Add the ``target`` attribute to links.
:param rel: Add the ``rel`` attribute to links.
:param extra_schemes: Recognize URLs that start with these schemes
in addition to the default behavior.
.. versionchanged:: 3.0
The ``extra_schemes`` parameter was added.
.. versionchanged:: 3.0
Generate ``https://`` links for URLs without a scheme.
.. versionchanged:: 3.0
The parsing rules were updated. Recognize email addresses with
or without the ``mailto:`` scheme. Validate IP addresses. Ignore
parentheses and brackets in more cases.
"""
if trim_url_limit is not None:
def trim_url(x: str) -> str:
if len(x) > trim_url_limit:
return f"{x[:trim_url_limit]}..."
return x
else:
def trim_url(x: str) -> str:
return x
words = re.split(r"(\s+)", str(markupsafe.escape(text)))
rel_attr = f' rel="{markupsafe.escape(rel)}"' if rel else ""
target_attr = f' target="{markupsafe.escape(target)}"' if target else ""
for i, word in enumerate(words):
head, middle, tail = "", word, ""
match = re.match(r"^([(<]|<)+", middle)
if match:
head = match.group()
middle = middle[match.end() :]
# Unlike lead, which is anchored to the start of the string,
# need to check that the string ends with any of the characters
# before trying to match all of them, to avoid backtracking.
if middle.endswith((")", ">", ".", ",", "\n", ">")):
match = re.search(r"([)>.,\n]|>)+$", middle)
if match:
tail = match.group()
middle = middle[: match.start()]
# Prefer balancing parentheses in URLs instead of ignoring a
# trailing character.
for start_char, end_char in ("(", ")"), ("<", ">"), ("<", ">"):
start_count = middle.count(start_char)
if start_count <= middle.count(end_char):
# Balanced, or lighter on the left
continue
# Move as many as possible from the tail to balance
for _ in range(min(start_count, tail.count(end_char))):
end_index = tail.index(end_char) + len(end_char)
# Move anything in the tail before the end char too
middle += tail[:end_index]
tail = tail[end_index:]
if _http_re.match(middle):
if middle.startswith("https://") or middle.startswith("http://"):
middle = (
f'<a href="{middle}"{rel_attr}{target_attr}>{trim_url(middle)}</a>'
)
else:
middle = (
f'<a href="https://{middle}"{rel_attr}{target_attr}>'
f"{trim_url(middle)}</a>"
)
elif middle.startswith("mailto:") and _email_re.match(middle[7:]):
middle = f'<a href="{middle}">{middle[7:]}</a>'
elif (
"@" in middle
and not middle.startswith("www.")
# ignore values like `@a@b`
and not middle.startswith("@")
and ":" not in middle
and _email_re.match(middle)
):
middle = f'<a href="mailto:{middle}">{middle}</a>'
elif extra_schemes is not None:
for scheme in extra_schemes:
if middle != scheme and middle.startswith(scheme):
middle = f'<a href="{middle}"{rel_attr}{target_attr}>{middle}</a>'
words[i] = f"{head}{middle}{tail}"
return "".join(words)
def generate_lorem_ipsum(
n: int = 5, html: bool = True, min: int = 20, max: int = 100
) -> str:
"""Generate some lorem ipsum for the template."""
from .constants import LOREM_IPSUM_WORDS
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ","
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += "."
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p_str = " ".join(p)
if p_str.endswith(","):
p_str = p_str[:-1] + "."
elif not p_str.endswith("."):
p_str += "."
result.append(p_str)
if not html:
return "\n\n".join(result)
return markupsafe.Markup(
"\n".join(f"<p>{markupsafe.escape(x)}</p>" for x in result)
)
def url_quote(obj: t.Any, charset: str = "utf-8", for_qs: bool = False) -> str:
"""Quote a string for use in a URL using the given charset.
:param obj: String or bytes to quote. Other types are converted to
string then encoded to bytes using the given charset.
:param charset: Encode text to bytes using this charset.
:param for_qs: Quote "/" and use "+" for spaces.
"""
if not isinstance(obj, bytes):
if not isinstance(obj, str):
obj = str(obj)
obj = obj.encode(charset)
safe = b"" if for_qs else b"/"
rv = quote_from_bytes(obj, safe)
if for_qs:
rv = rv.replace("%20", "+")
return rv
@abc.MutableMapping.register
| _PassArg |
python | kamyu104__LeetCode-Solutions | Python/employee-importance.py | {
"start": 70,
"end": 419
} | class ____(object):
def __init__(self, id, importance, subordinates):
# It's the unique id of each node.
# unique id of this employee
self.id = id
# the importance value of this employee
self.importance = importance
# the id of direct subordinates
self.subordinates = subordinates
"""
| Employee |
python | huggingface__transformers | tests/causal_lm_tester.py | {
"start": 1299,
"end": 13312
} | class ____:
# If the model follows the standard naming conventions, only `base_model_class` needs to be set (the others are
# inferred from available public classes).
base_model_class = None
# ⚠️ Don't set these unless the model does NOT follow the standard naming conventions ⚠️
config_class = None
causal_lm_class = None
question_answering_class = None
sequence_classification_class = None
token_classification_class = None
# These attributes are required after the initialization phase of the tester.
_required_attributes = ("base_model_class", "config_class", "causal_lm_class")
# Arguments that should be passed to the config class even if not in its signature
forced_config_args = ["pad_token_id"]
@classmethod
def _verify_and_infer_model_attributes(cls):
"""
Verifies that the required tester attributes are set correctly, and infers unset tester attributes.
Intentionally nitpicks the tester class attributes, to prevent human errors.
"""
# `base_model_class` is mandatory, and it must be a valid model class.
base_model_class = getattr(cls, "base_model_class")
if base_model_class is None or "PreTrainedModel" not in str(base_model_class.__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the `base_model_class` "
f"attribute to a valid model class. (It's set to `{base_model_class}`)"
)
# Infers other model classes from the base class name and available public classes, if the corresponding
# attributes are not set explicitly. If they are set, they must be set to a valid class (config or model).
model_name = base_model_class.__name__.replace("Model", "")
base_class_module = ".".join(base_model_class.__module__.split(".")[:-1])
for tester_attribute_name, model_class_termination in _COMMON_MODEL_NAMES_MAP.items():
if getattr(cls, tester_attribute_name) is None:
try:
model_class = getattribute_from_module(base_class_module, model_name + model_class_termination)
setattr(cls, tester_attribute_name, model_class)
except ValueError:
pass
else:
if tester_attribute_name == "config_class":
if "PreTrainedConfig" not in str(getattr(cls, tester_attribute_name).__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the "
f"`{tester_attribute_name}` attribute to a valid config class. (It's set to "
f"`{getattr(cls, tester_attribute_name)}`). If the config class follows a standard "
f"naming convention, you should unset `{tester_attribute_name}`."
)
else:
if "PreTrainedModel" not in str(getattr(cls, tester_attribute_name).__mro__):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the "
f"`{tester_attribute_name}` attribute to a valid model class. (It's set to "
f"`{getattr(cls, tester_attribute_name)}`). If the model class follows a standard "
f"naming convention, you should unset `{tester_attribute_name}`."
)
# After inferring, if we don't have the basic classes set, we raise an error.
for required_attribute in cls._required_attributes:
if getattr(cls, required_attribute) is None:
raise ValueError(
f"You have inherited from `CausalLMModelTester` but did not set the `{required_attribute}` "
"attribute. It can't be automatically inferred either -- this means it is not following a "
"standard naming convention. If this is intentional, please set the attribute explicitly."
)
# To prevent issues with typos, no other attributes can be set to a model class
for instance_attribute_name, instance_attribute in cls.__dict__.items():
if (
(
instance_attribute_name not in _COMMON_MODEL_NAMES_MAP
and instance_attribute_name != "base_model_class"
)
and isinstance(instance_attribute, type)
and "PreTrainedModel" in str(instance_attribute.__mro__)
):
raise ValueError(
f"You have inherited from `CausalLMModelTester` but set an unexpected attribute to a model class "
f"(`{instance_attribute_name}` is set to `{instance_attribute}`). "
f"Only the following attributes can be set to model classes: {_COMMON_MODEL_NAMES_MAP.keys()}."
)
@property
def all_model_classes(self):
# Models that set `all_model_classes` in their `XXXModelTest` class must have a new class that doesn't fit
# any of the common classes.
return [
model_class
for model_class in (
self.base_model_class,
self.causal_lm_class,
self.question_answering_class,
self.sequence_classification_class,
self.token_classification_class,
)
if model_class is not None
]
@property
def pipeline_model_mapping(self):
# This is the default pipeline mapping.
mapping = {
"feature-extraction": self.base_model_class,
"text-generation": self.causal_lm_class,
}
if self.question_answering_class is not None:
mapping["question-answering"] = self.question_answering_class
if self.sequence_classification_class is not None:
mapping["text-classification"] = self.sequence_classification_class
if self.token_classification_class is not None:
mapping["token-classification"] = self.token_classification_class
if self.sequence_classification_class is not None:
mapping["zero-shot"] = self.sequence_classification_class
return mapping
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=False,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
is_decoder=False,
scope=None,
expert_interval=1,
moe_layer_start_index=0,
moe_intermediate_size=12,
shared_expert_intermediate_size=36,
shared_expert_gate=True,
moe_num_shared_experts=2,
num_experts_per_tok=2,
num_experts=8,
mamba_n_groups=1,
mamba_n_heads=16,
mamba_d_state=16,
mamba_d_conv=4,
mamba_expand=2,
mamba_chunk_size=16,
):
self._verify_and_infer_model_attributes()
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.scope = scope
self.head_dim = self.hidden_size // self.num_attention_heads
self.is_decoder = is_decoder
self.expert_interval = expert_interval
self.moe_layer_start_index = moe_layer_start_index
self.moe_intermediate_size = moe_intermediate_size
self.shared_expert_intermediate_size = shared_expert_intermediate_size
self.shared_expert_gate = shared_expert_gate
self.moe_num_shared_experts = moe_num_shared_experts
self.num_experts_per_tok = num_experts_per_tok
self.num_experts = num_experts
self.mamba_n_groups = mamba_n_groups
self.mamba_n_heads = mamba_n_heads
self.mamba_d_state = mamba_d_state
self.mamba_d_conv = mamba_d_conv
self.mamba_expand = mamba_expand
self.mamba_chunk_size = mamba_chunk_size
self.tie_word_embeddings = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device))
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
@property
def config_args(self):
return list(signature(self.config_class.__init__).parameters.keys())
def get_config(self):
kwargs = {}
model_name_to_common_name = {v: k for k, v in self.config_class.attribute_map.items()}
for k in self.config_args + self.forced_config_args:
if hasattr(self, k) and k != "self":
kwargs[k] = getattr(self, k)
elif k in model_name_to_common_name and hasattr(self, model_name_to_common_name[k]):
kwargs[k] = getattr(self, model_name_to_common_name[k])
return self.config_class(**kwargs)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = self.base_model_class(config=config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config, input_ids, _, input_mask, _, _, _ = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
| CausalLMModelTester |
python | huggingface__transformers | src/transformers/models/lxmert/modeling_lxmert.py | {
"start": 18642,
"end": 19360
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.attention = LxmertSelfAttentionLayer(config)
self.intermediate = LxmertIntermediate(config)
self.output = LxmertOutput(config)
def forward(self, hidden_states, attention_mask=None, output_attentions=False):
outputs = self.attention(hidden_states, attention_mask, output_attentions=output_attentions)
attention_output = outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs[1:] # add attentions if we output them
return outputs
| LxmertLayer |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/string_split_op_test.py | {
"start": 1333,
"end": 9602
} | class ____(test.TestCase, parameterized.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
@test_util.run_deprecated_v1
def testStringSplitEmptyDelimiter(self):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter="")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [2, 0],
[2, 1], [2, 2], [2, 3]])
expected = np.array(
[
"h", "e", "l", "l", "o", "h", "o", "l", "a", b"\xf0", b"\x9f",
b"\x98", b"\x8e"
],
dtype="|S1")
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
def testStringSplitEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", " e", "f ", " g ", " "]
with self.cached_session():
tokens = string_ops.string_split(strings)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
def testStringSplitOnSetEmptyToken(self):
strings = ["", " a", "b ", " c", " ", " d ", ". e", "f .", " .g. ", " ."]
with self.cached_session():
tokens = string_ops.string_split(strings, delimiter=" .")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(
indices,
[[1, 0], [2, 0], [3, 0], [5, 0], [6, 0], [7, 0], [8, 0]])
self.assertAllEqual(values, [b"a", b"b", b"c", b"d", b"e", b"f", b"g"])
self.assertAllEqual(shape, [10, 1])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiter(self):
strings = ["hello|world", "hello world"]
with self.cached_session():
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["|", ""])
self.assertRaises(
ValueError, string_ops.string_split, strings, delimiter=["a"])
tokens = string_ops.string_split(strings, delimiter="|")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
tokens = string_ops.string_split(strings, delimiter="| ")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimiterTensor(self):
strings = ["hello|world", "hello world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
@test_util.run_deprecated_v1
def testStringSplitWithDelimitersTensor(self):
strings = ["hello.cruel,world", "hello cruel world"]
with self.cached_session() as sess:
delimiter = array_ops.placeholder(dtypes.string)
tokens = string_ops.string_split(strings, delimiter=delimiter)
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
self.assertAllEqual(values,
[b"hello", b"cruel", b"world", b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
def testStringSplitWithNoSkipEmpty(self):
strings = ["#a", "b#", "#c#"]
with self.cached_session():
tokens = string_ops.string_split(strings, "#", skip_empty=False)
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1],
[1, 0], [1, 1],
[2, 0], [2, 1], [2, 2]])
self.assertAllEqual(values, [b"", b"a", b"b", b"", b"", b"c", b""])
self.assertAllEqual(shape, [3, 3])
with self.cached_session():
tokens = string_ops.string_split(strings, "#")
indices, values, shape = self.evaluate(tokens)
self.assertAllEqual(values, [b"a", b"b", b"c"])
self.assertAllEqual(indices, [[0, 0], [1, 0], [2, 0]])
self.assertAllEqual(shape, [3, 1])
@parameterized.named_parameters([
dict(
testcase_name="RaggedResultType",
source=[b"pigs on the wing", b"animals"],
result_type="RaggedTensor",
expected=[[b"pigs", b"on", b"the", b"wing"], [b"animals"]]),
dict(
testcase_name="SparseResultType",
source=[b"pigs on the wing", b"animals"],
result_type="SparseTensor",
expected=sparse_tensor.SparseTensorValue(
[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]],
[b"pigs", b"on", b"the", b"wing", b"animals"], [2, 4])),
dict(
testcase_name="DefaultResultType",
source=[b"pigs on the wing", b"animals"],
expected=sparse_tensor.SparseTensorValue(
[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]],
[b"pigs", b"on", b"the", b"wing", b"animals"], [2, 4])),
dict(
testcase_name="BadResultType",
source=[b"pigs on the wing", b"animals"],
result_type="BouncyTensor",
error="result_type must be .*"),
dict(
testcase_name="WithSepAndAndSkipEmpty",
source=[b"+hello+++this+is+a+test"],
sep="+",
skip_empty=False,
result_type="RaggedTensor",
expected=[[b"", b"hello", b"", b"", b"this", b"is", b"a", b"test"]]),
dict(
testcase_name="WithDelimiter",
source=[b"hello world"],
delimiter="l",
result_type="RaggedTensor",
expected=[[b"he", b"o wor", b"d"]]),
])
def testRaggedStringSplitWrapper(self,
source,
sep=None,
skip_empty=True,
delimiter=None,
result_type="SparseTensor",
expected=None,
error=None):
if error is not None:
with self.assertRaisesRegex(ValueError, error):
ragged_string_ops.string_split(source, sep, skip_empty, delimiter,
result_type)
if expected is not None:
result = ragged_string_ops.string_split(source, sep, skip_empty,
delimiter, result_type)
if isinstance(expected, sparse_tensor.SparseTensorValue):
self.assertAllEqual(result.indices, expected.indices)
self.assertAllEqual(result.values, expected.values)
self.assertAllEqual(result.dense_shape, expected.dense_shape)
else:
self.assertAllEqual(result, expected)
| StringSplitOpTest |
python | EpistasisLab__tpot | tpot/builtin_modules/feature_set_selector.py | {
"start": 1714,
"end": 4244
} | class ____(SelectorMixin, BaseEstimator):
"""
Select predefined feature subsets.
"""
def __init__(self, sel_subset=None, name=None):
"""Create a FeatureSetSelector object.
Parameters
----------
sel_subset: list or int
If X is a dataframe, items in sel_subset list must correspond to column names
If X is a numpy array, items in sel_subset list must correspond to column indexes
int: index of a single column
Returns
-------
None
"""
self.name = name
self.sel_subset = sel_subset
def fit(self, X, y=None):
"""Fit FeatureSetSelector for feature selection
Parameters
----------
X: array-like of shape (n_samples, n_features)
The training input samples.
y: array-like, shape (n_samples,)
The target values (integers that correspond to classes in classification, real numbers in regression).
Returns
-------
self: object
Returns a copy of the estimator
"""
if isinstance(self.sel_subset, int) or isinstance(self.sel_subset, str):
self.sel_subset = [self.sel_subset]
#generate self.feat_list_idx
if isinstance(X, pd.DataFrame):
self.feature_names_in_ = X.columns.tolist()
self.feat_list_idx = sorted([self.feature_names_in_.index(feat) for feat in self.sel_subset])
elif isinstance(X, np.ndarray):
self.feature_names_in_ = None#list(range(X.shape[1]))
self.feat_list_idx = sorted(self.sel_subset)
n_features = X.shape[1]
self.mask = np.zeros(n_features, dtype=bool)
self.mask[np.asarray(self.feat_list_idx)] = True
return self
#TODO keep returned as dataframe if input is dataframe? may not be consistent with sklearn
# def transform(self, X):
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.input_tags.allow_nan = True
tags.target_tags.required = False # formally requires_y
return tags
def _get_support_mask(self):
"""
Get the boolean mask indicating which features are selected
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected for
retention.
"""
return self.mask
| FeatureSetSelector |
python | ipython__ipython | IPython/core/magic.py | {
"start": 10898,
"end": 19582
} | class ____(Configurable):
"""Object that handles all magic-related functionality for IPython."""
# Non-configurable class attributes
# A two-level dict, first keyed by magic type, then by magic function, and
# holding the actual callable object as value. This is the dict used for
# magic function dispatch
magics = Dict()
lazy_magics = Dict(
help="""
Mapping from magic names to modules to load.
This can be used in IPython/IPykernel configuration to declare lazy magics
that will only be imported/registered on first use.
For example::
c.MagicsManager.lazy_magics = {
"my_magic": "slow.to.import",
"my_other_magic": "also.slow",
}
On first invocation of `%my_magic`, `%%my_magic`, `%%my_other_magic` or
`%%my_other_magic`, the corresponding module will be loaded as an ipython
extensions as if you had previously done `%load_ext ipython`.
Magics names should be without percent(s) as magics can be both cell
and line magics.
Lazy loading happen relatively late in execution process, and
complex extensions that manipulate Python/IPython internal state or global state
might not support lazy loading.
"""
).tag(
config=True,
)
# A registry of the original objects that we've been given holding magics.
registry = Dict()
shell = Instance(
"IPython.core.interactiveshell.InteractiveShellABC", allow_none=True
)
auto_magic = Bool(
True, help="Automatically call line magics without requiring explicit % prefix"
).tag(config=True)
@observe("auto_magic")
def _auto_magic_changed(self, change):
assert self.shell is not None
self.shell.automagic = change["new"]
_auto_status = [
"Automagic is OFF, % prefix IS needed for line magics.",
"Automagic is ON, % prefix IS NOT needed for line magics.",
]
user_magics = Instance("IPython.core.magics.UserMagics", allow_none=True)
def __init__(self, shell=None, config=None, user_magics=None, **traits):
super(MagicsManager, self).__init__(
shell=shell, config=config, user_magics=user_magics, **traits
)
self.magics = dict(line={}, cell={})
# Let's add the user_magics to the registry for uniformity, so *all*
# registered magic containers can be found there.
self.registry[user_magics.__class__.__name__] = user_magics
def auto_status(self):
"""Return descriptive string with automagic status."""
return self._auto_status[self.auto_magic]
def lsmagic(self):
"""Return a dict of currently available magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a list of names.
"""
return self.magics
def lsmagic_docs(self, brief=False, missing=""):
"""Return dict of documentation of magic functions.
The return dict has the keys 'line' and 'cell', corresponding to the
two types of magics we support. Each value is a dict keyed by magic
name whose value is the function docstring. If a docstring is
unavailable, the value of `missing` is used instead.
If brief is True, only the first line of each docstring will be returned.
"""
docs = {}
for m_type in self.magics:
m_docs = {}
for m_name, m_func in self.magics[m_type].items():
if m_func.__doc__:
if brief:
m_docs[m_name] = m_func.__doc__.split("\n", 1)[0]
else:
m_docs[m_name] = m_func.__doc__.rstrip()
else:
m_docs[m_name] = missing
docs[m_type] = m_docs
return docs
def register_lazy(self, name: str, fully_qualified_name: str) -> None:
"""
Lazily register a magic via an extension.
Parameters
----------
name : str
Name of the magic you wish to register.
fully_qualified_name :
Fully qualified name of the module/submodule that should be loaded
as an extensions when the magic is first called.
It is assumed that loading this extensions will register the given
magic.
"""
self.lazy_magics[name] = fully_qualified_name
def register(self, *magic_objects):
"""Register one or more instances of Magics.
Take one or more classes or instances of classes that subclass the main
`core.Magic` class, and register them with IPython to use the magic
functions they provide. The registration process will then ensure that
any methods that have decorated to provide line and/or cell magics will
be recognized with the `%x`/`%%x` syntax as a line/cell magic
respectively.
If classes are given, they will be instantiated with the default
constructor. If your classes need a custom constructor, you should
instanitate them first and pass the instance.
The provided arguments can be an arbitrary mix of classes and instances.
Parameters
----------
*magic_objects : one or more classes or instances
"""
# Start by validating them to ensure they have all had their magic
# methods registered at the instance level
for m in magic_objects:
if not m.registered:
raise ValueError(
"Class of magics %r was constructed without "
"the @register_magics class decorator"
)
if isinstance(m, type):
# If we're given an uninstantiated class
m = m(shell=self.shell)
# Now that we have an instance, we can register it and update the
# table of callables
self.registry[m.__class__.__name__] = m
for mtype in magic_kinds:
self.magics[mtype].update(m.magics[mtype])
def register_function(self, func, magic_kind="line", magic_name=None):
"""Expose a standalone function as magic function for IPython.
This will create an IPython magic (line, cell or both) from a
standalone function. The functions should have the following
signatures:
* For line magics: `def f(line)`
* For cell magics: `def f(line, cell)`
* For a function that does both: `def f(line, cell=None)`
In the latter case, the function will be called with `cell==None` when
invoked as `%f`, and with cell as a string when invoked as `%%f`.
Parameters
----------
func : callable
Function to be registered as a magic.
magic_kind : str
Kind of magic, one of 'line', 'cell' or 'line_cell'
magic_name : optional str
If given, the name the magic will have in the IPython namespace. By
default, the name of the function itself is used.
"""
# Create the new method in the user_magics and register it in the
# global table
validate_type(magic_kind)
magic_name = func.__name__ if magic_name is None else magic_name
setattr(self.user_magics, magic_name, func)
record_magic(self.magics, magic_kind, magic_name, func)
def register_alias(
self, alias_name, magic_name, magic_kind="line", magic_params=None
):
"""Register an alias to a magic function.
The alias is an instance of :class:`MagicAlias`, which holds the
name and kind of the magic it should call. Binding is done at
call time, so if the underlying magic function is changed the alias
will call the new function.
Parameters
----------
alias_name : str
The name of the magic to be registered.
magic_name : str
The name of an existing magic.
magic_kind : str
Kind of magic, one of 'line' or 'cell'
"""
# `validate_type` is too permissive, as it allows 'line_cell'
# which we do not handle.
if magic_kind not in magic_kinds:
raise ValueError(
"magic_kind must be one of %s, %s given" % magic_kinds, magic_kind
)
alias = MagicAlias(self.shell, magic_name, magic_kind, magic_params)
setattr(self.user_magics, alias_name, alias)
record_magic(self.magics, magic_kind, alias_name, alias)
# Key base class that provides the central functionality for magics.
| MagicsManager |
python | getsentry__sentry | tests/sentry/seer/explorer/test_custom_tool_utils.py | {
"start": 1709,
"end": 2026
} | class ____(ExplorerTool):
@classmethod
def get_description(cls):
return "Tool that returns wrong type"
@classmethod
def get_params(cls):
return []
@classmethod
def execute(cls, organization, **kwargs):
return 123 # Returns wrong type to test runtime validation
| BadTool |
python | pytorch__pytorch | test/export/test_functionalized_assertions.py | {
"start": 115,
"end": 985
} | class ____(TestCase):
def test_functional_assert_async_msg(self) -> None:
dep_token = torch.ops.aten._make_dep_token()
self.assertEqual(
torch.ops.aten._functional_assert_async.msg(
torch.tensor(1), "test msg", dep_token
),
dep_token,
)
with self.assertRaisesRegex(RuntimeError, "test msg"):
torch.ops.aten._functional_assert_async.msg(
torch.tensor(0), "test msg", dep_token
)
def test_functional_sym_constrain_range(self) -> None:
dep_token = torch.ops.aten._make_dep_token()
self.assertEqual(
torch.ops.aten._functional_sym_constrain_range(
3, min=2, max=5, dep_token=dep_token
),
dep_token,
)
if __name__ == "__main__":
run_tests()
| TestFuntionalAssertions |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/optics/pyoptic.py | {
"start": 9371,
"end": 11368
} | class ____(pg.GraphicsObject, ParamObj):
"""GraphicsObject with two circular or flat surfaces."""
def __init__(self, pen=None, brush=None, **opts):
"""
Arguments for each surface are:
x1,x2 - position of center of _physical surface_
r1,r2 - radius of curvature
d1,d2 - diameter of optic
"""
defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)
defaults.update(opts)
ParamObj.__init__(self)
self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]
pg.GraphicsObject.__init__(self)
for s in self.surfaces:
s.setParentItem(self)
if pen is None:
self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)
else:
self.pen = pg.mkPen(pen)
if brush is None:
self.brush = pg.mkBrush((230, 230, 255, 30))
else:
self.brush = pg.mkBrush(brush)
self.setParams(**defaults)
def paramStateChanged(self):
self.updateSurfaces()
def updateSurfaces(self):
self.surfaces[0].setParams(self['r1'], self['d1'])
self.surfaces[1].setParams(-self['r2'], self['d2'])
self.surfaces[0].setPos(self['x1'], 0)
self.surfaces[1].setPos(self['x2'], 0)
self.path = QtGui.QPainterPath()
self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))
self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())
self.path.closeSubpath()
def boundingRect(self):
return self.path.boundingRect()
def shape(self):
return self.path
def paint(self, p, *args):
p.setRenderHints(p.renderHints() | p.RenderHint.Antialiasing)
p.setPen(self.pen)
p.fillPath(self.path, self.brush)
p.drawPath(self.path)
| CircularSolid |
python | optuna__optuna | optuna/terminator/erroreval.py | {
"start": 638,
"end": 3536
} | class ____(BaseErrorEvaluator):
"""An error evaluator for objective functions based on cross-validation.
This evaluator evaluates the objective function's statistical error, which comes from the
randomness of dataset. This evaluator assumes that the objective function is the average of
the cross-validation and uses the scaled variance of the cross-validation scores in the best
trial at the moment as the statistical error.
"""
def evaluate(
self,
trials: list[FrozenTrial],
study_direction: StudyDirection,
) -> float:
"""Evaluate the statistical error of the objective function based on cross-validation.
Args:
trials:
A list of trials to consider. The best trial in ``trials`` is used to compute the
statistical error.
study_direction:
The direction of the study.
Returns:
A float representing the statistical error of the objective function.
"""
trials = [trial for trial in trials if trial.state == TrialState.COMPLETE]
assert len(trials) > 0
if study_direction == StudyDirection.MAXIMIZE:
best_trial = max(trials, key=lambda t: cast(float, t.value))
else:
best_trial = min(trials, key=lambda t: cast(float, t.value))
best_trial_attrs = best_trial.system_attrs
if _CROSS_VALIDATION_SCORES_KEY in best_trial_attrs:
cv_scores = best_trial_attrs[_CROSS_VALIDATION_SCORES_KEY]
else:
raise ValueError(
"Cross-validation scores have not been reported. Please call "
"`report_cross_validation_scores(trial, scores)` during a trial and pass the "
"list of scores as `scores`."
)
k = len(cv_scores)
assert k > 1, "Should be guaranteed by `report_cross_validation_scores`."
scale = 1 / k + 1 / (k - 1)
var = scale * np.var(cv_scores)
std = np.sqrt(var)
return float(std)
@experimental_class("3.2.0")
def report_cross_validation_scores(trial: Trial, scores: list[float]) -> None:
"""A function to report cross-validation scores of a trial.
This function should be called within the objective function to report the cross-validation
scores. The reported scores are used to evaluate the statistical error for termination
judgement.
Args:
trial:
A :class:`~optuna.trial.Trial` object to report the cross-validation scores.
scores:
The cross-validation scores of the trial.
"""
if len(scores) <= 1:
raise ValueError("The length of `scores` is expected to be greater than one.")
trial.storage.set_trial_system_attr(trial._trial_id, _CROSS_VALIDATION_SCORES_KEY, scores)
@experimental_class("3.2.0")
| CrossValidationErrorEvaluator |
python | ray-project__ray | python/ray/dashboard/modules/job/job_supervisor.py | {
"start": 1658,
"end": 20874
} | class ____:
"""
Ray actor created by JobManager for each submitted job, responsible to
setup runtime_env, execute given shell command in subprocess, update job
status, persist job logs and manage subprocess group cleaning.
One job supervisor actor maps to one subprocess, for one job_id.
Job supervisor actor should fate share with subprocess it created.
"""
DEFAULT_RAY_JOB_STOP_WAIT_TIME_S = 3
SUBPROCESS_POLL_PERIOD_S = 0.1
VALID_STOP_SIGNALS = ["SIGINT", "SIGTERM"]
def __init__(
self,
job_id: str,
entrypoint: str,
user_metadata: Dict[str, str],
gcs_address: str,
cluster_id_hex: str,
logs_dir: Optional[str] = None,
):
self._job_id = job_id
gcs_client = GcsClient(address=gcs_address, cluster_id=cluster_id_hex)
self._job_info_client = JobInfoStorageClient(gcs_client, logs_dir)
self._log_client = JobLogStorageClient()
self._entrypoint = entrypoint
# Default metadata if not passed by the user.
self._metadata = {JOB_ID_METADATA_KEY: job_id, JOB_NAME_METADATA_KEY: job_id}
self._metadata.update(user_metadata)
# Event used to signal that a job should be stopped.
# Set in the `stop_job` method.
self._stop_event = asyncio.Event()
# Windows Job Object used to handle stopping the child processes.
self._win32_job_object = None
# Logger object to persist JobSupervisor logs in separate file.
self._logger = logging.getLogger(f"{__name__}.supervisor-{job_id}")
self._configure_logger()
def _configure_logger(self) -> None:
"""
Configure self._logger object to write logs to file based on job
submission ID and to console.
"""
supervisor_log_file_name = os.path.join(
ray._private.worker._global_node.get_logs_dir_path(),
f"jobs/supervisor-{self._job_id}.log",
)
os.makedirs(os.path.dirname(supervisor_log_file_name), exist_ok=True)
self._logger.addFilter(CoreContextFilter())
stream_handler = logging.StreamHandler()
file_handler = logging.FileHandler(supervisor_log_file_name)
formatter = TextFormatter()
if ray_constants.env_bool(ray_constants.RAY_BACKEND_LOG_JSON_ENV_VAR, False):
formatter = JSONFormatter()
stream_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
self._logger.addHandler(stream_handler)
self._logger.addHandler(file_handler)
self._logger.propagate = False
def _get_driver_runtime_env(
self, resources_specified: bool = False
) -> Dict[str, Any]:
"""Get the runtime env that should be set in the job driver.
Args:
resources_specified: Whether the user specified resources (CPUs, GPUs,
custom resources) in the submit_job request. If so, we will skip
the workaround for GPU detection introduced in #24546, so that the
behavior matches that of the user specifying resources for any
other actor.
Returns:
The runtime env that should be set in the job driver.
"""
# Get the runtime_env set for the supervisor actor.
curr_runtime_env = dict(ray.get_runtime_context().runtime_env)
if resources_specified:
return curr_runtime_env
# Allow CUDA_VISIBLE_DEVICES to be set normally for the driver's tasks
# & actors.
env_vars = curr_runtime_env.get("env_vars", {})
env_vars.pop(NOSET_CUDA_VISIBLE_DEVICES_ENV_VAR)
env_vars.pop(ray_constants.RAY_WORKER_NICENESS)
curr_runtime_env["env_vars"] = env_vars
return curr_runtime_env
def ping(self):
"""Used to check the health of the actor."""
pass
def _exec_entrypoint(self, env: dict, logs_path: str) -> subprocess.Popen:
"""
Runs the entrypoint command as a child process, streaming stderr &
stdout to given log files.
Unix systems:
Meanwhile we start a demon process and group driver
subprocess in same pgid, such that if job actor dies, entire process
group also fate share with it.
Windows systems:
A jobObject is created to enable fate sharing for the entire process group.
Args:
logs_path: File path on head node's local disk to store driver
command's stdout & stderr.
Returns:
child_process: Child process that runs the driver command. Can be
terminated or killed upon user calling stop().
"""
# Open in append mode to avoid overwriting runtime_env setup logs for the
# supervisor actor, which are also written to the same file.
with open(logs_path, "a") as logs_file:
logs_file.write(
f"Running entrypoint for job {self._job_id}: {self._entrypoint}\n"
)
child_process = subprocess.Popen(
self._entrypoint,
shell=True,
start_new_session=True,
stdout=logs_file,
stderr=subprocess.STDOUT,
env=env,
# Ray intentionally blocks SIGINT in all processes, so if the user wants
# to stop job through SIGINT, we need to unblock it in the child process
preexec_fn=(
(
lambda: signal.pthread_sigmask(
signal.SIG_UNBLOCK, {signal.SIGINT}
)
)
if sys.platform != "win32"
and os.environ.get("RAY_JOB_STOP_SIGNAL") == "SIGINT"
else None
),
)
parent_pid = os.getpid()
child_pid = child_process.pid
# Create new pgid with new subprocess to execute driver command
if sys.platform != "win32":
try:
child_pgid = os.getpgid(child_pid)
except ProcessLookupError:
# Process died before we could get its pgid.
return child_process
# Open a new subprocess to kill the child process when the parent
# process dies kill -s 0 parent_pid will succeed if the parent is
# alive. If it fails, SIGKILL the child process group and exit
subprocess.Popen(
f"while kill -s 0 {parent_pid}; do sleep 1; done; kill -9 -{child_pgid}", # noqa: E501
shell=True,
# Suppress output
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
elif sys.platform == "win32" and win32api:
# Create a JobObject to which the child process (and its children)
# will be connected. This job object can be used to kill the child
# processes explicitly or when the jobObject gets deleted during
# garbage collection.
self._win32_job_object = win32job.CreateJobObject(None, "")
win32_job_info = win32job.QueryInformationJobObject(
self._win32_job_object, win32job.JobObjectExtendedLimitInformation
)
win32_job_info["BasicLimitInformation"][
"LimitFlags"
] = win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
win32job.SetInformationJobObject(
self._win32_job_object,
win32job.JobObjectExtendedLimitInformation,
win32_job_info,
)
child_handle = win32api.OpenProcess(
win32con.PROCESS_TERMINATE | win32con.PROCESS_SET_QUOTA,
False,
child_pid,
)
win32job.AssignProcessToJobObject(self._win32_job_object, child_handle)
return child_process
def _get_driver_env_vars(self, resources_specified: bool) -> Dict[str, str]:
"""Returns environment variables that should be set in the driver."""
# RAY_ADDRESS may be the dashboard URL but not the gcs address,
# so when the environment variable is not empty, we force set RAY_ADDRESS
# to "auto" to avoid function `canonicalize_bootstrap_address_or_die` returning
# the wrong GCS address.
# TODO(Jialing He, Archit Kulkarni): Definition of Specification RAY_ADDRESS
if ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE in os.environ:
os.environ[ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE] = "auto"
ray_addr = ray._private.services.canonicalize_bootstrap_address_or_die(
"auto", ray._private.worker._global_node._ray_params.temp_dir
)
assert ray_addr is not None
return {
# Set JobConfig for the child process (runtime_env, metadata).
RAY_JOB_CONFIG_JSON_ENV_VAR: json.dumps(
{
"runtime_env": self._get_driver_runtime_env(resources_specified),
"metadata": self._metadata,
}
),
# Always set RAY_ADDRESS as find_bootstrap_address address for
# job submission. In case of local development, prevent user from
# re-using http://{address}:{dashboard_port} to interact with
# jobs SDK.
# TODO:(mwtian) Check why "auto" does not work in entrypoint script
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE: ray_addr,
# Set PYTHONUNBUFFERED=1 to stream logs during the job instead of
# only streaming them upon completion of the job.
"PYTHONUNBUFFERED": "1",
}
async def _polling(self, child_process: subprocess.Popen) -> int:
while child_process is not None:
return_code = child_process.poll()
if return_code is not None:
# subprocess finished with return code
return return_code
else:
# still running, yield control, 0.1s by default
await asyncio.sleep(self.SUBPROCESS_POLL_PERIOD_S)
async def _poll_all(self, processes: List[psutil.Process]):
"""Poll processes until all are completed."""
while True:
(_, alive) = psutil.wait_procs(processes, timeout=0)
if len(alive) == 0:
return
else:
await asyncio.sleep(self.SUBPROCESS_POLL_PERIOD_S)
def _kill_processes(self, processes: List[psutil.Process], sig: signal.Signals):
"""Ensure each process is already finished or send a kill signal."""
for proc in processes:
try:
os.kill(proc.pid, sig)
except ProcessLookupError:
# Process is already dead
pass
async def run(
self,
# Signal actor used in testing to capture PENDING -> RUNNING cases
_start_signal_actor: Optional[ActorHandle] = None,
resources_specified: bool = False,
):
"""
Stop and start both happen asynchronously, coordinated by asyncio event
and coroutine, respectively.
1) Sets job status as running
2) Pass runtime env and metadata to subprocess as serialized env
variables.
3) Handle concurrent events of driver execution and
"""
curr_info = await self._job_info_client.get_info(self._job_id)
if curr_info is None:
raise RuntimeError(f"Status could not be retrieved for job {self._job_id}.")
curr_status = curr_info.status
curr_message = curr_info.message
if curr_status == JobStatus.RUNNING:
raise RuntimeError(
f"Job {self._job_id} is already in RUNNING state. "
f"JobSupervisor.run() should only be called once. "
)
if curr_status != JobStatus.PENDING:
raise RuntimeError(
f"Job {self._job_id} is not in PENDING state. "
f"Current status is {curr_status} with message {curr_message}."
)
if _start_signal_actor:
# Block in PENDING state until start signal received.
await _start_signal_actor.wait.remote()
node = ray._private.worker.global_worker.node
driver_agent_http_address = f"http://{build_address(node.node_ip_address, node.dashboard_agent_listen_port)}"
driver_node_id = ray.get_runtime_context().get_node_id()
await self._job_info_client.put_status(
self._job_id,
JobStatus.RUNNING,
jobinfo_replace_kwargs={
"driver_agent_http_address": driver_agent_http_address,
"driver_node_id": driver_node_id,
},
)
try:
# Configure environment variables for the child process.
env = os.environ.copy()
# Remove internal Ray flags. They present because JobSuperVisor itself is
# a Ray worker process but we don't want to pass them to the driver.
remove_ray_internal_flags_from_env(env)
# These will *not* be set in the runtime_env, so they apply to the driver
# only, not its tasks & actors.
env.update(self._get_driver_env_vars(resources_specified))
self._logger.info(
"Submitting job with RAY_ADDRESS = "
f"{env[ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE]}"
)
log_path = self._log_client.get_log_file_path(self._job_id)
child_process = self._exec_entrypoint(env, log_path)
child_pid = child_process.pid
polling_task = create_task(self._polling(child_process))
finished, _ = await asyncio.wait(
[polling_task, create_task(self._stop_event.wait())],
return_when=FIRST_COMPLETED,
)
if self._stop_event.is_set():
polling_task.cancel()
if sys.platform == "win32" and self._win32_job_object:
win32job.TerminateJobObject(self._win32_job_object, -1)
elif sys.platform != "win32":
stop_signal = os.environ.get("RAY_JOB_STOP_SIGNAL", "SIGTERM")
if stop_signal not in self.VALID_STOP_SIGNALS:
self._logger.warning(
f"{stop_signal} not a valid stop signal. Terminating "
"job with SIGTERM."
)
stop_signal = "SIGTERM"
job_process = psutil.Process(child_pid)
proc_to_kill = [job_process] + job_process.children(recursive=True)
# Send stop signal and wait for job to terminate gracefully,
# otherwise SIGKILL job forcefully after timeout.
self._kill_processes(proc_to_kill, getattr(signal, stop_signal))
try:
stop_job_wait_time = int(
os.environ.get(
"RAY_JOB_STOP_WAIT_TIME_S",
self.DEFAULT_RAY_JOB_STOP_WAIT_TIME_S,
)
)
poll_job_stop_task = create_task(self._poll_all(proc_to_kill))
await asyncio.wait_for(poll_job_stop_task, stop_job_wait_time)
self._logger.info(
f"Job {self._job_id} has been terminated gracefully "
f"with {stop_signal}."
)
except asyncio.TimeoutError:
self._logger.warning(
f"Attempt to gracefully terminate job {self._job_id} "
f"through {stop_signal} has timed out after "
f"{stop_job_wait_time} seconds. Job is now being "
"force-killed with SIGKILL."
)
self._kill_processes(proc_to_kill, signal.SIGKILL)
await self._job_info_client.put_status(self._job_id, JobStatus.STOPPED)
else:
# Child process finished execution and no stop event is set
# at the same time
assert len(finished) == 1, "Should have only one coroutine done"
[child_process_task] = finished
return_code = child_process_task.result()
self._logger.info(
f"Job {self._job_id} entrypoint command "
f"exited with code {return_code}"
)
if return_code == 0:
await self._job_info_client.put_status(
self._job_id,
JobStatus.SUCCEEDED,
driver_exit_code=return_code,
)
else:
log_tail = await self._log_client.get_last_n_log_lines(self._job_id)
if log_tail is not None and log_tail != "":
message = (
"Job entrypoint command "
f"failed with exit code {return_code}, "
"last available logs (truncated to 20,000 chars):\n"
+ log_tail
)
else:
message = (
"Job entrypoint command "
f"failed with exit code {return_code}. No logs available."
)
await self._job_info_client.put_status(
self._job_id,
JobStatus.FAILED,
message=message,
driver_exit_code=return_code,
error_type=JobErrorType.JOB_ENTRYPOINT_COMMAND_ERROR,
)
except Exception:
self._logger.error(
"Got unexpected exception while trying to execute driver "
f"command. {traceback.format_exc()}"
)
try:
await self._job_info_client.put_status(
self._job_id,
JobStatus.FAILED,
message=traceback.format_exc(),
error_type=JobErrorType.JOB_ENTRYPOINT_COMMAND_START_ERROR,
)
except Exception:
self._logger.error(
"Failed to update job status to FAILED. "
f"Exception: {traceback.format_exc()}"
)
finally:
# clean up actor after tasks are finished
ray.actor.exit_actor()
def stop(self):
"""Set step_event and let run() handle the rest in its asyncio.wait()."""
self._stop_event.set()
| JobSupervisor |
python | pexpect__pexpect | tests/test_async.py | {
"start": 233,
"end": 3126
} | class ____(PexpectTestCase.AsyncPexpectTestCase):
async def test_simple_expect(self):
p = pexpect.spawn("cat")
p.sendline("Hello asyncio")
assert await p.expect(["Hello", pexpect.EOF], async_=True) == 0
print("Done")
async def test_timeout(self):
p = pexpect.spawn("cat")
with self.assertRaises(pexpect.TIMEOUT):
await p.expect("foo", timeout=1, async_=True)
p = pexpect.spawn("cat")
assert await p.expect(["foo", pexpect.TIMEOUT], timeout=1, async_=True) == 1
async def test_eof(self):
p = pexpect.spawn("cat")
p.sendline("Hi")
p.sendeof()
assert await p.expect(pexpect.EOF, async_=True) == 0
p = pexpect.spawn("cat")
p.sendeof()
with self.assertRaises(pexpect.EOF):
await p.expect("Blah", async_=True)
async def test_expect_exact(self):
p = pexpect.spawn("%s list100.py" % self.PYTHONBIN)
assert await p.expect_exact(b"5", async_=True) == 0
assert await p.expect_exact(["wpeok", b"11"], async_=True) == 1
assert await p.expect_exact([b"foo", pexpect.EOF], async_=True) == 1
async def test_async_utf8(self):
p = pexpect.spawn("%s list100.py" % self.PYTHONBIN, encoding="utf8")
assert await p.expect_exact("5", async_=True) == 0
assert await p.expect_exact(["wpeok", "11"], async_=True) == 1
assert await p.expect_exact(["foo", pexpect.EOF], async_=True) == 1
async def test_async_and_gc(self):
p = pexpect.spawn("%s sleep_for.py 1" % self.PYTHONBIN, encoding="utf8")
assert await p.expect_exact("READY", async_=True) == 0
gc.collect()
assert await p.expect_exact("END", async_=True) == 0
async def test_async_and_sync(self):
p = pexpect.spawn("echo 1234", encoding="utf8", maxread=1)
assert await p.expect_exact("1", async_=True) == 0
assert p.expect_exact("2") == 0
assert await p.expect_exact("3", async_=True) == 0
async def test_async_replwrap(self):
bash = replwrap.bash()
res = await bash.run_command("time", async_=True)
assert "real" in res, res
async def test_async_replwrap_multiline(self):
bash = replwrap.bash()
res = await bash.run_command("echo '1 2\n3 4'", async_=True)
self.assertEqual(res.strip().splitlines(), ["1 2", "3 4"])
# Should raise ValueError if input is incomplete
try:
await bash.run_command("echo '5 6", async_=True)
except ValueError:
pass
else:
assert False, "Didn't raise ValueError for incomplete input"
# Check that the REPL was reset (SIGINT) after the incomplete input
res = await bash.run_command("echo '1 2\n3 4'", async_=True)
self.assertEqual(res.strip().splitlines(), ["1 2", "3 4"])
| AsyncTests |
python | jazzband__django-model-utils | tests/test_fields/test_monitor_field.py | {
"start": 1780,
"end": 3323
} | class ____(TestCase):
"""
Will record changes only when name is 'Jose' or 'Maria'
"""
def setUp(self) -> None:
with time_machine.travel(datetime(2016, 1, 1, 10, 0, 0, tzinfo=timezone.utc)):
self.instance = MonitorWhen(name='Charlie')
self.created = self.instance.name_changed
def test_save_no_change(self) -> None:
self.instance.save()
self.assertEqual(self.instance.name_changed, self.created)
def test_save_changed_to_Jose(self) -> None:
with time_machine.travel(datetime(2016, 1, 1, 12, 0, 0, tzinfo=timezone.utc)):
self.instance.name = 'Jose'
self.instance.save()
self.assertEqual(self.instance.name_changed, datetime(2016, 1, 1, 12, 0, 0, tzinfo=timezone.utc))
def test_save_changed_to_Maria(self) -> None:
with time_machine.travel(datetime(2016, 1, 1, 12, 0, 0, tzinfo=timezone.utc)):
self.instance.name = 'Maria'
self.instance.save()
self.assertEqual(self.instance.name_changed, datetime(2016, 1, 1, 12, 0, 0, tzinfo=timezone.utc))
def test_save_changed_to_Pedro(self) -> None:
self.instance.name = 'Pedro'
self.instance.save()
self.assertEqual(self.instance.name_changed, self.created)
def test_double_save(self) -> None:
self.instance.name = 'Jose'
self.instance.save()
changed = self.instance.name_changed
self.instance.save()
self.assertEqual(self.instance.name_changed, changed)
| MonitorWhenFieldTests |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_ignore_error05.py | {
"start": 315,
"end": 1112
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("ignore_error05.xlsx")
self.ignore_files = [
"xl/calcChain.xml",
"[Content_Types].xml",
"xl/_rels/workbook.xml.rels",
]
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write_string("A1", "123")
worksheet.write_formula("A2", "=1/0", None, "#DIV/0!")
worksheet.ignore_errors({"number_stored_as_text": "A1", "eval_error": "A2"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | keras-team__keras | keras/src/wrappers/sklearn_wrapper.py | {
"start": 897,
"end": 6443
} | class ____(BaseEstimator):
"""Base class for scikit-learn wrappers.
Note that there are sources of randomness in model initialization and
training. Refer to [Reproducibility in Keras Models](
https://keras.io/examples/keras_recipes/reproducibility_recipes/) on how to
control randomness.
Args:
model: `Model`.
An instance of `Model`, or a callable returning such an object.
Note that if input is a `Model`, it will be cloned using
`keras.models.clone_model` before being fitted, unless
`warm_start=True`.
The `Model` instance needs to be passed as already compiled.
If callable, it must accept at least `X` and `y` as keyword
arguments. Other arguments must be accepted if passed as
`model_kwargs` by the user.
warm_start: bool, defaults to `False`.
Whether to reuse the model weights from the previous fit. If `True`,
the given model won't be cloned and the weights from the previous
fit will be reused.
model_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model`, if `model` is callable.
fit_kwargs: dict, defaults to `None`.
Keyword arguments passed to `model.fit`. These can also be passed
directly to the `fit` method of the scikit-learn wrapper. The
values passed directly to the `fit` method take precedence over
these.
Attributes:
model_ : `Model`
The fitted model.
history_ : dict
The history of the fit, returned by `model.fit`.
"""
def __init__(
self,
model,
warm_start=False,
model_kwargs=None,
fit_kwargs=None,
):
assert_sklearn_installed(self.__class__.__name__)
self.model = model
self.warm_start = warm_start
self.model_kwargs = model_kwargs
self.fit_kwargs = fit_kwargs
def _more_tags(self):
return {"non_deterministic": True}
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.non_deterministic = True
return tags
def __sklearn_clone__(self):
"""Return a deep copy of the model.
This is used by the `sklearn.base.clone` function.
"""
model = (
self.model if callable(self.model) else copy.deepcopy(self.model)
)
return type(self)(
model=model,
warm_start=self.warm_start,
model_kwargs=self.model_kwargs,
)
@property
def epoch_(self):
"""The current training epoch."""
return getattr(self, "history_", {}).get("epoch", 0)
def set_fit_request(self, **kwargs):
"""Set requested parameters by the fit method.
Please see [scikit-learn's metadata routing](
https://scikit-learn.org/stable/metadata_routing.html) for more
details.
Arguments:
kwargs : dict
Arguments should be of the form `param_name=alias`, and `alias`
can be one of `{True, False, None, str}`.
Returns:
self
"""
if not _routing_enabled():
raise RuntimeError(
"This method is only available when metadata routing is "
"enabled. You can enable it using "
"sklearn.set_config(enable_metadata_routing=True)."
)
self._metadata_request = sklearn.utils.metadata_routing.MetadataRequest(
owner=self.__class__.__name__
)
for param, alias in kwargs.items():
self._metadata_request.score.add_request(param=param, alias=alias)
return self
def _get_model(self, X, y):
if isinstance(self.model, Model):
return clone_model(self.model)
else:
args = self.model_kwargs or {}
return self.model(X=X, y=y, **args)
def fit(self, X, y, **kwargs):
"""Fit the model.
Args:
X: array-like, shape=(n_samples, n_features)
The input samples.
y: array-like, shape=(n_samples,) or (n_samples, n_outputs)
The targets.
**kwargs: keyword arguments passed to `model.fit`
"""
X, y = _validate_data(self, X, y)
y = self._process_target(y, reset=True)
model = self._get_model(X, y)
_check_model(model)
fit_kwargs = self.fit_kwargs or {}
fit_kwargs.update(kwargs)
self.history_ = model.fit(X, y, **fit_kwargs)
self.model_ = model
return self
def predict(self, X):
"""Predict using the model."""
from sklearn.utils.validation import check_is_fitted
check_is_fitted(self)
X = _validate_data(self, X, reset=False)
raw_output = self.model_.predict(X)
return self._reverse_process_target(raw_output)
def _process_target(self, y, reset=False):
"""Regressors are NOOP here, classifiers do OHE."""
# This is here to raise the right error in case of invalid target
type_of_target(y, raise_unknown=True)
if reset:
self._target_encoder = TargetReshaper().fit(y)
return self._target_encoder.transform(y)
def _reverse_process_target(self, y):
"""Regressors are NOOP here, classifiers reverse OHE."""
return self._target_encoder.inverse_transform(y)
@keras_export("keras.wrappers.SKLearnClassifier")
| SKLBase |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 217032,
"end": 217511
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of DeleteProjectV2"""
__schema__ = github_schema
__field_names__ = ("project_id", "client_mutation_id")
project_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="projectId")
"""The ID of the Project to delete."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| DeleteProjectV2Input |
python | huggingface__transformers | src/transformers/models/hubert/modeling_hubert.py | {
"start": 8636,
"end": 10369
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.feat_proj_layer_norm = config.feat_proj_layer_norm
if self.feat_proj_layer_norm:
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.dropout = nn.Dropout(config.feat_proj_dropout)
def forward(self, hidden_states):
# non-projected hidden states are needed for quantization
if self.feat_proj_layer_norm:
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.projection(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| HubertFeatureProjection |
python | sphinx-doc__sphinx | tests/roots/test-ext-autodoc/target/typed_vars.py | {
"start": 226,
"end": 551
} | class ____:
attr1: int = 0
attr2: int
attr3 = 0 # type: int
descr4: int = _Descriptor('descr4')
def __init__(self):
# fmt: off
self.attr4: int = 0 #: attr4
self.attr5: int #: attr5
self.attr6 = 0 # type: int
# fmt: on
"""attr6"""
| Class |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver7.py | {
"start": 427,
"end": 966
} | class ____(Generic[_A, _B]):
def __init__(self, a: _A, b: _B = "hello"):
self._foo_a = a
self._foo_b = b
@property
def value_a(self):
return self._foo_a
@property
def value_b(self):
return self._foo_b
a1 = ClassA(27)
reveal_type(a1.value_a, expected_text="int")
reveal_type(a1.value_b, expected_text="str")
@contextmanager
def func1(default: _A | None = None) -> Iterator[_A | str]:
yield ""
def func2():
with func1() as y:
reveal_type(y, expected_text="str")
| ClassA |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 8171,
"end": 8792
} | class ____(graphene.Enum):
"""The event type of an asset event."""
ASSET_MATERIALIZATION = "ASSET_MATERIALIZATION"
ASSET_OBSERVATION = "ASSET_OBSERVATION"
class Meta:
name = "AssetEventType"
def to_dagster_event_type(self) -> DagsterEventType:
if self == GrapheneRunlessAssetEventType.ASSET_MATERIALIZATION:
return DagsterEventType.ASSET_MATERIALIZATION
elif self == GrapheneRunlessAssetEventType.ASSET_OBSERVATION:
return DagsterEventType.ASSET_OBSERVATION
else:
check.failed(f"unhandled type {self}")
| GrapheneRunlessAssetEventType |
python | neetcode-gh__leetcode | python/0198-house-robber.py | {
"start": 0,
"end": 216
} | class ____:
def rob(self, nums: List[int]) -> int:
rob1, rob2 = 0, 0
for n in nums:
temp = max(n + rob1, rob2)
rob1 = rob2
rob2 = temp
return rob2
| Solution |
python | getsentry__sentry | src/sentry/middleware/integrations/tasks.py | {
"start": 3549,
"end": 4272
} | class ____(_AsyncRegionDispatcher):
@property
def log_code(self) -> str:
return IntegrationProviderSlug.SLACK.value
def unpack_payload(self, response: Response) -> Any:
if response.content:
return orjson.loads(response.content)
return None
@instrumented_task(
name="sentry.middleware.integrations.tasks.convert_to_async_slack_response",
namespace=integrations_control_tasks,
retry=Retry(times=2, delay=5),
silo_mode=SiloMode.CONTROL,
)
def convert_to_async_slack_response(
region_names: list[str],
payload: dict[str, Any],
response_url: str,
) -> None:
_AsyncSlackDispatcher(payload, response_url).dispatch(region_names)
| _AsyncSlackDispatcher |
python | joke2k__faker | tests/providers/test_enum.py | {
"start": 148,
"end": 208
} | class ____(Enum):
Single = auto
| _TestEnumWithSingleElement |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_index.py | {
"start": 17164,
"end": 18127
} | class ____(APITestCase):
endpoint = "sentry-organization-index"
def setup_user(self, is_superuser=False):
self.organization = self.create_organization()
self.user = self.create_user(is_superuser=is_superuser)
self.create_member(
organization=self.organization,
user=self.user,
role="member",
flags=OrganizationMember.flags["member-limit:restricted"],
)
self.login_as(self.user, superuser=is_superuser)
def test_member_limit_redirect(self) -> None:
self.setup_user()
response = self.get_success_response(self.organization.slug, status_code=302)
assert f"/organizations/{self.organization.slug}/disabled-member/" in response.url
def test_member_limit_superuser_no_redirect(self) -> None:
self.setup_user(is_superuser=True)
self.get_success_response(self.organization.slug, status_code=200)
| OrganizationIndexMemberLimitTest |
python | getsentry__sentry | src/sentry/api/bases/organization.py | {
"start": 6583,
"end": 6788
} | class ____(OrganizationPermission):
scope_map = {
"PUT": ["org:read", "org:write", "org:admin"],
"DELETE": ["org:read", "org:write", "org:admin"],
}
| OrganizationPinnedSearchPermission |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-sharepoint/source_microsoft_sharepoint/spec.py | {
"start": 311,
"end": 1424
} | class ____(BaseModel):
"""
OAuthCredentials class to hold authentication details for Microsoft OAuth authentication.
This class uses pydantic for data validation and settings management.
"""
class Config:
title = "Authenticate via Microsoft (OAuth)"
# Fields for the OAuth authentication, including tenant_id, client_id, client_secret, and refresh_token
auth_type: Literal["Client"] = Field("Client", const=True)
tenant_id: str = Field(title="Tenant ID", description="Tenant ID of the Microsoft SharePoint user", airbyte_secret=True)
client_id: str = Field(
title="Client ID",
description="Client ID of your Microsoft developer application",
airbyte_secret=True,
)
client_secret: str = Field(
title="Client Secret",
description="Client Secret of your Microsoft developer application",
airbyte_secret=True,
)
refresh_token: Optional[str] = Field(
title="Refresh Token",
description="Refresh Token of your Microsoft developer application",
airbyte_secret=True,
)
| OAuthCredentials |
python | spack__spack | lib/spack/spack/test/tengine.py | {
"start": 236,
"end": 1482
} | class ____:
class A(tengine.Context):
@tengine.context_property
def foo(self):
return 1
class B(tengine.Context):
@tengine.context_property
def bar(self):
return 2
class C(A, B):
@tengine.context_property
def foobar(self):
return 3
@tengine.context_property
def foo(self):
return 10
def test_to_dict(self):
"""Tests that all the context properties in a hierarchy are considered
when building the context dictionary.
"""
# A derives directly from Context
a = TestContext.A()
d = a.to_dict()
assert len(d) == 1
assert "foo" in d
assert d["foo"] == 1
# So does B
b = TestContext.B()
d = b.to_dict()
assert len(d) == 1
assert "bar" in d
assert d["bar"] == 2
# C derives from both and overrides 'foo'
c = TestContext.C()
d = c.to_dict()
assert len(d) == 3
for x in ("foo", "bar", "foobar"):
assert x in d
assert d["foo"] == 10
assert d["bar"] == 2
assert d["foobar"] == 3
@pytest.mark.usefixtures("config")
| TestContext |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 4380,
"end": 5613
} | class ____(test_util.TensorFlowTestCase):
@test_util.run_without_tensor_float_32(
"Calls rgb_to_yiq and yiq_to_rgb, which use matmul")
def testBatch(self):
# Build an arbitrary RGB image
np.random.seed(7)
batch_size = 5
shape = (batch_size, 2, 7, 3)
for nptype in [np.float32, np.float64]:
inp = np.random.rand(*shape).astype(nptype)
# Convert to YIQ and back, as a batch and individually
with self.cached_session():
batch0 = constant_op.constant(inp)
batch1 = image_ops.rgb_to_yiq(batch0)
batch2 = image_ops.yiq_to_rgb(batch1)
split0 = array_ops_stack.unstack(batch0)
split1 = list(map(image_ops.rgb_to_yiq, split0))
split2 = list(map(image_ops.yiq_to_rgb, split1))
join1 = array_ops_stack.stack(split1)
join2 = array_ops_stack.stack(split2)
batch1, batch2, join1, join2 = self.evaluate(
[batch1, batch2, join1, join2])
# Verify that processing batch elements together is the same as separate
self.assertAllClose(batch1, join1, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, join2, rtol=1e-4, atol=1e-4)
self.assertAllClose(batch2, inp, rtol=1e-4, atol=1e-4)
| RGBToYIQTest |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_temporalisomorphvf2.py | {
"start": 1768,
"end": 4942
} | class ____:
"""
A test class for the undirected temporal graph matcher.
"""
def provide_g1_topology(self):
G1 = nx.Graph()
G1.add_edges_from(provide_g1_edgelist())
return G1
def provide_g2_path_3edges(self):
G2 = nx.Graph()
G2.add_edges_from([(0, 1), (1, 2), (2, 3)])
return G2
def test_timdelta_zero_timeRespecting_returnsTrue(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_same_time(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
assert gm.subgraph_is_isomorphic()
def test_timdelta_zero_datetime_timeRespecting_returnsTrue(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_same_datetime(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
assert gm.subgraph_is_isomorphic()
def test_attNameStrange_timdelta_zero_timeRespecting_returnsTrue(self):
G1 = self.provide_g1_topology()
temporal_name = "strange_name"
G1 = put_same_time(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
assert gm.subgraph_is_isomorphic()
def test_notTimeRespecting_returnsFalse(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_sequence_time(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta()
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
assert not gm.subgraph_is_isomorphic()
def test_timdelta_one_config0_returns_no_embeddings(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_0(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 0
def test_timdelta_one_config1_returns_four_embedding(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_1(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 4
def test_timdelta_one_config2_returns_ten_embeddings(self):
G1 = self.provide_g1_topology()
temporal_name = "date"
G1 = put_time_config_2(G1, temporal_name)
G2 = self.provide_g2_path_3edges()
d = timedelta(days=1)
gm = iso.TimeRespectingGraphMatcher(G1, G2, temporal_name, d)
L = list(gm.subgraph_isomorphisms_iter())
count_match = len(list(gm.subgraph_isomorphisms_iter()))
assert count_match == 10
| TestTimeRespectingGraphMatcher |
python | doocs__leetcode | solution/1300-1399/1379.Find a Corresponding Node of a Binary Tree in a Clone of That Tree/Solution.py | {
"start": 164,
"end": 591
} | class ____:
def getTargetCopy(
self, original: TreeNode, cloned: TreeNode, target: TreeNode
) -> TreeNode:
def dfs(root1: TreeNode, root2: TreeNode) -> TreeNode:
if root1 is None:
return None
if root1 == target:
return root2
return dfs(root1.left, root2.left) or dfs(root1.right, root2.right)
return dfs(original, cloned)
| Solution |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_generators.py | {
"start": 10731,
"end": 16876
} | class ____(__TestCase):
# Tests for the issue #23353: check that the currently handled exception
# is correctly saved/restored in PyEval_EvalFrameEx().
def test_except_throw(self):
def store_raise_exc_generator():
try:
self.assertIsNone(sys.exception())
yield
except Exception as exc:
# exception raised by gen.throw(exc)
self.assertIsInstance(sys.exception(), ValueError)
self.assertIsNone(exc.__context__)
yield
# ensure that the exception is not lost
self.assertIsInstance(sys.exception(), ValueError)
yield
# we should be able to raise back the ValueError
raise
make = store_raise_exc_generator()
next(make)
try:
raise ValueError()
except Exception as exc:
try:
make.throw(exc)
except Exception:
pass
next(make)
with self.assertRaises(ValueError) as cm:
next(make)
self.assertIsNone(cm.exception.__context__)
self.assertIsNone(sys.exception())
def test_except_next(self):
def gen():
self.assertIsInstance(sys.exception(), ValueError)
yield "done"
g = gen()
try:
raise ValueError
except Exception:
self.assertEqual(next(g), "done")
self.assertIsNone(sys.exception())
def test_except_gen_except(self):
def gen():
try:
self.assertIsNone(sys.exception())
yield
# we are called from "except ValueError:", TypeError must
# inherit ValueError in its context
raise TypeError()
except TypeError as exc:
self.assertIsInstance(sys.exception(), TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# here we are still called from the "except ValueError:"
self.assertIsInstance(sys.exception(), ValueError)
yield
self.assertIsNone(sys.exception())
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception:
next(g)
self.assertEqual(next(g), "done")
self.assertIsNone(sys.exception())
def test_nested_gen_except_loop(self):
def gen():
for i in range(100):
self.assertIsInstance(sys.exception(), TypeError)
yield "doing"
def outer():
try:
raise TypeError
except:
for x in gen():
yield x
try:
raise ValueError
except Exception:
for x in outer():
self.assertEqual(x, "doing")
self.assertEqual(sys.exception(), None)
def test_except_throw_exception_context(self):
def gen():
try:
try:
self.assertIsNone(sys.exception())
yield
except ValueError:
# we are called from "except ValueError:"
self.assertIsInstance(sys.exception(), ValueError)
raise TypeError()
except Exception as exc:
self.assertIsInstance(sys.exception(), TypeError)
self.assertEqual(type(exc.__context__), ValueError)
# we are still called from "except ValueError:"
self.assertIsInstance(sys.exception(), ValueError)
yield
self.assertIsNone(sys.exception())
yield "done"
g = gen()
next(g)
try:
raise ValueError
except Exception as exc:
g.throw(exc)
self.assertEqual(next(g), "done")
self.assertIsNone(sys.exception())
def test_except_throw_bad_exception(self):
class E(Exception):
def __new__(cls, *args, **kwargs):
return cls
def boring_generator():
yield
gen = boring_generator()
err_msg = 'should have returned an instance of BaseException'
with self.assertRaisesRegex(TypeError, err_msg):
gen.throw(E)
self.assertRaises(StopIteration, next, gen)
def generator():
with self.assertRaisesRegex(TypeError, err_msg):
yield
gen = generator()
next(gen)
with self.assertRaises(StopIteration):
gen.throw(E)
def test_gen_3_arg_deprecation_warning(self):
def g():
yield 42
gen = g()
with self.assertWarns(DeprecationWarning):
with self.assertRaises(TypeError):
gen.throw(TypeError, TypeError(24), None)
def test_stopiteration_error(self):
# See also PEP 479.
def gen():
raise StopIteration
yield
with self.assertRaisesRegex(RuntimeError, 'raised StopIteration'):
next(gen())
def test_tutorial_stopiteration(self):
# Raise StopIteration" stops the generator too:
def f():
yield 1
raise StopIteration
yield 2 # never reached
g = f()
self.assertEqual(next(g), 1)
with self.assertRaisesRegex(RuntimeError, 'raised StopIteration'):
next(g)
def test_return_tuple(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send((2,))
self.assertEqual(cm.exception.value, (2,))
def test_return_stopiteration(self):
def g():
return (yield 1)
gen = g()
self.assertEqual(next(gen), 1)
with self.assertRaises(StopIteration) as cm:
gen.send(StopIteration(2))
self.assertIsInstance(cm.exception.value, StopIteration)
self.assertEqual(cm.exception.value.value, 2)
| ExceptionTest |
python | huggingface__transformers | src/transformers/models/video_llama_3/modular_video_llama_3.py | {
"start": 21557,
"end": 22962
} | class ____(ModelOutput):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_images_features, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
video_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(num_video_features, hidden_size)`.
video_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
last_hidden_state: torch.FloatTensor = None
past_key_values: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
video_hidden_states: Optional[torch.FloatTensor] = None
| VideoLlama3ModelOutputWithPast |
python | jina-ai__jina | jina/logging/logger.py | {
"start": 3031,
"end": 9349
} | class ____:
"""
Build a logger for a context.
:param context: The context identifier of the class, module or method.
:param log_config: The configuration file for the logger.
:return:: an executor object.
"""
supported = {'FileHandler', 'StreamHandler', 'SysLogHandler', 'RichHandler', 'TimedRotatingFileHandler', 'RotatingFileHandler'}
def __init__(
self,
context: str,
name: Optional[str] = None,
log_config: Optional[str] = None,
quiet: bool = False,
**kwargs,
):
log_config = os.getenv(
'JINA_LOG_CONFIG',
log_config or 'default',
)
if quiet or os.getenv('JINA_LOG_CONFIG', None) == 'QUIET':
log_config = 'quiet'
if not name:
name = os.getenv('JINA_DEPLOYMENT_NAME', context)
self.logger = logging.getLogger(context)
self.logger.propagate = False
context_vars = {
'name': name,
'uptime': __uptime__,
'context': context,
}
self.add_handlers(log_config, **context_vars)
self.debug = self.logger.debug
self.warning = self.logger.warning
self.critical = self.logger.critical
self.error = self.logger.error
self.info = self.logger.info
self._is_closed = False
self.debug_enabled = self.logger.isEnabledFor(logging.DEBUG)
def success(self, *args):
"""
Provides an API to print success messages
:param args: the args to be forwarded to the log
"""
self.logger.log(LogVerbosity.SUCCESS, *args)
@property
def handlers(self):
"""
Get the handlers of the logger.
:return:: Handlers of logger.
"""
return self.logger.handlers
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def close(self):
"""Close all the handlers."""
if not self._is_closed:
for handler in self.logger.handlers:
handler.close()
self._is_closed = True
def add_handlers(self, config_path: Optional[str] = None, **kwargs):
"""
Add handlers from config file.
:param config_path: Path of config file.
:param kwargs: Extra parameters.
"""
self.logger.handlers = []
if not os.path.exists(config_path):
old_config_path = config_path
if 'logging.' in config_path and '.yml' in config_path:
config_path = os.path.join(__resources_path__, config_path)
else:
config_path = os.path.join(
__resources_path__, f'logging.{config_path}.yml'
)
if not os.path.exists(config_path):
config_path = old_config_path
with open(config_path, encoding='utf-8') as fp:
config = JAML.load(fp)
for h in config['handlers']:
cfg = config['configs'].get(h, None)
fmt = getattr(formatter, cfg.get('formatter', 'Formatter'))
if h not in self.supported or not cfg:
raise ValueError(
f'can not find configs for {h}, maybe it is not supported'
)
handler = None
if h == 'StreamHandler':
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'RichHandler':
kwargs_handler = copy.deepcopy(cfg)
kwargs_handler.pop('format')
handler = RichHandler(**kwargs_handler)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'SysLogHandler' and not __windows__:
if cfg['host'] and cfg['port']:
handler = SysLogHandlerWrapper(address=(cfg['host'], cfg['port']))
else:
# a UNIX socket is used
if platform.system() == 'Darwin':
handler = SysLogHandlerWrapper(address='/var/run/syslog')
else:
handler = SysLogHandlerWrapper(address='/dev/log')
if handler:
handler.ident = cfg.get('ident', '')
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
try:
handler._connect_unixsocket(handler.address)
except OSError:
handler = None
pass
elif h == 'FileHandler':
filename = cfg['output'].format_map(kwargs)
if __windows__:
# colons are not allowed in filenames
filename = filename.replace(':', '.')
handler = logging.FileHandler(filename, delay=True)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'TimedRotatingFileHandler':
filename = cfg['filename'].format_map(kwargs)
handler = logging.handlers.TimedRotatingFileHandler(
filename=filename,
when=cfg['when'],
interval=cfg['interval'],
backupCount=cfg['backupCount'],
encoding=cfg.get('encoding', 'utf-8')
)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
elif h == 'RotatingFileHandler':
filename = cfg['filename'].format_map(kwargs)
handler = logging.handlers.RotatingFileHandler(
filename=filename,
maxBytes=cfg['maxBytes'],
backupCount=cfg['backupCount'],
encoding=cfg.get('encoding', 'utf-8')
)
handler.setFormatter(fmt(cfg['format'].format_map(kwargs)))
if handler:
self.logger.addHandler(handler)
verbose_level = LogVerbosity.from_string(config['level'])
if 'JINA_LOG_LEVEL' in os.environ:
verbose_level = LogVerbosity.from_string(os.environ['JINA_LOG_LEVEL'])
self.logger.setLevel(verbose_level.value)
| JinaLogger |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 36089,
"end": 36945
} | class ____(ASTExpression):
def __init__(self, expr: ASTExpression) -> None:
self.expr = expr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTSizeofExpr):
return NotImplemented
return self.expr == other.expr
def __hash__(self) -> int:
return hash(self.expr)
def _stringify(self, transform: StringifyTransform) -> str:
return 'sizeof ' + transform(self.expr)
def get_id(self, version: int) -> str:
return 'sz' + self.expr.get_id(version)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
signode += addnodes.desc_sig_keyword('sizeof', 'sizeof')
signode += addnodes.desc_sig_space()
self.expr.describe_signature(signode, mode, env, symbol)
| ASTSizeofExpr |
python | vyperlang__vyper | tests/evm_backends/abi.py | {
"start": 431,
"end": 1984
} | class ____(Encoder):
"""
Custom encoder that converts some types to the expected format.
"""
@classmethod
def visit_BytesNode(cls, node: BytesNode, value: bytes | str) -> bytes:
if isinstance(value, str):
assert value.startswith("0x"), "Sanity check failed: expected hex string"
value = bytes.fromhex(value[2:])
return super().visit_BytesNode(node, value)
@classmethod
def visit_FixedNode(cls, node: FixedNode, value: Decimal | int) -> bytes:
# REVIEW: note coming changes to decimal (PR #3696)
if isinstance(value, int):
value = Decimal(value)
return super().visit_FixedNode(node, value)
@classmethod
def visit_AddressNode(cls, node: AddressNode, value: str | bytes | HexBytes) -> bytes:
if isinstance(value, HexBytes):
value = value.hex()
if isinstance(value, bytes):
value = "0x" + value.hex()
return super().visit_AddressNode(node, value)
def _get_parser(schema: str):
try:
return _parsers[schema]
except KeyError:
_parsers[schema] = (ret := Parser.parse(schema))
return ret
def abi_encode(schema: str, data: Any) -> bytes:
return _Encoder.encode(_get_parser(schema), data)
def abi_decode(schema: str, data: bytes) -> Any:
return Decoder.decode(_get_parser(schema), data)
def is_abi_encodable(abi_type: str, data: Any) -> bool:
try:
abi_encode(abi_type, data)
return True
except ABIError:
return False
| _Encoder |
python | django__django | django/db/backends/oracle/base.py | {
"start": 15183,
"end": 17632
} | class ____:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (
isinstance(param, datetime.datetime)
and not isinstance(param, Oracle_datetime)
):
param = Oracle_datetime.from_datetime(param)
string_size = 0
has_boolean_data_type = (
cursor.database.features.supports_boolean_expr_in_select_clause
)
if not has_boolean_data_type:
# Oracle < 23c doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, "bind_parameter"):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (bytes, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_str(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, "input_size"):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.DB_TYPE_CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.DB_TYPE_TIMESTAMP
elif has_boolean_data_type and isinstance(param, bool):
self.input_size = Database.DB_TYPE_BOOLEAN
else:
self.input_size = None
| OracleParam |
python | kamyu104__LeetCode-Solutions | Python/compare-version-numbers.py | {
"start": 742,
"end": 2285
} | class ____(object):
def compareVersion(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1, v2 = version1.split("."), version2.split(".")
if len(v1) > len(v2):
v2 += ['0' for _ in xrange(len(v1) - len(v2))]
elif len(v1) < len(v2):
v1 += ['0' for _ in xrange(len(v2) - len(v1))]
i = 0
while i < len(v1):
if int(v1[i]) > int(v2[i]):
return 1
elif int(v1[i]) < int(v2[i]):
return -1
else:
i += 1
return 0
def compareVersion2(self, version1, version2):
"""
:type version1: str
:type version2: str
:rtype: int
"""
v1 = [int(x) for x in version1.split('.')]
v2 = [int(x) for x in version2.split('.')]
while len(v1) != len(v2):
if len(v1) > len(v2):
v2.append(0)
else:
v1.append(0)
return cmp(v1, v2)
def compareVersion3(self, version1, version2):
splits = (map(int, v.split('.')) for v in (version1, version2))
return cmp(*zip(*itertools.izip_longest(*splits, fillvalue=0)))
def compareVersion4(self, version1, version2):
main1, _, rest1 = ('0' + version1).partition('.')
main2, _, rest2 = ('0' + version2).partition('.')
return cmp(int(main1), int(main2)) or len(rest1 + rest2) and self.compareVersion4(rest1, rest2)
| Solution2 |
python | getsentry__sentry | tests/acceptance/test_project_keys.py | {
"start": 1364,
"end": 2624
} | class ____(AcceptanceTestCase, SnubaTestCase):
def setUp(self) -> None:
super().setUp()
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(name="Rowdy Tiger", owner=None)
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.create_member(user=self.user, organization=self.org, role="owner", teams=[self.team])
self.pk = ProjectKey.objects.create(
project=self.project,
label="Default",
public_key="5cc0482a13d248ff99f9717101dd6356",
secret_key="410fd998318844b8894775f36184ec28",
date_added=datetime(2015, 10, 1, 21, 19, 5, 648517, tzinfo=timezone.utc),
)
self.login_as(self.user)
self.path = f"/{self.org.slug}/{self.project.slug}/settings/keys/{self.pk.public_key}/"
def test_simple(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_test_id("key-details")
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
| ProjectKeyDetailsTest |
python | python-openxml__python-docx | src/docx/oxml/shared.py | {
"start": 944,
"end": 1323
} | class ____(BaseOxmlElement):
"""Used for `w:b`, `w:i` elements and others.
Contains a bool-ish string in its `val` attribute, xsd:boolean plus "on" and
"off". Defaults to `True`, so `<w:b>` for example means "bold is turned on".
"""
val: bool = OptionalAttribute( # pyright: ignore[reportAssignmentType]
"w:val", ST_OnOff, default=True
)
| CT_OnOff |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/side_effects.py | {
"start": 1087,
"end": 1141
} | class ____:
def append(self, arg):
pass
| MyList |
python | huggingface__transformers | src/transformers/models/dots1/modeling_dots1.py | {
"start": 2246,
"end": 3016
} | class ____(nn.Module):
def __init__(self, hidden_size, eps: float = 1e-6) -> None:
"""
Dots1RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| Dots1RMSNorm |
python | doocs__leetcode | solution/0900-0999/0924.Minimize Malware Spread/Solution.py | {
"start": 701,
"end": 1328
} | class ____:
def minMalwareSpread(self, graph: List[List[int]], initial: List[int]) -> int:
n = len(graph)
uf = UnionFind(n)
for i in range(n):
for j in range(i + 1, n):
graph[i][j] and uf.union(i, j)
cnt = Counter(uf.find(x) for x in initial)
ans, mx = n, 0
for x in initial:
root = uf.find(x)
if cnt[root] > 1:
continue
sz = uf.get_size(root)
if sz > mx or (sz == mx and x < ans):
ans = x
mx = sz
return min(initial) if ans == n else ans
| Solution |
python | doocs__leetcode | solution/1100-1199/1111.Maximum Nesting Depth of Two Valid Parentheses Strings/Solution.py | {
"start": 0,
"end": 322
} | class ____:
def maxDepthAfterSplit(self, seq: str) -> List[int]:
ans = [0] * len(seq)
x = 0
for i, c in enumerate(seq):
if c == "(":
ans[i] = x & 1
x += 1
else:
x -= 1
ans[i] = x & 1
return ans
| Solution |
python | doocs__leetcode | solution/1600-1699/1629.Slowest Key/Solution.py | {
"start": 0,
"end": 399
} | class ____:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
ans = keysPressed[0]
mx = releaseTimes[0]
for i in range(1, len(keysPressed)):
d = releaseTimes[i] - releaseTimes[i - 1]
if d > mx or (d == mx and ord(keysPressed[i]) > ord(ans)):
mx = d
ans = keysPressed[i]
return ans
| Solution |
python | walkccc__LeetCode | solutions/1408. String Matching in an Array/1408-3.py | {
"start": 103,
"end": 531
} | class ____:
def __init__(self):
self.root = TrieNode()
def insert(self, word: str) -> None:
node: TrieNode = self.root
for c in word:
node = node.children.setdefault(c, TrieNode())
node.count += 1
def search(self, word: str) -> bool:
node: TrieNode = self.root
for c in word:
if c not in node.children:
return False
node = node.children[c]
return node.count > 1
| Trie |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/translate.py | {
"start": 17847,
"end": 21535
} | class ____(GoogleCloudBaseOperator):
"""
Create a Google Cloud Translate dataset.
Creates a `native` translation dataset, using API V3.
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TranslateCreateDatasetOperator`.
:param dataset: The dataset to create. If a dict is provided, it must correspond to
the automl_translation.Dataset type.
:param project_id: ID of the Google Cloud project where dataset is located.
If not provided default project_id is used.
:param location: The location of the project.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"dataset",
"location",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (TranslationNativeDatasetLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
location: str,
dataset: dict | automl_translation.Dataset,
metadata: Sequence[tuple[str, str]] = (),
timeout: float | _MethodDefault = DEFAULT,
retry: Retry | _MethodDefault | None = DEFAULT,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.dataset = dataset
self.metadata = metadata
self.timeout = timeout
self.retry = retry
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> str:
hook = TranslateHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Dataset creation started %s...", self.dataset)
result_operation = hook.create_dataset(
dataset=self.dataset,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = hook.wait_for_operation_result(result_operation)
result = type(result).to_dict(result)
dataset_id = hook.extract_object_id(result)
context["ti"].xcom_push(key="dataset_id", value=dataset_id)
self.log.info("Dataset creation complete. The dataset_id: %s.", dataset_id)
project_id = self.project_id or hook.project_id
TranslationNativeDatasetLink.persist(
context=context,
dataset_id=dataset_id,
project_id=project_id,
location=self.location,
)
return result
| TranslateCreateDatasetOperator |
python | PrefectHQ__prefect | tests/cli/deployment/test_deployment_cli.py | {
"start": 36385,
"end": 42249
} | class ____:
@pytest.fixture
async def deployment_name(
self, deployment: DeploymentResponse, prefect_client: PrefectClient
):
flow = await prefect_client.read_flow(deployment.flow_id)
return f"{flow.name}/{deployment.name}"
def test_run_wraps_parameter_stdin_parsing_exception(self, deployment_name: str):
invoke_and_assert(
["deployment", "run", deployment_name, "--params", "-"],
expected_code=1,
expected_output_contains="Failed to parse JSON",
user_input="not-valid-json",
)
def test_run_wraps_parameter_stdin_empty(self, deployment_name: str):
invoke_and_assert(
["deployment", "run", deployment_name, "--params", "-"],
expected_code=1,
expected_output_contains="No data passed to stdin",
)
def test_run_wraps_parameters_parsing_exception(self, deployment_name: str):
invoke_and_assert(
["deployment", "run", deployment_name, "--params", "not-valid-json"],
expected_code=1,
expected_output_contains="Failed to parse JSON",
)
def test_wraps_parameter_json_parsing_exception(self, deployment_name: str):
invoke_and_assert(
["deployment", "run", deployment_name, "--param", 'x="foo"1'],
expected_code=1,
expected_output_contains="Failed to parse JSON for parameter 'x'",
)
def test_validates_parameters_are_in_deployment_schema(
self,
deployment_name: str,
):
invoke_and_assert(
["deployment", "run", deployment_name, "--param", "x=test"],
expected_code=1,
expected_output_contains=[
"parameters were specified but not found on the deployment: 'x'",
"parameters are available on the deployment: 'name'",
],
)
@pytest.mark.parametrize(
"given,expected",
[
("foo", "foo"),
('"foo"', "foo"),
(1, 1),
('["one", "two"]', ["one", "two"]),
('{"key": "val"}', {"key": "val"}),
('["one", 2]', ["one", 2]),
('{"key": 2}', {"key": 2}),
],
)
async def test_passes_parameters_to_flow_run(
self,
deployment: DeploymentResponse,
deployment_name: str,
prefect_client: PrefectClient,
given: Any,
expected: Any,
):
"""
This test ensures the parameters are set on the created flow run and that
data types are cast correctly.
"""
await run_sync_in_worker_thread(
invoke_and_assert,
["deployment", "run", deployment_name, "--param", f"name={given}"],
)
flow_runs = await prefect_client.read_flow_runs(
deployment_filter=DeploymentFilter(
id=DeploymentFilterId(any_=[deployment.id])
)
)
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.parameters == {"name": expected}
async def test_passes_parameters_from_stdin_to_flow_run(
self,
deployment: DeploymentResponse,
deployment_name: str,
prefect_client: PrefectClient,
):
await run_sync_in_worker_thread(
invoke_and_assert,
["deployment", "run", deployment_name, "--params", "-"],
json.dumps({"name": "foo"}), # stdin
)
flow_runs = await prefect_client.read_flow_runs(
deployment_filter=DeploymentFilter(
id=DeploymentFilterId(any_=[deployment.id])
)
)
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.parameters == {"name": "foo"}
async def test_passes_parameters_from_dict_to_flow_run(
self,
deployment: DeploymentResponse,
deployment_name: str,
prefect_client: PrefectClient,
):
await run_sync_in_worker_thread(
invoke_and_assert,
[
"deployment",
"run",
deployment_name,
"--params",
json.dumps({"name": "foo"}),
],
)
flow_runs = await prefect_client.read_flow_runs(
deployment_filter=DeploymentFilter(
id=DeploymentFilterId(any_=[deployment.id])
)
)
assert len(flow_runs) == 1
flow_run = flow_runs[0]
assert flow_run.parameters == {"name": "foo"}
async def test_sets_templated_flow_run_name(
self,
deployment: DeploymentResponse,
deployment_name: str,
prefect_client: PrefectClient,
):
await run_sync_in_worker_thread(
invoke_and_assert,
[
"deployment",
"run",
deployment_name,
"--flow-run-name",
"hello-{name}",
"--param",
"name=tester",
],
expected_code=0,
)
flow_runs = await prefect_client.read_flow_runs(
deployment_filter=DeploymentFilter(
id=DeploymentFilterId(any_=[deployment.id])
)
)
assert len(flow_runs) == 1
assert flow_runs[0].name == "hello-tester"
def test_raises_error_on_missing_template_param(self, deployment_name: str):
run_sync_in_worker_thread(
invoke_and_assert,
[
"deployment",
"run",
deployment_name,
"--flow-run-name",
"hello-{missing}",
],
expected_code=1,
expected_output_contains="Missing parameter for flow run name: 'missing' is undefined",
)
| TestDeploymentRun |
python | walkccc__LeetCode | solutions/1979. Find Greatest Common Divisor of Array/1979.py | {
"start": 0,
"end": 103
} | class ____:
def findGCD(self, nums: list[int]) -> int:
return math.gcd(min(nums), max(nums))
| Solution |
python | ApeWorX__ape | src/ape/api/convert.py | {
"start": 173,
"end": 1842
} | class ____(BaseInterfaceModel, Generic[ConvertedType]):
@abstractmethod
def is_convertible(self, value: Any) -> bool:
"""
Returns ``True`` if string value provided by ``value`` is convertible using
:meth:`ape.api.convert.ConverterAPI.convert`.
Args:
value (Any): The value to check.
Returns:
bool: ``True`` when the given value can be converted.
"""
@abstractmethod
def convert(self, value: Any) -> ConvertedType:
"""
Convert the given value to the type specified as the generic for this class.
Implementations of this API must throw a :class:`~ape.exceptions.ConversionError`
when the item fails to convert properly.
Usage example::
from ape import convert
from ape.types import AddressType
convert("1 gwei", int)
# 1000000000
convert("1 ETH", int)
# 1000000000000000000
convert("0x283Af0B28c62C092C9727F1Ee09c02CA627EB7F5", bytes)
# HexBytes('0x283af0b28c62c092c9727f1ee09c02ca627eb7f5')
convert("vitalik.eth", AddressType) # with ape-ens plugin installed
# '0xd8dA6BF26964aF9D7eEd9e03E53415D37aA96045'
"""
@property
def name(self) -> str:
"""
The calculated name of the converter class.
Typically, it is the lowered prefix of the class without
the "Converter" or "Conversions" suffix.
"""
class_name = self.__class__.__name__
name = class_name.replace("Converter", "").replace("Conversions", "")
return name.lower()
| ConverterAPI |
python | getsentry__sentry | tests/sentry/models/test_group.py | {
"start": 16555,
"end": 19056
} | class ____(TestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1).isoformat()
self.two_min_ago = before_now(minutes=2).isoformat()
self.just_over_one_min_ago = before_now(seconds=61).isoformat()
def test_get_latest_event_no_events(self) -> None:
project = self.create_project()
group = self.create_group(project=project)
assert group.get_latest_event() is None
def test_get_latest_event(self) -> None:
self.store_event(
data={"event_id": "a" * 32, "fingerprint": ["group-1"], "timestamp": self.two_min_ago},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.get()
group_event = group.get_latest_event()
assert group_event is not None
assert group_event.event_id == "b" * 32
assert group_event.occurrence is None
def test_get_latest_almost_identical_timestamps(self) -> None:
self.store_event(
data={
"event_id": "a" * 32,
"fingerprint": ["group-1"],
"timestamp": self.just_over_one_min_ago,
},
project_id=self.project.id,
)
self.store_event(
data={"event_id": "b" * 32, "fingerprint": ["group-1"], "timestamp": self.min_ago},
project_id=self.project.id,
)
group = Group.objects.get()
group_event = group.get_latest_event()
assert group_event is not None
assert group_event.event_id == "b" * 32
assert group_event.occurrence is None
def test_get_latest_event_occurrence(self) -> None:
event_id = uuid.uuid4().hex
occurrence, _ = self.process_occurrence(
project_id=self.project.id,
event_id=event_id,
event_data={
"fingerprint": ["group-1"],
"timestamp": before_now(minutes=1).isoformat(),
},
)
group = Group.objects.get()
group.update(type=ProfileFileIOGroupType.type_id)
group_event = group.get_latest_event()
assert group_event is not None
assert group_event.event_id == event_id
self.assert_occurrences_identical(group_event.occurrence, occurrence)
| GroupGetLatestEventTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 58122,
"end": 58511
} | class ____(GenericFunction[int]):
"""The CHAR_LENGTH() SQL function."""
type = sqltypes.Integer()
inherit_cache = True
def __init__(self, arg: _ColumnExpressionArgument[str], **kw: Any) -> None:
# slight hack to limit to just one positional argument
# not sure why this one function has this special treatment
super().__init__(arg, **kw)
| char_length |
python | tensorflow__tensorflow | tensorflow/examples/speech_commands/train_test.py | {
"start": 1305,
"end": 1401
} | class ____(object):
def __init__(self, **entries):
self.__dict__.update(entries)
| DictStruct |
python | celery__celery | t/unit/backends/test_couchbase.py | {
"start": 496,
"end": 4846
} | class ____:
def setup_method(self):
self.backend = CouchbaseBackend(app=self.app)
def test_init_no_couchbase(self):
prev, module.Cluster = module.Cluster, None
try:
with pytest.raises(ImproperlyConfigured):
CouchbaseBackend(app=self.app)
finally:
module.Cluster = prev
def test_init_no_settings(self):
self.app.conf.couchbase_backend_settings = []
with pytest.raises(ImproperlyConfigured):
CouchbaseBackend(app=self.app)
def test_init_settings_is_None(self):
self.app.conf.couchbase_backend_settings = None
CouchbaseBackend(app=self.app)
def test_get_connection_connection_exists(self):
with patch('couchbase.cluster.Cluster') as mock_Cluster:
self.backend._connection = sentinel._connection
connection = self.backend._get_connection()
assert sentinel._connection == connection
mock_Cluster.assert_not_called()
def test_get(self):
self.app.conf.couchbase_backend_settings = {}
x = CouchbaseBackend(app=self.app)
x._connection = Mock()
mocked_get = x._connection.get = Mock()
mocked_get.return_value.content = sentinel.retval
# should return None
assert x.get('1f3fab') == sentinel.retval
x._connection.get.assert_called_once_with('1f3fab')
def test_set_no_expires(self):
self.app.conf.couchbase_backend_settings = None
x = CouchbaseBackend(app=self.app)
x.expires = None
x._connection = MagicMock()
x._connection.set = MagicMock()
# should return None
assert x._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None
def test_set_expires(self):
self.app.conf.couchbase_backend_settings = None
x = CouchbaseBackend(app=self.app, expires=30)
assert x.expires == 30
x._connection = MagicMock()
x._connection.set = MagicMock()
# should return None
assert x._set_with_state(sentinel.key, sentinel.value, states.SUCCESS) is None
def test_delete(self):
self.app.conf.couchbase_backend_settings = {}
x = CouchbaseBackend(app=self.app)
x._connection = Mock()
mocked_delete = x._connection.remove = Mock()
mocked_delete.return_value = None
# should return None
assert x.delete('1f3fab') is None
x._connection.remove.assert_called_once_with('1f3fab')
def test_config_params(self):
self.app.conf.couchbase_backend_settings = {
'bucket': 'mycoolbucket',
'host': ['here.host.com', 'there.host.com'],
'username': 'johndoe',
'password': 'mysecret',
'port': '1234',
}
x = CouchbaseBackend(app=self.app)
assert x.bucket == 'mycoolbucket'
assert x.host == ['here.host.com', 'there.host.com']
assert x.username == 'johndoe'
assert x.password == 'mysecret'
assert x.port == 1234
def test_backend_by_url(self, url='couchbase://myhost/mycoolbucket'):
from celery.backends.couchbase import CouchbaseBackend
backend, url_ = backends.by_url(url, self.app.loader)
assert backend is CouchbaseBackend
assert url_ == url
def test_backend_params_by_url(self):
url = 'couchbase://johndoe:mysecret@myhost:123/mycoolbucket'
with self.Celery(backend=url) as app:
x = app.backend
assert x.bucket == 'mycoolbucket'
assert x.host == 'myhost'
assert x.username == 'johndoe'
assert x.password == 'mysecret'
assert x.port == 123
def test_expires_defaults_to_config(self):
self.app.conf.result_expires = 10
b = CouchbaseBackend(expires=None, app=self.app)
assert b.expires == 10
def test_expires_is_int(self):
b = CouchbaseBackend(expires=48, app=self.app)
assert b.expires == 48
def test_expires_is_None(self):
b = CouchbaseBackend(expires=None, app=self.app)
assert b.expires == self.app.conf.result_expires.total_seconds()
def test_expires_is_timedelta(self):
b = CouchbaseBackend(expires=timedelta(minutes=1), app=self.app)
assert b.expires == 60
| test_CouchbaseBackend |
python | great-expectations__great_expectations | great_expectations/core/batch_spec.py | {
"start": 2913,
"end": 3952
} | class ____(BatchSpec, metaclass=ABCMeta):
def __init__(
self,
*args,
path: PathStr = None, # type: ignore[assignment] # error raised if not provided
reader_options: dict[str, Any] | None = None,
**kwargs,
) -> None:
if path:
kwargs["path"] = str(path)
if reader_options:
kwargs["reader_options"] = reader_options
super().__init__(*args, **kwargs)
if "path" not in self:
raise InvalidBatchSpecError("PathBatchSpec requires a path element") # noqa: TRY003 # FIXME CoP
@property
def path(self) -> str:
return self.get("path") # type: ignore[return-value] # FIXME CoP
@property
def reader_method(self) -> str:
return self.get("reader_method") # type: ignore[return-value] # FIXME CoP
@property
def reader_options(self) -> dict:
return self.get("reader_options") or {}
FabricReaderMethods: TypeAlias = Literal["read_table", "evaluate_measure", "evaluate_dax"]
| PathBatchSpec |
python | dask__dask | dask/tests/test_tokenize.py | {
"start": 14451,
"end": 22475
} | class ____:
def f(self):
pass
@classmethod
def g(cls):
pass
_special_callables = [
getattr,
str.join,
"foo".join,
WithClassMethod.__str__,
WithClassMethod().__str__,
WithClassMethod.f,
WithClassMethod().f,
WithClassMethod.g,
]
@pytest.mark.parametrize("func", _local_functions())
def test_tokenize_local_functions(func):
check_tokenize(func)
@pytest.mark.parametrize("func", _special_callables)
def test_tokenize_special_callables(func):
check_tokenize(func)
def test_tokenize_functions_unique_token():
all_funcs = _local_functions() + _special_callables
tokens = [check_tokenize(func) for func in all_funcs]
assert len(set(tokens)) == len(tokens)
@pytest.mark.xfail(reason="https://github.com/cloudpipe/cloudpickle/issues/453")
@pytest.mark.parametrize("instance", [False, True])
def test_tokenize_local_classes_from_different_contexts(instance):
def f():
class C:
pass
return C() if instance else C
assert check_tokenize(f()) == check_tokenize(f())
def test_tokenize_local_functions_from_different_contexts():
def f():
def g():
return 123
return g
assert check_tokenize(f()) == check_tokenize(f())
def f1(a, b, c=1):
pass
def f2(a, b=1, c=2):
pass
def f3(a):
pass
def test_tokenize_callable():
assert check_tokenize(f1) != check_tokenize(f2)
def test_tokenize_composite_functions():
assert check_tokenize(partial(f2, b=2)) != check_tokenize(partial(f2, b=3))
assert check_tokenize(partial(f1, b=2)) != check_tokenize(partial(f2, b=2))
assert check_tokenize(compose(f2, f3)) != check_tokenize(compose(f2, f1))
assert check_tokenize(curry(f2)) != check_tokenize(curry(f1))
assert check_tokenize(curry(f2, b=1)) != check_tokenize(curry(f2, b=2))
@pytest.mark.skipif("not pd")
def test_tokenize_pandas():
a = pd.DataFrame({"x": [1, 2, 3], "y": ["4", "asd", None]}, index=[1, 2, 3])
b = pd.DataFrame({"x": [1, 2, 3], "y": ["4", "asd", None]}, index=[1, 2, 3])
assert check_tokenize(a) == check_tokenize(b)
b.index.name = "foo"
assert check_tokenize(a) != check_tokenize(b)
a = pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "a"]})
b = pd.DataFrame({"x": [1, 2, 3], "y": ["a", "b", "a"]})
a["z"] = a.y.astype("category")
# tokenize is currently sensitive to fragmentation of the pyarrow Array
# backing the columns. Create a new one to work around this.
a.columns = pd.Index(["x", "y", "z"])
assert check_tokenize(a) != check_tokenize(b)
b["z"] = a.y.astype("category")
b.columns = pd.Index(["x", "y", "z"])
assert check_tokenize(a) == check_tokenize(b)
@pytest.mark.skipif("not pd")
def test_tokenize_pandas_fragmented_index():
s = pd.concat([pd.Series(1, index=["a", "b"]), pd.Series(2, index=["c", "d"])])
check_tokenize(s)
@pytest.mark.skipif("not pd")
def test_tokenize_pandas_invalid_unicode():
# see https://github.com/dask/dask/issues/2713
df = pd.DataFrame(
{
"x": [1, 2, 3],
"y": pd.Series(["4", "asd\ud83d", None], dtype="object"),
},
index=[1, 2, 3],
)
df.columns = pd.Index(["x\ud83d", "y\ud83d"], dtype="object")
check_tokenize(df)
@pytest.mark.skipif("not pd")
def test_tokenize_pandas_mixed_unicode_bytes():
df = pd.DataFrame(
{"ö".encode(): [1, 2, 3], "ö": ["ö", "ö".encode(), None]},
index=[1, 2, 3],
)
check_tokenize(df)
@pytest.mark.skipif("not pd")
def test_tokenize_pandas_cloudpickle():
class NeedsCloudPickle:
# pickling not supported because it is a local class
pass
df = pd.DataFrame({"x": ["foo", None, NeedsCloudPickle()]})
check_tokenize(df)
@pytest.mark.skipif("not dd")
def test_tokenize_pandas_extension_array():
arrays = [
pd.array([1, 0, None], dtype="Int64"),
pd.array(["2000"], dtype="Period[D]"),
pd.array([1, 0, 0], dtype="Sparse[int]"),
pd.array([pd.Timestamp("2000")], dtype="datetime64[ns]"),
pd.array([pd.Timestamp("2000", tz="CET")], dtype="datetime64[ns, CET]"),
pd.array(
["a", "b"],
dtype=pd.api.types.CategoricalDtype(["a", "b", "c"], ordered=False),
),
]
arrays.extend(
[
pd.array(["a", "b", None], dtype="string"),
pd.array([True, False, None], dtype="boolean"),
]
)
for arr in arrays:
check_tokenize(arr)
@pytest.mark.skipif("not pd")
def test_tokenize_na():
check_tokenize(pd.NA)
@pytest.mark.skipif("not pd")
def test_tokenize_offset():
for offset in [
pd.offsets.Second(1),
pd.offsets.MonthBegin(2),
pd.offsets.Day(1),
pd.offsets.BQuarterEnd(2),
pd.DateOffset(years=1),
pd.DateOffset(months=7),
pd.DateOffset(days=10),
]:
check_tokenize(offset)
@pytest.mark.skipif("not pd")
def test_tokenize_pandas_index():
idx = pd.Index(["a", "b"])
check_tokenize(idx)
idx = pd.MultiIndex.from_product([["a", "b"], [0, 1]])
check_tokenize(idx)
def test_tokenize_kwargs():
check_tokenize(5, x=1)
assert check_tokenize(5) != check_tokenize(5, x=1)
assert check_tokenize(5, x=1) != check_tokenize(5, x=2)
assert check_tokenize(5, x=1) != check_tokenize(5, y=1)
assert check_tokenize(5, foo="bar") != check_tokenize(5, {"foo": "bar"})
def test_tokenize_same_repr():
class Foo:
def __init__(self, x):
self.x = x
def __repr__(self):
return "a foo"
assert check_tokenize(Foo(1)) != check_tokenize(Foo(2))
def test_tokenize_slotted():
class Foo:
__slots__ = ("x",)
def __init__(self, x):
self.x = x
assert check_tokenize(Foo(1)) != check_tokenize(Foo(2))
def test_tokenize_slotted_no_value():
class Foo:
__slots__ = ("x", "y")
def __init__(self, x=None, y=None):
if x is not None:
self.x = x
if y is not None:
self.y = y
assert check_tokenize(Foo(x=1)) != check_tokenize(Foo(y=1))
check_tokenize(Foo())
def test_tokenize_slots_and_dict():
class Foo:
__slots__ = ("x",)
class Bar(Foo):
def __init__(self, x, y):
self.x = x
if y is not None:
self.y = y
assert Bar(1, 2).__dict__ == {"y": 2}
tokens = [
check_tokenize(Bar(1, 2)),
check_tokenize(Bar(1, 3)),
check_tokenize(Bar(1, None)),
check_tokenize(Bar(2, 2)),
]
assert len(set(tokens)) == len(tokens)
def test_tokenize_method():
class Foo:
def __init__(self, x):
self.x = x
def __dask_tokenize__(self):
return self.x
def hello(self):
return "Hello world"
a, b = Foo(1), Foo(2)
assert check_tokenize(a) != check_tokenize(b)
assert check_tokenize(a.hello) != check_tokenize(b.hello)
# dispatch takes precedence
before = check_tokenize(a)
normalize_token.register(Foo, lambda self: self.x + 1)
after = check_tokenize(a)
assert before != after
del normalize_token._lookup[Foo]
def test_tokenize_callable_class():
class C:
def __init__(self, x):
self.x = x
def __call__(self):
return self.x
class D(C):
pass
a, b, c = C(1), C(2), D(1)
assert check_tokenize(a) != check_tokenize(b)
assert check_tokenize(a) != check_tokenize(c)
def test_tokenize_callable_class_with_tokenize_method():
"""Always use ___dask_tokenize__ if present"""
class C:
def __init__(self, x, y):
self.x = x
self.y = y
def __dask_tokenize__(self):
return self.x
def __call__(self):
pass
assert check_tokenize(C(1, 2)) == check_tokenize(C(1, 3))
assert check_tokenize(C(1, 2)) != check_tokenize(C(2, 2))
| WithClassMethod |
python | google__jax | tests/array_test.py | {
"start": 34966,
"end": 62829
} | class ____(jtu.JaxTestCase):
def test_mesh_pspec_sharding_interface(self):
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
pspec = P('y', 'x')
global_shape = (8, 4)
mp_sharding = jax.sharding.NamedSharding(mesh, pspec)
di_map = mp_sharding.devices_indices_map(global_shape)
hlo_sharding = mp_sharding._to_xla_hlo_sharding(len(global_shape))
device_assignment = mp_sharding._device_assignment
self.assertEqual(di_map[mesh.devices.flat[0]], (slice(0, 4), slice(0, 1)))
self.assertArraysEqual(device_assignment, list(mesh.devices.flat),
allow_object_dtype=True)
self.assertTrue(hlo_sharding.is_tiled())
self.assertListEqual(hlo_sharding.tile_assignment_dimensions(), [2, 4])
self.assertListEqual(hlo_sharding.tile_assignment_devices(),
[0, 2, 4, 6, 1, 3, 5, 7])
@jtu.thread_unsafe_test() # cache_info isn't thread-safe
def test_util_clear_cache(self):
mesh = jtu.create_mesh((1,), ('x',))
s = NamedSharding(mesh, P())
s.devices_indices_map((8,))
jax.clear_caches()
s.devices_indices_map((8,))
c = common_devices_indices_map.cache_info()
self.assertEqual(c.currsize, 1)
@parameterized.named_parameters(
("mesh_x_y", P("x", "y")),
("mesh_x", P("x")),
("mesh_y", P("y")),
("mesh_none_y", P(None, "y")),
("mesh_none_x", P(None, "x")),
("mesh_xy", P(("x", "y"))),
("mesh_fully_replicated", P()),
)
def test_op_sharding_indices(self, pspec):
shape = (8, 4)
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
mps = jax.sharding.NamedSharding(mesh, pspec)
ops = GSPMDSharding(
list(mesh.devices.flat), mps._to_xla_hlo_sharding(len(shape)))
self.assertDictEqual(
ops.devices_indices_map(shape), mps.devices_indices_map(shape))
@parameterized.named_parameters(
("mesh_x_y", P("x", "y"), (2, 2)),
("mesh_x", P("x"), (2, 4)),
("mesh_y", P("y"), (4, 4)),
("mesh_none_y", P(None, "y"), (8, 2)),
("mesh_none_x", P(None, "x"), (8, 1)),
("mesh_xy", P(("x", "y")), (1, 4)),
("mesh_fully_replicated", P(), (8, 4)),
)
def test_shard_shape(self, pspec, expected_shard_shape):
shape = (8, 4)
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
mps = jax.sharding.NamedSharding(mesh, pspec)
self.assertEqual(mps.shard_shape(shape), expected_shard_shape)
def test_uneven_shard_error(self):
mesh = jtu.create_mesh((4, 2), ('x', 'y'))
mps = jax.sharding.NamedSharding(mesh, P('x', 'y'))
with self.assertRaisesRegex(
ValueError,
r"Sharding.*implies that array axis 1 is partitioned 2 times, but the "
r"dimension size is 3 \(full shape: \(8, 3\), per-dimension tiling "
r"factors: \[4, 2\] should evenly divide the shape\)"):
mps.shard_shape((8, 3))
@jtu.ignore_warning(category=DeprecationWarning)
@jtu.thread_unsafe_test() # cache_info isn't thread-safe
def test_pmap_sharding_hash_eq(self):
if jax.device_count() < 2:
self.skipTest('Test needs >= 2 devices.')
if config.pmap_shmap_merge.value:
self.skipTest(
'There is not an equivalent cache to test when pmap_shmap_merge=True.'
)
shape = (2, 2)
num_elements = math.prod(shape)
inp_data = np.arange(num_elements).reshape(shape)
out = jax.pmap(lambda x: x)(inp_data)
self.assertIsInstance(out.sharding, jax.sharding.PmapSharding)
# Populate the device_indices_map cache.
_ = out.sharding.devices_indices_map(shape)
cache_info1 = pmap_sharding_devices_indices_map.cache_info()
inp_data2 = np.arange(num_elements, num_elements + num_elements).reshape(shape)
out2 = jax.pmap(lambda x: x)(inp_data2)
# Populate the device_indices_map cache.
_ = out2.sharding.devices_indices_map(shape)
cache_info2 = pmap_sharding_devices_indices_map.cache_info()
self.assertGreater(cache_info2.hits, cache_info1.hits + 1)
self.assertEqual(cache_info2.misses, cache_info1.misses)
def test_is_compatible_error(self):
shape = (8, 2)
mesh = jtu.create_mesh((1, 1, 2), ('replica', 'data', 'mdl'))
mps = jax.sharding.NamedSharding(mesh, P(None, ('mdl',), None, None))
with self.assertRaisesRegex(
ValueError,
r"Sharding NamedSharding.*PartitionSpec\(None, 'mdl', None, None\).*\)"
' is only valid for values of rank at least 4, but was applied to a'
' value of rank 2'):
mps.check_compatible_aval(shape)
def test_is_subclass(self):
# array version of api_test.py::APITest::test_is_subclass
self.assertTrue(issubclass(array.ArrayImpl, jax.Array))
self.assertFalse(issubclass(array.ArrayImpl, np.ndarray))
def test_gspmd_sharding_repr(self):
op = xc.OpSharding()
op.type = xc.OpSharding.Type.OTHER
op.tile_assignment_dimensions = [4, 1, 2]
op.tile_assignment_devices = [0, 1, 2, 3, 4, 5, 6, 7]
op.replicate_on_last_tile_dim = True
s = GSPMDSharding(jax.devices(), op)
# memory kind also appears in the repr but only for TPU.
self.assertIn(
'GSPMDSharding({devices=[4,1,2]0,1,2,3,4,5,6,7 '
'last_tile_dim_replicate}', repr(s))
op2 = xc.OpSharding()
op2.type = xc.OpSharding.Type.REPLICATED
s2 = GSPMDSharding(jax.devices(), op2)
# memory kind also appears in the repr but only for TPU.
self.assertIn('GSPMDSharding({replicated}', repr(s2))
@parameterized.named_parameters(
("2d_mesh_x", (1, 1), P("x", "y")),
("2d_mesh_x_y", (4, 2), P("x", "y")),
("2d_mesh_empty", (2, 1), P()),
("2d_mesh_p_none", (2, 1), P(None)),
("2d_mesh_none_none", (2, 1), P(None, None)),
("2d_mesh_tuple_empty", (2, 1), P((),)),
("2d_mesh_x_none", (2, 1), P(('x',), None)),
("2d_mesh_xy_none", (2, 1), P(('x', 'y'), None)),
("2d_mesh_x_tuple_empty", (2, 1), P('x', (), (), ())),
("2d_mesh_3_tuple_empty", (2, 1), P((), (), ())),
("3d_mesh2_x_none_none", (1, 2, 4), P('x', None, None)),
("3d_mesh2_x_y_none", (1, 1, 4), P('x', 'y', None)),
("3d_mesh2_xy_none", (1, 1, 4), P(('x', 'y'), None)),
)
def test_is_fully_replicated_named_sharding(self, mesh_shape, pspec):
if len(mesh_shape) == 2:
axis_names = ('x', 'y')
elif len(mesh_shape) == 3:
axis_names = ('x', 'y', 'z')
else:
axis_names = ('x',)
mesh = jtu.create_mesh(mesh_shape, axis_names)
mps = jax.sharding.NamedSharding(mesh, pspec)
shape = (8, 2, 4)
mps_op_sharding = mps._to_xla_hlo_sharding(len(shape))
ops_ifr = op_shardings.is_hlo_sharding_replicated(mps_op_sharding)
self.assertEqual(mps.is_fully_replicated, ops_ifr)
def test_pmap_sharding_repr(self):
if jax.device_count() < 2:
self.skipTest('Test needs >= 2 devices.')
out = jax.pmap(lambda x: x)(jnp.arange(2.))
str(out.sharding) # doesn't crash
repr(out.sharding) # doesn't crash
def test_pspec_tuple(self):
pspec = P('x', 'y', 'z')
self.assertEqual(pspec, ('x', 'y', 'z'))
self.assertEqual(pspec.index('z'), 2)
self.assertEqual(hash(P(None, 'x', 'y', 'z')), hash(P((), 'x', 'y', 'z')))
@parameterized.named_parameters(
('sharded_dim_0', (4, 2), 0),
('sharded_dim_1_0', (4, 2), 1),
('sharded_dim_2', (4, 2, 4), 2),
('sharded_dim_1_1', (2, 4), 1)
)
@jtu.ignore_warning(category=DeprecationWarning)
def test_default_pmap_sharding(self, shape, sharded_dim):
if jax.device_count() < 4:
self.skipTest('Test needs >= 4 devices.')
inp = jnp.arange(math.prod(shape)).reshape(shape)
if config.pmap_shmap_merge.value:
out = jax.pmap(lambda x: x, in_axes=sharded_dim, axis_name='x')(inp)
actual_sharding = out.sharding
expected_sharding = jax.sharding.NamedSharding(
jax.sharding.Mesh(jax.devices()[: shape[sharded_dim]], 'x'),
jax.P('x'),
)
self.assertEqual(actual_sharding.spec, expected_sharding.spec)
self.assertEqual(actual_sharding._device_assignment, expected_sharding._device_assignment)
else:
compiled = jax.pmap(lambda x: x, in_axes=sharded_dim).lower(inp).compile()
# TOOD(dsuo): Investigate why
# `compiled._executable.unsafe_call.in_handler.in_shardings` is of type
# `GSPMDSharding` when `pmap_shmap_merge=True`. It should be
# `NamedSharding`.
actual_sharding, = compiled._executable.unsafe_call.in_handler.in_shardings
expected_sharding = jax.sharding.PmapSharding.default(shape, sharded_dim)
self.assertEqual(actual_sharding.sharding_spec, expected_sharding.sharding_spec)
self.assertEqual(actual_sharding._device_assignment, expected_sharding._device_assignment)
@jtu.ignore_warning(category=DeprecationWarning)
def test_default_pmap_sharding_with_devices(self):
if jax.device_count() < 4:
self.skipTest('Test needs >= 4 devices.')
devs = jax.devices()
new_order = (devs[0], devs[3], devs[2], devs[1])
ps = jax.sharding.PmapSharding.default((4, 2), devices=new_order)
self.assertEqual(ps._device_assignment, new_order)
@jtu.ignore_warning(category=DeprecationWarning)
def test_default_pmap_sharding_replicated(self):
x = np.zeros((len(jax.local_devices()), 8), dtype=np.float32)
x = jax.pmap(lambda x: x, in_axes=0, out_axes=None, axis_name='x')(x)
if config.pmap_shmap_merge.value:
expected_sharding = jax.sharding.NamedSharding(
mesh=jax.sharding.Mesh(jax.local_devices(), 'x'),
spec=jax.P(),
)
self.assertEqual(x.sharding, expected_sharding)
else:
ps = jax.sharding.PmapSharding.default(
shape=(8,), sharded_dim=None,
devices=jax.local_devices())
self.assertEqual(x.sharding, ps)
def test_mesh_repr(self):
mesh = jtu.create_mesh((1, 1), ('x', 'y'))
mesh_repr = repr(mesh)
self.assertIn('axis_sizes', mesh_repr)
self.assertIn('axis_names', mesh_repr)
def test_are_shardings_equivalent(self):
mesh = jtu.create_mesh((1,), ('x'))
mesh2 = jtu.create_mesh((2, 1), ('x', 'y'))
s1 = jax.sharding.NamedSharding(mesh, P('x'))
s2 = jax.sharding.SingleDeviceSharding(jax.devices()[0])
self.assertTrue(s1.is_equivalent_to(s2, 2))
s3 = jax.pmap(lambda x: x)(jnp.arange(jax.device_count())).sharding
s4 = jax.pmap(lambda x: x)(jnp.arange(jax.device_count())).sharding
self.assertTrue(s3.is_equivalent_to(s4, 2))
self.assertFalse(s1.is_equivalent_to(s3, 2))
self.assertFalse(s2.is_equivalent_to(s3, 2))
s5 = jax.sharding.NamedSharding(mesh2, P('x', 'y'))
op1 = xc.OpSharding()
op1.type = xc.OpSharding.Type.REPLICATED
s6 = GSPMDSharding([jax.devices()[0]], op1)
s7 = GSPMDSharding(jax.devices(), op1)
# The OpSharding is replicated but the Sharding itself are on different
# devices.
self.assertFalse(s6.is_equivalent_to(s7, 2))
op2 = xc.OpSharding()
op2.type = xc.OpSharding.Type.OTHER
op2.tile_assignment_devices = [0, 1]
op2.tile_assignment_dimensions = [2, 1]
s8 = GSPMDSharding(list(mesh2.devices.flat), op2)
self.assertTrue(s1.is_equivalent_to(s6, 2))
self.assertTrue(s5.is_equivalent_to(s8, 2))
self.assertFalse(s5.is_equivalent_to(s2, 2))
s9 = jax.sharding.NamedSharding(mesh2, P('y'))
op3 = xc.OpSharding()
op3.type = xc.OpSharding.Type.OTHER
op3.tile_assignment_devices = [0, 1]
op3.tile_assignment_dimensions = [1, 1, 2]
op3.replicate_on_last_tile_dim = True
s10 = GSPMDSharding(list(mesh2.devices.flat), op3)
self.assertTrue(s9.is_equivalent_to(s10, 2))
def test_devices_indices_map_good_error_message(self):
shape = (1, 2)
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = jax.sharding.NamedSharding(mesh, P('x', 'y'))
with self.assertRaisesRegex(
ValueError,
"Sharding.*implies that array axis 0 is partitioned 2 times, but the "
"dimension size is 1"):
s.devices_indices_map(shape)
def test_scalar_input_wrong_pspec(self):
mesh = jtu.create_mesh((1, ), ('x'))
shape = ()
s = jax.sharding.NamedSharding(mesh, P('x'))
with self.assertRaisesRegex(
ValueError,
r"For scalars the PartitionSpec should be P()"):
s.check_compatible_aval(shape)
def test_mesh_caching_during_construction(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Requires >=2 devices")
mesh1 = jax.sharding.Mesh(jax.devices(), 'x')
mesh2 = jax.sharding.Mesh(jax.devices(), 'x')
self.assertIs(mesh1, mesh2)
def test_mesh_str(self):
mesh = jtu.create_mesh((2, 2, 2), ('x', 'y', 'z'))
self.assertEqual(
str(mesh), "Mesh('x': 2, 'y': 2, 'z': 2, axis_types=(Auto, Auto, Auto))"
)
def test_make_array_from_callback_error(self):
mesh_shape = (2, 3)
global_shape = tuple(np.square(mesh_shape))
mesh = jtu.create_mesh(mesh_shape, ('x', 'y'), iota_order=True)
pspec = P('x', 'y')
sharding = jax.sharding.NamedSharding(mesh, pspec)
n = math.prod(global_shape)
global_x = jnp.arange(n).astype('uint32').reshape(global_shape)
def f(arr):
return array.make_array_from_callback(arr.shape, sharding, lambda i: arr[i])
out = f(global_x)
self.assertEqual(out.shape, global_shape)
msg = "jax.make_array_from_callback cannot be called within a traced context"
with self.assertRaisesRegex(jax.errors.UnexpectedTracerError, msg):
jax.jit(f)(global_x)
def test_make_array_from_single_device_arrays_error(self):
x = jnp.arange(10)
sharding = x.sharding
def f(x):
return jax.make_array_from_single_device_arrays(x.shape, sharding, [x])
msg = "jax.make_array_from_single_device_arrays requires a list of concrete arrays"
with self.assertRaisesRegex(ValueError, msg):
jax.jit(f)(x)
def test_make_array_from_single_device_arrays_nonlist_error(self):
x = jnp.arange(10)
sharding = x.sharding
def f(x):
return jax.make_array_from_single_device_arrays(x.shape, sharding, x)
msg = "jax.make_array_from_single_device_arrays `arrays` argument"
with self.assertRaisesRegex(TypeError, msg):
jax.jit(f)(x)
def test_make_array_from_single_device_arrays_tuple(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
shape = (8, 8)
s = jax.sharding.NamedSharding(mesh, P('x', 'y'))
inp_data = np.arange(math.prod(shape)).reshape(shape)
arrays = tuple(
jax.device_put(inp_data[index], d)
for d, index in s.addressable_devices_indices_map(shape).items())
jax.make_array_from_single_device_arrays(shape, s, arrays) # doesn't crash
def test_make_array_from_single_device_arrays_bad_inputs(self):
x = jnp.arange(10)
mesh = jtu.create_mesh((2,), ('x',))
s = jax.sharding.NamedSharding(mesh, P('x'))
x = jax.device_put(x, s)
msg = ("When making an array from single-device arrays the input arrays "
"must have one shard each. An argument array had 2 shard\\(s\\).")
with self.assertRaisesRegex(ValueError, msg):
jax.make_array_from_single_device_arrays(x.shape, s, [x, x])
def test_gspmd_sharding_hash_eq(self):
mesh = jtu.create_mesh((1, 1, 1), ('x', 'y', 'z'))
ns = NamedSharding(mesh, P('x', 'y', 'z'))
x1 = GSPMDSharding(mesh._flat_devices_tuple, ns._to_xla_hlo_sharding(3))
x2 = GSPMDSharding.get_replicated(mesh._flat_devices_tuple)
self.assertEqual(x1, x2)
self.assertEqual(hash(x1), hash(x2))
def test_device_attr(self):
# For single-device arrays, x.device returns the device
x = jnp.ones((2, 10))
self.assertEqual(x.device, list(x.devices())[0])
# For sharded arrays, x.device returns the sharding
mesh = jtu.create_mesh((2,), ('x',))
sharding = jax.sharding.NamedSharding(mesh, P('x'))
x = jax.device_put(x, sharding)
self.assertEqual(x.device, sharding)
def test_to_device(self):
device = jax.devices()[-1]
mesh = jtu.create_mesh((2,), ('x',))
sharding = jax.sharding.NamedSharding(mesh, P('x'))
x = jnp.ones((2, 10))
x_device = x.to_device(device)
x_sharding = x.to_device(sharding)
self.assertEqual(x_device.device, device)
self.assertEqual(x_sharding.device, sharding)
def test_mesh_with_axis_name_none(self):
with self.assertRaisesRegex(ValueError, 'Mesh axis names cannot be None.'):
jax.sharding.Mesh(jax.devices(), (None, 'x'))
def test_mesh_axis_types_mismatch(self):
with self.assertRaisesRegex(
ValueError,
'Number of axis names should match the number of axis_types'):
jtu.create_mesh((2, 1), ('x', 'y'),
axis_types=jax.sharding.AxisType.Auto)
with self.assertRaisesRegex(
ValueError,
'Number of axis names should match the number of axis_types'):
jax.sharding.AbstractMesh((2, 1), ('x', 'y'),
axis_types=jax.sharding.AxisType.Auto)
with self.assertRaisesRegex(TypeError, "axis_types.*must be of type"):
AbstractMesh((2,), ('x',), axis_types=("explicit",))
with self.assertRaisesRegex(TypeError, "axis_types.*must be of type"):
AbstractMesh((2,), ('x',), axis_types="explicit")
with self.assertRaisesRegex(TypeError, "axis_types.*must be of type"):
AbstractMesh((2, 2), ('x', 'y'),
axis_types=("explicit", AxisType.Explicit))
def test_make_mesh_axis_types(self):
Auto, Explicit, Manual = AxisType.Auto, AxisType.Explicit, AxisType.Manual
mesh1 = jax.sharding.AbstractMesh((2,), 'x', axis_types=Auto)
mesh2 = jax.sharding.AbstractMesh((2,), 'x', axis_types=Auto)
self.assertEqual(mesh1, mesh2)
if deprecations.is_accelerated('jax-make-mesh-default-explicit'):
mesh = jax.make_mesh((1, 1), ('x', 'y'))
self.assertTupleEqual(mesh.axis_types, (AxisType.Explicit,) * 2)
else:
mesh = jax.make_mesh((1, 1), ('x', 'y'),
axis_types=(AxisType.Explicit,) * 2)
self.assertTupleEqual(mesh.axis_types, (AxisType.Explicit,) * 2)
mesh = jax.make_mesh((1, 1, 1), ('x', 'y', 'z'),
axis_types=(Explicit, Auto, Manual))
self.assertEqual(mesh.explicit_axes, ('x',))
self.assertEqual(mesh.auto_axes, ('y',))
self.assertEqual(mesh.manual_axes, ('z',))
with self.assertRaisesRegex(
ValueError,
'Number of axis names should match the number of axis_types'):
jax.make_mesh((1, 1), ('data', 'model'), axis_types=Explicit)
mesh1 = jax.make_mesh((1, 1, 1, 1, 1), ('a', 'b', 'c', 'd', 'e'),
axis_types=(Explicit, Auto, Auto, Explicit, Explicit))
mesh2 = jax.make_mesh((1, 1, 1, 1, 1), ('a', 'b', 'c', 'd', 'e'),
axis_types=(Explicit, Auto, Auto, Explicit, Auto))
self.assertNotEqual(mesh1, mesh2)
self.assertNotEqual(hash(mesh1), hash(mesh2))
def test_memory_kind_with_abstract_mesh(self):
abstract_mesh = AbstractMesh((2,), ('x',))
ns = NamedSharding(abstract_mesh, P(), memory_kind='pinned_host')
self.assertEqual(ns.memory_kind, 'pinned_host')
ns = NamedSharding(abstract_mesh, P())
self.assertIsNone(ns.memory_kind)
with self.assertRaisesRegex(
ValueError, 'Got invalid memory kind'):
NamedSharding(abstract_mesh, P(), memory_kind='weird_device')
def test_pspec_mix_axis_types(self):
mesh = AbstractMesh(
(2, 2, 2, 2), ('a', 'b', 'c', 'd'),
axis_types=(AxisType.Explicit, AxisType.Explicit, AxisType.Auto,
AxisType.Manual))
aval = jax.core.ShapedArray((16, 8, 4, 2), np.float32)
out = aval.update(sharding=NamedSharding(mesh, P(('a', 'b', 'c'), 'd')))
self.assertEqual(out.sharding.spec, P(('a', 'b'), None, None, None))
out = aval.update(sharding=NamedSharding(mesh, P(('a', 'c'), 'b', 'd')))
self.assertEqual(out.sharding.spec, P('a', 'b', None, None))
out = aval.update(sharding=NamedSharding(mesh, P(('a', 'b'), 'c', 'd')))
self.assertEqual(out.sharding.spec, P(('a', 'b'), None, None, None))
with self.assertRaisesRegex(
ValueError,
'Tuple subset of `PartitionSpec` cannot contain `Manual` mixed with'
' `Auto` or `Explicit`'):
aval.update(sharding=NamedSharding(mesh, P(('a', 'd'), 'b', 'c')))
def test_aval_str_short(self):
mesh = AbstractMesh(
(2, 2, 2), ('a', 'b', 'c'),
axis_types=(AxisType.Explicit, AxisType.Explicit, AxisType.Manual))
s = NamedSharding(mesh, P(unreduced={'a'}, reduced={'b'}))
aval = jax.core.ShapedArray((1, 1, 1, 1), np.float32, sharding=s,
vma=frozenset('c'))
self.assertEqual(aval.str_short(True), 'f32[1,1,1,1]{V:c, U:a, R:b}')
s = NamedSharding(mesh, P(unreduced={'a'}))
aval = jax.core.ShapedArray((1, 1, 1, 1), np.float32, sharding=s,
vma=frozenset('c'))
self.assertEqual(aval.str_short(True), 'f32[1,1,1,1]{V:c, U:a}')
s = NamedSharding(mesh, P(unreduced={'a'}))
aval = jax.core.ShapedArray((1, 1, 1, 1), np.float32, sharding=s)
self.assertEqual(aval.str_short(True), 'f32[1,1,1,1]{U:a}')
s = NamedSharding(mesh, P())
aval = jax.core.ShapedArray((1, 1, 1, 1), np.float32, sharding=s,
vma=frozenset('c'))
self.assertEqual(aval.str_short(True), 'f32[1,1,1,1]{V:c}')
aval = jax.core.ShapedArray((1, 1, 1, 1), np.float32)
self.assertEqual(aval.str_short(True), 'f32[1,1,1,1]')
def test_modify_spec_auto_unreduced(self):
mesh = AbstractMesh(
(2, 2, 2), ('a', 'b', 'c'),
axis_types=(AxisType.Explicit, AxisType.Explicit, AxisType.Auto))
spec = P(unreduced={'a', 'b', 'c'})
out = core.modify_spec_for_auto_manual(spec, mesh)
self.assertEqual(out, P(unreduced={'a', 'b'}))
spec = P(reduced={'a', 'b', 'c'})
out = core.modify_spec_for_auto_manual(spec, mesh)
self.assertEqual(out, P(reduced={'a', 'b'}))
spec = P(unreduced={'a', 'b'}, reduced={'c'})
out = core.modify_spec_for_auto_manual(spec, mesh)
self.assertEqual(out, P(unreduced={'a', 'b'}))
spec = P(unreduced={'a', 'c'}, reduced={'b'})
out = core.modify_spec_for_auto_manual(spec, mesh)
self.assertEqual(out, P(unreduced={'a'}, reduced={'b'}))
spec = P(unreduced={'c'}, reduced={'a', 'b'})
out = core.modify_spec_for_auto_manual(spec, mesh)
self.assertEqual(out, P(reduced={'a', 'b'}))
def test_pspec_unreduced(self):
pspec = P('a', 'b', None, unreduced={'c'}, reduced={'d'})
self.assertEqual(
repr(pspec),
"PartitionSpec('a', 'b', None, unreduced={'c'}, reduced={'d'})")
pspec1 = P('a', 'b', None, unreduced={'c'})
self.assertEqual(repr(pspec1),
"PartitionSpec('a', 'b', None, unreduced={'c'})")
pspec2 = P('a', 'b', None, unreduced={'c'})
self.assertEqual(pspec1, pspec2)
pspec3 = P('a', 'b', None, unreduced={'d'})
self.assertNotEqual(pspec1, pspec3)
out = P('x', unreduced={'z'}) + P('a', unreduced={'b'})
self.assertEqual(out, P('x', 'a', unreduced={'z', 'b'}))
pspec4 = P('x', unreduced={'y'})
self.assertEqual(repr(pspec4),
"PartitionSpec('x', unreduced={'y'})")
pspec5 = P(None, None, unreduced={'x'})
self.assertEqual(repr(pspec5),
"PartitionSpec(None, None, unreduced={'x'})")
pspec6 = P(None, unreduced={'x'})
self.assertEqual(repr(pspec6), "PartitionSpec(None, unreduced={'x'})")
pspec7 = P(unreduced={'x'})
self.assertEqual(repr(pspec7), "PartitionSpec(unreduced={'x'})")
with self.assertRaisesRegex(
TypeError, 'unreduced in `__add__` of PartitionSpec'):
P('x', unreduced={'z'}) + (None,) * 2
with self.assertRaisesRegex(
TypeError, "unreduced in `__radd__` of PartitionSpec"):
(None,) * 2 + P('x', unreduced={'y'})
with self.assertRaisesRegex(
ValueError, "partitions cannot overlap with unreduced"):
P('x', 'y', unreduced={'x'})
with self.assertRaisesRegex(
ValueError, "partitions cannot overlap with unreduced"):
P('x', None, 'y', unreduced={'z', 'y'})
def test_named_sharding_unreduced_error(self):
mesh = jtu.create_mesh((1, 1, 1), ('x', 'y', 'z'))
with self.assertRaisesRegex(
ValueError, "Unreduced axes.*not found in mesh.*"):
NamedSharding(mesh, P('x', unreduced={'a'}))
with self.assertRaisesRegex(
ValueError, "Unreduced axes can only refer to mesh axes.*Explicit"):
NamedSharding(mesh, P('x', unreduced={'y', 'z'}))
with self.assertRaisesRegex(
ValueError, "unreduced cannot contain None.*"):
NamedSharding(mesh, P('x', unreduced={'y', None}))
def test_hlo_sharding_get_axis_sizes(self):
op = xc.OpSharding()
op.type = xc.OpSharding.Type.OTHER
op.tile_assignment_dimensions = [6, 35]
op.iota_reshape_dims = [7, 10, 3]
op.iota_transpose_perm = [2, 1, 0]
s = GSPMDSharding(jax.devices(), op)
self.assertIn('{devices=[6,35]<=[7,10,3]T(2,1,0)}', repr(s))
self.assertEqual(s._to_xla_hlo_sharding(2).get_axis_sizes(), [7, 2, 5, 3])
@parameterized.named_parameters(
('2d_mesh_x_y', (4, 2), P('x', 'y')),
('2d_mesh_x', (4, 2), P('x')),
('2d_mesh_y', (4, 2), P('y')),
('2d_mesh_none_y', (4, 2), P(None, 'y')),
('2d_mesh_none_x', (4, 2), P(None, 'x')),
('2d_mesh_xy', (4, 2), P(('x', 'y'))),
('2d_mesh_none_xy', (4, 2), P(None, ('x', 'y'))),
('2d_mesh_fully_replicated', (4, 2), P()),
('2d_mesh_x_none', (2, 1), P(('x',), None)),
('3d_mesh_none_none_z', (2, 2, 2), P(None, None, 'z')),
('3d_mesh_none_y_none', (2, 2, 2), P(None, 'y', None)),
('3d_mesh_x_y_none', (2, 2, 2), P('x', 'y', None)),
('3d_mesh_none_yz', (2, 2, 2), P(None, ('y', 'z'))),
('3d_mesh_x_none_yz', (2, 2, 2), P('x', None, ('y', 'z'))),
('3d_mesh_none_x_yz', (2, 2, 2), P(None, 'x', ('y', 'z'))),
('3d_mesh_xy_z', (2, 2, 2), P(('x', 'y'), 'z')),
('3d_mesh_xy_none_z', (2, 2, 2), P(('x', 'y'), None, 'z')),
('3d_mesh_x_y_z', (2, 2, 2), P('x', 'y', 'z')),
('3d_mesh_xz_y', (2, 2, 2), P(('x', 'z'), 'y')),
('3d_mesh_xz_none_y', (2, 2, 2), P(('x', 'z'), None, 'y')),
('3d_mesh_y_none_xz', (2, 2, 2), P('y', None, ('x', 'z'))),
('3d_mesh_none_y_xz', (2, 2, 2), P(None, 'y', ('x', 'z'))),
('3d_mesh2_none_none_z', (1, 2, 4), P(None, None, 'z')),
('3d_mesh2_x_none_none', (1, 2, 4), P('x', None, None)),
('3d_mesh_x_none_none', (2, 1, 1), P('x', None, None)),
)
def test_gspmd_sharding_shardy_lowering(self, mesh_shape, pspec):
ndim = len(mesh_shape)
mesh = jtu.create_mesh(
mesh_shape, ('x', 'y') if ndim == 2 else ('x', 'y', 'z')
)
ns = jax.sharding.NamedSharding(mesh, pspec)
gs = GSPMDSharding(ns._device_assignment, ns._to_xla_hlo_sharding(ndim))
out_sdy_sharding = gs._to_sdy_sharding(ndim)
self.assertTrue(out_sdy_sharding, ns._to_sdy_sharding(ndim))
def test_nested_tuple_pspec_error(self):
with self.assertRaisesRegex(
ValueError,
"A tuple inside PartitionSpec cannot contain a nested tuple"):
jax.P('x', 'y', ('z', ('a',)))
with self.assertRaisesRegex(
ValueError,
"A tuple inside PartitionSpec cannot contain a nested tuple"):
jax.P((('a', 'b'), 'c'))
def test_make_mesh_accelerate_explicit(self):
if deprecations.is_accelerated('jax-make-mesh-default-explicit'):
mesh = jax.make_mesh((1,), 'x')
self.assertTupleEqual(mesh.axis_types, (AxisType.Explicit,))
else:
with self.assertWarnsRegex(DeprecationWarning, "The default axis_types"):
mesh = jax.make_mesh((1,), 'x')
self.assertTupleEqual(mesh.axis_types, (AxisType.Auto,))
| ShardingTest |
python | spyder-ide__spyder | spyder/plugins/explorer/widgets/fileassociations.py | {
"start": 9900,
"end": 21600
} | class ____(QWidget):
"""Widget to add applications association to file extensions."""
# This allows validating a single extension entry or a list of comma
# separated values (eg `*.json` or `*.json,*.txt,MANIFEST.in`)
_EXTENSIONS_LIST_REGEX = (r'(?:(?:\*{1,1}|\w+)\.\w+)'
r'(?:,(?:\*{1,1}|\w+)\.\w+){0,20}')
sig_data_changed = Signal(dict)
def __init__(self, parent=None):
"""Widget to add applications association to file extensions."""
super().__init__(parent=parent)
# Variables
self._data = {}
self._dlg_applications = None
self._dlg_input = None
self._regex = re.compile(self._EXTENSIONS_LIST_REGEX)
# Widgets
self.label = QLabel(
_("Here you can associate different external applications "
"to open specific file extensions (e.g. .txt "
"files with Notepad++ or .csv files with Excel).")
)
self.label.setWordWrap(True)
self.label_extensions = QLabel(_('File types:'))
self.list_extensions = QListWidget()
self.button_add = QPushButton(_('Add'))
self.button_remove = QPushButton(_('Remove'))
self.button_edit = QPushButton(_('Edit'))
self.label_applications = QLabel(_('Associated applications:'))
self.list_applications = QListWidget()
self.button_add_application = QPushButton(_('Add'))
self.button_remove_application = QPushButton(_('Remove'))
self.button_default = QPushButton(_('Set default'))
# Layout
layout_extensions = QHBoxLayout()
layout_extensions.addWidget(self.list_extensions, 4)
layout_buttons_extensions = QVBoxLayout()
layout_buttons_extensions.addWidget(self.button_add)
layout_buttons_extensions.addWidget(self.button_remove)
layout_buttons_extensions.addWidget(self.button_edit)
layout_buttons_extensions.addStretch()
layout_applications = QHBoxLayout()
layout_applications.addWidget(self.list_applications, 4)
layout_buttons_applications = QVBoxLayout()
layout_buttons_applications.addWidget(self.button_add_application)
layout_buttons_applications.addWidget(self.button_remove_application)
layout_buttons_applications.addWidget(self.button_default)
layout_buttons_applications.addStretch()
layout_extensions.addLayout(layout_buttons_extensions, 2)
layout_applications.addLayout(layout_buttons_applications, 2)
layout = QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(self.label_extensions)
layout.addLayout(layout_extensions)
layout.addSpacing(9)
layout.addWidget(self.label_applications)
layout.addLayout(layout_applications)
layout.addSpacing(9)
self.setLayout(layout)
# Signals
self.button_add.clicked.connect(self.add_association)
self.button_remove.clicked.connect(self.remove_association)
self.button_edit.clicked.connect(self.edit_association)
self.button_add_application.clicked.connect(self.add_application)
self.button_remove_application.clicked.connect(
self.remove_application)
self.button_default.clicked.connect(self.set_default_application)
self.list_extensions.currentRowChanged.connect(self.update_extensions)
self.list_extensions.itemDoubleClicked.connect(self.edit_association)
self.list_applications.currentRowChanged.connect(
self.update_applications)
self._refresh()
self._create_association_dialog()
def _refresh(self):
"""Refresh the status of buttons on widget."""
self.setUpdatesEnabled(False)
for widget in [self.button_remove, self.button_add_application,
self.button_edit,
self.button_remove_application, self.button_default]:
widget.setDisabled(True)
item = self.list_extensions.currentItem()
if item:
for widget in [self.button_remove, self.button_add_application,
self.button_remove_application, self.button_edit]:
widget.setDisabled(False)
self.update_applications()
self.setUpdatesEnabled(True)
def _add_association(self, value):
"""Add association helper."""
# Check value is not pressent
for row in range(self.list_extensions.count()):
item = self.list_extensions.item(row)
if item.text().strip() == value.strip():
break
else:
item = QListWidgetItem(value)
self.list_extensions.addItem(item)
self.list_extensions.setCurrentItem(item)
self._refresh()
def _add_application(self, app_name, fpath):
"""Add application helper."""
app_not_found_text = _(' (Application not found!)')
for row in range(self.list_applications.count()):
item = self.list_applications.item(row)
# Ensure the actual name is checked without the `app not found`
# additional text, in case app was not found
item_text = item.text().replace(app_not_found_text, '').strip()
if item and item_text == app_name:
break
else:
icon = get_application_icon(fpath)
if not (os.path.isfile(fpath) or os.path.isdir(fpath)):
app_name += app_not_found_text
item = QListWidgetItem(icon, app_name)
self.list_applications.addItem(item)
self.list_applications.setCurrentItem(item)
if not (os.path.isfile(fpath) or os.path.isdir(fpath)):
item.setToolTip(_('Application not found!'))
def _update_extensions(self):
"""Update extensions list."""
self.list_extensions.clear()
for extension, __ in sorted(self._data.items()):
self._add_association(extension)
# Select first item
self.list_extensions.setCurrentRow(0)
self.update_extensions()
self.update_applications()
def _create_association_dialog(self):
"""Create input extension dialog and save it to for reuse."""
self._dlg_input = InputTextDialog(
self,
title=_('File association'),
label=(
_('Enter new file extension. You can add several values '
'separated by commas.<br>Examples include:')
+ '<ul><li><code>*.txt</code></li>'
+ '<li><code>*.json,*.csv</code></li>'
+ '<li><code>*.json,README.md</code></li></ul>'
),
)
self._dlg_input.set_regex_validation(self._EXTENSIONS_LIST_REGEX)
def load_values(self, data=None):
"""
Load file associations data.
Format {'*.ext': [['Application Name', '/path/to/app/executable']]}
`/path/to/app/executable` is an executable app on mac and windows and
a .desktop xdg file on linux.
"""
self._data = {} if data is None else data
self._update_extensions()
@Slot()
def add_association(self, value=None):
"""Add extension file association."""
if value is None or isinstance(value, bool):
text, ok_pressed = '', False
self._dlg_input.set_text('')
if self._dlg_input.exec_():
text = self._dlg_input.text()
ok_pressed = True
else:
match = self._regex.match(value)
text, ok_pressed = value, bool(match)
if ok_pressed:
if text not in self._data:
self._data[text] = []
self._add_association(text)
self.check_data_changed()
def remove_association(self):
"""Remove extension file association."""
if self._data:
if self.current_extension:
self._data.pop(self.current_extension)
self._update_extensions()
self._refresh()
self.check_data_changed()
def edit_association(self):
"""Edit text of current selected association."""
old_text = self.current_extension
self._dlg_input.set_text(old_text)
if self._dlg_input.exec_():
new_text = self._dlg_input.text()
if old_text != new_text:
values = self._data.pop(self.current_extension)
self._data[new_text] = values
self._update_extensions()
self._refresh()
for row in range(self.list_extensions.count()):
item = self.list_extensions.item(row)
if item.text() == new_text:
self.list_extensions.setCurrentItem(item)
break
self.check_data_changed()
def add_application(self):
"""Remove application to selected extension."""
if self.current_extension:
if self._dlg_applications is None:
self._dlg_applications = ApplicationsDialog(self)
self._dlg_applications.set_extension(self.current_extension)
if self._dlg_applications.exec_():
app_name = self._dlg_applications.application_name
fpath = self._dlg_applications.application_path
self._data[self.current_extension].append((app_name, fpath))
self._add_application(app_name, fpath)
self.check_data_changed()
def remove_application(self):
"""Remove application from selected extension."""
current_row = self.list_applications.currentRow()
values = self._data.get(self.current_extension)
if values and current_row != -1:
values.pop(current_row)
self.update_extensions()
self.update_applications()
self.check_data_changed()
def set_default_application(self):
"""
Set the selected item on the application list as default application.
"""
current_row = self.list_applications.currentRow()
if current_row != -1:
values = self._data[self.current_extension]
value = values.pop(current_row)
values.insert(0, value)
self._data[self.current_extension] = values
self.update_extensions()
self.check_data_changed()
def update_extensions(self, row=None):
"""Update extensiosn list after additions or deletions."""
self.list_applications.clear()
for extension, values in self._data.items():
if extension.strip() == self.current_extension:
for (app_name, fpath) in values:
self._add_application(app_name, fpath)
break
self.list_applications.setCurrentRow(0)
self._refresh()
def update_applications(self, row=None):
"""Update application list after additions or deletions."""
current_row = self.list_applications.currentRow()
self.button_default.setEnabled(current_row != 0)
def check_data_changed(self):
"""Check if data has changed and emit signal as needed."""
self.sig_data_changed.emit(self._data)
@property
def current_extension(self):
"""Return the current selected extension text."""
item = self.list_extensions.currentItem()
if item:
return item.text()
@property
def data(self):
"""Return the current file associations data."""
return self._data.copy()
| FileAssociationsWidget |
python | encode__django-rest-framework | tests/authentication/test_authentication.py | {
"start": 23081,
"end": 23595
} | class ____(TestCase):
def setUp(self):
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(
self.username, self.email, self.password
)
def test_remote_user_works(self):
response = self.client.post('/remote-user/',
REMOTE_USER=self.username)
self.assertEqual(response.status_code, status.HTTP_200_OK)
| RemoteUserAuthenticationUnitTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol20.py | {
"start": 218,
"end": 411
} | class ____:
@classmethod
def test(cls: type[T1]) -> T1:
return cls()
reveal_type(Sample.test(), expected_text="Sample")
reveal_type(Sample().test(), expected_text="Sample")
| Sample |
python | sphinx-doc__sphinx | tests/test_util/test_util_typing.py | {
"start": 1756,
"end": 1790
} | class ____(List[T]):
pass
| MyList |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/auto-table.py | {
"start": 2454,
"end": 3078
} | class ____(LabeledBox):
DEFAULT_CSS = """
#sidebar-status {
height: auto;
border-bottom: dashed #632CA6;
}
#sidebar-options {
height: 1fr;
}
"""
def __init__(self):
self.__status = Label("ok")
self.__options = Vertical()
super().__init__(
"",
Container(self.__status, id="sidebar-status"),
Container(self.__options, id="sidebar-options"),
)
@property
def status(self) -> Label:
return self.__status
@property
def options(self) -> Vertical:
return self.__options
| Sidebar |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 63883,
"end": 64158
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('placementSize', c_uint),
('count', c_uint),
('placementIds', POINTER(c_uint)),
('mode', c_uint),
]
VgpuPlacementList_v2 = 0x2000020
| c_nvmlVgpuPlacementList_v2_t |
python | plotly__plotly.py | plotly/graph_objs/sankey/_domain.py | {
"start": 233,
"end": 4988
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "sankey"
_path_str = "sankey.domain"
_valid_props = {"column", "row", "x", "y"}
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this sankey trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this sankey trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
@property
def x(self):
"""
Sets the horizontal domain of this sankey trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
Sets the vertical domain of this sankey trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.sankey.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for this row
in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace (in
plot fraction).
y
Sets the vertical domain of this sankey trace (in plot
fraction).
Returns
-------
Domain
"""
super().__init__("domain")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.sankey.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.sankey.Domain`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("column", arg, column)
self._set_property("row", arg, row)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Domain |
python | falconry__falcon | tests/test_sinks.py | {
"start": 75,
"end": 149
} | class ____:
def forward(self, req):
return falcon.HTTP_503
| Proxy |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/win32.py | {
"start": 4064,
"end": 20476
} | class ____:
"""
:param recognize_paste: When True, try to discover paste actions and turn
the event into a BracketedPaste.
"""
# Keys with character data.
mappings = {
b"\x1b": Keys.Escape,
b"\x00": Keys.ControlSpace, # Control-Space (Also for Ctrl-@)
b"\x01": Keys.ControlA, # Control-A (home)
b"\x02": Keys.ControlB, # Control-B (emacs cursor left)
b"\x03": Keys.ControlC, # Control-C (interrupt)
b"\x04": Keys.ControlD, # Control-D (exit)
b"\x05": Keys.ControlE, # Control-E (end)
b"\x06": Keys.ControlF, # Control-F (cursor forward)
b"\x07": Keys.ControlG, # Control-G
b"\x08": Keys.ControlH, # Control-H (8) (Identical to '\b')
b"\x09": Keys.ControlI, # Control-I (9) (Identical to '\t')
b"\x0a": Keys.ControlJ, # Control-J (10) (Identical to '\n')
b"\x0b": Keys.ControlK, # Control-K (delete until end of line; vertical tab)
b"\x0c": Keys.ControlL, # Control-L (clear; form feed)
b"\x0d": Keys.ControlM, # Control-M (enter)
b"\x0e": Keys.ControlN, # Control-N (14) (history forward)
b"\x0f": Keys.ControlO, # Control-O (15)
b"\x10": Keys.ControlP, # Control-P (16) (history back)
b"\x11": Keys.ControlQ, # Control-Q
b"\x12": Keys.ControlR, # Control-R (18) (reverse search)
b"\x13": Keys.ControlS, # Control-S (19) (forward search)
b"\x14": Keys.ControlT, # Control-T
b"\x15": Keys.ControlU, # Control-U
b"\x16": Keys.ControlV, # Control-V
b"\x17": Keys.ControlW, # Control-W
b"\x18": Keys.ControlX, # Control-X
b"\x19": Keys.ControlY, # Control-Y (25)
b"\x1a": Keys.ControlZ, # Control-Z
b"\x1c": Keys.ControlBackslash, # Both Control-\ and Ctrl-|
b"\x1d": Keys.ControlSquareClose, # Control-]
b"\x1e": Keys.ControlCircumflex, # Control-^
b"\x1f": Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hyphen.)
b"\x7f": Keys.Backspace, # (127) Backspace (ASCII Delete.)
}
# Keys that don't carry character data.
keycodes = {
# Home/End
33: Keys.PageUp,
34: Keys.PageDown,
35: Keys.End,
36: Keys.Home,
# Arrows
37: Keys.Left,
38: Keys.Up,
39: Keys.Right,
40: Keys.Down,
45: Keys.Insert,
46: Keys.Delete,
# F-keys.
112: Keys.F1,
113: Keys.F2,
114: Keys.F3,
115: Keys.F4,
116: Keys.F5,
117: Keys.F6,
118: Keys.F7,
119: Keys.F8,
120: Keys.F9,
121: Keys.F10,
122: Keys.F11,
123: Keys.F12,
}
LEFT_ALT_PRESSED = 0x0002
RIGHT_ALT_PRESSED = 0x0001
SHIFT_PRESSED = 0x0010
LEFT_CTRL_PRESSED = 0x0008
RIGHT_CTRL_PRESSED = 0x0004
def __init__(self, recognize_paste: bool = True) -> None:
self._fdcon = None
self.recognize_paste = recognize_paste
# When stdin is a tty, use that handle, otherwise, create a handle from
# CONIN$.
self.handle: HANDLE
if sys.stdin.isatty():
self.handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE))
else:
self._fdcon = os.open("CONIN$", os.O_RDWR | os.O_BINARY)
self.handle = HANDLE(msvcrt.get_osfhandle(self._fdcon))
def close(self) -> None:
"Close fdcon."
if self._fdcon is not None:
os.close(self._fdcon)
def read(self) -> Iterable[KeyPress]:
"""
Return a list of `KeyPress` instances. It won't return anything when
there was nothing to read. (This function doesn't block.)
http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx
"""
max_count = 2048 # Max events to read at the same time.
read = DWORD(0)
arrtype = INPUT_RECORD * max_count
input_records = arrtype()
# Check whether there is some input to read. `ReadConsoleInputW` would
# block otherwise.
# (Actually, the event loop is responsible to make sure that this
# function is only called when there is something to read, but for some
# reason this happened in the asyncio_win32 loop, and it's better to be
# safe anyway.)
if not wait_for_handles([self.handle], timeout=0):
return
# Get next batch of input event.
windll.kernel32.ReadConsoleInputW(
self.handle, pointer(input_records), max_count, pointer(read)
)
# First, get all the keys from the input buffer, in order to determine
# whether we should consider this a paste event or not.
all_keys = list(self._get_keys(read, input_records))
# Fill in 'data' for key presses.
all_keys = [self._insert_key_data(key) for key in all_keys]
# Correct non-bmp characters that are passed as separate surrogate codes
all_keys = list(self._merge_paired_surrogates(all_keys))
if self.recognize_paste and self._is_paste(all_keys):
gen = iter(all_keys)
k: KeyPress | None
for k in gen:
# Pasting: if the current key consists of text or \n, turn it
# into a BracketedPaste.
data = []
while k and (
not isinstance(k.key, Keys)
or k.key in {Keys.ControlJ, Keys.ControlM}
):
data.append(k.data)
try:
k = next(gen)
except StopIteration:
k = None
if data:
yield KeyPress(Keys.BracketedPaste, "".join(data))
if k is not None:
yield k
else:
yield from all_keys
def flush_keys(self) -> list[KeyPress]:
# Method only needed for structural compatibility with `Vt100ConsoleInputReader`.
return []
def _insert_key_data(self, key_press: KeyPress) -> KeyPress:
"""
Insert KeyPress data, for vt100 compatibility.
"""
if key_press.data:
return key_press
if isinstance(key_press.key, Keys):
data = REVERSE_ANSI_SEQUENCES.get(key_press.key, "")
else:
data = ""
return KeyPress(key_press.key, data)
def _get_keys(
self, read: DWORD, input_records: Array[INPUT_RECORD]
) -> Iterator[KeyPress]:
"""
Generator that yields `KeyPress` objects from the input records.
"""
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if isinstance(ev, KEY_EVENT_RECORD) and ev.KeyDown:
yield from self._event_to_key_presses(ev)
elif isinstance(ev, MOUSE_EVENT_RECORD):
yield from self._handle_mouse(ev)
@staticmethod
def _merge_paired_surrogates(key_presses: list[KeyPress]) -> Iterator[KeyPress]:
"""
Combines consecutive KeyPresses with high and low surrogates into
single characters
"""
buffered_high_surrogate = None
for key in key_presses:
is_text = not isinstance(key.key, Keys)
is_high_surrogate = is_text and "\ud800" <= key.key <= "\udbff"
is_low_surrogate = is_text and "\udc00" <= key.key <= "\udfff"
if buffered_high_surrogate:
if is_low_surrogate:
# convert high surrogate + low surrogate to single character
fullchar = (
(buffered_high_surrogate.key + key.key)
.encode("utf-16-le", "surrogatepass")
.decode("utf-16-le")
)
key = KeyPress(fullchar, fullchar)
else:
yield buffered_high_surrogate
buffered_high_surrogate = None
if is_high_surrogate:
buffered_high_surrogate = key
else:
yield key
if buffered_high_surrogate:
yield buffered_high_surrogate
@staticmethod
def _is_paste(keys: list[KeyPress]) -> bool:
"""
Return `True` when we should consider this list of keys as a paste
event. Pasted text on windows will be turned into a
`Keys.BracketedPaste` event. (It's not 100% correct, but it is probably
the best possible way to detect pasting of text and handle that
correctly.)
"""
# Consider paste when it contains at least one newline and at least one
# other character.
text_count = 0
newline_count = 0
for k in keys:
if not isinstance(k.key, Keys):
text_count += 1
if k.key == Keys.ControlM:
newline_count += 1
return newline_count >= 1 and text_count >= 1
def _event_to_key_presses(self, ev: KEY_EVENT_RECORD) -> list[KeyPress]:
"""
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
"""
assert isinstance(ev, KEY_EVENT_RECORD) and ev.KeyDown
result: KeyPress | None = None
control_key_state = ev.ControlKeyState
u_char = ev.uChar.UnicodeChar
# Use surrogatepass because u_char may be an unmatched surrogate
ascii_char = u_char.encode("utf-8", "surrogatepass")
# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be the
# unicode code point truncated to 1 byte. See also:
# https://github.com/ipython/ipython/issues/10004
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389
if u_char == "\x00":
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], "")
else:
if ascii_char in self.mappings:
if self.mappings[ascii_char] == Keys.ControlJ:
u_char = (
"\n" # Windows sends \n, turn into \r for unix compatibility.
)
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# First we handle Shift-Control-Arrow/Home/End (need to do this first)
if (
(
control_key_state & self.LEFT_CTRL_PRESSED
or control_key_state & self.RIGHT_CTRL_PRESSED
)
and control_key_state & self.SHIFT_PRESSED
and result
):
mapping: dict[str, str] = {
Keys.Left: Keys.ControlShiftLeft,
Keys.Right: Keys.ControlShiftRight,
Keys.Up: Keys.ControlShiftUp,
Keys.Down: Keys.ControlShiftDown,
Keys.Home: Keys.ControlShiftHome,
Keys.End: Keys.ControlShiftEnd,
Keys.Insert: Keys.ControlShiftInsert,
Keys.PageUp: Keys.ControlShiftPageUp,
Keys.PageDown: Keys.ControlShiftPageDown,
}
result.key = mapping.get(result.key, result.key)
# Correctly handle Control-Arrow/Home/End and Control-Insert/Delete keys.
if (
control_key_state & self.LEFT_CTRL_PRESSED
or control_key_state & self.RIGHT_CTRL_PRESSED
) and result:
mapping = {
Keys.Left: Keys.ControlLeft,
Keys.Right: Keys.ControlRight,
Keys.Up: Keys.ControlUp,
Keys.Down: Keys.ControlDown,
Keys.Home: Keys.ControlHome,
Keys.End: Keys.ControlEnd,
Keys.Insert: Keys.ControlInsert,
Keys.Delete: Keys.ControlDelete,
Keys.PageUp: Keys.ControlPageUp,
Keys.PageDown: Keys.ControlPageDown,
}
result.key = mapping.get(result.key, result.key)
# Turn 'Tab' into 'BackTab' when shift was pressed.
# Also handle other shift-key combination
if control_key_state & self.SHIFT_PRESSED and result:
mapping = {
Keys.Tab: Keys.BackTab,
Keys.Left: Keys.ShiftLeft,
Keys.Right: Keys.ShiftRight,
Keys.Up: Keys.ShiftUp,
Keys.Down: Keys.ShiftDown,
Keys.Home: Keys.ShiftHome,
Keys.End: Keys.ShiftEnd,
Keys.Insert: Keys.ShiftInsert,
Keys.Delete: Keys.ShiftDelete,
Keys.PageUp: Keys.ShiftPageUp,
Keys.PageDown: Keys.ShiftPageDown,
}
result.key = mapping.get(result.key, result.key)
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (
(
control_key_state & self.LEFT_CTRL_PRESSED
or control_key_state & self.RIGHT_CTRL_PRESSED
)
and result
and result.data == " "
):
result = KeyPress(Keys.ControlSpace, " ")
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (
(
control_key_state & self.LEFT_CTRL_PRESSED
or control_key_state & self.RIGHT_CTRL_PRESSED
)
and result
and result.key == Keys.ControlJ
):
return [KeyPress(Keys.Escape, ""), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
# NOTE: Only replace the left alt with escape. The right alt key often
# acts as altgr and is used in many non US keyboard layouts for
# typing some special characters, like a backslash. We don't want
# all backslashes to be prefixed with escape. (Esc-\ has a
# meaning in E-macs, for instance.)
if result:
meta_pressed = control_key_state & self.LEFT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ""), result]
else:
return [result]
else:
return []
def _handle_mouse(self, ev: MOUSE_EVENT_RECORD) -> list[KeyPress]:
"""
Handle mouse events. Return a list of KeyPress instances.
"""
event_flags = ev.EventFlags
button_state = ev.ButtonState
event_type: MouseEventType | None = None
button: MouseButton = MouseButton.NONE
# Scroll events.
if event_flags & MOUSE_WHEELED:
if button_state > 0:
event_type = MouseEventType.SCROLL_UP
else:
event_type = MouseEventType.SCROLL_DOWN
else:
# Handle button state for non-scroll events.
if button_state == FROM_LEFT_1ST_BUTTON_PRESSED:
button = MouseButton.LEFT
elif button_state == RIGHTMOST_BUTTON_PRESSED:
button = MouseButton.RIGHT
# Move events.
if event_flags & MOUSE_MOVED:
event_type = MouseEventType.MOUSE_MOVE
# No key pressed anymore: mouse up.
if event_type is None:
if button_state > 0:
# Some button pressed.
event_type = MouseEventType.MOUSE_DOWN
else:
# No button pressed.
event_type = MouseEventType.MOUSE_UP
data = ";".join(
[
button.value,
event_type.value,
str(ev.MousePosition.X),
str(ev.MousePosition.Y),
]
)
return [KeyPress(Keys.WindowsMouseEvent, data)]
| ConsoleInputReader |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 34148,
"end": 35511
} | class ____(MemoryRefTransform):
swizzle: int
def __post_init__(self):
if self.swizzle not in {32, 64, 128}:
raise ValueError(
f"Swizzle {self.swizzle} is not supported. Only 32, 64 and 128 are"
" accepted."
)
def batch(self, leading_rank: int):
return self
def undo(self, ref: pallas_core.TransformedRef) -> pallas_core.TransformedRef:
return dataclasses.replace(
ref, transforms=(*ref.transforms, UnswizzleRef(self.swizzle))
)
def to_gpu_transform(self) -> mgpu.MemRefTransform:
raise RuntimeError("SwizzleTransform does not have a GPU transform.")
def to_gpu_transform_attr(self) -> ir.Attribute:
return mgpu.dialect.SwizzleTransformAttr.get(self.swizzle)
def undo_to_gpu_transform(self) -> mgpu.MemRefTransform:
# There's no swizzle transform in mgpu right now. It's a separate arg.
raise NotImplementedError
def __call__(self, aval: jax_core.ShapedArray) -> jax_core.ShapedArray:
swizzle_elems = (self.swizzle * 8) // dtypes.itemsize_bits(aval.dtype)
if swizzle_elems != aval.shape[-1]:
raise ValueError(
f"Swizzle {self.swizzle} requires the trailing dimension to be of"
f" size {swizzle_elems}, but got shape: {aval.shape}"
)
return aval
@tree_util.register_dataclass
@dataclasses.dataclass(frozen=True)
| SwizzleTransform |
python | python-poetry__poetry | src/poetry/puzzle/provider.py | {
"start": 3138,
"end": 3795
} | class ____(ProgressIndicator):
CONTEXT: str | None = None
@staticmethod
@contextmanager
def context() -> Iterator[Callable[[str | None], None]]:
def _set_context(context: str | None) -> None:
Indicator.CONTEXT = context
yield _set_context
_set_context(None)
def _formatter_context(self) -> str:
if Indicator.CONTEXT is None:
return " "
else:
return f" <c1>{Indicator.CONTEXT}</> "
def _formatter_elapsed(self) -> str:
assert self._start_time is not None
elapsed = time.time() - self._start_time
return f"{elapsed:.1f}s"
| Indicator |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly1.py | {
"start": 857,
"end": 974
} | class ____(F1):
# This should generate an error because it is redefined as not required.
a: NotRequired[int]
| F4 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_4.py | {
"start": 131,
"end": 272
} | class ____(DeclarativeBase):
__tablename__ = "birthday"
id: Mapped[int] = mapped_column(primary_key=True)
day: Mapped[date]
| Birthday |
python | pyca__cryptography | tests/hazmat/primitives/test_dsa.py | {
"start": 17000,
"end": 19818
} | class ____:
def test_dsa_verification(self, backend, subtests):
vectors = load_vectors_from_file(
os.path.join("asymmetric", "DSA", "FIPS_186-3", "SigVer.rsp"),
load_fips_dsa_sig_vectors,
)
for vector in vectors:
with subtests.test():
digest_algorithm = vector["digest_algorithm"].replace("-", "")
algorithm = _ALGORITHMS_DICT[digest_algorithm]
_skip_if_dsa_not_supported(
backend, algorithm, vector["p"], vector["q"], vector["g"]
)
public_key = dsa.DSAPublicNumbers(
parameter_numbers=dsa.DSAParameterNumbers(
vector["p"], vector["q"], vector["g"]
),
y=vector["y"],
).public_key(backend)
sig = encode_dss_signature(vector["r"], vector["s"])
if vector["result"] == "F":
with pytest.raises(InvalidSignature):
public_key.verify(sig, vector["msg"], algorithm)
else:
public_key.verify(sig, vector["msg"], algorithm)
def test_dsa_verify_invalid_asn1(self, backend):
public_key = DSA_KEY_1024.public_numbers.public_key(backend)
with pytest.raises(InvalidSignature):
public_key.verify(b"fakesig", b"fakemsg", hashes.SHA1())
def test_verify(self, backend):
message = b"one little message"
algorithm = hashes.SHA1()
private_key = DSA_KEY_1024.private_key(backend)
signature = private_key.sign(message, algorithm)
public_key = private_key.public_key()
public_key.verify(signature, message, algorithm)
def test_prehashed_verify(self, backend):
private_key = DSA_KEY_1024.private_key(backend)
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
prehashed_alg = Prehashed(hashes.SHA1())
signature = private_key.sign(message, hashes.SHA1())
public_key = private_key.public_key()
public_key.verify(signature, digest, prehashed_alg)
def test_prehashed_digest_mismatch(self, backend):
private_key = DSA_KEY_1024.private_key(backend)
public_key = private_key.public_key()
message = b"one little message"
h = hashes.Hash(hashes.SHA1(), backend)
h.update(message)
digest = h.finalize()
prehashed_alg = Prehashed(hashes.SHA224())
with pytest.raises(ValueError):
public_key.verify(b"\x00" * 128, digest, prehashed_alg)
@pytest.mark.supported(
only_if=lambda backend: backend.dsa_supported(),
skip_message="Does not support DSA.",
)
| TestDSAVerification |
python | getsentry__sentry | src/sentry/integrations/metric_alerts.py | {
"start": 2277,
"end": 13579
} | class ____(TypedDict, total=False):
alert: str
referrer: str
detection_type: str
notification_uuid: str
project_id: int | None
def logo_url() -> str:
return absolute_uri(get_asset_url("sentry", "images/sentry-email-avatar.png"))
def get_metric_count_from_incident(incident: Incident) -> float | None:
"""Returns the current or last count of an incident aggregate."""
# TODO(iamrajjoshi): Hoist FK lookup up
incident_trigger = (
IncidentTrigger.objects.filter(incident=incident).order_by("-date_modified").first()
)
if incident_trigger:
alert_rule_trigger = incident_trigger.alert_rule_trigger
# TODO: If we're relying on this and expecting possible delays between a
# trigger fired and this function running, then this could actually be
# incorrect if they changed the trigger's time window in this time period.
# Should we store it?
start = incident_trigger.date_modified - timedelta(
seconds=alert_rule_trigger.alert_rule.snuba_query.time_window
)
end = incident_trigger.date_modified
else:
start, end = None, None
organization = Organization.objects.get_from_cache(id=incident.organization_id)
project_ids = list(
IncidentProject.objects.filter(incident=incident).values_list("project_id", flat=True)
)
params = GetMetricIssueAggregatesParams(
snuba_query=incident.alert_rule.snuba_query,
date_started=incident.date_started,
current_end_date=incident.current_end_date,
organization=organization,
project_ids=project_ids,
start_arg=start,
end_arg=end,
)
return get_metric_issue_aggregates(params).get("count")
def get_incident_status_text(
snuba_query: SnubaQuery,
threshold_type: AlertRuleThresholdType | AnomalyDetectionThresholdType | None,
comparison_delta: int | None,
metric_value: str,
) -> str:
"""Returns a human readable current status of an incident"""
agg_display_key = snuba_query.aggregate
if CRASH_RATE_ALERT_AGGREGATE_ALIAS in snuba_query.aggregate:
agg_display_key = agg_display_key.split(f"AS {CRASH_RATE_ALERT_AGGREGATE_ALIAS}")[0].strip()
if is_mri_field(agg_display_key):
metric_value = format_mri_field_value(agg_display_key, metric_value)
agg_text = format_mri_field(agg_display_key)
else:
agg_text = QUERY_AGGREGATION_DISPLAY.get(agg_display_key, snuba_query.aggregate)
if agg_text.startswith("%"):
metric_and_agg_text = f"{metric_value}{agg_text}"
else:
metric_and_agg_text = f"{metric_value} {agg_text}"
time_window = snuba_query.time_window // 60
# % change alerts have a comparison delta
if comparison_delta:
metric_and_agg_text = f"{agg_text.capitalize()} {int(float(metric_value))}%"
higher_or_lower = (
"higher"
if (
threshold_type == AlertRuleThresholdType.ABOVE
or threshold_type == AnomalyDetectionThresholdType.ABOVE
)
else "lower"
)
comparison_delta_minutes = comparison_delta // 60
comparison_string = TEXT_COMPARISON_DELTA.get(
comparison_delta_minutes, f"same time {comparison_delta_minutes} minutes ago"
)
return _(
f"{metric_and_agg_text} {higher_or_lower} in the last {format_duration_idiomatic(time_window)} "
f"compared to the {comparison_string}"
)
return _(f"{metric_and_agg_text} in the last {format_duration_idiomatic(time_window)}")
def get_status_text(status: IncidentStatus) -> str:
return INCIDENT_STATUS[status]
def get_title(status: str, name: str) -> str:
return f"{status}: {name}"
def build_title_link_workflow_engine_ui(
identifier_id: int, organization: Organization, project_id: int, params: TitleLinkParams
) -> str:
"""Builds the URL for the metric issue with the given parameters."""
return organization.absolute_url(
reverse(
"sentry-group",
kwargs={
"organization_slug": organization.slug,
"project_id": project_id,
"group_id": identifier_id,
},
),
query=parse.urlencode(params),
)
def build_title_link(
identifier_id: int, organization: Organization, params: TitleLinkParams
) -> str:
"""Builds the URL for an alert rule with the given parameters."""
return organization.absolute_url(
reverse(
"sentry-metric-alert-details",
kwargs={
"organization_slug": organization.slug,
"alert_rule_id": identifier_id,
},
),
query=parse.urlencode(params),
)
def incident_attachment_info(
organization: Organization,
alert_context: AlertContext,
metric_issue_context: MetricIssueContext,
referrer: str = "metric_alert",
notification_uuid: str | None = None,
) -> AttachmentInfo:
from sentry.notifications.notification_action.utils import should_fire_workflow_actions
status = get_status_text(metric_issue_context.new_status)
text = ""
if metric_issue_context.metric_value is not None:
text = get_incident_status_text(
metric_issue_context.snuba_query,
alert_context.threshold_type,
alert_context.comparison_delta,
str(metric_issue_context.metric_value),
)
if features.has("organizations:anomaly-detection-alerts", organization):
text += f"\nThreshold: {alert_context.detection_type.title()}"
title = get_title(status, alert_context.name)
title_link_params: TitleLinkParams = {
"alert": str(metric_issue_context.open_period_identifier),
"referrer": referrer,
"detection_type": alert_context.detection_type.value,
}
if notification_uuid:
title_link_params["notification_uuid"] = notification_uuid
from sentry.incidents.grouptype import MetricIssue
# TODO(iamrajjoshi): This will need to be updated once we plan out Metric Alerts rollout
if should_fire_workflow_actions(organization, MetricIssue.type_id):
try:
alert_rule_id = AlertRuleDetector.objects.values_list("alert_rule_id", flat=True).get(
detector_id=alert_context.action_identifier_id
)
if alert_rule_id is None:
raise ValueError("Alert rule id not found when querying for AlertRuleDetector")
except AlertRuleDetector.DoesNotExist:
# the corresponding metric detector was not dual written
alert_rule_id = get_fake_id_from_object_id(alert_context.action_identifier_id)
workflow_engine_params = title_link_params.copy()
try:
open_period_incident = IncidentGroupOpenPeriod.objects.get(
group_open_period_id=metric_issue_context.open_period_identifier
)
workflow_engine_params["alert"] = str(open_period_incident.incident_identifier)
except IncidentGroupOpenPeriod.DoesNotExist:
# the corresponding metric detector was not dual written
workflow_engine_params["alert"] = str(
get_fake_id_from_object_id(metric_issue_context.open_period_identifier)
)
title_link = build_title_link(alert_rule_id, organization, workflow_engine_params)
elif features.has("organizations:workflow-engine-ui-links", organization):
if metric_issue_context.group is None:
raise ValueError("Group is required for workflow engine UI links")
# We don't need to save the query param the alert rule id here because the link is to the group and not the alert rule
# TODO(iamrajjoshi): This this through and perhaps
workflow_engine_ui_params = title_link_params.copy()
workflow_engine_ui_params.pop("alert", None)
title_link = build_title_link_workflow_engine_ui(
metric_issue_context.group.id,
organization,
metric_issue_context.group.project.id,
workflow_engine_ui_params,
)
else:
title_link = build_title_link(
alert_context.action_identifier_id, organization, title_link_params
)
return AttachmentInfo(
title=title,
text=text,
logo_url=logo_url(),
status=status,
title_link=title_link,
)
def metric_alert_unfurl_attachment_info(
alert_rule: AlertRule,
selected_incident: Incident | None = None,
new_status: IncidentStatus | None = None,
metric_value: float | None = None,
) -> AttachmentInfo:
latest_incident = None
if selected_incident is None:
try:
# Use .get() instead of .first() to avoid sorting table by id
latest_incident = Incident.objects.filter(
id__in=Incident.objects.filter(alert_rule=alert_rule)
.values("alert_rule_id")
.annotate(incident_id=Max("id"))
.values("incident_id")
).get()
except Incident.DoesNotExist:
latest_incident = None
if new_status:
status = get_status_text(new_status)
elif selected_incident:
status = get_status_text(IncidentStatus(selected_incident.status))
elif latest_incident:
status = get_status_text(IncidentStatus(latest_incident.status))
else:
status = get_status_text(IncidentStatus.CLOSED)
title_link_params: TitleLinkParams = {"detection_type": alert_rule.detection_type}
if selected_incident:
title_link_params["alert"] = str(selected_incident.identifier)
title = get_title(status, alert_rule.name)
title_link = build_title_link(alert_rule.id, alert_rule.organization, title_link_params)
if metric_value is None:
if (
selected_incident is None
and latest_incident
and latest_incident.status != IncidentStatus.CLOSED
):
# Without a selected incident, use latest incident if it is not resolved
incident_info: Incident | None = latest_incident
else:
incident_info = selected_incident
if incident_info:
# TODO(iamrajjoshi): Hoist FK lookup up
metric_value = get_metric_count_from_incident(incident_info)
text = ""
if metric_value is not None and status != INCIDENT_STATUS[IncidentStatus.CLOSED]:
text = get_incident_status_text(
alert_rule.snuba_query,
(
AlertRuleThresholdType(alert_rule.threshold_type)
if alert_rule.threshold_type is not None
else None
),
alert_rule.comparison_delta,
str(metric_value),
)
if features.has("organizations:anomaly-detection-alerts", alert_rule.organization):
text += f"\nThreshold: {alert_rule.detection_type.title()}"
date_started = None
if selected_incident:
date_started = selected_incident.date_started
return AttachmentInfo(
title_link=title_link,
title=title,
text=text,
status=status,
logo_url=logo_url(),
date_started=date_started,
)
| TitleLinkParams |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_features.py | {
"start": 275,
"end": 1580
} | class ____(APITestCase):
def setUp(self) -> None:
self.user = self.create_user(email="boop@example.com")
self.sentry_app = self.create_sentry_app(
name="Test", organization=self.create_organization(owner=self.user)
)
self.api_feature = IntegrationFeature.objects.get(
target_id=self.sentry_app.id, target_type=IntegrationTypes.SENTRY_APP.value
)
self.issue_link_feature = self.create_sentry_app_feature(
sentry_app=self.sentry_app, feature=Feature.ISSUE_LINK
)
self.url = reverse("sentry-api-0-sentry-app-features", args=[self.sentry_app.slug])
def test_retrieves_all_features(self) -> None:
self.login_as(user=self.user)
response = self.client.get(self.url, format="json")
assert response.status_code == 200
assert {
"featureId": self.api_feature.feature,
"description": self.api_feature.description,
"featureGate": self.api_feature.feature_str(),
} in response.data
assert {
"featureId": self.issue_link_feature.feature,
"description": self.issue_link_feature.description,
"featureGate": self.issue_link_feature.feature_str(),
} in response.data
| SentryAppFeaturesTest |
python | Netflix__metaflow | test/core/metaflow_test/__init__.py | {
"start": 2075,
"end": 2290
} | class ____(MetaflowException):
headline = "Resume requested"
def __init__(self):
super(ResumeFromHere, self).__init__(
"This is not an error. " "Testing resume..."
)
| ResumeFromHere |
python | tiangolo__fastapi | docs_src/extra_models/tutorial001_py310.py | {
"start": 204,
"end": 303
} | class ____(BaseModel):
username: str
email: EmailStr
full_name: str | None = None
| UserOut |
python | cython__cython | Tools/dataclass_test_data/test_dataclasses.py | {
"start": 68047,
"end": 69296
} | class ____(unittest.TestCase):
def test_field_without_annotation(self):
with self.assertRaisesRegex(TypeError,
"'f' is a field but has no type annotation"):
@dataclass
class C:
f = field()
def test_field_without_annotation_but_annotation_in_base(self):
@dataclass
class B:
f: int
with self.assertRaisesRegex(TypeError,
"'f' is a field but has no type annotation"):
# This is still an error: make sure we don't pick up the
# type annotation in the base class.
@dataclass
class C(B):
f = field()
def test_field_without_annotation_but_annotation_in_base_not_dataclass(self):
# Same test, but with the base class not a dataclass.
class B:
f: int
with self.assertRaisesRegex(TypeError,
"'f' is a field but has no type annotation"):
# This is still an error: make sure we don't pick up the
# type annotation in the base class.
@dataclass
class C(B):
f = field()
| TestFieldNoAnnotation |
python | redis__redis-py | redis/commands/bf/__init__.py | {
"start": 5724,
"end": 6935
} | class ____(TDigestCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
TDIGEST_CREATE: bool_ok,
# TDIGEST_RESET: bool_ok,
# TDIGEST_ADD: spaceHolder,
# TDIGEST_MERGE: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
TDIGEST_BYRANK: parse_to_list,
TDIGEST_BYREVRANK: parse_to_list,
TDIGEST_CDF: parse_to_list,
TDIGEST_INFO: TDigestInfo,
TDIGEST_MIN: float,
TDIGEST_MAX: float,
TDIGEST_TRIMMED_MEAN: float,
TDIGEST_QUANTILE: parse_to_list,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = TDigestCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
| TDigestBloom |
python | pyca__cryptography | src/cryptography/hazmat/decrepit/ciphers/algorithms.py | {
"start": 1942,
"end": 2212
} | class ____(BlockCipherAlgorithm):
name = "IDEA"
block_size = 64
key_sizes = frozenset([128])
def __init__(self, key: bytes):
self.key = _verify_key_size(self, key)
@property
def key_size(self) -> int:
return len(self.key) * 8
| IDEA |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/decl_api.py | {
"start": 4981,
"end": 5100
} | class ____(DeclarativeAttributeIntercept):
"""metaclass that includes @dataclass_transforms"""
| DCTransformDeclarative |
python | matplotlib__matplotlib | lib/matplotlib/backend_tools.py | {
"start": 19957,
"end": 20132
} | class ____(ToolBase):
"""Base tool for the configuration of subplots."""
description = 'Configure subplots'
image = 'mpl-data/images/subplots'
| ConfigureSubplotsBase |
python | gevent__gevent | src/gevent/local.py | {
"start": 5937,
"end": 6424
} | class ____(object):
"""
A weakref callback for when the greenlet
is deleted.
If the greenlet is a `gevent.greenlet.Greenlet` and
supplies ``rawlink``, that will be used instead of a
weakref.
"""
__slots__ = ('idt', 'wrdicts')
def __init__(self, idt, wrdicts):
self.idt = idt
self.wrdicts = wrdicts
def __call__(self, _unused):
dicts = self.wrdicts()
if dicts:
dicts.pop(self.idt, None)
| _greenlet_deleted |
python | psf__black | src/black/debug.py | {
"start": 356,
"end": 1922
} | class ____(Visitor[T]):
tree_depth: int = 0
list_output: list[str] = field(default_factory=list)
print_output: bool = True
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
self.list_output.append(message)
if self.print_output:
out(message, *args, **kwargs)
def visit_default(self, node: LN) -> Iterator[T]:
indent = " " * (2 * self.tree_depth)
if isinstance(node, Node):
_type = type_repr(node.type)
self.out(f"{indent}{_type}", fg="yellow")
self.tree_depth += 1
for child in node.children:
yield from self.visit(child)
self.tree_depth -= 1
self.out(f"{indent}/{_type}", fg="yellow", bold=False)
else:
_type = token.tok_name.get(node.type, str(node.type))
self.out(f"{indent}{_type}", fg="blue", nl=False)
if node.prefix:
# We don't have to handle prefixes for `Node` objects since
# that delegates to the first child anyway.
self.out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
self.out(f" {node.value!r}", fg="blue", bold=False)
@classmethod
def show(cls, code: str | Leaf | Node) -> None:
"""Pretty-print the lib2to3 AST of a given string of `code`.
Convenience method for debugging.
"""
v: DebugVisitor[None] = DebugVisitor()
if isinstance(code, str):
code = lib2to3_parse(code)
list(v.visit(code))
| DebugVisitor |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module.py | {
"start": 325,
"end": 417
} | class ____(type):
__annotations__['123'] = 123
o: type = object
(pars): bool = True
| M |
python | tensorflow__tensorflow | tensorflow/python/debug/wrappers/local_cli_wrapper.py | {
"start": 1501,
"end": 24293
} | class ____(framework.BaseDebugWrapperSession):
"""Concrete subclass of BaseDebugWrapperSession implementing a local CLI.
This class has all the methods that a `session.Session` object has, in order
to support debugging with minimal code changes. Invoking its `run()` method
will launch the command-line interface (CLI) of tfdbg.
"""
def __init__(self,
sess,
dump_root=None,
ui_type="readline",
thread_name_filter=None,
config_file_path=False):
"""Constructor of LocalCLIDebugWrapperSession.
Args:
sess: The TensorFlow `Session` object being wrapped.
dump_root: (`str`) optional path to the dump root directory. Must be a
directory that does not exist or an empty directory. If the directory
does not exist, it will be created by the debugger core during debug
`run()` calls and removed afterwards. If `None`, the debug dumps will
be at tfdbg_<random_string> under the system temp directory.
ui_type: (`str`) requested UI type. Currently supported:
(readline)
thread_name_filter: Regular-expression white list for thread name. See
the doc of `BaseDebugWrapperSession` for details.
config_file_path: Optional override to the default configuration file
path, which is at `${HOME}/.tfdbg_config`.
Raises:
ValueError: If dump_root is an existing and non-empty directory or if
dump_root is a file.
"""
framework.BaseDebugWrapperSession.__init__(
self, sess, thread_name_filter=thread_name_filter)
if not dump_root:
self._dump_root = tempfile.mkdtemp(prefix=_DUMP_ROOT_PREFIX)
else:
dump_root = os.path.expanduser(dump_root)
if os.path.isfile(dump_root):
raise ValueError("dump_root path points to a file: %s" % dump_root)
elif os.path.isdir(dump_root) and os.listdir(dump_root):
raise ValueError("dump_root path points to a non-empty directory: %s" %
dump_root)
self._dump_root = dump_root
self._initialize_argparsers()
# Registered tensor filters.
self._tensor_filters = {}
# Register frequently-used filter(s).
self.add_tensor_filter("has_inf_or_nan", debug_data.has_inf_or_nan)
# Below are the state variables of this wrapper object.
# _active_tensor_filter: what (if any) tensor filter is in effect. If such
# a filter is in effect, this object will call run() method of the
# underlying TensorFlow Session object until the filter passes. This is
# activated by the "-f" flag of the "run" command.
# _run_through_times: keeps track of how many times the wrapper needs to
# run through without stopping at the run-end CLI. It is activated by the
# "-t" option of the "run" command.
# _skip_debug: keeps track of whether the current run should be executed
# without debugging. It is activated by the "-n" option of the "run"
# command.
#
# _run_start_response: keeps track what OnRunStartResponse the wrapper
# should return at the next run-start callback. If this information is
# unavailable (i.e., is None), the run-start CLI will be launched to ask
# the user. This is the case, e.g., right before the first run starts.
self._active_tensor_filter = None
self._active_filter_exclude_node_names = None
self._active_tensor_filter_run_start_response = None
self._run_through_times = 1
self._skip_debug = False
self._run_start_response = None
self._is_run_start = True
self._ui_type = ui_type
self._config = None
if config_file_path:
self._config = cli_config.CLIConfig(config_file_path=config_file_path)
def _is_disk_usage_reset_each_run(self):
# The dumped tensors are all cleaned up after every Session.run
# in a command-line wrapper.
return True
def _initialize_argparsers(self):
self._argparsers = {}
ap = argparse.ArgumentParser(
description="Run through, with or without debug tensor watching.",
usage=argparse.SUPPRESS)
ap.add_argument(
"-t",
"--times",
dest="times",
type=int,
default=1,
help="How many Session.run() calls to proceed with.")
ap.add_argument(
"-n",
"--no_debug",
dest="no_debug",
action="store_true",
help="Run through without debug tensor watching.")
ap.add_argument(
"-f",
"--till_filter_pass",
dest="till_filter_pass",
type=str,
default="",
help="Run until a tensor in the graph passes the specified filter.")
ap.add_argument(
"-fenn",
"--filter_exclude_node_names",
dest="filter_exclude_node_names",
type=str,
default="",
help="When applying the tensor filter, exclude node with names "
"matching the regular expression. Applicable only if --tensor_filter "
"or -f is used.")
ap.add_argument(
"--node_name_filter",
dest="node_name_filter",
type=str,
default="",
help="Regular-expression filter for node names to be watched in the "
"run, e.g., loss, reshape.*")
ap.add_argument(
"--op_type_filter",
dest="op_type_filter",
type=str,
default="",
help="Regular-expression filter for op type to be watched in the run, "
"e.g., (MatMul|Add), Variable.*")
ap.add_argument(
"--tensor_dtype_filter",
dest="tensor_dtype_filter",
type=str,
default="",
help="Regular-expression filter for tensor dtype to be watched in the "
"run, e.g., (float32|float64), int.*")
ap.add_argument(
"-p",
"--profile",
dest="profile",
action="store_true",
help="Run and profile TensorFlow graph execution.")
self._argparsers["run"] = ap
ap = argparse.ArgumentParser(
description="Display information about this Session.run() call.",
usage=argparse.SUPPRESS)
self._argparsers["run_info"] = ap
self._argparsers["print_feed"] = command_parser.get_print_tensor_argparser(
"Print the value of a feed in feed_dict.")
def add_tensor_filter(self, filter_name, tensor_filter):
"""Add a tensor filter.
Args:
filter_name: (`str`) name of the filter.
tensor_filter: (`callable`) the filter callable. See the doc string of
`DebugDumpDir.find()` for more details about its signature.
"""
self._tensor_filters[filter_name] = tensor_filter
def on_session_init(self, request):
"""Overrides on-session-init callback.
Args:
request: An instance of `OnSessionInitRequest`.
Returns:
An instance of `OnSessionInitResponse`.
"""
return framework.OnSessionInitResponse(
framework.OnSessionInitAction.PROCEED)
def on_run_start(self, request):
"""Overrides on-run-start callback.
Args:
request: An instance of `OnRunStartRequest`.
Returns:
An instance of `OnRunStartResponse`.
"""
self._is_run_start = True
self._update_run_calls_state(
request.run_call_count, request.fetches, request.feed_dict,
is_callable_runner=request.is_callable_runner)
if self._active_tensor_filter:
# If we are running until a filter passes, we just need to keep running
# with the previous `OnRunStartResponse`.
return self._active_tensor_filter_run_start_response
self._exit_if_requested_by_user()
if self._run_call_count > 1 and not self._skip_debug:
if self._run_through_times > 0:
# Just run through without debugging.
return framework.OnRunStartResponse(
framework.OnRunStartAction.NON_DEBUG_RUN, [])
elif self._run_through_times == 0:
# It is the run at which the run-end CLI will be launched: activate
# debugging.
return (self._run_start_response or
framework.OnRunStartResponse(
framework.OnRunStartAction.DEBUG_RUN,
self._get_run_debug_urls()))
if self._run_start_response is None:
self._prep_cli_for_run_start()
self._run_start_response = self._launch_cli()
if self._active_tensor_filter:
self._active_tensor_filter_run_start_response = self._run_start_response
if self._run_through_times > 1:
self._run_through_times -= 1
self._exit_if_requested_by_user()
return self._run_start_response
def _exit_if_requested_by_user(self):
if self._run_start_response == debugger_cli_common.EXPLICIT_USER_EXIT:
# Explicit user "exit" command leads to sys.exit(1).
print(
"Note: user exited from debugger CLI: Calling sys.exit(1).",
file=sys.stderr)
sys.exit(1)
def _prep_cli_for_run_start(self):
"""Prepare (but not launch) the CLI for run-start."""
self._run_cli = ui_factory.get_ui(self._ui_type, config=self._config)
help_intro = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
# Show logo at the onset of the first run.
help_intro.extend(cli_shared.get_tfdbg_logo())
help_intro.extend(debugger_cli_common.get_tensorflow_version_lines())
help_intro.extend(debugger_cli_common.RichTextLines("Upcoming run:"))
help_intro.extend(self._run_info)
self._run_cli.set_help_intro(help_intro)
# Create initial screen output detailing the run.
self._title = "run-start: " + self._run_description
self._init_command = "run_info"
self._title_color = "blue_on_white"
def on_run_end(self, request):
"""Overrides on-run-end callback.
Actions taken:
1) Load the debug dump.
2) Bring up the Analyzer CLI.
Args:
request: An instance of OnSessionInitRequest.
Returns:
An instance of OnSessionInitResponse.
"""
self._is_run_start = False
if request.performed_action == framework.OnRunStartAction.DEBUG_RUN:
partition_graphs = None
if request.run_metadata and request.run_metadata.partition_graphs:
partition_graphs = request.run_metadata.partition_graphs
elif request.client_graph_def:
partition_graphs = [request.client_graph_def]
if request.tf_error and not os.path.isdir(self._dump_root):
# It is possible that the dump root may not exist due to errors that
# have occurred prior to graph execution (e.g., invalid device
# assignments), in which case we will just raise the exception as the
# unwrapped Session does.
raise request.tf_error
debug_dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=partition_graphs)
debug_dump.set_python_graph(self._sess.graph)
passed_filter = None
passed_filter_exclude_node_names = None
if self._active_tensor_filter:
if not debug_dump.find(
self._tensor_filters[self._active_tensor_filter], first_n=1,
exclude_node_names=self._active_filter_exclude_node_names):
# No dumped tensor passes the filter in this run. Clean up the dump
# directory and move on.
self._remove_dump_root()
return framework.OnRunEndResponse()
else:
# Some dumped tensor(s) from this run passed the filter.
passed_filter = self._active_tensor_filter
passed_filter_exclude_node_names = (
self._active_filter_exclude_node_names)
self._active_tensor_filter = None
self._active_filter_exclude_node_names = None
self._prep_debug_cli_for_run_end(
debug_dump, request.tf_error, passed_filter,
passed_filter_exclude_node_names)
self._run_start_response = self._launch_cli()
# Clean up the dump generated by this run.
self._remove_dump_root()
elif request.performed_action == framework.OnRunStartAction.PROFILE_RUN:
self._prep_profile_cli_for_run_end(self._sess.graph, request.run_metadata)
self._run_start_response = self._launch_cli()
else:
# No debug information to show following a non-debug run() call.
self._run_start_response = None
# Return placeholder response that currently holds no additional
# information.
return framework.OnRunEndResponse()
def _remove_dump_root(self):
if os.path.isdir(self._dump_root):
file_io.delete_recursively(self._dump_root)
def _prep_debug_cli_for_run_end(self,
debug_dump,
tf_error,
passed_filter,
passed_filter_exclude_node_names):
"""Prepare (but not launch) CLI for run-end, with debug dump from the run.
Args:
debug_dump: (debug_data.DebugDumpDir) The debug dump directory from this
run.
tf_error: (None or OpError) OpError that happened during the run() call
(if any).
passed_filter: (None or str) Name of the tensor filter that just passed
and caused the preparation of this run-end CLI (if any).
passed_filter_exclude_node_names: (None or str) Regular expression used
with the tensor filter to exclude ops with names matching the regular
expression.
"""
if tf_error:
help_intro = cli_shared.get_error_intro(tf_error)
self._init_command = "help"
self._title_color = "red_on_white"
else:
help_intro = None
self._init_command = "lt"
self._title_color = "black_on_white"
if passed_filter is not None:
# Some dumped tensor(s) from this run passed the filter.
self._init_command = "lt -f %s" % passed_filter
if passed_filter_exclude_node_names:
self._init_command += (" --filter_exclude_node_names %s" %
passed_filter_exclude_node_names)
self._title_color = "red_on_white"
self._run_cli = analyzer_cli.create_analyzer_ui(
debug_dump,
self._tensor_filters,
ui_type=self._ui_type,
on_ui_exit=self._remove_dump_root,
config=self._config)
# Get names of all dumped tensors.
dumped_tensor_names = []
for datum in debug_dump.dumped_tensor_data:
dumped_tensor_names.append("%s:%d" %
(datum.node_name, datum.output_slot))
# Tab completions for command "print_tensors".
self._run_cli.register_tab_comp_context(["print_tensor", "pt"],
dumped_tensor_names)
# Tab completion for commands "node_info", "list_inputs" and
# "list_outputs". The list comprehension is used below because nodes()
# output can be unicodes and they need to be converted to strs.
self._run_cli.register_tab_comp_context(
["node_info", "ni", "list_inputs", "li", "list_outputs", "lo"],
[str(node_name) for node_name in debug_dump.nodes()])
# TODO(cais): Reduce API surface area for aliases vis-a-vis tab
# completion contexts and registered command handlers.
self._title = "run-end: " + self._run_description
if help_intro:
self._run_cli.set_help_intro(help_intro)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self._init_command = "lp"
self._run_cli = profile_analyzer_cli.create_profiler_ui(
py_graph, run_metadata, ui_type=self._ui_type,
config=self._run_cli.config)
self._title = "run-end (profiler mode): " + self._run_description
def _launch_cli(self):
"""Launch the interactive command-line interface.
Returns:
The OnRunStartResponse specified by the user using the "run" command.
"""
self._register_this_run_info(self._run_cli)
response = self._run_cli.run_ui(
init_command=self._init_command,
title=self._title,
title_color=self._title_color)
return response
def _run_info_handler(self, args, screen_info=None):
output = debugger_cli_common.RichTextLines([])
if self._run_call_count == 1:
output.extend(cli_shared.get_tfdbg_logo())
output.extend(debugger_cli_common.get_tensorflow_version_lines())
output.extend(self._run_info)
if (not self._is_run_start and
debugger_cli_common.MAIN_MENU_KEY in output.annotations):
menu = output.annotations[debugger_cli_common.MAIN_MENU_KEY]
if "list_tensors" not in menu.captions():
menu.insert(
0, debugger_cli_common.MenuItem("list_tensors", "list_tensors"))
return output
def _print_feed_handler(self, args, screen_info=None):
np_printoptions = cli_shared.numpy_printoptions_from_screen_info(
screen_info)
if not self._feed_dict:
return cli_shared.error(
"The feed_dict of the current run is None or empty.")
parsed = self._argparsers["print_feed"].parse_args(args)
tensor_name, tensor_slicing = (
command_parser.parse_tensor_name_with_slicing(parsed.tensor_name))
feed_key = None
feed_value = None
for key in self._feed_dict:
key_name = common.get_graph_element_name(key)
if key_name == tensor_name:
feed_key = key_name
feed_value = self._feed_dict[key]
break
if feed_key is None:
return cli_shared.error(
"The feed_dict of the current run does not contain the key %s" %
tensor_name)
else:
return cli_shared.format_tensor(
feed_value,
feed_key + " (feed)",
np_printoptions,
print_all=parsed.print_all,
tensor_slicing=tensor_slicing,
highlight_options=cli_shared.parse_ranges_highlight(parsed.ranges),
include_numeric_summary=parsed.numeric_summary)
def _run_handler(self, args, screen_info=None):
"""Command handler for "run" command during on-run-start."""
del screen_info # Currently unused.
parsed = self._argparsers["run"].parse_args(args)
parsed.node_name_filter = parsed.node_name_filter or None
parsed.op_type_filter = parsed.op_type_filter or None
parsed.tensor_dtype_filter = parsed.tensor_dtype_filter or None
if parsed.filter_exclude_node_names and not parsed.till_filter_pass:
raise ValueError(
"The --filter_exclude_node_names (or -feon) flag is valid only if "
"the --till_filter_pass (or -f) flag is used.")
if parsed.profile:
raise debugger_cli_common.CommandLineExit(
exit_token=framework.OnRunStartResponse(
framework.OnRunStartAction.PROFILE_RUN, []))
self._skip_debug = parsed.no_debug
self._run_through_times = parsed.times
if parsed.times > 1 or parsed.no_debug:
# If requested -t times > 1, the very next run will be a non-debug run.
action = framework.OnRunStartAction.NON_DEBUG_RUN
debug_urls = []
else:
action = framework.OnRunStartAction.DEBUG_RUN
debug_urls = self._get_run_debug_urls()
run_start_response = framework.OnRunStartResponse(
action,
debug_urls,
node_name_regex_allowlist=parsed.node_name_filter,
op_type_regex_allowlist=parsed.op_type_filter,
tensor_dtype_regex_allowlist=parsed.tensor_dtype_filter)
if parsed.till_filter_pass:
# For the run-till-filter-pass (run -f) mode, use the DEBUG_RUN
# option to access the intermediate tensors, and set the corresponding
# state flag of the class itself to True.
if parsed.till_filter_pass in self._tensor_filters:
action = framework.OnRunStartAction.DEBUG_RUN
self._active_tensor_filter = parsed.till_filter_pass
self._active_filter_exclude_node_names = (
parsed.filter_exclude_node_names)
self._active_tensor_filter_run_start_response = run_start_response
else:
# Handle invalid filter name.
return debugger_cli_common.RichTextLines(
["ERROR: tensor filter \"%s\" does not exist." %
parsed.till_filter_pass])
# Raise CommandLineExit exception to cause the CLI to exit.
raise debugger_cli_common.CommandLineExit(exit_token=run_start_response)
def _register_this_run_info(self, curses_cli):
curses_cli.register_command_handler(
"run",
self._run_handler,
self._argparsers["run"].format_help(),
prefix_aliases=["r"])
curses_cli.register_command_handler(
"run_info",
self._run_info_handler,
self._argparsers["run_info"].format_help(),
prefix_aliases=["ri"])
curses_cli.register_command_handler(
"print_feed",
self._print_feed_handler,
self._argparsers["print_feed"].format_help(),
prefix_aliases=["pf"])
if self._tensor_filters:
# Register tab completion for the filter names.
curses_cli.register_tab_comp_context(["run", "r"],
list(self._tensor_filters.keys()))
if self._feed_dict and hasattr(self._feed_dict, "keys"):
# Register tab completion for feed_dict keys.
feed_keys = [common.get_graph_element_name(key)
for key in self._feed_dict.keys()]
curses_cli.register_tab_comp_context(["print_feed", "pf"], feed_keys)
def _get_run_debug_urls(self):
"""Get the debug_urls value for the current run() call.
Returns:
debug_urls: (list of str) Debug URLs for the current run() call.
Currently, the list consists of only one URL that is a file:// URL.
"""
return ["file://" + self._dump_root]
def _update_run_calls_state(self,
run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Update the internal state with regard to run() call history.
Args:
run_call_count: (int) Number of run() calls that have occurred.
fetches: a node/tensor or a list of node/tensor that are the fetches of
the run() call. This is the same as the fetches argument to the run()
call.
feed_dict: None of a dict. This is the feed_dict argument to the run()
call.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
"""
self._run_call_count = run_call_count
self._feed_dict = feed_dict
self._run_description = cli_shared.get_run_short_description(
run_call_count,
fetches,
feed_dict,
is_callable_runner=is_callable_runner)
self._run_through_times -= 1
self._run_info = cli_shared.get_run_start_intro(
run_call_count,
fetches,
feed_dict,
self._tensor_filters,
is_callable_runner=is_callable_runner)
| LocalCLIDebugWrapperSession |
python | django__django | django/db/models/fields/json.py | {
"start": 12017,
"end": 13615
} | class ____(lookups.Exact):
# RemovedInDjango70Warning: When the deprecation period is over, remove
# the following line.
can_use_none_as_rhs = True
def process_rhs(self, compiler, connection):
if self.rhs is None and not isinstance(self.lhs, KeyTransform):
warnings.warn(
"Using None as the right-hand side of an exact lookup on JSONField to "
"mean JSON scalar 'null' is deprecated. Use JSONNull() instead (or use "
"the __isnull lookup if you meant SQL NULL).",
RemovedInDjango70Warning,
skip_file_prefixes=django_file_prefixes(),
)
rhs, rhs_params = super().process_rhs(compiler, connection)
# RemovedInDjango70Warning: When the deprecation period is over, remove
# The following if-block entirely.
# Treat None lookup values as null.
if rhs == "%s" and (*rhs_params,) == (None,):
rhs_params = ("null",)
if connection.vendor == "mysql" and not isinstance(self.rhs, JSONNull):
func = ["JSON_EXTRACT(%s, '$')"] * len(rhs_params)
rhs %= tuple(func)
return rhs, rhs_params
def as_oracle(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
if connection.features.supports_primitives_in_json_field:
lhs = f"JSON({lhs})"
rhs = f"JSON({rhs})"
return f"JSON_EQUAL({lhs}, {rhs} ERROR ON ERROR)", (*lhs_params, *rhs_params)
| JSONExact |
python | sqlalchemy__sqlalchemy | test/orm/declarative/test_dc_transforms.py | {
"start": 80098,
"end": 82275
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
"""tests related to #9628"""
__dialect__ = "default"
@testing.combinations(
(query_expression,), (column_property,), argnames="construct"
)
def test_default_behavior(
self, dc_decl_base: Type[MappedAsDataclass], construct
):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column()
const: Mapped[str] = construct(data + "asdf")
m1 = MyClass(data="foo")
eq_(m1, MyClass(data="foo"))
ne_(m1, MyClass(data="bar"))
eq_regex(
repr(m1),
r".*MyClass\(id=None, data='foo', const=None\)",
)
@testing.combinations(
(query_expression,), (column_property,), argnames="construct"
)
def test_no_repr_behavior(
self, dc_decl_base: Type[MappedAsDataclass], construct
):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column()
const: Mapped[str] = construct(data + "asdf", repr=False)
m1 = MyClass(data="foo")
eq_regex(
repr(m1),
r".*MyClass\(id=None, data='foo'\)",
)
@testing.combinations(
(query_expression,), (column_property,), argnames="construct"
)
def test_enable_compare(
self, dc_decl_base: Type[MappedAsDataclass], construct
):
class MyClass(dc_decl_base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True, init=False)
data: Mapped[str] = mapped_column()
const: Mapped[str] = construct(data + "asdf", compare=True)
m1 = MyClass(data="foo")
eq_(m1, MyClass(data="foo"))
ne_(m1, MyClass(data="bar"))
m2 = MyClass(data="foo")
m2.const = "some const"
ne_(m2, MyClass(data="foo"))
m3 = MyClass(data="foo")
m3.const = "some const"
eq_(m2, m3)
| ReadOnlyAttrTest |
python | pypa__virtualenv | src/virtualenv/app_data/base.py | {
"start": 197,
"end": 1618
} | class ____(ABC):
"""Abstract storage interface for the virtualenv application."""
@abstractmethod
def close(self):
"""Called before virtualenv exits."""
@abstractmethod
def reset(self):
"""Called when the user passes in the reset app data."""
@abstractmethod
def py_info(self, path):
raise NotImplementedError
@abstractmethod
def py_info_clear(self):
raise NotImplementedError
@property
def can_update(self):
raise NotImplementedError
@abstractmethod
def embed_update_log(self, distribution, for_py_version):
raise NotImplementedError
@property
def house(self):
raise NotImplementedError
@property
def transient(self):
raise NotImplementedError
@abstractmethod
def wheel_image(self, for_py_version, name):
raise NotImplementedError
@contextmanager
def ensure_extracted(self, path, to_folder=None):
"""Some paths might be within the zipapp, unzip these to a path on the disk."""
if IS_ZIPAPP:
with self.extract(path, to_folder) as result:
yield result
else:
yield path
@abstractmethod
@contextmanager
def extract(self, path, to_folder):
raise NotImplementedError
@abstractmethod
@contextmanager
def locked(self, path):
raise NotImplementedError
| AppData |
python | prabhupant__python-ds | data_structures/binary_trees/check_divide_in_two_halves.py | {
"start": 241,
"end": 947
} | class ____:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def count(root):
if not root:
return 0
return count(root.left) + count(root.right) + 1
def check_util(root, n):
if root == None:
return False
# Check for root
if count(root) == n - count(root):
return True
# Check for all the other nodes
return check_util(root.left, n) or check_util(root.right, n)
def check(root):
n = count(root)
return check_util(root, n)
root = Node(5)
root.left = Node(1)
root.right = Node(6)
root.left.left = Node(3)
root.right.left = Node(7)
root.right.right = Node(4)
print(check(root)) | Node |
python | numpy__numpy | tools/swig/test/testFortran.py | {
"start": 3005,
"end": 3270
} | class ____(FortranTestCase):
def __init__(self, methodName="runTest"):
FortranTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
######################################################################
| longTestCase |
python | apache__thrift | lib/py/src/protocol/TProtocol.py | {
"start": 13046,
"end": 13125
} | class ____(object):
def getProtocol(self, trans):
pass
| TProtocolFactory |
python | neetcode-gh__leetcode | python/0785-is-graph-bipartite.py | {
"start": 0,
"end": 1280
} | class ____:
def isBipartiteBFS(self, graph: List[List[int]]) -> bool:
colors = [-1] * len(graph)
for i in range(len(graph)):
if colors[i] == -1:
q = deque([i])
colors[i] = 0
while q:
node = q.popleft()
for nbh in graph[node]:
if colors[nbh] == -1:
colors[nbh] = 1 - colors[node]
q.append(nbh)
elif colors[nbh] == colors[node]:
return False
return True
def isBipartiteDFS(self, graph: List[List[int]]) -> bool:
colors = [-1] * len(graph)
def dfs(node, c):
colors[node] = c
for nbh in graph[node]:
if colors[nbh] == -1:
if not dfs(nbh, 1 - c):
return False
elif colors[nbh] == colors[node]:
return False
return True
for i in range(len(graph)):
if colors[i] == -1:
if not dfs(i, 0):
return False
return True | Solution |
python | eventlet__eventlet | tests/hub_test.py | {
"start": 5326,
"end": 5643
} | class ____(tests.LimitedTestCase):
def test_debug_listeners(self):
hubs.get_hub().set_debug_listeners(True)
hubs.get_hub().set_debug_listeners(False)
def test_timer_exceptions(self):
hubs.get_hub().set_timer_exceptions(True)
hubs.get_hub().set_timer_exceptions(False)
| TestDebug |
python | ipython__ipython | IPython/utils/_process_win32_controller.py | {
"start": 5124,
"end": 6409
} | class ____:
"""A context manager to protect command execution from UNC paths.
In the Win32 API, commands can't be invoked with the cwd being a UNC path.
This context manager temporarily changes directory to the 'C:' drive on
entering, and restores the original working directory on exit.
The context manager returns the starting working directory *if* it made a
change and None otherwise, so that users can apply the necessary adjustment
to their system calls in the event of a change.
Examples
--------
::
cmd = 'dir'
with AvoidUNCPath() as path:
if path is not None:
cmd = '"pushd %s &&"%s' % (path, cmd)
os.system(cmd)
"""
def __enter__(self) -> None:
self.path = os.getcwd()
self.is_unc_path = self.path.startswith(r"\\")
if self.is_unc_path:
# change to c drive (as cmd.exe cannot handle UNC addresses)
os.chdir("C:")
return self.path
else:
# We return None to signal that there was no change in the working
# directory
return None
def __exit__(self, exc_type, exc_value, traceback) -> None:
if self.is_unc_path:
os.chdir(self.path)
| AvoidUNCPath |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_slots/SLOT002.py | {
"start": 157,
"end": 248
} | class ____(NamedTuple("foo", [("x", int, "y", int)])): # SLOT002
pass
| UnusualButStillBad |
python | neetcode-gh__leetcode | python/0605-can-place-flowers.py | {
"start": 0,
"end": 420
} | class ____:
def canPlaceFlowers(self, flowerbed: List[int], n: int) -> bool:
# Solution with O(1) space complexity
empty = 0 if flowerbed[0] else 1
for f in flowerbed:
if f:
n -= int((empty - 1) / 2) # int division, round toward zero
empty = 0
else:
empty += 1
n -= (empty) // 2
return n <= 0
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super2.py | {
"start": 1629,
"end": 1706
} | class ____(F): ...
def func1(cls: type[F | FChild1]):
super(F, cls)
| FChild1 |
python | kubernetes-client__python | kubernetes/client/models/v1_label_selector_requirement.py | {
"start": 383,
"end": 6013
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'key': 'str',
'operator': 'str',
'values': 'list[str]'
}
attribute_map = {
'key': 'key',
'operator': 'operator',
'values': 'values'
}
def __init__(self, key=None, operator=None, values=None, local_vars_configuration=None): # noqa: E501
"""V1LabelSelectorRequirement - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._key = None
self._operator = None
self._values = None
self.discriminator = None
self.key = key
self.operator = operator
if values is not None:
self.values = values
@property
def key(self):
"""Gets the key of this V1LabelSelectorRequirement. # noqa: E501
key is the label key that the selector applies to. # noqa: E501
:return: The key of this V1LabelSelectorRequirement. # noqa: E501
:rtype: str
"""
return self._key
@key.setter
def key(self, key):
"""Sets the key of this V1LabelSelectorRequirement.
key is the label key that the selector applies to. # noqa: E501
:param key: The key of this V1LabelSelectorRequirement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and key is None: # noqa: E501
raise ValueError("Invalid value for `key`, must not be `None`") # noqa: E501
self._key = key
@property
def operator(self):
"""Gets the operator of this V1LabelSelectorRequirement. # noqa: E501
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
:return: The operator of this V1LabelSelectorRequirement. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this V1LabelSelectorRequirement.
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. # noqa: E501
:param operator: The operator of this V1LabelSelectorRequirement. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and operator is None: # noqa: E501
raise ValueError("Invalid value for `operator`, must not be `None`") # noqa: E501
self._operator = operator
@property
def values(self):
"""Gets the values of this V1LabelSelectorRequirement. # noqa: E501
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
:return: The values of this V1LabelSelectorRequirement. # noqa: E501
:rtype: list[str]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this V1LabelSelectorRequirement.
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. # noqa: E501
:param values: The values of this V1LabelSelectorRequirement. # noqa: E501
:type: list[str]
"""
self._values = values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1LabelSelectorRequirement):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1LabelSelectorRequirement):
return True
return self.to_dict() != other.to_dict()
| V1LabelSelectorRequirement |
python | ansible__ansible | test/lib/ansible_test/_internal/ssh.py | {
"start": 462,
"end": 2633
} | class ____:
"""Information needed to establish an SSH connection to a host."""
name: str
host: str
port: t.Optional[int]
user: str
identity_file: str
python_interpreter: t.Optional[str] = None
shell_type: t.Optional[str] = None
enable_rsa_sha1: bool = False
def __post_init__(self):
self.name = sanitize_host_name(self.name)
@property
def options(self) -> dict[str, str]:
"""OpenSSH config options, which can be passed to the `ssh` CLI with the `-o` argument."""
options: dict[str, str] = {}
if self.enable_rsa_sha1:
# Newer OpenSSH clients connecting to older SSH servers must explicitly enable ssh-rsa support.
# OpenSSH 8.8, released on 2021-09-26, deprecated using RSA with the SHA-1 hash algorithm (ssh-rsa).
# OpenSSH 7.2, released on 2016-02-29, added support for using RSA with SHA-256/512 hash algorithms.
# See: https://www.openssh.com/txt/release-8.8
algorithms = '+ssh-rsa' # append the algorithm to the default list, requires OpenSSH 7.0 or later
options.update(
# Host key signature algorithms that the client wants to use.
# Available options can be found with `ssh -Q HostKeyAlgorithms` or `ssh -Q key` on older clients.
# This option was updated in OpenSSH 7.0, released on 2015-08-11, to support the "+" prefix.
# See: https://www.openssh.com/txt/release-7.0
HostKeyAlgorithms=algorithms,
# Signature algorithms that will be used for public key authentication.
# Available options can be found with `ssh -Q PubkeyAcceptedAlgorithms` or `ssh -Q key` on older clients.
# This option was added in OpenSSH 7.0, released on 2015-08-11.
# See: https://www.openssh.com/txt/release-7.0
# This option is an alias for PubkeyAcceptedAlgorithms, which was added in OpenSSH 8.5.
# See: https://www.openssh.com/txt/release-8.5
PubkeyAcceptedKeyTypes=algorithms,
)
return options
| SshConnectionDetail |
python | cython__cython | Tools/dataclass_test_data/test_dataclasses.py | {
"start": 126877,
"end": 128808
} | class ____(unittest.TestCase):
def test_match_args(self):
@dataclass
class C:
a: int
self.assertEqual(C(42).__match_args__, ('a',))
def test_explicit_match_args(self):
ma = ()
@dataclass
class C:
a: int
__match_args__ = ma
self.assertIs(C(42).__match_args__, ma)
def test_bpo_43764(self):
@dataclass(repr=False, eq=False, init=False)
class X:
a: int
b: int
c: int
self.assertEqual(X.__match_args__, ("a", "b", "c"))
def test_match_args_argument(self):
@dataclass(match_args=False)
class X:
a: int
self.assertNotIn('__match_args__', X.__dict__)
@dataclass(match_args=False)
class Y:
a: int
__match_args__ = ('b',)
self.assertEqual(Y.__match_args__, ('b',))
@dataclass(match_args=False)
class Z(Y):
z: int
self.assertEqual(Z.__match_args__, ('b',))
# Ensure parent dataclass __match_args__ is seen, if child class
# specifies match_args=False.
@dataclass
class A:
a: int
z: int
@dataclass(match_args=False)
class B(A):
b: int
self.assertEqual(B.__match_args__, ('a', 'z'))
def test_make_dataclasses(self):
C = make_dataclass('C', [('x', int), ('y', int)])
self.assertEqual(C.__match_args__, ('x', 'y'))
C = make_dataclass('C', [('x', int), ('y', int)], match_args=True)
self.assertEqual(C.__match_args__, ('x', 'y'))
C = make_dataclass('C', [('x', int), ('y', int)], match_args=False)
self.assertNotIn('__match__args__', C.__dict__)
C = make_dataclass('C', [('x', int), ('y', int)], namespace={'__match_args__': ('z',)})
self.assertEqual(C.__match_args__, ('z',))
| TestMatchArgs |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.