language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/utils/regex_helper.py | {
"start": 722,
"end": 822
} | class ____(list):
"""Represent multiple possibilities at this point in a pattern string."""
| Choice |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/call5.py | {
"start": 1439,
"end": 1678
} | class ____(NamedTuple):
a: list[str]
b: list[int]
q6 = Z(["1"], [3])
for a, b in zip(*q6):
reveal_type(a, expected_text="str")
reveal_type(b, expected_text="int")
def func1(a: list[str], c: list[int]): ...
func1(*q6)
| Z |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF009_attrs_auto_attribs.py | {
"start": 731,
"end": 847
} | class ____:
a: str = 0
b = field()
c: int = foo()
d = list()
@mutable # auto_attribs = None => True
| C |
python | django__django | tests/admin_views/models.py | {
"start": 5850,
"end": 6025
} | class ____(models.Model):
username = models.CharField(max_length=100)
last_action = models.DateTimeField()
def __str__(self):
return self.username
| Character |
python | facebook__pyre-check | client/json_rpc.py | {
"start": 2148,
"end": 3400
} | class ____(abc.ABC):
@abc.abstractmethod
def json(self) -> JSON:
raise NotImplementedError()
def serialize(self) -> str:
return json.dumps(self.json())
def _verify_json_rpc_version(json: JSON) -> None:
json_rpc_version = json.get("jsonrpc")
if json_rpc_version is None:
raise InvalidRequestError(f"Required field `jsonrpc` is missing: {json}")
if json_rpc_version != JSONRPC_VERSION:
raise InvalidRequestError(
f"`jsonrpc` is expected to be '2.0' but got '{json_rpc_version}'"
)
def _parse_json_rpc_id(json: JSON) -> Union[int, str, None]:
id = json.get("id")
if id is not None and not isinstance(id, int) and not isinstance(id, str):
raise InvalidRequestError(
f"Request ID must be either an integer or string but got {id}"
)
return id
def _parse_json_rpc_activity_key(json: JSON) -> Optional[JSON]:
activity_key = json.get("activityKey")
if activity_key is None:
return None
elif isinstance(activity_key, dict):
return activity_key
else:
raise InvalidParameterError(
f"Cannot parse request activityKey JSON: {activity_key}"
)
@dataclasses.dataclass(frozen=True)
| JSONRPC |
python | numba__numba | numba/tests/test_withlifting.py | {
"start": 3887,
"end": 4885
} | class ____(TestCase):
def setUp(self):
super(BaseTestWithLifting, self).setUp()
self.typingctx = cpu_target.typing_context
self.targetctx = cpu_target.target_context
self.flags = DEFAULT_FLAGS
def check_extracted_with(self, func, expect_count, expected_stdout):
the_ir = get_func_ir(func)
new_ir, extracted = with_lifting(
the_ir, self.typingctx, self.targetctx, self.flags,
locals={},
)
self.assertEqual(len(extracted), expect_count)
cres = self.compile_ir(new_ir)
with captured_stdout() as out:
cres.entry_point()
self.assertEqual(out.getvalue(), expected_stdout)
def compile_ir(self, the_ir, args=(), return_type=None):
typingctx = self.typingctx
targetctx = self.targetctx
flags = self.flags
return compile_ir(typingctx, targetctx, the_ir, args,
return_type, flags, locals={})
| BaseTestWithLifting |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/clipboard/pyperclip.py | {
"start": 192,
"end": 1160
} | class ____(Clipboard):
"""
Clipboard that synchronizes with the Windows/Mac/Linux system clipboard,
using the pyperclip module.
"""
def __init__(self) -> None:
self._data: ClipboardData | None = None
def set_data(self, data: ClipboardData) -> None:
self._data = data
pyperclip.copy(data.text)
def get_data(self) -> ClipboardData:
text = pyperclip.paste()
# When the clipboard data is equal to what we copied last time, reuse
# the `ClipboardData` instance. That way we're sure to keep the same
# `SelectionType`.
if self._data and self._data.text == text:
return self._data
# Pyperclip returned something else. Create a new `ClipboardData`
# instance.
else:
return ClipboardData(
text=text,
type=SelectionType.LINES if "\n" in text else SelectionType.CHARACTERS,
)
| PyperclipClipboard |
python | sqlalchemy__sqlalchemy | examples/vertical/dictlike-polymorphic.py | {
"start": 1825,
"end": 7353
} | class ____:
"""A key/value pair with polymorphic value storage.
The class which is mapped should indicate typing information
within the "info" dictionary of mapped Column objects; see
the AnimalFact mapping below for an example.
"""
def __init__(self, key, value=None):
self.key = key
self.value = value
@hybrid_property
def value(self):
fieldname, discriminator = self.type_map[self.type]
if fieldname is None:
return None
else:
return getattr(self, fieldname)
@value.setter
def value(self, value):
py_type = type(value)
fieldname, discriminator = self.type_map[py_type]
self.type = discriminator
if fieldname is not None:
setattr(self, fieldname, value)
@value.deleter
def value(self):
self._set_value(None)
@value.comparator
class value(PropComparator):
"""A comparator for .value, builds a polymorphic comparison
via CASE."""
def __init__(self, cls):
self.cls = cls
def _case(self):
pairs = set(self.cls.type_map.values())
whens = [
(
literal_column("'%s'" % discriminator),
cast(getattr(self.cls, attribute), String),
)
for attribute, discriminator in pairs
if attribute is not None
]
return case(*whens, value=self.cls.type, else_=null())
def __eq__(self, other):
return self._case() == cast(other, String)
def __ne__(self, other):
return self._case() != cast(other, String)
def __repr__(self):
return "<%s %r=%r>" % (self.__class__.__name__, self.key, self.value)
@event.listens_for(
PolymorphicVerticalProperty, "mapper_configured", propagate=True
)
def on_new_class(mapper, cls_):
"""Look for Column objects with type info in them, and work up
a lookup table."""
info_dict = {}
info_dict[type(None)] = (None, "none")
info_dict["none"] = (None, "none")
for k in mapper.c.keys():
col = mapper.c[k]
if "type" in col.info:
python_type, discriminator = col.info["type"]
info_dict[python_type] = (k, discriminator)
info_dict[discriminator] = (k, discriminator)
cls_.type_map = info_dict
if __name__ == "__main__":
Base = declarative_base()
class AnimalFact(PolymorphicVerticalProperty, Base):
"""A fact about an animal."""
__tablename__ = "animal_fact"
animal_id = Column(ForeignKey("animal.id"), primary_key=True)
key = Column(Unicode(64), primary_key=True)
type = Column(Unicode(16))
# add information about storage for different types
# in the info dictionary of Columns
int_value = Column(Integer, info={"type": (int, "integer")})
char_value = Column(UnicodeText, info={"type": (str, "string")})
boolean_value = Column(Boolean, info={"type": (bool, "boolean")})
class Animal(ProxiedDictMixin, Base):
"""an Animal"""
__tablename__ = "animal"
id = Column(Integer, primary_key=True)
name = Column(Unicode(100))
facts = relationship(
"AnimalFact", collection_class=attribute_keyed_dict("key")
)
_proxied = association_proxy(
"facts",
"value",
creator=lambda key, value: AnimalFact(key=key, value=value),
)
def __init__(self, name):
self.name = name
def __repr__(self):
return "Animal(%r)" % self.name
@classmethod
def with_characteristic(self, key, value):
return self.facts.any(key=key, value=value)
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
stoat = Animal("stoat")
stoat["color"] = "red"
stoat["cuteness"] = 7
stoat["weasel-like"] = True
session.add(stoat)
session.commit()
critter = session.query(Animal).filter(Animal.name == "stoat").one()
print(critter["color"])
print(critter["cuteness"])
print("changing cuteness value and type:")
critter["cuteness"] = "very cute"
session.commit()
marten = Animal("marten")
marten["cuteness"] = 5
marten["weasel-like"] = True
marten["poisonous"] = False
session.add(marten)
shrew = Animal("shrew")
shrew["cuteness"] = 5
shrew["weasel-like"] = False
shrew["poisonous"] = True
session.add(shrew)
session.commit()
q = session.query(Animal).filter(
Animal.facts.any(
and_(AnimalFact.key == "weasel-like", AnimalFact.value == True)
)
)
print("weasel-like animals", q.all())
q = session.query(Animal).filter(
Animal.with_characteristic("weasel-like", True)
)
print("weasel-like animals again", q.all())
q = session.query(Animal).filter(
Animal.with_characteristic("poisonous", False)
)
print("animals with poisonous=False", q.all())
q = session.query(Animal).filter(
or_(
Animal.with_characteristic("poisonous", False),
~Animal.facts.any(AnimalFact.key == "poisonous"),
)
)
print("non-poisonous animals", q.all())
q = session.query(Animal).filter(Animal.facts.any(AnimalFact.value == 5))
print("any animal with a .value of 5", q.all())
| PolymorphicVerticalProperty |
python | django__django | django/core/cache/backends/db.py | {
"start": 865,
"end": 1110
} | class ____(BaseCache):
def __init__(self, table, params):
super().__init__(params)
self._table = table
class CacheEntry:
_meta = Options(table)
self.cache_model_class = CacheEntry
| BaseDatabaseCache |
python | walkccc__LeetCode | solutions/2571. Minimum Operations to Reduce an Integer to 0/2571.py | {
"start": 0,
"end": 630
} | class ____:
def minOperations(self, n: int) -> int:
# The strategy is that when the end of n is
# 1. consecutive 1s, add 1 (2^0).
# 2. single 1, substract 1 (2^0).
# 3. 0, substract 2^k to omit the last 1. Equivalently, n >> 1.
#
# e.g.
#
# n = 0b101
# n -= 2^0 -> 0b100
# n -= 2^2 -> 0b0
# n = 0b1011
# n += 2^0 -> 0b1100
# n -= 2^2 -> 0b1000
# n -= 2^3 -> 0b0
ans = 0
while n > 0:
if (n & 3) == 3:
n += 1
ans += 1
elif n % 2 == 1:
n -= 1
ans += 1
else:
n >>= 1
return ans
| Solution |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 76200,
"end": 76558
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = torch.nn.Linear(5, 5).to(dtype=torch.float)
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
| NestedModel |
python | ansible__ansible | lib/ansible/module_utils/compat/version.py | {
"start": 3429,
"end": 10570
} | class ____(Version):
"""Version numbering for anal retentives and software idealists.
Implements the standard interface for version number classes as
described above. A version number consists of two or three
dot-separated numeric components, with an optional "pre-release" tag
on the end. The pre-release tag consists of the letter 'a' or 'b'
followed by a number. If the numeric components of two version
numbers are equal, then one with a pre-release tag will always
be deemed earlier (lesser) than one without.
The following are valid version numbers (shown in the order that
would be obtained by sorting according to the supplied cmp function):
0.4 0.4.0 (these two are equivalent)
0.4.1
0.5a1
0.5b3
0.5
0.9.6
1.0
1.0.4a3
1.0.4b1
1.0.4
The following are examples of invalid version numbers:
1
2.7.2.2
1.3.a4
1.3pl1
1.3c4
The rationale for this version numbering system will be explained
in the distutils documentation.
"""
version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
RE_FLAGS)
def parse(self, vstring):
match = self.version_re.match(vstring)
if not match:
raise ValueError("invalid version number '%s'" % vstring)
(major, minor, patch, prerelease, prerelease_num) = \
match.group(1, 2, 4, 5, 6)
if patch:
self.version = tuple(map(int, [major, minor, patch]))
else:
self.version = tuple(map(int, [major, minor])) + (0,)
if prerelease:
self.prerelease = (prerelease[0], int(prerelease_num))
else:
self.prerelease = None
def __str__(self):
if self.version[2] == 0:
vstring = '.'.join(map(str, self.version[0:2]))
else:
vstring = '.'.join(map(str, self.version))
if self.prerelease:
vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
return vstring
def _cmp(self, other):
if isinstance(other, str):
other = StrictVersion(other)
elif not isinstance(other, StrictVersion):
return NotImplemented
if self.version != other.version:
# numeric versions don't match
# prerelease stuff doesn't matter
if self.version < other.version:
return -1
else:
return 1
# have to compare prerelease
# case 1: neither has prerelease; they're equal
# case 2: self has prerelease, other doesn't; other is greater
# case 3: self doesn't have prerelease, other does: self is greater
# case 4: both have prerelease: must compare them!
if (not self.prerelease and not other.prerelease):
return 0
elif (self.prerelease and not other.prerelease):
return -1
elif (not self.prerelease and other.prerelease):
return 1
elif (self.prerelease and other.prerelease):
if self.prerelease == other.prerelease:
return 0
elif self.prerelease < other.prerelease:
return -1
else:
return 1
else:
raise AssertionError("never get here")
# end class StrictVersion
# The rules according to Greg Stein:
# 1) a version number has 1 or more numbers separated by a period or by
# sequences of letters. If only periods, then these are compared
# left-to-right to determine an ordering.
# 2) sequences of letters are part of the tuple for comparison and are
# compared lexicographically
# 3) recognize the numeric components may have leading zeroes
#
# The LooseVersion class below implements these rules: a version number
# string is split up into a tuple of integer and string components, and
# comparison is a simple tuple comparison. This means that version
# numbers behave in a predictable and obvious way, but a way that might
# not necessarily be how people *want* version numbers to behave. There
# wouldn't be a problem if people could stick to purely numeric version
# numbers: just split on period and compare the numbers as tuples.
# However, people insist on putting letters into their version numbers;
# the most common purpose seems to be:
# - indicating a "pre-release" version
# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
# - indicating a post-release patch ('p', 'pl', 'patch')
# but of course this can't cover all version number schemes, and there's
# no way to know what a programmer means without asking them.
#
# The problem is what to do with letters (and other non-numeric
# characters) in a version number. The current implementation does the
# obvious and predictable thing: keep them as strings and compare
# lexically within a tuple comparison. This has the desired effect if
# an appended letter sequence implies something "post-release":
# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
#
# However, if letters in a version number imply a pre-release version,
# the "obvious" thing isn't correct. Eg. you would expect that
# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
# implemented here, this just isn't so.
#
# Two possible solutions come to mind. The first is to tie the
# comparison algorithm to a particular set of semantic rules, as has
# been done in the StrictVersion class above. This works great as long
# as everyone can go along with bondage and discipline. Hopefully a
# (large) subset of Python module programmers will agree that the
# particular flavour of bondage and discipline provided by StrictVersion
# provides enough benefit to be worth using, and will submit their
# version numbering scheme to its domination. The free-thinking
# anarchists in the lot will never give in, though, and something needs
# to be done to accommodate them.
#
# Perhaps a "moderately strict" version class could be implemented that
# lets almost anything slide (syntactically), and makes some heuristic
# assumptions about non-digits in version number strings. This could
# sink into special-case-hell, though; if I was as talented and
# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
# just as happy dealing with things like "2g6" and "1.13++". I don't
# think I'm smart enough to do it right though.
#
# In any case, I've coded the test suite for this module (see
# ../test/test_version.py) specifically to fail on things like comparing
# "1.2a2" and "1.2". That's not because the *code* is doing anything
# wrong, it's because the simple, obvious design doesn't match my
# complicated, hairy expectations for real-world version numbers. It
# would be a snap to fix the test suite to say, "Yep, LooseVersion does
# the Right Thing" (ie. the code matches the conception). But I'd rather
# have a conception that matches common notions about version numbers.
| StrictVersion |
python | mlflow__mlflow | mlflow/tracing/constant.py | {
"start": 1509,
"end": 1843
} | class ____:
"""Key for the token usage information in the `mlflow.chat.tokenUsage` span attribute."""
INPUT_TOKENS = "input_tokens"
OUTPUT_TOKENS = "output_tokens"
TOTAL_TOKENS = "total_tokens"
@classmethod
def all_keys(cls):
return [cls.INPUT_TOKENS, cls.OUTPUT_TOKENS, cls.TOTAL_TOKENS]
| TokenUsageKey |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/draugiem/tests.py | {
"start": 545,
"end": 5451
} | class ____(TestCase):
def setUp(self):
# workaround to create a session. see:
# https://code.djangoproject.com/ticket/11475
User.objects.create_user(
"anakin", "skywalker@deathstar.example.com", "s1thrul3s"
)
self.client.login(username="anakin", password="s1thrul3s")
app = SocialApp.objects.create(
provider=DraugiemProvider.id,
name=DraugiemProvider.id,
client_id="app123id",
key=DraugiemProvider.id,
secret="dummy",
)
request = RequestFactory().get("/")
self.provider = app.get_provider(request)
if app_settings.SITES_ENABLED:
from django.contrib.sites.models import Site
app.sites.add(Site.objects.get_current())
self.app = app
def get_draugiem_login_response(self):
"""
Sample Draugiem.lv response
"""
return {
"apikey": "12345",
"uid": "42",
"users": {
"42": {
"age": "266",
"imgl": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"surname": "Skywalker",
"url": "/user/42/",
"imgi": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"nick": "Sky Guy",
"created": "09.11.1812 11:26:15",
"deleted": "false",
"imgm": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
"sex": "M",
"type": "User_Default",
"uid": "42",
"place": "London",
"emailHash": "3f198f21434gfd2f2b4rs05939shk93f3815bc6aa",
"name": "Anakin",
"adult": "1",
"birthday": "1750-09-13",
"img": "http://cdn.memegenerator.net/instances/500x/23395689.jpg",
}
},
}
def get_socialaccount(self, response, token):
"""
Returns SocialLogin based on the data from the request
"""
request = Mock()
login = self.provider.sociallogin_from_response(request, response)
login.token = token
return login
def mock_socialaccount_state(self):
"""
SocialLogin depends on Session state - a tuple of request
params and a random string
"""
session = self.client.session
session[statekit.STATES_SESSION_KEY] = {
"12345": ({"process": "login", "scope": "", "auth_params": ""}, time.time())
}
session.save()
def test_login_redirect(self):
response = self.client.get(reverse(views.login))
redirect_url = reverse(views.callback)
full_redirect_url = "http://testserver" + redirect_url
secret = self.app.secret + full_redirect_url
redirect_url_hash = md5(secret.encode("utf-8")).hexdigest()
params = {
"app": self.app.client_id,
"hash": redirect_url_hash,
"redirect": full_redirect_url,
}
self.assertRedirects(
response,
"%s?%s" % (views.AUTHORIZE_URL, urlencode(params)),
fetch_redirect_response=False,
)
def test_callback_no_auth_status(self):
response = self.client.get(reverse(views.callback))
self.assertTemplateUsed(response, "socialaccount/authentication_error.html")
def test_callback_invalid_auth_status(self):
response = self.client.get(reverse(views.callback), {"dr_auth_status": "fail"})
self.assertTemplateUsed(response, "socialaccount/authentication_error.html")
def test_callback(self):
with patch(
"allauth.socialaccount.providers.draugiem.views.draugiem_complete_login"
) as draugiem_complete_login:
self.mock_socialaccount_state()
response_json = self.get_draugiem_login_response()
token = SocialToken(app=self.app, token=response_json["apikey"])
login = self.get_socialaccount(response_json, token)
draugiem_complete_login.return_value = login
response = self.client.get(
reverse(views.callback),
{"dr_auth_status": "ok", "dr_auth_code": "42"},
)
self.assertRedirects(
response, "/accounts/profile/", fetch_redirect_response=False
)
socialaccount = SocialAccount.objects.filter(
provider=DraugiemProvider.id
).last()
pacc = socialaccount.get_provider_account()
assert (
pacc.get_avatar_url()
== "http://cdn.memegenerator.net/instances/500x/23395689.jpg"
)
assert pacc.to_str() == "Anakin"
| DraugiemTests |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 12779,
"end": 16053
} | class ____(nn.Module):
"""
Convolutional backbone, using either the AutoBackbone API or one from the timm library.
nn.BatchNorm2d layers are replaced by ConditionalDetrFrozenBatchNorm2d as defined above.
"""
def __init__(self, config):
super().__init__()
self.config = config
# For backwards compatibility we have to use the timm library directly instead of the AutoBackbone API
if config.use_timm_backbone:
# We default to values which were previously hard-coded. This enables configurability from the config
# using backbone arguments, while keeping the default behavior the same.
requires_backends(self, ["timm"])
kwargs = getattr(config, "backbone_kwargs", {})
kwargs = {} if kwargs is None else kwargs.copy()
out_indices = kwargs.pop("out_indices", (1, 2, 3, 4))
num_channels = kwargs.pop("in_chans", config.num_channels)
if config.dilation:
kwargs["output_stride"] = kwargs.get("output_stride", 16)
backbone = create_model(
config.backbone,
pretrained=config.use_pretrained_backbone,
features_only=True,
out_indices=out_indices,
in_chans=num_channels,
**kwargs,
)
else:
backbone = load_backbone(config)
# replace batch norm by frozen batch norm
with torch.no_grad():
replace_batch_norm(backbone)
self.model = backbone
self.intermediate_channel_sizes = (
self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels
)
backbone_model_type = None
if config.backbone is not None:
backbone_model_type = config.backbone
elif config.backbone_config is not None:
backbone_model_type = config.backbone_config.model_type
else:
raise ValueError("Either `backbone` or `backbone_config` should be provided in the config")
if "resnet" in backbone_model_type:
for name, parameter in self.model.named_parameters():
if config.use_timm_backbone:
if "layer2" not in name and "layer3" not in name and "layer4" not in name:
parameter.requires_grad_(False)
else:
if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name:
parameter.requires_grad_(False)
def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor):
# send pixel_values through the model to get list of feature maps
features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps
out = []
for feature_map in features:
# downsample pixel_mask to match shape of corresponding feature_map
mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0]
out.append((feature_map, mask))
return out
# Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr
| ConditionalDetrConvEncoder |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/check_ops_test.py | {
"start": 12135,
"end": 16201
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([10, 20], name="small")
with ops.control_dependencies(
[check_ops.assert_none_equal(big, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
@test_util.run_deprecated_v1
def test_raises_when_equal(self):
small = constant_op.constant([3, 1], name="small")
with self.assertRaisesOpError("x != y did not hold"):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, small)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_not_equal_and_broadcastable_shapes(self):
small = constant_op.constant([1, 2], name="small")
big = constant_op.constant([3], name="big")
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_raises_when_not_equal_but_non_broadcastable_shapes(self):
small = constant_op.constant([1, 1, 1], name="small")
big = constant_op.constant([10, 10], name="big")
# The exception in eager and non-eager mode is different because
# eager mode relies on shape check done as part of the C++ op, while
# graph mode does shape checks when creating the `Operation` instance.
with self.assertRaisesIncompatibleShapesError(
(ValueError, errors.InvalidArgumentError)):
with ops.control_dependencies(
[check_ops.assert_none_equal(small, big)]):
out = array_ops.identity(small)
self.evaluate(out)
@test_util.run_in_graph_and_eager_modes
def test_doesnt_raise_when_both_empty(self):
larry = constant_op.constant([])
curly = constant_op.constant([])
with ops.control_dependencies(
[check_ops.assert_none_equal(larry, curly)]):
out = array_ops.identity(larry)
self.evaluate(out)
def test_returns_none_with_eager(self):
with context.eager_mode():
t1 = constant_op.constant([1, 2])
t2 = constant_op.constant([3, 4])
x = check_ops.assert_none_equal(t1, t2)
assert x is None
def test_static_check_in_graph_mode(self):
with ops.Graph().as_default():
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, "Custom error message"):
check_ops.assert_none_equal(1, 1, message="Custom error message")
def test_error_message_eager(self):
# Note that the following three strings are regexes
expected_error_msg_full = r"""\[ *0\. +1\. +2\. +3\. +4\. +5\.\]"""
expected_error_msg_default = r"""\[ *0\. +1\. +2\.\]"""
expected_error_msg_short = r"""\[ *0\. +1\.\]"""
with context.eager_mode():
t = constant_op.constant(
np.array(range(6)), shape=[2, 3], dtype=np.float32)
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, expected_error_msg_full):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=10)
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, expected_error_msg_full):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=-1)
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, expected_error_msg_default):
check_ops.assert_none_equal(t, t, message="This is the error message.")
with self.assertRaisesRegex( # pylint:disable=g-error-prone-assert-raises
errors.InvalidArgumentError, expected_error_msg_short):
check_ops.assert_none_equal(
t, t, message="This is the error message.", summarize=2)
| AssertNoneEqualTest |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 3306,
"end": 5953
} | class ____(DashboardComponent):
"""Occupancy (in time) per worker"""
@log_errors
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"occupancy": [0, 0],
"worker": ["a", "b"],
"x": [0.0, 0.1],
"y": [1, 2],
"ms": [1, 2],
"color": ["red", "blue"],
"escaped_worker": ["a", "b"],
}
)
self.root = figure(
title="Occupancy",
tools="",
toolbar_location="above",
x_axis_type="datetime",
min_border_bottom=50,
**kwargs,
)
rect = self.root.rect(
source=self.source, x="x", width="ms", y="y", height=0.9, color="color"
)
rect.nonselection_glyph = None
self.root.xaxis.minor_tick_line_alpha = 0
self.root.yaxis.visible = False
self.root.ygrid.visible = False
# fig.xaxis[0].formatter = NumeralTickFormatter(format='0.0s')
self.root.x_range.start = 0
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
hover = HoverTool()
hover.tooltips = "@worker : @occupancy s."
hover.point_policy = "follow_mouse"
self.root.add_tools(hover, tap)
@without_property_validation
@log_errors
def update(self):
workers = self.scheduler.workers.values()
y = list(range(len(workers)))
occupancy = [ws.occupancy for ws in workers]
ms = [occ * 1000 for occ in occupancy]
x = [occ / 500 for occ in occupancy]
total = sum(occupancy)
color = []
for ws in workers:
if ws in self.scheduler.idle:
color.append("red")
elif ws in self.scheduler.saturated:
color.append("green")
else:
color.append("blue")
if total:
self.root.title.text = (
f"Occupancy -- total time: {format_time(total)} "
f"wall time: {format_time(total / self.scheduler.total_nthreads)}"
)
else:
self.root.title.text = "Occupancy"
if occupancy:
result = {
"occupancy": occupancy,
"worker": [ws.address for ws in workers],
"ms": ms,
"color": color,
"escaped_worker": [url_escape(ws.address) for ws in workers],
"x": x,
"y": y,
}
update(self.source, result)
| Occupancy |
python | scipy__scipy | scipy/signal/_czt.py | {
"start": 8797,
"end": 19445
} | class ____(CZT):
"""
Create a callable zoom FFT transform function.
This is a specialization of the chirp z-transform (`CZT`) for a set of
equally-spaced frequencies around the unit circle, used to calculate a
section of the FFT more efficiently than calculating the entire FFT and
truncating.
Parameters
----------
n : int
The size of the signal.
fn : array_like
A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
scalar, for which the range [0, `fn`] is assumed.
m : int, optional
The number of points to evaluate. Default is `n`.
fs : float, optional
The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
then `f1` and `f2` would also be given in kHz.
The default sampling frequency is 2, so `f1` and `f2` should be
in the range [0, 1] to keep the transform below the Nyquist
frequency.
endpoint : bool, optional
If True, `f2` is the last sample. Otherwise, it is not included.
Default is False.
Returns
-------
f : ZoomFFT
Callable object ``f(x, axis=-1)`` for computing the zoom FFT on `x`.
See Also
--------
zoom_fft : Convenience function for calculating a zoom FFT.
Notes
-----
The defaults are chosen such that ``f(x, 2)`` is equivalent to
``fft.fft(x)`` and, if ``m > len(x)``, that ``f(x, 2, m)`` is equivalent to
``fft.fft(x, m)``.
Sampling frequency is 1/dt, the time step between samples in the
signal `x`. The unit circle corresponds to frequencies from 0 up
to the sampling frequency. The default sampling frequency of 2
means that `f1`, `f2` values up to the Nyquist frequency are in the
range [0, 1). For `f1`, `f2` values expressed in radians, a sampling
frequency of 2*pi should be used.
Remember that a zoom FFT can only interpolate the points of the existing
FFT. It cannot help to resolve two separate nearby frequencies.
Frequency resolution can only be increased by increasing acquisition
time.
These functions are implemented using Bluestein's algorithm (as is
`scipy.fft`). [2]_
References
----------
.. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
applications", pg 29 (1970)
https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
.. [2] Leo I. Bluestein, "A linear filtering approach to the computation
of the discrete Fourier transform," Northeast Electronics Research
and Engineering Meeting Record 10, 218-219 (1968).
Examples
--------
To plot the transform results use something like the following:
>>> import numpy as np
>>> from scipy.signal import ZoomFFT
>>> t = np.linspace(0, 1, 1021)
>>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
>>> f1, f2 = 5, 27
>>> transform = ZoomFFT(len(x), [f1, f2], len(x), fs=1021)
>>> X = transform(x)
>>> f = np.linspace(f1, f2, len(x))
>>> import matplotlib.pyplot as plt
>>> plt.plot(f, 20*np.log10(np.abs(X)))
>>> plt.show()
"""
def __init__(self, n, fn, m=None, *, fs=2, endpoint=False):
m = _validate_sizes(n, m)
k = arange(max(m, n), dtype=np.min_scalar_type(-max(m, n)**2))
if np.size(fn) == 2:
f1, f2 = fn
elif np.size(fn) == 1:
f1, f2 = 0.0, fn
else:
raise ValueError('fn must be a scalar or 2-length sequence')
self.f1, self.f2, self.fs = f1, f2, fs
if endpoint:
scale = ((f2 - f1) * m) / (fs * (m - 1))
else:
scale = (f2 - f1) / fs
a = cmath.exp(2j * pi * f1/fs)
wk2 = np.exp(-(1j * pi * scale * k**2) / m)
self.w = cmath.exp(-2j*pi/m * scale)
self.a = a
self.m, self.n = m, n
ak = np.exp(-2j * pi * f1/fs * k[:n])
self._Awk2 = ak * wk2[:n]
nfft = next_fast_len(n + m - 1)
self._nfft = nfft
self._Fwk2 = fft(1/np.hstack((wk2[n-1:0:-1], wk2[:m])), nfft)
self._wk2 = wk2[:m]
self._yidx = slice(n-1, n+m-1)
def czt(x, m=None, w=None, a=1+0j, *, axis=-1):
"""
Compute the frequency response around a spiral in the Z plane.
Parameters
----------
x : array
The signal to transform.
m : int, optional
The number of output points desired. Default is the length of the
input data.
w : complex, optional
The ratio between points in each step. This must be precise or the
accumulated error will degrade the tail of the output sequence.
Defaults to equally spaced points around the entire unit circle.
a : complex, optional
The starting point in the complex plane. Default is 1+0j.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : ndarray
An array of the same dimensions as `x`, but with the length of the
transformed axis set to `m`.
See Also
--------
CZT : Class that creates a callable chirp z-transform function.
zoom_fft : Convenience function for partial FFT calculations.
Notes
-----
The defaults are chosen such that ``signal.czt(x)`` is equivalent to
``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.czt(x, m)`` is
equivalent to ``fft.fft(x, m)``.
If the transform needs to be repeated, use `CZT` to construct a
specialized transform function which can be reused without
recomputing constants.
An example application is in system identification, repeatedly evaluating
small slices of the z-transform of a system, around where a pole is
expected to exist, to refine the estimate of the pole's true location. [1]_
References
----------
.. [1] Steve Alan Shilling, "A study of the chirp z-transform and its
applications", pg 20 (1970)
https://krex.k-state.edu/dspace/bitstream/handle/2097/7844/LD2668R41972S43.pdf
Examples
--------
Generate a sinusoid:
>>> import numpy as np
>>> f1, f2, fs = 8, 10, 200 # Hz
>>> t = np.linspace(0, 1, fs, endpoint=False)
>>> x = np.sin(2*np.pi*t*f2)
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, x)
>>> plt.axis([0, 1, -1.1, 1.1])
>>> plt.show()
Its discrete Fourier transform has all of its energy in a single frequency
bin:
>>> from scipy.fft import rfft, rfftfreq
>>> from scipy.signal import czt, czt_points
>>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
>>> plt.margins(0, 0.1)
>>> plt.show()
However, if the sinusoid is logarithmically-decaying:
>>> x = np.exp(-t*f1) * np.sin(2*np.pi*t*f2)
>>> plt.plot(t, x)
>>> plt.axis([0, 1, -1.1, 1.1])
>>> plt.show()
the DFT will have spectral leakage:
>>> plt.plot(rfftfreq(fs, 1/fs), abs(rfft(x)))
>>> plt.margins(0, 0.1)
>>> plt.show()
While the DFT always samples the z-transform around the unit circle, the
chirp z-transform allows us to sample the Z-transform along any
logarithmic spiral, such as a circle with radius smaller than unity:
>>> M = fs // 2 # Just positive frequencies, like rfft
>>> a = np.exp(-f1/fs) # Starting point of the circle, radius < 1
>>> w = np.exp(-1j*np.pi/M) # "Step size" of circle
>>> points = czt_points(M + 1, w, a) # M + 1 to include Nyquist
>>> plt.plot(points.real, points.imag, '.')
>>> plt.gca().add_patch(plt.Circle((0,0), radius=1, fill=False, alpha=.3))
>>> plt.axis('equal'); plt.axis([-1.05, 1.05, -0.05, 1.05])
>>> plt.show()
With the correct radius, this transforms the decaying sinusoid (and others
with the same decay rate) without spectral leakage:
>>> z_vals = czt(x, M + 1, w, a) # Include Nyquist for comparison to rfft
>>> freqs = np.angle(points)*fs/(2*np.pi) # angle = omega, radius = sigma
>>> plt.plot(freqs, abs(z_vals))
>>> plt.margins(0, 0.1)
>>> plt.show()
"""
x = np.asarray(x)
transform = CZT(x.shape[axis], m=m, w=w, a=a)
return transform(x, axis=axis)
def zoom_fft(x, fn, m=None, *, fs=2, endpoint=False, axis=-1):
"""
Compute the DFT of `x` only for frequencies in range `fn`.
Parameters
----------
x : array
The signal to transform.
fn : array_like
A length-2 sequence [`f1`, `f2`] giving the frequency range, or a
scalar, for which the range [0, `fn`] is assumed.
m : int, optional
The number of points to evaluate. The default is the length of `x`.
fs : float, optional
The sampling frequency. If ``fs=10`` represented 10 kHz, for example,
then `f1` and `f2` would also be given in kHz.
The default sampling frequency is 2, so `f1` and `f2` should be
in the range [0, 1] to keep the transform below the Nyquist
frequency.
endpoint : bool, optional
If True, `f2` is the last sample. Otherwise, it is not included.
Default is False.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : ndarray
The transformed signal. The Fourier transform will be calculated
at the points f1, f1+df, f1+2df, ..., f2, where df=(f2-f1)/m.
See Also
--------
ZoomFFT : Class that creates a callable partial FFT function.
Notes
-----
The defaults are chosen such that ``signal.zoom_fft(x, 2)`` is equivalent
to ``fft.fft(x)`` and, if ``m > len(x)``, that ``signal.zoom_fft(x, 2, m)``
is equivalent to ``fft.fft(x, m)``.
To graph the magnitude of the resulting transform, use::
plot(linspace(f1, f2, m, endpoint=False), abs(zoom_fft(x, [f1, f2], m)))
If the transform needs to be repeated, use `ZoomFFT` to construct
a specialized transform function which can be reused without
recomputing constants.
Examples
--------
To plot the transform results use something like the following:
>>> import numpy as np
>>> from scipy.signal import zoom_fft
>>> t = np.linspace(0, 1, 1021)
>>> x = np.cos(2*np.pi*15*t) + np.sin(2*np.pi*17*t)
>>> f1, f2 = 5, 27
>>> X = zoom_fft(x, [f1, f2], len(x), fs=1021)
>>> f = np.linspace(f1, f2, len(x))
>>> import matplotlib.pyplot as plt
>>> plt.plot(f, 20*np.log10(np.abs(X)))
>>> plt.show()
"""
x = np.asarray(x)
transform = ZoomFFT(x.shape[axis], fn, m=m, fs=fs, endpoint=endpoint)
return transform(x, axis=axis)
| ZoomFFT |
python | getsentry__sentry | src/sentry/deletions/defaults/repository.py | {
"start": 750,
"end": 1509
} | class ____(ModelDeletionTask[Repository]):
def should_proceed(self, instance: Repository) -> bool:
"""
Only delete repositories that haven't been undeleted.
"""
return instance.status in {ObjectStatus.PENDING_DELETION, ObjectStatus.DELETION_IN_PROGRESS}
def get_child_relations(self, instance: Repository) -> list[BaseRelation]:
return _get_repository_child_relations(instance)
def delete_instance(self, instance: Repository) -> None:
# TODO: child_relations should also send pending_delete so we
# don't have to do this here.
pending_delete.send(sender=type(instance), instance=instance, actor=self.get_actor())
return super().delete_instance(instance)
| RepositoryDeletionTask |
python | google__pytype | pytype/tools/xref/callgraph.py | {
"start": 348,
"end": 425
} | class ____:
name: str
node_type: str
type: Any
@dataclasses.dataclass
| Arg |
python | huggingface__transformers | tests/models/idefics/test_image_processing_idefics.py | {
"start": 1120,
"end": 4405
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
size=None,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
):
size = size if size is not None else {"shortest_edge": 30}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
# self.size = size
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"image_size": self.image_size,
}
def get_expected_values(self, image_inputs, batched=False):
"""
This function computes the expected height and width when providing images to IdeficsImageProcessor,
assuming do_resize is set to True with a scalar size and size_divisor.
"""
if not batched:
size = self.image_size
image = image_inputs[0]
if isinstance(image, Image.Image):
w, h = image.size
elif isinstance(image, np.ndarray):
h, w = image.shape[0], image.shape[1]
else:
h, w = image.shape[1], image.shape[2]
scale = size / min(w, h)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
max_size = int((1333 / 800) * size)
if max(newh, neww) > max_size:
scale = max_size / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
expected_height, expected_width = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
expected_values = []
for image in image_inputs:
expected_height, expected_width = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
expected_height = max(expected_values, key=lambda item: item[0])[0]
expected_width = max(expected_values, key=lambda item: item[1])[1]
return expected_height, expected_width
def expected_output_image_shape(self, images):
height, width = self.get_expected_values(images, batched=True)
return (self.num_channels, height, width)
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| IdeficsImageProcessingTester |
python | patrys__httmock | tests.py | {
"start": 7437,
"end": 11249
} | class ____(unittest.TestCase):
content = {'name': 'foo', 'ipv4addr': '127.0.0.1'}
content_list = list(content.keys())
def test_response_auto_json(self):
r = response(0, self.content)
self.assertTrue(isinstance(r.content, binary_type))
self.assertTrue(isinstance(r.text, text_type))
self.assertEqual(r.json(), self.content)
r = response(0, self.content_list)
self.assertEqual(r.json(), self.content_list)
def test_response_status_code(self):
r = response(200)
self.assertEqual(r.status_code, 200)
def test_response_headers(self):
r = response(200, None, {'Content-Type': 'application/json'})
self.assertEqual(r.headers['content-type'], 'application/json')
def test_response_raw_version(self):
r = response(200, None, {'Content-Type': 'application/json'},
http_vsn=10)
self.assertEqual(r.raw.version, 10)
def test_response_cookies(self):
@all_requests
def response_content(url, request):
return response(200, 'Foo', {'Set-Cookie': 'foo=bar;'},
request=request)
with HTTMock(response_content):
r = requests.get('https://example.com/')
self.assertEqual(len(r.cookies), 1)
self.assertTrue('foo' in r.cookies)
self.assertEqual(r.cookies['foo'], 'bar')
def test_response_session_cookies(self):
@all_requests
def response_content(url, request):
return response(200, 'Foo', {'Set-Cookie': 'foo=bar;'},
request=request)
session = requests.Session()
with HTTMock(response_content):
r = session.get('https://foo_bar')
self.assertEqual(len(r.cookies), 1)
self.assertTrue('foo' in r.cookies)
self.assertEqual(r.cookies['foo'], 'bar')
self.assertEqual(len(session.cookies), 1)
self.assertTrue('foo' in session.cookies)
self.assertEqual(session.cookies['foo'], 'bar')
def test_session_persistent_cookies(self):
session = requests.Session()
with HTTMock(lambda u, r: response(200, 'Foo', {'Set-Cookie': 'foo=bar;'}, request=r)):
session.get('https://foo_bar')
with HTTMock(lambda u, r: response(200, 'Baz', {'Set-Cookie': 'baz=qux;'}, request=r)):
session.get('https://baz_qux')
self.assertEqual(len(session.cookies), 2)
self.assertTrue('foo' in session.cookies)
self.assertEqual(session.cookies['foo'], 'bar')
self.assertTrue('baz' in session.cookies)
self.assertEqual(session.cookies['baz'], 'qux')
def test_python_version_encoding_differences(self):
# Previous behavior would result in this test failing in Python3 due
# to how requests checks for utf-8 JSON content in requests.utils with:
#
# TypeError: Can't convert 'bytes' object to str implicitly
@all_requests
def get_mock(url, request):
return {'content': self.content,
'headers': {'content-type': 'application/json'},
'status_code': 200,
'elapsed': 5}
with HTTMock(get_mock):
response = requests.get('http://example.com/')
self.assertEqual(self.content, response.json())
def test_mock_redirect(self):
@urlmatch(netloc='example.com')
def get_mock(url, request):
return {'status_code': 302,
'headers': {'Location': 'http://google.com/'}}
with HTTMock(get_mock, google_mock):
response = requests.get('http://example.com/')
self.assertEqual(len(response.history), 1)
self.assertEqual(response.content, b'Hello from Google')
| ResponseTest |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 21105,
"end": 21296
} | class ____(models.Model):
greeting = models.CharField(max_length=100)
history = HistoricalRecords(history_change_reason_field=models.TextField(null=True))
| UserTextFieldChangeReasonModel |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/types.py | {
"start": 743,
"end": 802
} | class ____(sqltypes.Text):
__visit_name__ = "NCLOB"
| NCLOB |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/fs/test_s3.py | {
"start": 1498,
"end": 3149
} | class ____:
def test_get_s3fs(self):
from airflow.providers.amazon.aws.fs.s3 import get_fs
fs = get_fs(conn_id=TEST_CONN, storage_options={"key": "value"})
fs = cast("s3fs.S3FileSystem", fs)
assert "s3" in fs.protocol
assert fs.config_kwargs["key"] == "value"
@patch("s3fs.S3FileSystem", autospec=True)
def test_get_s3fs_anonymous(self, s3fs, monkeypatch):
from airflow.providers.amazon.aws.fs.s3 import get_fs
# remove all AWS_* env vars
for env_name in os.environ:
if env_name.startswith("AWS"):
monkeypatch.delenv(env_name, raising=False)
get_fs(conn_id=None, storage_options=None)
assert s3fs.call_args.kwargs["anon"] is True
@responses.activate
def test_signer(self):
from airflow.providers.amazon.aws.fs.s3 import s3v4_rest_signer
req = AWSRequest(
method="GET",
url=TEST_REQ_URI,
headers={"x": "y"},
)
req.context = {"client_region": "antarctica"}
responses.add(
responses.POST,
f"{TEST_SIGNER_URL}/v1/aws/s3/sign",
json={
"uri": TEST_SIGNER_RESP_URL,
"headers": {
TEST_HEADER_KEY: [TEST_HEADER_VALUE],
},
},
)
req = s3v4_rest_signer(
{
"uri": TEST_SIGNER_URL,
"token": TEST_SIGNER_TOKEN,
},
req,
)
assert req.url == TEST_SIGNER_RESP_URL
assert req.headers[TEST_HEADER_KEY] == TEST_HEADER_VALUE
| TestFilesystem |
python | matplotlib__matplotlib | lib/matplotlib/backends/_backend_gtk.py | {
"start": 10553,
"end": 10917
} | class ____(backend_tools.RubberbandBase):
def draw_rubberband(self, x0, y0, x1, y1):
_NavigationToolbar2GTK.draw_rubberband(
self._make_classic_style_pseudo_toolbar(), None, x0, y0, x1, y1)
def remove_rubberband(self):
_NavigationToolbar2GTK.remove_rubberband(
self._make_classic_style_pseudo_toolbar())
| RubberbandGTK |
python | cherrypy__cherrypy | cherrypy/_cptools.py | {
"start": 14174,
"end": 14259
} | class ____(HandlerTool):
"""An HTTP session authentication tool."""
| SessionAuthTool |
python | facebook__pyre-check | client/dataclasses_json_extensions.py | {
"start": 706,
"end": 989
} | class ____(DataclassJsonMixinWithCachedSchema):
dataclass_json_config: Mapping[str, object] = dataclasses_json.config(
letter_case=dataclasses_json.LetterCase.CAMEL,
undefined=dataclasses_json.Undefined.EXCLUDE,
)["dataclasses_json"]
| CamlCaseAndExcludeJsonMixin |
python | pypa__warehouse | tests/unit/test_forms.py | {
"start": 2495,
"end": 3938
} | class ____:
@pytest.mark.parametrize(
"inbound_data",
[
"A link https://example.com",
"query string https://example.com?query=string",
"anchor https://example.com#fragment",
"qs and anchor https://example.com?query=string#fragment",
"path, qs, anchor https://example.com/path?query=string#fragment",
"A comment with a > character",
"A comment with a < character",
"A comment with a & character",
"A comment with a ' character",
'A comment with a " character',
],
)
def test_valid(self, inbound_data):
validator = PreventHTMLTagsValidator()
validator(pretend.stub(), pretend.stub(data=inbound_data))
def test_invalid(self):
validator = PreventHTMLTagsValidator()
with pytest.raises(ValidationError) as exc:
validator(
pretend.stub(), pretend.stub(data="<img src='https://example.com'>")
)
assert str(exc.value) == "HTML tags are not allowed"
def test_custom_message(self):
validator = PreventHTMLTagsValidator(message="No HTML allowed")
with pytest.raises(ValidationError) as exc:
validator(
pretend.stub(), pretend.stub(data="<img src='https://example.com'>")
)
assert str(exc.value) == "No HTML allowed"
| TestPreventHTMLTagsValidator |
python | encode__django-rest-framework | tests/test_multitable_inheritance.py | {
"start": 512,
"end": 643
} | class ____(serializers.ModelSerializer):
class Meta:
model = ChildModel
fields = '__all__'
| DerivedModelSerializer |
python | TheAlgorithms__Python | data_structures/linked_list/deque_doubly.py | {
"start": 1649,
"end": 4073
} | class ____(_DoublyLinkedBase):
def first(self):
"""return first element
>>> d = LinkedDeque()
>>> d.add_first('A').first()
'A'
>>> d.add_first('B').first()
'B'
"""
if self.is_empty():
raise Exception("List is empty")
return self._header._next._data
def last(self):
"""return last element
>>> d = LinkedDeque()
>>> d.add_last('A').last()
'A'
>>> d.add_last('B').last()
'B'
"""
if self.is_empty():
raise Exception("List is empty")
return self._trailer._prev._data
# DEque Insert Operations (At the front, At the end)
def add_first(self, element):
"""insertion in the front
>>> LinkedDeque().add_first('AV').first()
'AV'
"""
return self._insert(self._header, element, self._header._next)
def add_last(self, element):
"""insertion in the end
>>> LinkedDeque().add_last('B').last()
'B'
"""
return self._insert(self._trailer._prev, element, self._trailer)
# DEqueu Remove Operations (At the front, At the end)
def remove_first(self):
"""removal from the front
>>> d = LinkedDeque()
>>> d.is_empty()
True
>>> d.remove_first()
Traceback (most recent call last):
...
IndexError: remove_first from empty list
>>> d.add_first('A') # doctest: +ELLIPSIS
<data_structures.linked_list.deque_doubly.LinkedDeque object at ...
>>> d.remove_first()
'A'
>>> d.is_empty()
True
"""
if self.is_empty():
raise IndexError("remove_first from empty list")
return self._delete(self._header._next)
def remove_last(self):
"""removal in the end
>>> d = LinkedDeque()
>>> d.is_empty()
True
>>> d.remove_last()
Traceback (most recent call last):
...
IndexError: remove_first from empty list
>>> d.add_first('A') # doctest: +ELLIPSIS
<data_structures.linked_list.deque_doubly.LinkedDeque object at ...
>>> d.remove_last()
'A'
>>> d.is_empty()
True
"""
if self.is_empty():
raise IndexError("remove_first from empty list")
return self._delete(self._trailer._prev)
| LinkedDeque |
python | nedbat__coveragepy | tests/test_phystokens.py | {
"start": 1569,
"end": 5540
} | class ____(CoverageTest):
"""Tests for coverage.py's improved tokenizer."""
run_in_temp_dir = False
def check_tokenization(self, source: str) -> None:
"""Tokenize `source`, then put it back together, should be the same."""
tokenized = ""
for line in source_token_lines(source):
text = "".join(t for _, t in line)
tokenized += text + "\n"
# source_token_lines doesn't preserve trailing spaces, so trim all that
# before comparing.
source = source.replace("\r\n", "\n")
source = re.sub(r"(?m)[ \t]+$", "", source)
tokenized = re.sub(r"(?m)[ \t]+$", "", tokenized)
assert source == tokenized
def check_file_tokenization(self, fname: str) -> None:
"""Use the contents of `fname` for `check_tokenization`."""
self.check_tokenization(get_python_source(fname))
def test_simple(self) -> None:
assert list(source_token_lines(SIMPLE)) == SIMPLE_TOKENS
self.check_tokenization(SIMPLE)
def test_missing_final_newline(self) -> None:
# We can tokenize source that is missing the final newline.
assert list(source_token_lines(SIMPLE.rstrip())) == SIMPLE_TOKENS
def test_tab_indentation(self) -> None:
# Mixed tabs and spaces...
assert list(source_token_lines(MIXED_WS)) == MIXED_WS_TOKENS
def test_bug_822(self) -> None:
self.check_tokenization(BUG_822)
def test_tokenize_real_file(self) -> None:
# Check the tokenization of a real file (large, btw).
real_file = os.path.join(TESTS_DIR, "test_coverage.py")
self.check_file_tokenization(real_file)
def test_1828(self) -> None:
# https://github.com/coveragepy/coveragepy/pull/1828
tokens = list(
source_token_lines(
textwrap.dedent("""
x = \
1
a = ["aaa",\\
"bbb \\
ccc"]
""")
)
)
assert tokens == [
[],
[("nam", "x"), ("ws", " "), ("op", "="), ("ws", " "), ("num", "1")],
[
("nam", "a"),
("ws", " "),
("op", "="),
("ws", " "),
("op", "["),
("str", '"aaa"'),
("op", ","),
("xx", "\\"),
],
[("ws", " "), ("str", '"bbb \\')],
[("str", ' ccc"'), ("op", "]")],
]
@pytest.mark.parametrize(
"fname",
[
"stress_phystoken.tok",
"stress_phystoken_dos.tok",
],
)
def test_stress(self, fname: str) -> None:
# Check the tokenization of the stress-test files.
# And check that those files haven't been incorrectly "fixed".
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=r".*invalid escape sequence")
stress = os.path.join(TESTS_DIR, fname)
self.check_file_tokenization(stress)
with open(stress, encoding="utf-8") as fstress:
assert re.search(r"(?m) $", fstress.read()), f"{stress} needs a trailing space."
def test_fstring_middle(self) -> None:
tokens = list(
source_token_lines(
textwrap.dedent("""\
f'Look: {x} {{x}}!'
""")
)
)
if env.PYBEHAVIOR.fstring_syntax:
assert tokens == [
[
("fst", "f'"),
("fst", "Look: "),
("op", "{"),
("nam", "x"),
("op", "}"),
("fst", " {{"),
("fst", "x}}"),
("fst", "!"),
("fst", "'"),
],
]
else:
assert tokens == [[("str", "f'Look: {x} {{x}}!'")]]
| PhysTokensTest |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/sql/lambda_stmt.py | {
"start": 702,
"end": 1933
} | class ____(Base):
__tablename__ = "a"
id: Mapped[int] = mapped_column(primary_key=True)
email: Mapped[str]
user_table = Table(
"user_table", MetaData(), Column("id", Integer), Column("email", String)
)
s1 = select(user_table).where(lambda: user_table.c.id == 5)
s2 = select(User).where(lambda: User.id == 5)
s3 = lambda_stmt(lambda: select(user_table).where(user_table.c.id == 5))
s4 = lambda_stmt(lambda: select(User).where(User.id == 5))
s5 = lambda_stmt(lambda: select(user_table)) + (
lambda s: s.where(user_table.c.id == 5)
)
s6 = lambda_stmt(lambda: select(User)) + (lambda s: s.where(User.id == 5))
if TYPE_CHECKING:
assert_type(s5, StatementLambdaElement)
assert_type(s6, StatementLambdaElement)
e = create_engine("sqlite://")
with e.connect() as conn:
result = conn.execute(s6)
if TYPE_CHECKING:
assert_type(result, CursorResult[Unpack[tuple[Any, ...]]])
# we can type these like this
my_result: Result[User] = conn.execute(s6)
if TYPE_CHECKING:
# pyright and mypy disagree on the specific type here,
# mypy sees Result as we said, pyright seems to upgrade it to
# CursorResult
assert_type(my_result, Result[User])
| User |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/dialects/dialect.py | {
"start": 1171,
"end": 7723
} | class ____(LoggingMixin):
"""Generic dialect implementation."""
pattern = re.compile(r"[^\w]")
def __init__(self, hook, **kwargs) -> None:
super().__init__(**kwargs)
from airflow.providers.common.sql.hooks.sql import DbApiHook
if not isinstance(hook, DbApiHook):
raise TypeError(f"hook must be an instance of {DbApiHook.__class__.__name__}")
self.hook: DbApiHook = hook
@property
def placeholder(self) -> str:
return self.hook.placeholder
@property
def inspector(self) -> Inspector:
return self.hook.inspector
@property
def insert_statement_format(self) -> str:
return self.hook.insert_statement_format
@property
def replace_statement_format(self) -> str:
return self.hook.replace_statement_format
@property
def escape_word_format(self) -> str:
return self.hook.escape_word_format
@property
def escape_column_names(self) -> bool:
return self.hook.escape_column_names
def escape_word(self, word: str) -> str:
"""
Escape the word if necessary.
If the word is a reserved word or contains special characters or if the ``escape_column_names``
property is set to True in connection extra field, then the given word will be escaped.
:param word: Name of the column
:return: The escaped word
"""
if word != self.escape_word_format.format(self.unescape_word(word)) and (
self.escape_column_names or word.casefold() in self.reserved_words or self.pattern.search(word)
):
return self.escape_word_format.format(word)
return word
def unescape_word(self, word: str) -> str:
"""
Remove escape characters from each part of a dotted identifier (e.g., schema.table).
:param word: Escaped schema, table, or column name, potentially with multiple segments.
:return: The word without escaped characters.
"""
escape_char_start = self.escape_word_format[0]
escape_char_end = self.escape_word_format[-1]
def unescape_part(part: str) -> str:
if part.startswith(escape_char_start) and part.endswith(escape_char_end):
return part[1:-1]
return part
return ".".join(map(unescape_part, word.split(".")))
@classmethod
def extract_schema_from_table(cls, table: str) -> tuple[str, str | None]:
parts = table.split(".")
return tuple(parts[::-1]) if len(parts) == 2 else (table, None) # type: ignore[return-value]
@lru_cache(maxsize=None)
def get_column_names(
self,
table: str,
schema: str | None = None,
predicate: Callable[[T | ReflectedColumn], bool] = lambda column: True,
) -> list[str] | None:
if schema is None:
table, schema = self.extract_schema_from_table(table)
table_name = self.unescape_word(table)
schema = self.unescape_word(schema) if schema else None
column_names = []
for column in self.inspector.get_columns(table_name=table_name, schema=schema):
if predicate(column):
column_names.append(column["name"])
self.log.debug("Column names for table '%s': %s", table, column_names)
return column_names
@lru_cache(maxsize=None)
def get_target_fields(self, table: str, schema: str | None = None) -> list[str] | None:
target_fields = self.get_column_names(
table,
schema,
lambda column: not column.get("identity", False) and not column.get("autoincrement", False),
)
self.log.debug("Target fields for table '%s': %s", table, target_fields)
return target_fields
@lru_cache(maxsize=None)
def get_primary_keys(self, table: str, schema: str | None = None) -> list[str] | None:
if schema is None:
table, schema = self.extract_schema_from_table(table)
primary_keys = self.inspector.get_pk_constraint(
table_name=self.unescape_word(table),
schema=self.unescape_word(schema) if schema else None,
).get("constrained_columns", [])
self.log.debug("Primary keys for table '%s': %s", table, primary_keys)
return primary_keys
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping[str, Any] | None = None,
handler: Callable[[Any], T] | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> tuple | list | list[tuple] | list[list[tuple] | tuple] | None:
return self.hook.run(sql, autocommit, parameters, handler, split_statements, return_last)
def get_records(
self,
sql: str | list[str],
parameters: Iterable | Mapping[str, Any] | None = None,
) -> Any:
return self.hook.get_records(sql=sql, parameters=parameters)
@property
def reserved_words(self) -> set[str]:
return self.hook.reserved_words
def _joined_placeholders(self, values) -> str:
placeholders = [
self.placeholder,
] * len(values)
return ",".join(placeholders)
def _joined_target_fields(self, target_fields) -> str:
if target_fields:
target_fields = ", ".join(map(self.escape_word, target_fields))
return f"({target_fields})"
return ""
def generate_insert_sql(self, table, values, target_fields, **kwargs) -> str:
"""
Generate the INSERT SQL statement.
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table
:return: The generated INSERT SQL statement
"""
return self.insert_statement_format.format(
table, self._joined_target_fields(target_fields), self._joined_placeholders(values)
)
def generate_replace_sql(self, table, values, target_fields, **kwargs) -> str:
"""
Generate the REPLACE SQL statement.
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table
:return: The generated REPLACE SQL statement
"""
return self.replace_statement_format.format(
table, self._joined_target_fields(target_fields), self._joined_placeholders(values)
)
| Dialect |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-file/llama_index/readers/file/xml/base.py | {
"start": 1093,
"end": 2960
} | class ____(BaseReader):
"""
XML reader.
Reads XML documents with options to help suss out relationships between nodes.
Args:
tree_level_split (int): From which level in the xml tree we split documents,
the default level is the root which is level 0
"""
def __init__(self, tree_level_split: Optional[int] = 0) -> None:
"""Initialize with arguments."""
super().__init__()
self.tree_level_split = tree_level_split
def _parse_xmlelt_to_document(
self, root: _XmlET.Element, extra_info: Optional[Dict] = None
) -> List[Document]:
"""
Parse the xml object into a list of Documents.
Args:
root: The XML Element to be converted.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
Document: The documents.
"""
nodes = _get_leaf_nodes_up_to_level(root, self.tree_level_split)
documents = []
for node in nodes:
content = ET.tostring(node, encoding="utf8").decode("utf-8")
content = re.sub(r"^<\?xml.*", "", content)
content = content.strip()
documents.append(Document(text=content, extra_info=extra_info or {}))
return documents
def load_data(
self,
file: Path,
extra_info: Optional[Dict] = None,
) -> List[Document]:
"""
Load data from the input file.
Args:
file (Path): Path to the input file.
extra_info (Optional[Dict]): Additional information. Default is None.
Returns:
List[Document]: List of documents.
"""
if not isinstance(file, Path):
file = Path(file)
tree = ET.parse(file)
return self._parse_xmlelt_to_document(tree.getroot(), extra_info)
| XMLReader |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/settings.py | {
"start": 3176,
"end": 4130
} | class ____:
@attr.s
class MemorySettings:
sequence_length: int = attr.ib(default=64)
memory_size: int = attr.ib(default=128)
@memory_size.validator
def _check_valid_memory_size(self, attribute, value):
if value <= 0:
raise TrainerConfigError(
"When using a recurrent network, memory size must be greater than 0."
)
elif value % 2 != 0:
raise TrainerConfigError(
"When using a recurrent network, memory size must be divisible by 2."
)
normalize: bool = False
hidden_units: int = 128
num_layers: int = 2
vis_encode_type: EncoderType = EncoderType.SIMPLE
memory: Optional[MemorySettings] = None
goal_conditioning_type: ConditioningType = ConditioningType.HYPER
deterministic: bool = parser.get_default("deterministic")
@attr.s(auto_attribs=True)
| NetworkSettings |
python | tensorflow__tensorflow | tensorflow/python/data/benchmarks/list_files_benchmark.py | {
"start": 897,
"end": 2512
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.list_files()`."""
def benchmark_nested_directories(self):
tmp_dir = tempfile.mkdtemp()
width = 1024
depth = 16
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
# the num_elements depends on the pattern that has been defined above.
# In the current scenario, the num of files are selected based on the
# ['*.txt', '*.log'] patterns. Since the files which match either of these
# patterns are created once per `width`. The num_elements would be:
num_elements = width * 2
dataset = dataset_ops.Dataset.list_files(patterns)
self.run_and_report_benchmark(
dataset=dataset,
iters=3,
num_elements=num_elements,
extras={
'model_name': 'list_files.benchmark.1',
'parameters': '%d.%d' % (width, depth),
},
name='nested_directory(%d*%d)' % (width, depth))
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
benchmark_base.test.main()
| ListFilesBenchmark |
python | google__jax | jax/_src/pallas/pipelining/schedulers.py | {
"start": 6693,
"end": 11589
} | class ____:
"""Helper class for managing the pipeline grid indices.
Attributes:
grid: The size of the grid.
offsets: A mapping from the stage index to the integer offset from the
slowest scheduled stage.
dynamic: Whether grid indices should be calculated dynamically.
indices: A mapping from offset to the grid indices.
"""
grid: Sequence[int]
offsets: Sequence[int]
dynamic: bool
indices: Sequence[Sequence[int | jax.Array]]
@classmethod
def init(cls, grid, offsets, dynamic=False) -> 'GridCarry':
max_offset = max(offsets)
cur_indices = tuple([0] * len(grid))
indices = [cur_indices]
for _ in range(1, max_offset + 1):
next_indices, _ = increment_grid(cur_indices, grid)
indices.append(next_indices)
cur_indices = next_indices
return cls(grid, offsets, dynamic, tuple(indices))
def next(self) -> "GridCarry":
next_indices, _ = increment_grid(
self.indices[-1], self.grid, dynamic=self.dynamic
)
new_indices = (*self.indices[1:], next_indices)
return GridCarry(self.grid, self.offsets, self.dynamic, new_indices)
def get_indices_for_stage(self, stage_idx: int) -> Sequence[int | jax.Array]:
return self.indices[self.offsets[stage_idx]]
def check_args_ready(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
new_scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
dynamic=False,
) -> bool | jax.Array:
"""Returns whether all arguments to the stage have already been computed."""
all_read_stages = []
for arg_idx in stage.get_read_idxs():
if stage.properties.is_async_start:
# Async start stages can start immediately after the preceding
# stage, so we use new_scoreboard instead of scoreboard.
arg_stage_idx = new_scoreboard.get_writing_stage(arg_idx)
arg_stage_ctr = new_scoreboard.get_stage_counter(arg_stage_idx)
else:
arg_stage_idx = scoreboard.get_writing_stage(arg_idx)
arg_stage_ctr = scoreboard.get_stage_counter(arg_stage_idx)
all_read_stages.append(arg_stage_ctr > current_stage_counter)
op = jnp.logical_and if dynamic else operator.and_
args_ready = functools.reduce(op, all_read_stages, True)
return args_ready
def check_async_done(stage: internal.PipelineStage,
scoreboard: Scoreboard,
num_itrs: int | jax.Array,
current_stage_counter: int | jax.Array,
dynamic=False) -> bool | jax.Array:
"""Returns whether the async done stage can run."""
and_op = jnp.logical_and if dynamic else operator.and_
# For async done stages, we need to insert delays so that they
# happen as late as possible.
# First condition is that there are a full number of async starts
# in flight.
max_in_flight = stage.properties.max_in_flight
can_run = True
token_read_effs = internal.filter_tokens(
internal.filter_read_effects(stage.effects))
read_tokens = {effect.input_index for effect in token_read_effs}
assert len(read_tokens) == 1, stage.effects
read_token = tuple(read_tokens)[0]
async_start_stage_idx = scoreboard.which_stage_writes[read_token]
async_start_counter = scoreboard.get_stage_counter(
async_start_stage_idx)
async_done_counter = current_stage_counter
min_op = jnp.minimum if dynamic else min
start_full = (async_start_counter >=
min_op(async_done_counter + max_in_flight, num_itrs))
can_run = and_op(can_run, start_full)
# Second condition - the consumers of this stage's outputs will
# actually need the results on the next iteration.
for write_idx in stage.get_write_idxs():
which_stages_read = scoreboard.which_stages_read[write_idx]
for read_stage_idx in which_stages_read:
read_itr = scoreboard.stage_counters[read_stage_idx]
can_run = and_op(can_run, (current_stage_counter <= read_itr))
return can_run
def check_async_start(
stage: internal.PipelineStage,
scoreboard: Scoreboard,
current_stage_counter: int | jax.Array,
dynamic=False,
) -> bool | jax.Array:
"""Returns whether the async start stage can run."""
token_write_effs = internal.filter_tokens(
internal.filter_write_effects(stage.effects)
)
assert len(token_write_effs) == 1, stage.effects
token_write_idx = tuple(token_write_effs)[0].input_index
dependent_stages = scoreboard.which_stages_read[token_write_idx]
dependents_ready = []
max_in_flight = stage.properties.max_in_flight
for dependent_stage_idx in dependent_stages:
check_itr = scoreboard.stage_counters[dependent_stage_idx]
# Do not issue more async_starts than max_in_flight.
dependents_ready.append(
current_stage_counter < check_itr + max_in_flight)
op = jnp.logical_and if dynamic else operator.and_
dependents_ready = functools.reduce(op, dependents_ready, True)
return dependents_ready
| GridCarry |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 131538,
"end": 134083
} | class ____(Request):
"""
Delete task hyper parameters
:param task: Task ID
:type task: str
:param hyperparams: List of hyper parameters to delete. In case a parameter
with an empty name is passed all the section will be deleted
:type hyperparams: Sequence[ParamKey]
"""
_service = "tasks"
_action = "delete_hyper_params"
_version = "2.9"
_schema = {
"definitions": {
"param_key": {
"properties": {
"name": {
"description": "Name of the parameter. If the name is ommitted then the corresponding operation is performed on the whole section",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"hyperparams": {
"description": "List of hyper parameters to delete. In case a parameter with an empty name is passed all the section will be deleted",
"items": {"$ref": "#/definitions/param_key"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(self, task: str, hyperparams: List[Any], **kwargs: Any) -> None:
super(DeleteHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self) -> List[Any]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: List[Any]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (ParamKey, dict), is_array=True)
value = [ParamKey(**v) if isinstance(v, dict) else v for v in value]
self._property_hyperparams = value
| DeleteHyperParamsRequest |
python | PyCQA__pylint | tests/functional/e/enum_subclasses.py | {
"start": 258,
"end": 424
} | class ____(Enum):
"""https://github.com/pylint-dev/pylint/issues/2062"""
FOO = 1
BAR = 2
def __str__(self):
return self.name.lower()
| Issue2062 |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/build_systems/gnu.py | {
"start": 199,
"end": 679
} | class ____(PackageBase):
gnu_mirror_path: Optional[str] = None
base_mirrors = [
"https://ftpmirror.gnu.org/",
"https://ftp.gnu.org/gnu/",
"http://ftpmirror.gnu.org/",
]
@property
def urls(self):
if self.gnu_mirror_path is None:
raise AttributeError(f"{self.__class__.__name__}: `gnu_mirror_path` missing")
return [join_url(m, self.gnu_mirror_path, resolve_href=True) for m in self.base_mirrors]
| GNUMirrorPackage |
python | huggingface__transformers | src/transformers/models/vjepa2/modeling_vjepa2.py | {
"start": 7647,
"end": 13923
} | class ____(nn.Module):
def __init__(
self,
config: VJEPA2Config,
hidden_size: int = 1024,
num_attention_heads: int = 16,
):
super().__init__()
self.config = config
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
if hidden_size % num_attention_heads != 0:
raise ValueError(
f"The hidden size {(hidden_size,)} is not a multiple of the number of attention "
f"heads {num_attention_heads}."
)
self.attention_head_size = int(hidden_size / num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.key = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.value = nn.Linear(hidden_size, self.all_head_size, bias=config.qkv_bias)
self.proj = nn.Linear(hidden_size, hidden_size)
self.dropout_prob = config.attention_probs_dropout_prob
self.dropout = nn.Dropout(self.dropout_prob)
self.grid_size = self.config.crop_size // self.config.patch_size
self.grid_depth = self.config.frames_per_clip // self.config.tubelet_size
self.d_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.h_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.w_dim = int(2 * ((self.attention_head_size // 3) // 2))
self.scaling = self.attention_head_size**-0.5
self.is_causal = False
def _get_frame_pos(self, ids):
tokens_per_frame = int(self.grid_size * self.grid_size)
return ids // tokens_per_frame
def _get_height_pos(self, ids):
# Remove frame component from ids
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
ids = ids - tokens_per_frame * frame_ids
# --
tokens_per_row = self.grid_size
return ids // tokens_per_row
def get_position_ids(self, x, masks=None):
device = x.device
token_size = x.size(1)
# Note: when masks is none, we use a 1d id instead of Bxnum_attention_heads mask,
# as 1d vector is broadcasted to the correct shapes.
if masks is not None:
ids = masks.unsqueeze(1).repeat(1, self.num_attention_heads, 1)
else:
ids = torch.arange(token_size, device=device)
# change to allow for extrapolation
tokens_per_frame = int(self.grid_size * self.grid_size)
frame_ids = self._get_frame_pos(ids)
# --
tokens_per_row = self.grid_size
height_ids = self._get_height_pos(ids)
# --
# Remove frame component from ids (1st term) and height component (2nd term)
width_ids = (ids - tokens_per_frame * frame_ids) - tokens_per_row * height_ids
return frame_ids, height_ids, width_ids
def apply_rotary_embeddings(self, qk, pos_ids):
d_mask, h_mask, w_mask = pos_ids
s = 0
qkd = rotate_queries_or_keys(qk[..., s : s + self.d_dim], pos=d_mask)
s += self.d_dim
qkh = rotate_queries_or_keys(qk[..., s : s + self.h_dim], pos=h_mask)
s += self.h_dim
qkw = rotate_queries_or_keys(qk[..., s : s + self.w_dim], pos=w_mask)
s += self.w_dim
# Combine rotated dimension
if s < self.attention_head_size:
qkr = qk[..., s:]
qk = torch.cat([qkd, qkh, qkw, qkr], dim=-1)
else:
qk = torch.cat([qkd, qkh, qkw], dim=-1)
return qk
def forward(
self,
hidden_states,
position_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor, torch.Tensor], tuple[torch.Tensor]]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
key_layer = (
self.key(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
pos_ids = self.get_position_ids(hidden_states, masks=position_mask)
key_layer = self.apply_rotary_embeddings(key_layer, pos_ids)
query_layer = self.apply_rotary_embeddings(query_layer, pos_ids)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
context_layer, attention_probs = attention_interface(
self,
query_layer,
key_layer,
value_layer,
None,
is_causal=self.is_causal,
scaling=self.scaling,
dropout=0.0 if not self.training else self.dropout_prob,
)
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = self.proj(context_layer.reshape(new_context_layer_shape))
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Adapted from transformers.models.beit.modeling_dinov2.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Adapted from transformers.models.beit.modeling_beit.BeitDropPath
| VJEPA2RopeAttention |
python | gabrielfalcao__HTTPretty | tests/functional/testserver.py | {
"start": 3756,
"end": 4315
} | class ____(object):
def __init__(self, port):
self.port = int(port)
self.sock = true_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(('localhost', self.port))
def send(self, data):
if isinstance(data, str):
data = data.encode('utf-8')
self.sock.sendall(data)
return self.sock.recv(len(data) + 11)
def close(self):
try:
self.sock.close()
except socket.error:
pass # already closed
def __del__(self):
self.close()
| TCPClient |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/compute_ssh.py | {
"start": 2813,
"end": 15523
} | class ____(SSHHook):
"""
Hook to connect to a remote instance in compute engine.
:param instance_name: The name of the Compute Engine instance
:param zone: The zone of the Compute Engine instance
:param user: The name of the user on which the login attempt will be made
:param project_id: The project ID of the remote instance
:param gcp_conn_id: The connection id to use when fetching connection info
:param hostname: The hostname of the target instance. If it is not passed, it will be detected
automatically.
:param use_iap_tunnel: Whether to connect through IAP tunnel
:param use_internal_ip: Whether to connect using internal IP
:param use_oslogin: Whether to manage keys using OsLogin API. If false,
keys are managed using instance metadata
:param expire_time: The maximum amount of time in seconds before the private key expires
:param gcp_conn_id: The connection id to use when fetching connection information
:param max_retries: Maximum number of retries the process will try to establish connection to instance.
Could be decreased/increased by user based on the amount of parallel SSH connections to the instance.
:param impersonation_chain: Optional. The service account email to impersonate using short-term
credentials. The provided service account must grant the originating account
the Service Account Token Creator IAM role and have the sufficient rights to perform the request
"""
conn_name_attr = "gcp_conn_id"
default_conn_name = "google_cloud_ssh_default"
conn_type = "gcpssh"
hook_name = "Google Cloud SSH"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": ["host", "schema", "login", "password", "port", "extra"],
"relabeling": {},
}
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
instance_name: str | None = None,
zone: str | None = None,
user: str | None = "root",
project_id: str = PROVIDE_PROJECT_ID,
hostname: str | None = None,
use_internal_ip: bool = False,
use_iap_tunnel: bool = False,
use_oslogin: bool = True,
expire_time: int = 300,
cmd_timeout: int | ArgNotSet = NOTSET,
max_retries: int = 10,
impersonation_chain: str | None = None,
**kwargs,
) -> None:
# Ignore original constructor
# super().__init__()
self.gcp_conn_id = gcp_conn_id
self.instance_name = instance_name
self.zone = zone
self.user = user
self.project_id = project_id
self.hostname = hostname
self.use_internal_ip = use_internal_ip
self.use_iap_tunnel = use_iap_tunnel
self.use_oslogin = use_oslogin
self.expire_time = expire_time
self.cmd_timeout = cmd_timeout
self.max_retries = max_retries
self.impersonation_chain = impersonation_chain
self._conn: Any | None = None
@cached_property
def _oslogin_hook(self) -> OSLoginHook:
return OSLoginHook(gcp_conn_id=self.gcp_conn_id)
@cached_property
def _compute_hook(self) -> ComputeEngineHook:
if self.impersonation_chain:
return ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
return ComputeEngineHook(gcp_conn_id=self.gcp_conn_id)
def _load_connection_config(self):
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == "false":
return False
if value.lower() == "true":
return True
return False
def intify(key, value, default):
if value is None:
return default
if isinstance(value, str) and value.strip() == "":
return default
try:
return int(value)
except ValueError:
raise AirflowException(
f"The {key} field should be a integer. "
f'Current value: "{value}" (type: {type(value)}). '
f"Please check the connection configuration."
)
conn = self.get_connection(self.gcp_conn_id)
if conn and conn.conn_type == "gcpssh":
self.instance_name = self._compute_hook._get_field("instance_name", self.instance_name)
self.zone = self._compute_hook._get_field("zone", self.zone)
self.user = conn.login if conn.login else self.user
# self.project_id is skipped intentionally
self.hostname = conn.host if conn.host else self.hostname
self.use_internal_ip = _boolify(self._compute_hook._get_field("use_internal_ip"))
self.use_iap_tunnel = _boolify(self._compute_hook._get_field("use_iap_tunnel"))
self.use_oslogin = _boolify(self._compute_hook._get_field("use_oslogin"))
self.expire_time = intify(
"expire_time",
self._compute_hook._get_field("expire_time"),
self.expire_time,
)
if conn.extra is not None:
extra_options = conn.extra_dejson
if "cmd_timeout" in extra_options and self.cmd_timeout is NOTSET:
if extra_options["cmd_timeout"]:
self.cmd_timeout = int(extra_options["cmd_timeout"])
else:
self.cmd_timeout = None
if self.cmd_timeout is NOTSET:
self.cmd_timeout = CMD_TIMEOUT
def get_conn(self) -> paramiko.SSHClient:
"""Return SSH connection."""
self._load_connection_config()
if not self.project_id:
self.project_id = self._compute_hook.project_id
missing_fields = [k for k in ["instance_name", "zone", "project_id"] if not getattr(self, k)]
if not self.instance_name or not self.zone or not self.project_id:
raise AirflowException(
f"Required parameters are missing: {missing_fields}. These parameters be passed either as "
"keyword parameter or as extra field in Airflow connection definition. Both are not set!"
)
self.log.info(
"Connecting to instance: instance_name=%s, user=%s, zone=%s, "
"use_internal_ip=%s, use_iap_tunnel=%s, use_os_login=%s",
self.instance_name,
self.user,
self.zone,
self.use_internal_ip,
self.use_iap_tunnel,
self.use_oslogin,
)
if not self.hostname:
hostname = self._compute_hook.get_instance_address(
zone=self.zone,
resource_id=self.instance_name,
project_id=self.project_id,
use_internal_ip=self.use_internal_ip or self.use_iap_tunnel,
)
else:
hostname = self.hostname
privkey, pubkey = self._generate_ssh_key(self.user)
max_delay = 10
sshclient = None
for retry in range(self.max_retries + 1):
try:
if self.use_oslogin:
user = self._authorize_os_login(pubkey)
else:
user = self.user
self._authorize_compute_engine_instance_metadata(pubkey)
proxy_command = None
if self.use_iap_tunnel:
proxy_command_args = [
"gcloud",
"compute",
"start-iap-tunnel",
str(self.instance_name),
"22",
"--listen-on-stdin",
f"--project={self.project_id}",
f"--zone={self.zone}",
"--verbosity=warning",
]
if self.impersonation_chain:
proxy_command_args.append(f"--impersonate-service-account={self.impersonation_chain}")
proxy_command = " ".join(shlex.quote(arg) for arg in proxy_command_args)
sshclient = self._connect_to_instance(user, hostname, privkey, proxy_command)
break
except (HttpError, AirflowException, SSHException) as exc:
if (isinstance(exc, HttpError) and exc.resp.status == 412) or (
isinstance(exc, AirflowException) and "412 PRECONDITION FAILED" in str(exc)
):
self.log.info("Error occurred when trying to update instance metadata: %s", exc)
elif isinstance(exc, SSHException):
self.log.info("Error occurred when establishing SSH connection using Paramiko: %s", exc)
else:
raise
if retry == self.max_retries:
raise AirflowException("Maximum retries exceeded. Aborting operation.")
delay = random.randint(0, max_delay)
self.log.info("Failed establish SSH connection, waiting %s seconds to retry...", delay)
time.sleep(delay)
if not sshclient:
raise AirflowException("Unable to establish SSH connection.")
return sshclient
def _connect_to_instance(self, user, hostname, pkey, proxy_command) -> paramiko.SSHClient:
self.log.info("Opening remote connection to host: username=%s, hostname=%s", user, hostname)
max_time_to_wait = 5
for time_to_wait in range(max_time_to_wait + 1):
try:
client = _GCloudAuthorizedSSHClient(self._compute_hook)
# Default is RejectPolicy
# No known host checking since we are not storing privatekey
client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) # nosec B507
client.connect(
hostname=hostname,
username=user,
pkey=pkey,
sock=paramiko.ProxyCommand(proxy_command) if proxy_command else None,
look_for_keys=False,
)
return client
except paramiko.SSHException:
if time_to_wait == max_time_to_wait:
raise
self.log.info("Failed to connect. Waiting %ds to retry", time_to_wait)
time.sleep(time_to_wait)
raise AirflowException("Can not connect to instance")
def _authorize_compute_engine_instance_metadata(self, pubkey):
self.log.info("Appending SSH public key to instance metadata")
instance_info = self._compute_hook.get_instance_info(
zone=self.zone, resource_id=self.instance_name, project_id=self.project_id
)
keys = self.user + ":" + pubkey + "\n"
metadata = instance_info["metadata"]
items = metadata.get("items", [])
for item in items:
if item.get("key") == "ssh-keys":
keys += item["value"]
item["value"] = keys
break
else:
new_dict = {"key": "ssh-keys", "value": keys}
metadata["items"] = [*items, new_dict]
self._compute_hook.set_instance_metadata(
zone=self.zone, resource_id=self.instance_name, metadata=metadata, project_id=self.project_id
)
def _authorize_os_login(self, pubkey):
username = self._oslogin_hook._get_credentials_email
self.log.info("Importing SSH public key using OSLogin: user=%s", username)
expiration = int((time.time() + self.expire_time) * 1000000)
ssh_public_key = {"key": pubkey, "expiration_time_usec": expiration}
response = self._oslogin_hook.import_ssh_public_key(
user=username, ssh_public_key=ssh_public_key, project_id=self.project_id
)
profile = response.login_profile
account = profile.posix_accounts[0]
user = account.username
return user
def _generate_ssh_key(self, user):
try:
self.log.info("Generating ssh keys...")
pkey_file = StringIO()
pkey_obj = paramiko.RSAKey.generate(2048)
pkey_obj.write_private_key(pkey_file)
pubkey = f"{pkey_obj.get_name()} {pkey_obj.get_base64()} {user}"
return pkey_obj, pubkey
except (OSError, paramiko.SSHException) as err:
raise AirflowException(f"Error encountered creating ssh keys, {err}")
| ComputeEngineSSHHook |
python | google__pytype | pytype/pyi/parser_test.py | {
"start": 55271,
"end": 59628
} | class ____(parser_test_base.ParserTestBase):
"""Tests that cover _parse_signature_as_property()."""
def test_property_with_type(self):
expected = """
from typing import Annotated
class A:
name: Annotated[str, 'property']
"""
# The return type of @property is used for the property type.
self.check(
"""
class A:
@property
def name(self) -> str:...
""",
expected,
)
self.check(
"""
class A:
@name.setter
def name(self, value: str) -> None: ...
""",
"""
from typing import Annotated, Any
class A:
name: Annotated[Any, 'property']
""",
)
self.check(
"""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value: str) -> None: ...
""",
expected,
)
self.check(
"""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value) -> None: ...
""",
expected,
)
self.check(
"""
class A:
@property
def name(self) -> str:...
@name.setter
def name(self, value: int) -> None: ...
""",
expected,
)
def test_property_decorator_any_type(self):
expected = """
from typing import Annotated, Any
class A:
name: Annotated[Any, 'property']
"""
self.check(
"""
class A:
@property
def name(self): ...
""",
expected,
)
self.check(
"""
class A:
@name.setter
def name(self, value): ...
""",
expected,
)
self.check(
"""
class A:
@name.deleter
def name(self): ...
""",
expected,
)
self.check(
"""
class A:
@name.setter
def name(self, value): ...
@name.deleter
def name(self): ...
""",
expected,
)
def test_property_decorator_bad_syntax(self):
self.check_error(
"""
class A:
@property
def name(self, bad_arg): ...
""",
1,
"@property must have 1 param(s), but actually has 2",
)
self.check_error(
"""
class A:
@name.setter
def name(self): ...
""",
1,
"@name.setter must have 2 param(s), but actually has 1",
)
self.check(
"""
class A:
@property
def name(self, optional_arg: str = ...): ...
""",
expected=parser_test_base.IGNORE,
)
self.check_error(
"""
class A:
@property
@staticmethod
def name(self): ...
""",
4,
"'name' can be decorated with at most one of",
)
self.check_error(
"""
@property
def name(self): ...
""",
None,
"Module-level functions with property decorators: name",
)
def test_property_setter_with_default_value(self):
self.check(
"""
class A:
@property
def x(self) -> int: ...
@x.setter
def x(self, value=...) -> None: ...
""",
"""
from typing import Annotated
class A:
x: Annotated[int, 'property']
""",
)
def test_property_clash(self):
self.check_error(
"""
class A:
@property
def name(self) -> str: ...
@property
def name(self) -> int: ...
""",
1,
"Invalid property decorators for 'name'",
)
def test_too_many_property_decorators(self):
self.check_error(
"""
class A:
@property
@name.setter
def name(self) -> str: ...
""",
1,
"conflicting decorators property, name.setter",
)
def test_abstract_property(self):
self.check(
"""
class Foo:
@property
@abstractmethod
def x(self) -> int: ...
@x.setter
def x(self, y: int) -> None: ...
""",
"""
from typing import Annotated
class Foo:
x: Annotated[int, 'property']
""",
)
| PropertyDecoratorTest |
python | huggingface__transformers | tests/models/llava/test_modeling_llava.py | {
"start": 5649,
"end": 10553
} | class ____(
ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase
):
"""
Model tester for `LlavaForConditionalGeneration`.
"""
all_model_classes = (
(
LlavaModel,
LlavaForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-to-text": LlavaForConditionalGeneration,
"image-text-to-text": LlavaForConditionalGeneration,
"any-to-any": LlavaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_is_composite = True
def setUp(self):
self.model_tester = LlavaVisionText2TextModelTester(self)
common_properties = ["image_token_index", "vision_feature_layer", "image_seq_length"]
self.config_tester = ConfigTester(
self, config_class=LlavaConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model.eval()
curr_input_dict = copy.deepcopy(input_dict) # in=place modifications further
_ = model(**curr_input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
curr_input_dict["pixel_values"] = curr_input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**curr_input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = curr_input_dict["input_ids"][:1]
pixel_values = curr_input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
@parameterized.expand(
[
(-1,),
([-1],),
([-1, -2],),
],
)
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
@unittest.skip(
reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seems to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
@slow
| LlavaForConditionalGenerationModelTest |
python | google__jax | jax/_src/test_util.py | {
"start": 56920,
"end": 66300
} | class ____:
"""A class that unifies lists of supported dtypes.
These could be module-level constants, but device_under_test() is not always
known at import time, so we need to define these lists lazily.
"""
def supported(self, dtypes):
supported = supported_dtypes()
return type(dtypes)(d for d in dtypes if d in supported)
@_cached_property
def custom_floats(self):
float_dtypes = [
_dtypes.bfloat16,
_dtypes.float8_e4m3b11fnuz,
_dtypes.float8_e4m3fn,
_dtypes.float8_e4m3fnuz,
_dtypes.float8_e5m2,
_dtypes.float8_e5m2fnuz,
_dtypes.float8_e3m4,
_dtypes.float8_e4m3,
_dtypes.float8_e8m0fnu,
_dtypes.float4_e2m1fn,
]
return self.supported(float_dtypes)
@_cached_property
def floating(self):
return self.supported([np.float32, np.float64])
@_cached_property
def all_floating(self):
return self.supported([_dtypes.bfloat16, np.float16, np.float32, np.float64])
@_cached_property
def integer(self):
return self.supported([np.int32, np.int64])
@_cached_property
def all_integer(self):
return self.supported([np.int8, np.int16, np.int32, np.int64])
@_cached_property
def unsigned(self):
return self.supported([np.uint32, np.uint64])
@_cached_property
def all_unsigned(self):
return self.supported([np.uint8, np.uint16, np.uint32, np.uint64])
@_cached_property
def complex(self):
return self.supported([np.complex64, np.complex128])
@_cached_property
def boolean(self):
return self.supported([np.bool_])
@_cached_property
def inexact(self):
return self.floating + self.complex
@_cached_property
def all_inexact(self):
return self.all_floating + self.complex
@_cached_property
def numeric(self):
return self.floating + self.integer + self.unsigned + self.complex
@_cached_property
def all(self):
return (self.all_floating + self.all_integer + self.all_unsigned +
self.complex + self.boolean)
dtypes = _LazyDtypes()
def strict_promotion_if_dtypes_match(dtypes):
"""
Context manager to enable strict promotion if all dtypes match,
and enable standard dtype promotion otherwise.
"""
if all(dtype == dtypes[0] for dtype in dtypes):
return config.numpy_dtype_promotion('strict')
return config.numpy_dtype_promotion('standard')
_version_regex = re.compile(r"([0-9]+(?:\.[0-9]+)*)(?:(rc|dev).*)?")
def parse_version(v: str) -> tuple[int, ...]:
m = _version_regex.match(v)
if m is None:
raise ValueError(f"Unable to parse version '{v}'")
return tuple(int(x) for x in m.group(1).split('.'))
def numpy_version():
return parse_version(np.__version__)
def parameterized_filterable(*,
kwargs: Sequence[dict[str, Any]],
testcase_name: Callable[[dict[str, Any]], str] | None = None,
one_containing: str | None = None,
):
"""Decorator for named parameterized tests, with filtering support.
Works like ``parameterized.named_parameters``, except that it sanitizes the test
names so that we can use ``pytest -k`` and ``python test.py -k`` test filtering.
This means, e.g., that many special characters are replaced with `_`.
It also supports the ``one_containing`` arg to select one of the tests, while
leaving the name unchanged, which is useful for IDEs to be able to easily
pick up the enclosing test name.
Usage:
@jtu.parameterized_filterable(
# one_containing="a_4",
[dict(a=4, b=5),
dict(a=5, b=4)])
def test_my_test(self, *, a, b): ...
Args:
kwargs: Each entry is a set of kwargs to be passed to the test function.
testcase_name: Optionally, a function to construct the testcase_name from
one kwargs dict. If not given then ``kwargs`` may contain ``testcase_name`` and
otherwise the test case name is constructed as ``str(kwarg)``.
We sanitize the test names to work with -k test filters. See
``sanitize_test_name``.
one_containing: If given, then leaves the test name unchanged, and use
only one of the ``kwargs`` whose `testcase_name` includes ``one_containing``.
"""
# Ensure that all kwargs contain a testcase_name
kwargs_with_testcase_name: Sequence[dict[str, Any]]
if testcase_name is not None:
kwargs_with_testcase_name = [
dict(testcase_name=sanitize_test_name(str(testcase_name(kw))), **kw)
for kw in kwargs]
else:
for kw in kwargs:
testcase_name = kw.get("testcase_name")
if testcase_name is None:
testcase_name = "_".join(f"{k}={kw[k]}" # type: ignore
for k in sorted(kw.keys()))
kw["testcase_name"] = sanitize_test_name(testcase_name) # type: ignore
kwargs_with_testcase_name = kwargs
if one_containing is not None:
filtered = tuple(kw for kw in kwargs_with_testcase_name
if one_containing in kw["testcase_name"])
assert filtered, (
f"No testcase_name contains '{one_containing}'. "
"The testcase_name values are\n " +
"\n ".join(kw["testcase_name"] for kw in kwargs_with_testcase_name))
kw = filtered[0]
kw["testcase_name"] = ""
return parameterized.named_parameters([kw])
else:
return parameterized.named_parameters(*kwargs_with_testcase_name)
@contextmanager
def register_event_duration_listener(callback):
"""Manages registering/unregistering an event duration listener callback."""
try:
monitoring.register_event_duration_secs_listener(callback)
yield
finally:
monitoring.unregister_event_duration_listener(callback)
@contextmanager
def set_env(**kwargs):
"""Context manager to temporarily set/unset one or more environment variables.
Caution: setting environment variables is not thread-safe. If you use this
utility, you must annotate your test using, e.g., @thread_unsafe_test() or
@thread_unsafe_test_class().
Examples:
>>> import os
>>> os.environ['my_var'] = 'original'
>>> with set_env(my_var=None, other_var='some_value'):
... print("my_var is set:", 'my_var' in os.environ)
... print("other_var =", os.environ['other_var'])
...
my_var is set: False
other_var = some_value
>>> os.environ['my_var']
'original'
>>> 'other_var' in os.environ
False
"""
original = {key: os.environ.pop(key, None) for key in kwargs}
os.environ.update({k: v for k, v in kwargs.items() if v is not None})
try:
yield
finally:
_ = [os.environ.pop(key, None) for key in kwargs]
os.environ.update({k: v for k, v in original.items() if v is not None})
def fwd_bwd_jaxprs(f, *example_args):
fwd_jaxpr, (y_shape, res_shape) = api.make_jaxpr(
lambda *args: api.vjp(f, *args), return_shape=True)(*example_args)
bwd_jaxpr = api.make_jaxpr(lambda res, outs: res(outs))(res_shape, y_shape)
return fwd_jaxpr, bwd_jaxpr
def complex_plane_sample(dtype, size_re=10, size_im=None):
"""Return a 2-D array of complex numbers that covers the complex plane
with a grid of samples.
The size of the grid is (3 + 2 * size_im) x (3 + 2 * size_re)
that includes infinity points, extreme finite points, and the
specified number of points from real and imaginary axis.
For example:
>>> print(complex_plane_sample(np.complex64, 0, 3))
[[-inf -infj 0. -infj inf -infj]
[-inf-3.4028235e+38j 0.-3.4028235e+38j inf-3.4028235e+38j]
[-inf-2.0000000e+00j 0.-2.0000000e+00j inf-2.0000000e+00j]
[-inf-1.1754944e-38j 0.-1.1754944e-38j inf-1.1754944e-38j]
[-inf+0.0000000e+00j 0.+0.0000000e+00j inf+0.0000000e+00j]
[-inf+1.1754944e-38j 0.+1.1754944e-38j inf+1.1754944e-38j]
[-inf+2.0000000e+00j 0.+2.0000000e+00j inf+2.0000000e+00j]
[-inf+3.4028235e+38j 0.+3.4028235e+38j inf+3.4028235e+38j]
[-inf +infj 0. +infj inf +infj]]
"""
if size_im is None:
size_im = size_re
finfo = np.finfo(dtype)
machine = platform.machine()
is_arm_cpu = machine.startswith('aarch') or machine.startswith('arm')
smallest = np.nextafter(finfo.tiny, finfo.max) if is_arm_cpu and platform.system() == 'Darwin' else finfo.tiny
def make_axis_points(size):
prec_dps_ratio = 3.3219280948873626
logmin = logmax = finfo.maxexp / prec_dps_ratio
logtiny = finfo.minexp / prec_dps_ratio
axis_points = np.zeros(3 + 2 * size, dtype=finfo.dtype)
with ignore_warning(category=RuntimeWarning):
# Silence RuntimeWarning: overflow encountered in cast
half_neg_line = -np.logspace(logmin, logtiny, size, dtype=finfo.dtype)
half_line = -half_neg_line[::-1]
axis_points[-size - 1:-1] = half_line
axis_points[1:size + 1] = half_neg_line
if size > 1:
axis_points[1] = finfo.min
axis_points[-2] = finfo.max
if size > 0:
axis_points[size] = -smallest
axis_points[-size - 1] = smallest
axis_points[0] = -np.inf
axis_points[-1] = np.inf
return axis_points
real_axis_points = make_axis_points(size_re)
imag_axis_points = make_axis_points(size_im)
real_part = real_axis_points.reshape((-1, 3 + 2 * size_re)).repeat(3 + 2 * size_im, 0).astype(dtype)
imag_part = imag_axis_points.repeat(2).view(dtype)
imag_part.real[:] = 0
imag_part = imag_part.reshape((3 + 2 * size_im, -1)).repeat(3 + 2 * size_re, 1)
return real_part + imag_part
| _LazyDtypes |
python | django__django | tests/lookup/models.py | {
"start": 1477,
"end": 1842
} | class ____(models.Model):
year = models.PositiveSmallIntegerField()
gt = models.IntegerField(null=True, blank=True)
nulled_text_field = NulledTextField(null=True)
class Meta:
constraints = [
models.UniqueConstraint(fields=["year"], name="season_year_unique"),
]
def __str__(self):
return str(self.year)
| Season |
python | getsentry__sentry | tests/sentry/integrations/repository/issue_alert/test_issue_alert_notification_message_repository.py | {
"start": 1741,
"end": 5385
} | class ____(BaseIssueAlertNotificationMessageRepositoryTest):
def test_returns_parent_notification_message(self) -> None:
instance = self.repository.get_parent_notification_message(
rule_id=self.rule.id,
group_id=self.group.id,
rule_action_uuid=self.action_uuid,
)
assert instance is not None
assert instance == IssueAlertNotificationMessage.from_model(
self.parent_notification_message
)
def test_returns_latest_parent_notification_message(self) -> None:
# this can happen if somebody toggles threads on for the first time
rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=self.group,
event_id=self.event_id,
notification_uuid=self.notification_uuid,
)
latest = NotificationMessage.objects.create(
rule_fire_history=rule_fire_history,
rule_action_uuid=self.action_uuid,
message_identifier="abc123",
)
instance = self.repository.get_parent_notification_message(
rule_id=self.rule.id,
group_id=self.group.id,
rule_action_uuid=self.action_uuid,
)
assert instance is not None
assert instance == IssueAlertNotificationMessage.from_model(latest)
def test_returns_none_when_filter_does_not_exist(self) -> None:
instance = self.repository.get_parent_notification_message(
rule_id=9999,
group_id=self.group.id,
rule_action_uuid=self.action_uuid,
)
assert instance is None
def test_when_parent_has_child(self) -> None:
child = NotificationMessage.objects.create(
rule_fire_history=self.rule_fire_history,
rule_action_uuid=self.action_uuid,
message_identifier="456abc",
parent_notification_message=self.parent_notification_message,
)
assert child.id != self.parent_notification_message.id
instance = self.repository.get_parent_notification_message(
rule_id=self.rule.id,
group_id=self.group.id,
rule_action_uuid=self.action_uuid,
)
assert instance is not None
assert instance == IssueAlertNotificationMessage.from_model(
self.parent_notification_message
)
def test_returns_parent_notification_message_with_open_period_start(self) -> None:
open_period_start = timezone.now()
notification_with_period = NotificationMessage.objects.create(
rule_fire_history=self.rule_fire_history,
rule_action_uuid=self.action_uuid,
message_identifier="789xyz",
open_period_start=open_period_start,
)
notification_with_period = NotificationMessage.objects.create(
rule_fire_history=self.rule_fire_history,
rule_action_uuid=self.action_uuid,
message_identifier="789xyz",
open_period_start=open_period_start + timedelta(seconds=1),
)
instance = self.repository.get_parent_notification_message(
rule_id=self.rule.id,
group_id=self.group.id,
rule_action_uuid=self.action_uuid,
open_period_start=open_period_start + timedelta(seconds=1),
)
assert instance is not None
assert instance == IssueAlertNotificationMessage.from_model(notification_with_period)
assert instance.open_period_start == open_period_start + timedelta(seconds=1)
| TestGetParentNotificationMessage |
python | django__django | tests/m2m_through_regress/models.py | {
"start": 1296,
"end": 1493
} | class ____(models.Model):
name = models.CharField(max_length=20, unique=True, null=True)
class Meta:
ordering = ("name",)
def __str__(self):
return str(self.name)
| Driver |
python | kamyu104__LeetCode-Solutions | Python/find-the-level-of-tree-with-minimum-sum.py | {
"start": 165,
"end": 754
} | class ____(object):
def minimumLevel(self, root):
"""
:type root: Optional[TreeNode]
:rtype: int
"""
q = [root]
d = 1
result = ((float("inf"), float("inf")))
while q:
new_q = []
total = 0
for u in q:
if u.left:
new_q.append(u.left)
if u.right:
new_q.append(u.right)
total += u.val
result = min(result, (total, d))
q = new_q
d += 1
return result[-1]
| Solution |
python | getsentry__sentry | src/sentry/monitors/system_incidents.py | {
"start": 21997,
"end": 24647
} | class ____:
key: str
ts: datetime
def _make_backfill(start: datetime, until_not: TickAnomalyDecision) -> Generator[BackfillItem]:
"""
Yields keys and associated timestamps from the `start` tick until the value
of the key is not a `until_not` tick decision.
"""
redis_client = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
for chunked_offsets in batched(range(0, BACKFILL_CUTOFF), BACKFILL_CHUNKS):
pipeline = redis_client.pipeline()
keys: list[str] = []
timestamps: list[datetime] = []
for offset in chunked_offsets:
ts = start - timedelta(minutes=offset)
key = MONITOR_TICK_DECISION.format(ts=_make_reference_ts(ts))
pipeline.get(key)
keys.append(key)
timestamps.append(ts)
for key, ts, value in zip(keys, timestamps, pipeline.execute()):
# Edge case, we found a hole gap in decisions
if value is None:
return
# Exit the backfill once we no longer see the `until_not` decision
prev_decision = TickAnomalyDecision.from_str(value)
if prev_decision != until_not:
return
yield BackfillItem(key, ts)
# If we've iterated through the entire BACKFILL_CUTOFF we have a
# "decision runaway" and should report this as an error
logger.error("decision_backfill_runaway")
def _backfill_decisions(
start: datetime,
decision: TickAnomalyDecision,
until_not: TickAnomalyDecision,
) -> datetime | None:
"""
Update historic tick decisions from `start` to `decision` until we no
longer see the `until_not` decision.
If a backfill occurred, returns the timestamp just before
"""
redis_client = redis.redis_clusters.get(settings.SENTRY_MONITORS_REDIS_CLUSTER)
pipeline = redis_client.pipeline()
backfill_items = list(_make_backfill(start, until_not))
for item in backfill_items:
pipeline.set(item.key, decision.value)
pipeline.execute()
# Return the timestamp just before we reached until_not. Note
# backfill_items is in reverse chronological order here.
if backfill_items:
return backfill_items[-1].ts
# In the case that we didn't backfill anything return None
return None
def _make_reference_ts(ts: datetime) -> int:
"""
Produce a timestamp number with the seconds and microsecond removed
"""
return int(ts.replace(second=0, microsecond=0).timestamp())
def _int_or_none(s: str | None) -> int | None:
if s is None:
return None
else:
return int(s)
| BackfillItem |
python | tensorflow__tensorflow | tensorflow/python/distribute/sharded_variable.py | {
"start": 30532,
"end": 37854
} | class ____(ShardedVariableMixin, composite_tensor.CompositeTensor):
"""A container for `Variables` that should be treated as shards.
Variables that are too large to fit on a single device (e.g., large
embeddings)
may need to be sharded over multiple devices. This class maintains a list of
smaller variables that can be independently stored on separate devices (eg,
multiple parameter servers), and saves and restores those variables as if they
were a single larger variable.
Objects of this class can be saved with a given number of shards and then
restored from a checkpoint into a different number of shards.
Objects of this class can be saved to SavedModel format using
`tf.saved_model.save`. The SavedModel can be used by programs like TF serving
APIs. It is not yet supported to load the SavedModel with
`tf.saved_model.load`.
Since `ShardedVariable` can be saved and then restored to different number of
shards depending on the restore environments, for example, TF serving APIs
would restore to one shard for serving efficiency, when using
`ShardedVariable` in a tf.function, one should generally not assume it has the
same number of shards across save and load.
Sharding is only supported along the first dimension.
>>> class Model(tf.Module):
... def __init__(self):
... self.sharded_variable = ShardedVariable([
... tf.Variable([3.0], dtype=tf.float32),
... tf.Variable([2.0], dtype=tf.float32)
... ])
...
... @tf.function(input_signature=[tf.TensorSpec([], dtype=tf.int32)])
... def fn(self, x):
... return tf.nn.embedding_lookup(self.sharded_variable.variables, x)
...
... @tf.function(input_signature=[tf.TensorSpec([], dtype=tf.int32)])
... def serve_fn(self, x):
... return tf.nn.embedding_lookup(self.sharded_variable.variables, x)
>>>
>>> model = Model()
>>> model.fn(1).numpy()
2.0
>>> tf.saved_model.save(model, export_dir='/tmp/saved_model',
... signatures=model.serve_fn)
"""
@property
def _type_spec(self):
return ShardedVariableSpec(
*(resource_variable_ops.VariableSpec(v.shape, v.dtype)
for v in self._variables))
@classmethod
def _overload_all_operators(cls):
"""Register overloads for all operators."""
for operator in tensor_lib.Tensor.OVERLOADABLE_OPERATORS:
if operator == '__getitem__':
continue
cls._overload_operator(operator)
@classmethod
def _overload_operator(cls, operator):
"""Delegate an operator overload to `tensor_lib.Tensor`."""
tensor_operator = getattr(tensor_lib.Tensor, operator)
def _operator(v, *args, **kwargs):
return tensor_operator(_var_to_tensor(v), *args, **kwargs)
setattr(cls, operator, _operator)
def __tf_experimental_restore_capture__(
self, concrete_function, internal_capture
):
# Avoid restoring captures for functions that use ShardedVariable - the
# layer will be recreated during Keras model loading
# TODO(jmullenbach): support loading models with ShardedVariables using
# tf.saved_model.load
return None
def _should_act_as_resource_variable(self):
"""Pass resource_variable_ops.is_resource_variable check."""
return True
def _write_object_proto(self, proto, options):
resource_variable_ops.write_object_proto_for_resource_variable(
self._saving_variable, proto, options, enforce_naming=False
)
def _copy_trackable_to_cpu(self, object_map):
"""For implementing `Trackable` async checkpointing."""
if self in object_map:
# If populated already, simply loop through sub-variables to copy values.
for v in self._variables:
v._copy_trackable_to_cpu(object_map) # pylint: disable=protected-access
else:
# If not populated, populate first, then copy.
copied_vars = []
for v in self._variables:
# This step will both instantiate `v`'s CPU copy and copy its value.
v._copy_trackable_to_cpu(object_map) # pylint: disable=protected-access
copied_vars.append(object_map[v])
new_var = ShardedVariable(copied_vars, name=self.name)
object_map[self] = new_var
def _var_to_tensor(var, dtype=None, name=None, as_ref=False):
"""Converts a `ShardedVariable` to a `Tensor`."""
del name
if dtype is not None and not dtype.is_compatible_with(var.dtype):
raise ValueError(
'Incompatible type conversion requested to type {!r} for variable '
'of type {!r}'.format(dtype.name, var.dtype.name)
)
if as_ref:
raise NotImplementedError(
"ShardedVariable doesn't support being used as a reference."
)
# We use op dispatch mechanism to override embedding_lookup ops when called
# with ShardedVariable. This requires embedding_lookup ops to raise TypeError
# when called with ShardedVariable. However since ShardedVariable can be
# converted to a tensor via concat, embedding_lookup ops would silently
# do the conversion and never raise a TypeError. To be able to properly
# raise a TypeError, namescope is used to detect if this method is called
# within a embedding_lookup op.
# NOTE: This doesn't work in eager mode since op namescope is always cleared
# in eager. This also breaks if user sets the name of embedding_lookup op
# with something that doesn't contain str "embedding_lookup".
#
# TODO(chenkai): Find a more robust way to do this, which should not rely
# on namescope.
if 'embedding_lookup' in ops.get_name_scope():
raise TypeError(
'Converting ShardedVariable to tensor in embedding lookup'
' ops is disallowed.'
)
return array_ops.concat(var.variables, axis=0)
# Register a conversion function which reads the value of the variable,
# allowing instances of the class to be used as tensors.
tensor_conversion_registry.register_tensor_conversion_function(
ShardedVariable, _var_to_tensor
)
ShardedVariable._overload_all_operators() # pylint: disable=protected-access
# Override the behavior of embedding_lookup(sharded_variable, ...)
@dispatch.dispatch_for_types(embedding_ops.embedding_lookup, ShardedVariable)
def embedding_lookup(
params,
ids,
partition_strategy='mod',
name=None,
validate_indices=True,
max_norm=None,
):
if isinstance(params, list):
params = params[0]
return embedding_ops.embedding_lookup(
params.variables,
ids,
partition_strategy,
name,
validate_indices,
max_norm,
)
# Separately override safe_embedding_lookup_sparse, to avoid conversion of
# ShardedVariable to tensor.
@dispatch.dispatch_for_api(embedding_ops.safe_embedding_lookup_sparse)
def safe_embedding_lookup_sparse(
embedding_weights: ShardedVariable,
sparse_ids,
sparse_weights=None,
combiner='mean',
default_id=None,
name=None,
partition_strategy='div',
max_norm=None,
allow_fast_lookup=False,
):
"""Pass the individual shard variables as a list."""
return embedding_ops.safe_embedding_lookup_sparse(
embedding_weights.variables,
sparse_ids,
sparse_weights=sparse_weights,
combiner=combiner,
default_id=default_id,
name=name,
partition_strategy=partition_strategy,
max_norm=max_norm,
allow_fast_lookup=allow_fast_lookup,
)
| ShardedVariable |
python | pytorch__pytorch | torch/_inductor/codecache.py | {
"start": 5460,
"end": 7720
} | class ____:
@staticmethod
@functools.cache
def get_system() -> dict[str, Any]:
from torch._inductor.runtime.triton_compat import HAS_TRITON, triton_key
if HAS_TRITON:
# Use triton_key instead of triton.__version__ as the version
# is not updated with each code change
triton_version = triton_key()
else:
triton_version = None
try:
system: dict[str, Any] = {
"device": {"name": None},
"version": {
"triton": triton_version,
},
}
device_properties = torch.cuda.get_device_properties(
torch.cuda.current_device()
)
if torch.version.cuda is not None:
system["device"]["name"] = device_properties.name
system["version"]["cuda"] = torch.version.cuda
else:
system["device"]["name"] = device_properties.gcnArchName
system["version"]["hip"] = torch.version.hip
except (AssertionError, RuntimeError):
# If cuda is not installed, none of the above config is relevant.
system = {}
system["hash"] = hashlib.sha256(
json.dumps(system, sort_keys=True).encode("utf-8")
).hexdigest()
return system
@staticmethod
@clear_on_fresh_cache
@functools.cache
def get_local_cache_path() -> Path:
return Path(os.path.join(cache_dir(), "cache", CacheBase.get_system()["hash"]))
def __init__(self) -> None:
self.system = CacheBase.get_system()
def get_local_cache(self) -> dict[str, Any]:
local_cache_path = self.get_local_cache_path()
if not local_cache_path.is_file():
return {}
with open(local_cache_path) as local_cache_fp:
local_cache = json.load(local_cache_fp)
return local_cache["cache"]
def update_local_cache(self, local_cache: dict[str, Any]) -> None:
local_cache_path = self.get_local_cache_path()
write_atomic(
str(local_cache_path),
json.dumps({"system": self.system, "cache": local_cache}, indent=4),
make_dirs=True,
)
| CacheBase |
python | pyinstaller__pyinstaller | bootloader/waflib/Utils.py | {
"start": 3629,
"end": 13354
} | class ____(object):
def __init__(self, fun, params):
self.fun = fun
self.params = params
def __iter__(self):
return self
def __next__(self):
try:
it = self.it
except AttributeError:
it = self.it = self.fun(*self.params)
return next(it)
next = __next__
is_win32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt'
def readf(fname, m='r', encoding='latin-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
with open(fname, m) as f:
txt = f.read()
if encoding:
txt = txt.decode(encoding)
else:
txt = txt.decode()
else:
with open(fname, m) as f:
txt = f.read()
return txt
def writef(fname, data, m='w', encoding='latin-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
with open(fname, m) as f:
f.write(data)
def h_file(fname):
m = md5()
with open(fname, 'rb') as f:
while fname:
fname = f.read(200000)
m.update(fname)
return m.digest()
def readf_win32(f, m='r', encoding='latin-1'):
flags = os.O_NOINHERIT | os.O_RDONLY
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise IOError('Cannot read from %r' % f)
if sys.hexversion > 0x3000000 and not 'b' in m:
m += 'b'
with os.fdopen(fd, m) as f:
txt = f.read()
if encoding:
txt = txt.decode(encoding)
else:
txt = txt.decode()
else:
with os.fdopen(fd, m) as f:
txt = f.read()
return txt
def writef_win32(f, data, m='w', encoding='latin-1'):
if sys.hexversion > 0x3000000 and not 'b' in m:
data = data.encode(encoding)
m += 'b'
flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT
if 'b' in m:
flags |= os.O_BINARY
if '+' in m:
flags |= os.O_RDWR
try:
fd = os.open(f, flags)
except OSError:
raise OSError('Cannot write to %r' % f)
with os.fdopen(fd, m) as f:
f.write(data)
def h_file_win32(fname):
try:
fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT)
except OSError:
raise OSError('Cannot read from %r' % fname)
m = md5()
with os.fdopen(fd, 'rb') as f:
while fname:
fname = f.read(200000)
m.update(fname)
return m.digest()
readf_unix = readf
writef_unix = writef
h_file_unix = h_file
if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000:
readf = readf_win32
writef = writef_win32
h_file = h_file_win32
try:
x = ''.encode('hex')
except LookupError:
import binascii
def to_hex(s):
ret = binascii.hexlify(s)
if not isinstance(ret, str):
ret = ret.decode('utf-8')
return ret
else:
def to_hex(s):
return s.encode('hex')
to_hex.__doc__ = """
Return the hexadecimal representation of a string
:param s: string to convert
:type s: string
"""
def listdir_win32(s):
if not s:
try:
import ctypes
except ImportError:
return [x + ':\\' for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ']
else:
dlen = 4
maxdrives = 26
buf = ctypes.create_string_buffer(maxdrives * dlen)
ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives * dlen, ctypes.byref(buf))
return [str(buf.raw[4 * i:4 * i + 2].decode('ascii')) for i in range(int(ndrives / dlen))]
if len(s) == 2 and s[1] == ":":
s += os.sep
if not os.path.isdir(s):
e = OSError('%s is not a directory' % s)
e.errno = errno.ENOENT
raise e
return os.listdir(s)
listdir = os.listdir
if is_win32:
listdir = listdir_win32
def num2ver(ver):
if isinstance(ver, str):
ver = tuple(ver.split('.'))
if isinstance(ver, tuple):
ret = 0
for i in range(4):
if i < len(ver):
ret += 256**(3 - i) * int(ver[i])
return ret
return ver
def to_list(val):
if isinstance(val, str):
return val.split()
else:
return val
def console_encoding():
try:
import ctypes
except ImportError:
pass
else:
try:
codepage = ctypes.windll.kernel32.GetConsoleCP()
except AttributeError:
pass
else:
if codepage:
return 'cp%d' % codepage
return sys.stdout.encoding or ('cp1252' if is_win32 else 'latin-1')
def split_path_unix(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret = path.split('/')[2:]
ret[0] = '/' + ret[0]
return ret
return path.split('/')
re_sp = re.compile('[/\\\\]+')
def split_path_win32(path):
if path.startswith('\\\\'):
ret = re_sp.split(path)[1:]
ret[0] = '\\\\' + ret[0]
if ret[0] == '\\\\?':
return ret[1:]
return ret
return re_sp.split(path)
msysroot = None
def split_path_msys(path):
if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')):
global msysroot
if not msysroot:
msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1')
msysroot = msysroot.strip()
path = os.path.normpath(msysroot + os.sep + path)
return split_path_win32(path)
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif is_win32:
if os.environ.get('MSYSTEM') and sys.executable.startswith('/'):
split_path = split_path_msys
else:
split_path = split_path_win32
else:
split_path = split_path_unix
split_path.__doc__ = """
Splits a path by / or \\; do not confuse this function with with ``os.path.split``
:type path: string
:param path: path to split
:return: list of string
"""
def check_dir(path):
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as e:
if not os.path.isdir(path):
raise Errors.WafError('Cannot create the folder %r' % path, ex=e)
def check_exe(name, env=None):
if not name:
raise ValueError('Cannot execute an empty string!')
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(name)
if fpath and is_exe(name):
return os.path.abspath(name)
else:
env = env or os.environ
for path in env['PATH'].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, name)
if is_exe(exe_file):
return os.path.abspath(exe_file)
return None
def def_attrs(cls, **kw):
for k, v in kw.items():
if not hasattr(cls, k):
setattr(cls, k, v)
def quote_define_name(s):
fu = re.sub('[^a-zA-Z0-9]', '_', s)
fu = re.sub('_+', '_', fu)
fu = fu.upper()
return fu
re_sh = re.compile('\\s|\'|"')
def shell_escape(cmd):
if isinstance(cmd, str):
return cmd
return ' '.join(repr(x) if re_sh.search(x) else x for x in cmd)
def h_list(lst):
return md5(repr(lst).encode()).digest()
if sys.hexversion < 0x3000000:
def h_list_python2(lst):
return md5(repr(lst)).digest()
h_list_python2.__doc__ = h_list.__doc__
h_list = h_list_python2
def h_fun(fun):
try:
return fun.code
except AttributeError:
if isinstance(fun, functools.partial):
code = list(fun.args)
code.extend(sorted(fun.keywords.items()))
code.append(h_fun(fun.func))
fun.code = h_list(code)
return fun.code
try:
h = inspect.getsource(fun)
except EnvironmentError:
h = 'nocode'
try:
fun.code = h
except AttributeError:
pass
return h
def h_cmd(ins):
if isinstance(ins, str):
ret = ins
elif isinstance(ins, list) or isinstance(ins, tuple):
ret = str([h_cmd(x) for x in ins])
else:
ret = str(h_fun(ins))
if sys.hexversion > 0x3000000:
ret = ret.encode('latin-1', 'xmlcharrefreplace')
return ret
reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}")
def subst_vars(expr, params):
def repl_var(m):
if m.group(1):
return '\\'
if m.group(2):
return '$'
try:
return params.get_flat(m.group(3))
except AttributeError:
return params[m.group(3)]
return reg_subst.sub(repl_var, expr)
def destos_to_binfmt(key):
if key == 'darwin':
return 'mac-o'
elif key in ('win32', 'cygwin', 'uwin', 'msys'):
return 'pe'
return 'elf'
def unversioned_sys_platform():
s = sys.platform
if s.startswith('java'):
from java.lang import System
s = System.getProperty('os.name')
if s == 'Mac OS X':
return 'darwin'
elif s.startswith('Windows '):
return 'win32'
elif s == 'OS/2':
return 'os2'
elif s == 'HP-UX':
return 'hp-ux'
elif s in ('SunOS', 'Solaris'):
return 'sunos'
else:
s = s.lower()
if s == 'powerpc':
return 'darwin'
if s == 'win32' or s == 'os2':
return s
if s == 'cli' and os.name == 'nt':
return 'win32'
return re.split(r'\d+$', s)[0]
def nada(*k, **kw):
pass
| lazy_generator |
python | pytorch__pytorch | test/quantization/ao_migration/test_quantization.py | {
"start": 152,
"end": 7954
} | class ____(AOMigrationTestCase):
r"""Modules and functions related to the
`torch/quantization` migration to `torch/ao/quantization`.
"""
def test_function_import_quantize(self):
function_list = [
"_convert",
"_observer_forward_hook",
"_propagate_qconfig_helper",
"_remove_activation_post_process",
"_remove_qconfig",
"_add_observer_",
"add_quant_dequant",
"convert",
"_get_observer_dict",
"_get_unique_devices_",
"_is_activation_post_process",
"prepare",
"prepare_qat",
"propagate_qconfig_",
"quantize",
"quantize_dynamic",
"quantize_qat",
"_register_activation_post_process_hook",
"swap_module",
]
self._test_function_import("quantize", function_list)
def test_function_import_stubs(self):
function_list = [
"QuantStub",
"DeQuantStub",
"QuantWrapper",
]
self._test_function_import("stubs", function_list)
def test_function_import_quantize_jit(self):
function_list = [
"_check_is_script_module",
"_check_forward_method",
"script_qconfig",
"script_qconfig_dict",
"fuse_conv_bn_jit",
"_prepare_jit",
"prepare_jit",
"prepare_dynamic_jit",
"_convert_jit",
"convert_jit",
"convert_dynamic_jit",
"_quantize_jit",
"quantize_jit",
"quantize_dynamic_jit",
]
self._test_function_import("quantize_jit", function_list)
def test_function_import_fake_quantize(self):
function_list = [
"_is_per_channel",
"_is_per_tensor",
"_is_symmetric_quant",
"FakeQuantizeBase",
"FakeQuantize",
"FixedQParamsFakeQuantize",
"FusedMovingAvgObsFakeQuantize",
"default_fake_quant",
"default_weight_fake_quant",
"default_fixed_qparams_range_neg1to1_fake_quant",
"default_fixed_qparams_range_0to1_fake_quant",
"default_per_channel_weight_fake_quant",
"default_histogram_fake_quant",
"default_fused_act_fake_quant",
"default_fused_wt_fake_quant",
"default_fused_per_channel_wt_fake_quant",
"_is_fake_quant_script_module",
"disable_fake_quant",
"enable_fake_quant",
"disable_observer",
"enable_observer",
]
self._test_function_import("fake_quantize", function_list)
def test_function_import_fuse_modules(self):
function_list = [
"_fuse_modules",
"_get_module",
"_set_module",
"fuse_conv_bn",
"fuse_conv_bn_relu",
"fuse_known_modules",
"fuse_modules",
"get_fuser_method",
]
self._test_function_import("fuse_modules", function_list)
def test_function_import_quant_type(self):
function_list = [
"QuantType",
"_get_quant_type_to_str",
]
self._test_function_import("quant_type", function_list)
def test_function_import_observer(self):
function_list = [
"_PartialWrapper",
"_with_args",
"_with_callable_args",
"ABC",
"ObserverBase",
"_ObserverBase",
"MinMaxObserver",
"MovingAverageMinMaxObserver",
"PerChannelMinMaxObserver",
"MovingAveragePerChannelMinMaxObserver",
"HistogramObserver",
"PlaceholderObserver",
"RecordingObserver",
"NoopObserver",
"_is_activation_post_process",
"_is_per_channel_script_obs_instance",
"get_observer_state_dict",
"load_observer_state_dict",
"default_observer",
"default_placeholder_observer",
"default_debug_observer",
"default_weight_observer",
"default_histogram_observer",
"default_per_channel_weight_observer",
"default_dynamic_quant_observer",
"default_float_qparams_observer",
]
self._test_function_import("observer", function_list)
def test_function_import_qconfig(self):
function_list = [
"QConfig",
"default_qconfig",
"default_debug_qconfig",
"default_per_channel_qconfig",
"QConfigDynamic",
"default_dynamic_qconfig",
"float16_dynamic_qconfig",
"float16_static_qconfig",
"per_channel_dynamic_qconfig",
"float_qparams_weight_only_qconfig",
"default_qat_qconfig",
"default_weight_only_qconfig",
"default_activation_only_qconfig",
"default_qat_qconfig_v2",
"get_default_qconfig",
"get_default_qat_qconfig",
"_assert_valid_qconfig",
"QConfigAny",
"_add_module_to_qconfig_obs_ctr",
"qconfig_equals",
]
self._test_function_import("qconfig", function_list)
def test_function_import_quantization_mappings(self):
function_list = [
"no_observer_set",
"get_default_static_quant_module_mappings",
"get_static_quant_module_class",
"get_dynamic_quant_module_class",
"get_default_qat_module_mappings",
"get_default_dynamic_quant_module_mappings",
"get_default_qconfig_propagation_list",
"get_default_compare_output_module_list",
"get_default_float_to_quantized_operator_mappings",
"get_quantized_operator",
"_get_special_act_post_process",
"_has_special_act_post_process",
]
dict_list = [
"DEFAULT_REFERENCE_STATIC_QUANT_MODULE_MAPPINGS",
"DEFAULT_STATIC_QUANT_MODULE_MAPPINGS",
"DEFAULT_QAT_MODULE_MAPPINGS",
"DEFAULT_DYNAMIC_QUANT_MODULE_MAPPINGS",
# "_INCLUDE_QCONFIG_PROPAGATE_LIST",
"DEFAULT_FLOAT_TO_QUANTIZED_OPERATOR_MAPPINGS",
"DEFAULT_MODULE_TO_ACT_POST_PROCESS",
]
self._test_function_import("quantization_mappings", function_list)
self._test_dict_import("quantization_mappings", dict_list)
def test_function_import_fuser_method_mappings(self):
function_list = [
"fuse_conv_bn",
"fuse_conv_bn_relu",
"fuse_linear_bn",
"get_fuser_method",
]
dict_list = ["_DEFAULT_OP_LIST_TO_FUSER_METHOD"]
self._test_function_import("fuser_method_mappings", function_list)
self._test_dict_import("fuser_method_mappings", dict_list)
def test_function_import_utils(self):
function_list = [
"activation_dtype",
"activation_is_int8_quantized",
"activation_is_statically_quantized",
"calculate_qmin_qmax",
"check_min_max_valid",
"get_combined_dict",
"get_qconfig_dtypes",
"get_qparam_dict",
"get_quant_type",
"get_swapped_custom_module_class",
"getattr_from_fqn",
"is_per_channel",
"is_per_tensor",
"weight_dtype",
"weight_is_quantized",
"weight_is_statically_quantized",
]
self._test_function_import("utils", function_list)
if __name__ == "__main__":
raise_on_run_directly("test/test_quantization.py")
| TestAOMigrationQuantization |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-oci-data-science/tests/test_oci_data_science_client.py | {
"start": 6501,
"end": 8340
} | class ____:
"""Unit tests for BaseClient class."""
def setup_method(self):
self.endpoint = "https://example.com/api"
self.auth_mock = {"signer": Mock()}
self.retries = 3
self.backoff_factor = 2
self.timeout = 30
self.base_client = BaseClient(
endpoint=self.endpoint,
auth=self.auth_mock,
retries=self.retries,
backoff_factor=self.backoff_factor,
timeout=self.timeout,
)
def test_init(self):
"""Ensures that the client is initialized correctly."""
assert self.base_client.endpoint == self.endpoint
assert self.base_client.retries == self.retries
assert self.base_client.backoff_factor == self.backoff_factor
assert self.base_client.timeout == self.timeout
assert isinstance(self.base_client.auth, OCIAuth)
# def test_init_default_auth(self):
# """Ensures that default auth is used when auth is None."""
# with patch.object(authutil, "default_signer", return_value=self.auth_mock):
# client = BaseClient(endpoint=self.endpoint)
# assert client.auth is not None
def test_init_invalid_auth(self):
"""Ensures that ValueError is raised when auth signer is invalid."""
with pytest.raises(ValueError):
BaseClient(endpoint=self.endpoint, auth={"signer": None})
def test_prepare_headers(self):
"""Ensures that headers are prepared correctly."""
headers = {"Custom-Header": "Value"}
result = self.base_client._prepare_headers(headers=headers)
expected_headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Custom-Header": "Value",
}
assert result == expected_headers
| TestBaseClient |
python | ray-project__ray | rllib/examples/rl_modules/classes/vpg_using_shared_encoder_rlm.py | {
"start": 7851,
"end": 8551
} | class ____(torch.nn.Module):
def __init__(self, observation_space, embedding_dim):
"""
An individual version of SharedEncoder, supporting direct comparison between
the two architectures.
"""
super().__init__()
input_dim = observation_space.shape[0]
# A very simple encoder network.
self._net = torch.nn.Sequential(
torch.nn.Linear(input_dim, embedding_dim),
)
def forward(self, batch, **kwargs):
# Pass observations through the net and return outputs.
return {ENCODER_OUT: self._net(batch[Columns.OBS])}
# __sphinx_doc_ns_encoder_end__
# __sphinx_doc_ns_policy_begin__
| VPGIndividualEncoder |
python | sympy__sympy | sympy/polys/domains/gaussiandomains.py | {
"start": 11126,
"end": 16747
} | class ____(GaussianDomain[GaussianInteger, MPZ], Ring[MPZ]):
r"""Ring of Gaussian integers ``ZZ_I``
The :ref:`ZZ_I` domain represents the `Gaussian integers`_ `\mathbb{Z}[i]`
as a :py:class:`~.Domain` in the domain system (see
:ref:`polys-domainsintro`).
By default a :py:class:`~.Poly` created from an expression with
coefficients that are combinations of integers and ``I`` (`\sqrt{-1}`)
will have the domain :ref:`ZZ_I`.
>>> from sympy import Poly, Symbol, I
>>> x = Symbol('x')
>>> p = Poly(x**2 + I)
>>> p
Poly(x**2 + I, x, domain='ZZ_I')
>>> p.domain
ZZ_I
The :ref:`ZZ_I` domain can be used to factorise polynomials that are
reducible over the Gaussian integers.
>>> from sympy import factor
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, domain='ZZ_I')
(x - I)*(x + I)
The corresponding `field of fractions`_ is the domain of the Gaussian
rationals :ref:`QQ_I`. Conversely :ref:`ZZ_I` is the `ring of integers`_
of :ref:`QQ_I`.
>>> from sympy import ZZ_I, QQ_I
>>> ZZ_I.get_field()
QQ_I
>>> QQ_I.get_ring()
ZZ_I
When using the domain directly :ref:`ZZ_I` can be used as a constructor.
>>> ZZ_I(3, 4)
(3 + 4*I)
>>> ZZ_I(5)
(5 + 0*I)
The domain elements of :ref:`ZZ_I` are instances of
:py:class:`~.GaussianInteger` which support the rings operations
``+,-,*,**``.
>>> z1 = ZZ_I(5, 1)
>>> z2 = ZZ_I(2, 3)
>>> z1
(5 + 1*I)
>>> z2
(2 + 3*I)
>>> z1 + z2
(7 + 4*I)
>>> z1 * z2
(7 + 17*I)
>>> z1 ** 2
(24 + 10*I)
Both floor (``//``) and modulo (``%``) division work with
:py:class:`~.GaussianInteger` (see the :py:meth:`~.Domain.div` method).
>>> z3, z4 = ZZ_I(5), ZZ_I(1, 3)
>>> z3 // z4 # floor division
(1 + -1*I)
>>> z3 % z4 # modulo division (remainder)
(1 + -2*I)
>>> (z3//z4)*z4 + z3%z4 == z3
True
True division (``/``) in :ref:`ZZ_I` gives an element of :ref:`QQ_I`. The
:py:meth:`~.Domain.exquo` method can be used to divide in :ref:`ZZ_I` when
exact division is possible.
>>> z1 / z2
(1 + -1*I)
>>> ZZ_I.exquo(z1, z2)
(1 + -1*I)
>>> z3 / z4
(1/2 + -3/2*I)
>>> ZZ_I.exquo(z3, z4)
Traceback (most recent call last):
...
ExactQuotientFailed: (1 + 3*I) does not divide (5 + 0*I) in ZZ_I
The :py:meth:`~.Domain.gcd` method can be used to compute the `gcd`_ of any
two elements.
>>> ZZ_I.gcd(ZZ_I(10), ZZ_I(2))
(2 + 0*I)
>>> ZZ_I.gcd(ZZ_I(5), ZZ_I(2, 1))
(2 + 1*I)
.. _Gaussian integers: https://en.wikipedia.org/wiki/Gaussian_integer
.. _gcd: https://en.wikipedia.org/wiki/Greatest_common_divisor
"""
dom: IntegerRing = ZZ
mod = DMP([ZZ.one, ZZ.zero, ZZ.one], ZZ)
dtype = GaussianInteger
zero = dtype(ZZ(0), ZZ(0))
one = dtype(ZZ(1), ZZ(0))
imag_unit = dtype(ZZ(0), ZZ(1))
units = (one, imag_unit, -one, -imag_unit) # powers of i
rep = 'ZZ_I'
is_GaussianRing = True
is_ZZ_I = True
is_PID = True
def __init__(self): # override Domain.__init__
"""For constructing ZZ_I."""
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
if isinstance(other, GaussianIntegerRing):
return True
else:
return NotImplemented
def __hash__(self) -> int:
"""Compute hash code of ``self``. """
return hash('ZZ_I')
@property
def has_CharacteristicZero(self): # type: ignore
return True
def characteristic(self) -> int:
return 0
def get_ring(self) -> Self:
"""Returns a ring associated with ``self``. """
return self
def get_field(self) -> GaussianRationalField:
"""Returns a field associated with ``self``. """
return QQ_I
def normalize(self, d: GaussianInteger, *args: GaussianInteger):
"""Return first quadrant element associated with ``d``.
Also multiply the other arguments by the same power of i.
"""
unit = self.canonical_unit(d)
d *= unit
args = tuple(a*unit for a in args)
return (d,) + args if args else d
def gcd(self, a: GaussianInteger, b: GaussianInteger) -> GaussianInteger:
"""Greatest common divisor of a and b over ZZ_I."""
while b:
a, b = b, a % b
unit = self.canonical_unit(a)
return a*unit
def gcdex(
self, a: GaussianInteger, b: GaussianInteger
) -> tuple[GaussianInteger, GaussianInteger, GaussianInteger]:
"""Return x, y, g such that x * a + y * b = g = gcd(a, b)"""
x_a = self.one
x_b = self.zero
y_a = self.zero
y_b = self.one
while b:
q = a // b
a, b = b, a - q * b
x_a, x_b = x_b, x_a - q * x_b
y_a, y_b = y_b, y_a - q * y_b
unit = self.canonical_unit(a)
a, x_a, y_a = a*unit, x_a*unit, y_a*unit
return x_a, y_a, a
def lcm(self, a: GaussianInteger, b: GaussianInteger) -> GaussianInteger:
"""Least common multiple of a and b over ZZ_I."""
return (a * b) // self.gcd(a, b)
def from_GaussianIntegerRing(K1, a, K0) -> GaussianInteger:
"""Convert a ZZ_I element to ZZ_I."""
return a
def from_GaussianRationalField(K1, a, K0) -> GaussianInteger:
"""Convert a QQ_I element to ZZ_I."""
return K1.new(ZZ.convert(a.x), ZZ.convert(a.y))
ZZ_I = GaussianInteger._parent = GaussianIntegerRing() # type: ignore
| GaussianIntegerRing |
python | walkccc__LeetCode | solutions/45. Jump Game II/45.py | {
"start": 0,
"end": 474
} | class ____:
def jump(self, nums: list[int]) -> int:
ans = 0
end = 0
farthest = 0
# Start an implicit BFS.
for i in range(len(nums) - 1):
farthest = max(farthest, i + nums[i])
if farthest >= len(nums) - 1:
ans += 1
break
if i == end: # Visited all the items on the current level.
ans += 1 # Increment the level.
end = farthest # Make the queue size for the next level.
return ans
| Solution |
python | getsentry__sentry | fixtures/safe_migrations_apps/good_flow_delete_field_pending_with_not_null_m2m_app/migrations/0001_initial.py | {
"start": 154,
"end": 1811
} | class ____(CheckedMigration):
initial = True
checked = False
dependencies = []
operations = [
migrations.CreateModel(
name="OtherTable",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name="M2MTable",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False)),
(
"alert_rule",
FlexibleForeignKey(
on_delete=models.deletion.CASCADE,
to="good_flow_delete_field_pending_with_not_null_m2m_app.othertable",
),
),
],
),
migrations.CreateModel(
name="TestTable",
fields=[
("id", models.AutoField(auto_created=True, primary_key=True, serialize=False)),
(
"excluded_projects",
models.ManyToManyField(
through="good_flow_delete_field_pending_with_not_null_m2m_app.M2MTable",
to="good_flow_delete_field_pending_with_not_null_m2m_app.othertable",
),
),
],
),
migrations.AddField(
model_name="m2mtable",
name="test_table",
field=FlexibleForeignKey(
on_delete=models.deletion.CASCADE,
to="good_flow_delete_field_pending_with_not_null_m2m_app.testtable",
),
),
]
| Migration |
python | PrefectHQ__prefect | tests/test_schedules.py | {
"start": 1230,
"end": 2037
} | class ____:
def test_cron_schedule_creation(self):
schedule = Cron("0 0 * * *")
assert schedule.cron == "0 0 * * *"
assert schedule.timezone is None
assert schedule.day_or is True
assert schedule.active is True
assert schedule.parameters == {}
def test_cron_schedule_with_all_parameters(self):
params = {"key": "value"}
schedule = Cron(
"0 0 * * *",
timezone="America/New_York",
day_or=False,
active=False,
parameters=params,
)
assert schedule.cron == "0 0 * * *"
assert schedule.timezone == "America/New_York"
assert schedule.day_or is False
assert schedule.active is False
assert schedule.parameters == params
| TestCronSchedule |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_meta.py | {
"start": 4689,
"end": 7270
} | class ____(OrganizationEventsEndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, organization: Organization) -> Response:
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response([])
with sentry_sdk.start_span(op="discover.endpoint", name="find_lookup_keys") as span:
possible_keys = ["transaction"]
lookup_keys = {key: request.query_params.get(key) for key in possible_keys}
if not any(lookup_keys.values()):
return Response(
{
"detail": f"Must provide one of {possible_keys} in order to find related events"
},
status=400,
)
with handle_query_errors():
with sentry_sdk.start_span(op="discover.endpoint", name="filter_creation"):
projects = self.get_projects(request, organization)
query_kwargs = build_query_params_from_request(
request, organization, projects, snuba_params.environments
)
query_kwargs["limit"] = 5
try:
# Need to escape quotes in case some "joker" has a transaction with quotes
transaction_name = UNESCAPED_QUOTE_RE.sub('\\"', lookup_keys["transaction"])
parsed_terms = parse_search_query(f'transaction:"{transaction_name}"')
except ParseError:
return Response({"detail": "Invalid transaction search"}, status=400)
if query_kwargs.get("search_filters"):
query_kwargs["search_filters"].extend(parsed_terms)
else:
query_kwargs["search_filters"] = parsed_terms
query_kwargs["actor"] = request.user
with sentry_sdk.start_span(op="discover.endpoint", name="issue_search"):
results_cursor = search.backend.query(**query_kwargs)
with sentry_sdk.start_span(op="discover.endpoint", name="serialize_results") as span:
results = list(results_cursor)
span.set_data("result_length", len(results))
context = serialize(
results,
request.user,
GroupSerializer(environment_func=get_environment_func(request, organization.id)),
)
return Response(context)
@region_silo_endpoint
| OrganizationEventsRelatedIssuesEndpoint |
python | optuna__optuna | optuna/samplers/_tpe/probability_distributions.py | {
"start": 158,
"end": 236
} | class ____(NamedTuple):
weights: np.ndarray
| _BatchedCategoricalDistributions |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_translate.py | {
"start": 3540,
"end": 5414
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.translate.TranslateHook")
def test_minimal_green_path(self, mock_hook):
translation_result_data = {
"translations": [
{"translated_text": "Hello World!", "model": "", "detected_language_code": ""},
{
"translated_text": "Can you get me a cup of coffee, please?",
"model": "",
"detected_language_code": "",
},
],
"glossary_translations": [],
}
mock_hook.return_value.translate_text.return_value = translation_result_data
data_to_translate = ["Ciao mondo!", "Mi puoi prendere una tazza di caffè, per favore?"]
op = TranslateTextOperator(
task_id="task_id",
contents=data_to_translate,
source_language_code="it",
target_language_code="en",
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
timeout=TIMEOUT_VALUE,
retry=None,
model=None,
)
context = mock.MagicMock()
result = op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.translate_text.assert_called_once_with(
contents=data_to_translate,
source_language_code="it",
target_language_code="en",
mime_type=None,
location=None,
labels=None,
model=None,
transliteration_config=None,
glossary_config=None,
timeout=TIMEOUT_VALUE,
retry=None,
metadata=(),
)
assert translation_result_data == result
| TestTranslateText |
python | pytorch__pytorch | test/test_schema_check.py | {
"start": 1894,
"end": 3823
} | class ____(torch.Tensor):
ALIAS_ARG_OUT = {"aten::add"}
ALIAS_OUT_OUT = {"aten::aminmax"}
MUTATE_ARGS_OUT = {"aten::sub"}
elem: torch.Tensor
__slots__ = ['elem']
@staticmethod
def __new__(cls, elem, *args, **kwargs):
# The wrapping tensor (IncorrectAliasTensor) shouldn't hold any
# memory for the class in question, but it should still
# advertise the same device as before
r = torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
cls, elem.size(),
strides=elem.stride(), storage_offset=elem.storage_offset(),
# TODO: clone storage aliasing
dtype=elem.dtype, layout=elem.layout,
device=elem.device, requires_grad=kwargs.get("requires_grad", False)
)
# ...the real tensor is held as an element on the tensor.
r.elem = elem.detach() if r.requires_grad else elem
return r
def __repr__(self):
return super().__repr__(tensor_contents=f"{self.elem}")
@classmethod
def __torch_dispatch__(cls, func, types, args=(), kwargs=None):
def unwrap(e):
return e.elem if isinstance(e, cls) else e
def wrap(e):
return cls(e) if isinstance(e, torch.Tensor) else e
unwrapped_args = tree_map(unwrap, args)
out = func(*unwrapped_args, **tree_map(unwrap, kwargs))
if func._schema.name in IncorrectAliasTensor.ALIAS_ARG_OUT:
args[0].elem = out
if func._schema.name in IncorrectAliasTensor.MUTATE_ARGS_OUT:
args[0].elem = torch.rand(args[0].elem.shape)
if func._schema.name in IncorrectAliasTensor.ALIAS_OUT_OUT:
incorrect_out = list(out)
incorrect_out[0] = incorrect_out[1]
return tree_map(wrap, tuple(incorrect_out))
return tree_map(wrap, out)
# Tests various schema checking functionalities.
| IncorrectAliasTensor |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 16964,
"end": 17833
} | class ____(SingleAggregation):
groupby_chunk = M.size
groupby_aggregate = M.sum
def _simplify_down(self):
if (
self._slice is not None
and not isinstance(self._slice, list)
or self.frame.ndim == 1
):
# Scalar slices influence the result and are allowed, i.e., the name of
# the series is different
return
# We can remove every column since pandas reduces to a Series anyway
by_columns = self._by_columns
by_columns = [c for c in by_columns if c in self.frame.columns]
if set(by_columns) == set(self.frame.columns):
return
slice_idx = self._parameters.index("_slice")
ops = [op if i != slice_idx else None for i, op in enumerate(self.operands)]
return type(self)(self.frame[by_columns], *ops[1:])
| Size |
python | cython__cython | Cython/Compiler/TypeSlots.py | {
"start": 20351,
"end": 20723
} | class ____(MethodSlot):
def slot_code(self, scope):
entry = scope.lookup_here(self.method_name)
if entry and entry.is_special and entry.func_cname:
return entry.func_cname
elif scope.defines_any_special(richcmp_special_methods):
return scope.mangle_internal(self.slot_name)
else:
return "0"
| RichcmpSlot |
python | keras-team__keras | keras/src/callbacks/callback_test.py | {
"start": 145,
"end": 1012
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_model_state_is_current_on_epoch_end(self):
class TestModel(models.Model):
def __init__(self):
super().__init__()
self.iterations = self.add_variable(
shape=(), initializer="zeros", trainable=False
)
def call(self, inputs):
self.iterations.assign(self.iterations + 1)
return inputs
class CBK(Callback):
def on_batch_end(self, batch, logs):
assert np.int32(self.model.iterations) == batch + 1
model = TestModel()
model.compile(optimizer="sgd", loss="mse")
x = np.random.random((8, 1))
y = np.random.random((8, 1))
model.fit(x, y, callbacks=[CBK()], batch_size=2)
| CallbackTest |
python | walkccc__LeetCode | solutions/2470. Number of Subarrays With LCM Equal to K/2470.py | {
"start": 0,
"end": 321
} | class ____:
def subarrayLCM(self, nums: list[int], k: int) -> int:
ans = 0
for i, runningLcm in enumerate(nums):
for j in range(i, len(nums)):
runningLcm = math.lcm(runningLcm, nums[j])
if runningLcm > k:
break
if runningLcm == k:
ans += 1
return ans
| Solution |
python | automl__auto-sklearn | test/test_pipeline/components/feature_preprocessing/test_pca.py | {
"start": 177,
"end": 873
} | class ____(PreprocessingTestCase):
def test_default_configuration(self):
transformations = []
for i in range(2):
transformation, original = _test_preprocessing(PCA)
self.assertEqual(transformation.shape, original.shape)
self.assertFalse((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
np.testing.assert_allclose(
transformations[-1], transformations[-2], rtol=1e-4
)
def test_preprocessing_dtype(self):
super(PCAComponentTest, self)._test_preprocessing_dtype(PCA, test_sparse=False)
| PCAComponentTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/dms.py | {
"start": 11042,
"end": 12702
} | class ____(AwsBaseOperator[DmsHook]):
"""
Stops AWS DMS replication task.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DmsStopTaskOperator`
:param replication_task_arn: Replication task ARN
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
aws_hook_class = DmsHook
template_fields: Sequence[str] = aws_template_fields("replication_task_arn")
def __init__(self, *, replication_task_arn: str | None = None, **kwargs):
super().__init__(**kwargs)
self.replication_task_arn = replication_task_arn
def execute(self, context: Context):
"""Stop AWS DMS replication task from Airflow."""
self.hook.stop_replication_task(replication_task_arn=self.replication_task_arn)
self.log.info("DMS replication task(%s) is stopping.", self.replication_task_arn)
| DmsStopTaskOperator |
python | sphinx-doc__sphinx | sphinx/ext/graphviz.py | {
"start": 6469,
"end": 18061
} | class ____(SphinxDirective):
"""Directive to insert arbitrary dot markup."""
has_content = True
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: ClassVar[OptionSpec] = {
'alt': directives.unchanged,
'align': align_spec,
'caption': directives.unchanged,
'layout': directives.unchanged,
'graphviz_dot': directives.unchanged, # an old alias of `layout` option
'name': directives.unchanged,
'class': directives.class_option,
}
def run(self) -> list[Node]:
node = graphviz()
dot_code = '\n'.join(self.content)
node['code'] = f'{self.name} {self.arguments[0]} {{\n{dot_code}\n}}\n'
node['options'] = {'docname': self.env.current_document.docname}
if 'graphviz_dot' in self.options:
node['options']['graphviz_dot'] = self.options['graphviz_dot']
if 'layout' in self.options:
node['options']['graphviz_dot'] = self.options['layout']
if 'alt' in self.options:
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
if 'class' in self.options:
node['classes'] = self.options['class']
if 'caption' not in self.options:
self.add_name(node)
return [node]
else:
figure = figure_wrapper(self, node, self.options['caption'])
self.add_name(figure)
return [figure]
def fix_svg_relative_paths(
self: HTML5Translator | LaTeXTranslator | TexinfoTranslator,
filepath: str | os.PathLike[str],
) -> None:
"""Change relative links in generated svg files to be relative to imgpath."""
env = self.builder.env
tree = ET.parse(filepath) # NoQA: S314
root = tree.getroot()
ns = {'svg': 'http://www.w3.org/2000/svg', 'xlink': 'http://www.w3.org/1999/xlink'}
href_name = '{http://www.w3.org/1999/xlink}href'
modified = False
for element in chain(
root.findall('.//svg:image[@xlink:href]', ns),
root.findall('.//svg:a[@xlink:href]', ns),
):
scheme, hostname, rel_uri, query, fragment = urlsplit(element.attrib[href_name])
if hostname:
# not a relative link
continue
docname = env.path2doc(self.document['source'])
if docname is None:
# This shouldn't happen!
continue
doc_dir = self.builder.outdir.joinpath(docname).resolve().parent
old_path = doc_dir / rel_uri
img_path = doc_dir / self.builder.imgpath
new_path = os.path.relpath(old_path, start=img_path)
modified_url = urlunsplit((scheme, hostname, new_path, query, fragment))
element.set(href_name, modified_url)
modified = True
if modified:
tree.write(filepath)
def render_dot(
self: HTML5Translator | LaTeXTranslator | TexinfoTranslator,
code: str,
options: dict[str, Any],
format: str,
prefix: str = 'graphviz',
filename: str | None = None,
) -> tuple[_StrPath | None, _StrPath | None]:
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
if not graphviz_dot:
raise GraphvizError(
__('graphviz_dot executable path must be set! %r') % graphviz_dot,
)
hashkey = ''.join((
code,
str(options),
str(graphviz_dot),
str(self.builder.config.graphviz_dot_args),
)).encode()
fname = f'{prefix}-{sha1(hashkey, usedforsecurity=False).hexdigest()}.{format}'
relfn = _StrPath(self.builder.imgpath, fname)
outfn = self.builder.outdir / self.builder.imagedir / fname
if outfn.is_file():
return relfn, outfn
if getattr(self.builder, '_graphviz_warned_dot', {}).get(graphviz_dot):
return None, None
outfn.parent.mkdir(parents=True, exist_ok=True)
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend([f'-T{format}', f'-o{outfn}'])
docname = options.get('docname', 'index')
if filename:
cwd = (self.builder.srcdir / filename).parent
else:
cwd = (self.builder.srcdir / docname).parent
if format == 'png':
dot_args.extend(['-Tcmapx', f'-o{outfn}.map'])
try:
ret = subprocess.run(
dot_args, input=code.encode(), capture_output=True, cwd=cwd, check=True
)
except OSError:
logger.warning(
__(
'dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'
),
graphviz_dot,
)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {} # type: ignore[union-attr]
self.builder._graphviz_warned_dot[graphviz_dot] = True # type: ignore[union-attr]
return None, None
except CalledProcessError as exc:
raise GraphvizError(
__('dot exited with error:\n[stderr]\n%r\n[stdout]\n%r')
% (exc.stderr, exc.stdout)
) from exc
if not outfn.is_file():
raise GraphvizError(
__('dot did not produce an output file:\n[stderr]\n%r\n[stdout]\n%r')
% (ret.stderr, ret.stdout)
)
if format == 'svg':
fix_svg_relative_paths(self, outfn)
return relfn, outfn
def render_dot_html(
self: HTML5Translator,
node: graphviz,
code: str,
options: dict[str, Any],
prefix: str = 'graphviz',
imgcls: str | None = None,
alt: str | None = None,
filename: str | None = None,
) -> tuple[str, str]:
format = self.builder.config.graphviz_output_format
if format not in {'png', 'svg'}:
logger.warning(
__("graphviz_output_format must be either 'png' or 'svg', but is %r"),
format,
)
try:
fname, outfn = render_dot(self, code, options, format, prefix, filename)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode from exc
classes = [imgcls, 'graphviz', *node.get('classes', [])]
imgcls = ' '.join(filter(None, classes))
if fname is None:
self.body.append(self.encode(code))
else:
src = fname.as_posix()
if alt is None:
alt = node.get('alt', self.encode(code).strip())
if 'align' in node:
align = node['align']
self.body.append(f'<div align="{align}" class="align-{align}">')
if format == 'svg':
self.body.append('<div class="graphviz">')
self.body.append(
f'<object data="{src}" type="image/svg+xml" class="{imgcls}">\n'
)
self.body.append(f'<p class="warning">{alt}</p>')
self.body.append('</object></div>\n')
else:
assert outfn is not None
with open(f'{outfn}.map', encoding='utf-8') as mapfile:
map_content = mapfile.read()
imgmap = ClickableMapDefinition(f'{outfn}.map', map_content, dot=code)
if imgmap.clickable:
# has a map
self.body.append('<div class="graphviz">')
self.body.append(
f'<img src="{src}" alt="{alt}" usemap="#{imgmap.id}" class="{imgcls}" />'
)
self.body.append('</div>\n')
self.body.append(imgmap.generate_clickable_map())
else:
# nothing in image map
self.body.append('<div class="graphviz">')
self.body.append(f'<img src="{src}" alt="{alt}" class="{imgcls}" />')
self.body.append('</div>\n')
if 'align' in node:
self.body.append('</div>\n')
raise nodes.SkipNode
def html_visit_graphviz(self: HTML5Translator, node: graphviz) -> None:
render_dot_html(
self, node, node['code'], node['options'], filename=node.get('filename')
)
def render_dot_latex(
self: LaTeXTranslator,
node: graphviz,
code: str,
options: dict[str, Any],
prefix: str = 'graphviz',
filename: str | None = None,
) -> None:
try:
fname, _outfn = render_dot(self, code, options, 'pdf', prefix, filename)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode from exc
is_inline = self.is_inline(node)
if not is_inline:
pre = ''
post = ''
if 'align' in node:
if node['align'] == 'left':
pre = '{'
post = r'\hspace*{\fill}}'
elif node['align'] == 'right':
pre = r'{\hspace*{\fill}'
post = '}'
elif node['align'] == 'center':
pre = r'{\hfill'
post = r'\hspace*{\fill}}'
self.body.append(f'\n{pre}')
self.body.append(r'\sphinxincludegraphics[]{%s}' % fname)
if not is_inline:
self.body.append(f'{post}\n')
raise nodes.SkipNode
def latex_visit_graphviz(self: LaTeXTranslator, node: graphviz) -> None:
render_dot_latex(
self, node, node['code'], node['options'], filename=node.get('filename')
)
def render_dot_texinfo(
self: TexinfoTranslator,
node: graphviz,
code: str,
options: dict[str, Any],
prefix: str = 'graphviz',
) -> None:
try:
fname, _outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode from exc
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
raise nodes.SkipNode
def texinfo_visit_graphviz(self: TexinfoTranslator, node: graphviz) -> None:
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self: TextTranslator, node: graphviz) -> None:
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
self.add_text(_('[graph]'))
raise nodes.SkipNode
def man_visit_graphviz(self: ManualPageTranslator, node: graphviz) -> None:
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
self.body.append(_('[graph]'))
raise nodes.SkipNode
def on_config_inited(_app: Sphinx, config: Config) -> None:
css_path = sphinx.package_dir.joinpath('templates', 'graphviz', 'graphviz.css')
config.html_static_path.append(str(css_path))
def setup(app: Sphinx) -> ExtensionMetadata:
app.add_node(
graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
texinfo=(texinfo_visit_graphviz, None),
text=(text_visit_graphviz, None),
man=(man_visit_graphviz, None),
)
app.add_directive('graphviz', Graphviz)
app.add_directive('graph', GraphvizSimple)
app.add_directive('digraph', GraphvizSimple)
app.add_config_value('graphviz_dot', 'dot', 'html', types=frozenset({str}))
app.add_config_value(
'graphviz_dot_args', (), 'html', types=frozenset({list, tuple})
)
app.add_config_value(
'graphviz_output_format', 'png', 'html', types=frozenset({str})
)
app.add_css_file('graphviz.css')
app.connect('config-inited', on_config_inited)
return {
'version': sphinx.__display_version__,
'parallel_read_safe': True,
}
| GraphvizSimple |
python | streamlit__streamlit | lib/tests/streamlit/data_test_cases.py | {
"start": 3308,
"end": 3398
} | class ____(NamedTuple):
name: str
is_widget: bool
usage: float
| ElementNamedTuple |
python | walkccc__LeetCode | solutions/3511. Make a Positive Array/3511.py | {
"start": 0,
"end": 356
} | class ____:
def makeArrayPositive(self, nums: list[int]) -> int:
MAX = 10**18
ans = 0
minSum = nums[0] + nums[1]
for i in range(2, len(nums)):
a, b, c = nums[i - 2], nums[i - 1], nums[i]
minSum = min(minSum + c, a + b + c)
if minSum <= 0:
nums[i] = MAX
minSum = MAX
ans += 1
return ans
| Solution |
python | wandb__wandb | wandb/sdk/data_types/graph.py | {
"start": 547,
"end": 1717
} | class ____(WBValue):
"""Edge used in `Graph`."""
def __init__(self, from_node, to_node):
self._attributes = {}
self.from_node = from_node
self.to_node = to_node
def __repr__(self):
temp_attr = dict(self._attributes)
del temp_attr["from_node"]
del temp_attr["to_node"]
temp_attr["from_id"] = self.from_node.id
temp_attr["to_id"] = self.to_node.id
return str(temp_attr)
def to_json(self, run=None):
return [self.from_node.id, self.to_node.id]
@property
def name(self):
"""Optional, not necessarily unique."""
return self._attributes.get("name")
@name.setter
def name(self, val):
self._attributes["name"] = val
return val
@property
def from_node(self):
return self._attributes.get("from_node")
@from_node.setter
def from_node(self, val):
self._attributes["from_node"] = val
return val
@property
def to_node(self):
return self._attributes.get("to_node")
@to_node.setter
def to_node(self, val):
self._attributes["to_node"] = val
return val
| Edge |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 696165,
"end": 697793
} | class ____(sgqlc.types.Type):
"""Configuration for a MergeQueue"""
__schema__ = github_schema
__field_names__ = (
"check_response_timeout",
"maximum_entries_to_build",
"maximum_entries_to_merge",
"merge_method",
"merging_strategy",
"minimum_entries_to_merge",
"minimum_entries_to_merge_wait_time",
)
check_response_timeout = sgqlc.types.Field(Int, graphql_name="checkResponseTimeout")
"""The amount of time in minutes to wait for a check response before
considering it a failure.
"""
maximum_entries_to_build = sgqlc.types.Field(Int, graphql_name="maximumEntriesToBuild")
"""The maximum number of entries to build at once."""
maximum_entries_to_merge = sgqlc.types.Field(Int, graphql_name="maximumEntriesToMerge")
"""The maximum number of entries to merge at once."""
merge_method = sgqlc.types.Field(PullRequestMergeMethod, graphql_name="mergeMethod")
"""The merge method to use for this queue."""
merging_strategy = sgqlc.types.Field(MergeQueueMergingStrategy, graphql_name="mergingStrategy")
"""The strategy to use when merging entries."""
minimum_entries_to_merge = sgqlc.types.Field(Int, graphql_name="minimumEntriesToMerge")
"""The minimum number of entries required to merge at once."""
minimum_entries_to_merge_wait_time = sgqlc.types.Field(Int, graphql_name="minimumEntriesToMergeWaitTime")
"""The amount of time in minutes to wait before ignoring the minumum
number of entries in the queue requirement and merging a
collection of entries
"""
| MergeQueueConfiguration |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/stateful.py | {
"start": 2818,
"end": 2964
} | class ____:
"""Sentinel class to prevent overlapping overloads in type hints. See comments
above the overloads of @rule."""
| _OmittedArgument |
python | readthedocs__readthedocs.org | readthedocs/builds/migrations/0051_add_addons_field.py | {
"start": 149,
"end": 647
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("builds", "0050_build_readthedocs_yaml_path"),
]
operations = [
migrations.AddField(
model_name="version",
name="addons",
field=models.BooleanField(
blank=True,
default=False,
null=True,
verbose_name="Inject new addons js library for this version",
),
),
]
| Migration |
python | scikit-learn__scikit-learn | sklearn/tests/test_pipeline.py | {
"start": 71196,
"end": 81354
} | class ____(BaseEstimator):
# This class is used in this section for testing routing in the pipeline.
# This class should have every set_{method}_request
def __sklearn_is_fitted__(self):
return True
def fit(self, X, y, sample_weight=None, prop=None):
assert sample_weight is not None, sample_weight
assert prop is not None, prop
return self
def fit_transform(self, X, y, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return X + 1
def fit_predict(self, X, y, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return np.ones(len(X))
def predict(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return np.ones(len(X))
def predict_proba(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return np.ones(len(X))
def predict_log_proba(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return np.zeros(len(X))
def decision_function(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return np.ones(len(X))
def score(self, X, y, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return 1
def transform(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return X + 1
def inverse_transform(self, X, sample_weight=None, prop=None):
assert sample_weight is not None
assert prop is not None
return X - 1
# split and partial_fit not relevant for pipelines
@pytest.mark.parametrize("method", sorted(set(METHODS) - {"split", "partial_fit"}))
@config_context(enable_metadata_routing=True)
def test_metadata_routing_for_pipeline(method):
"""Test that metadata is routed correctly for pipelines."""
def set_request(est, method, **kwarg):
"""Set requests for a given method.
If the given method is a composite method, set the same requests for
all the methods that compose it.
"""
if method in COMPOSITE_METHODS:
methods = COMPOSITE_METHODS[method]
else:
methods = [method]
for method in methods:
getattr(est, f"set_{method}_request")(**kwarg)
return est
X, y = np.array([[1]]), np.array([1])
sample_weight, prop, metadata = [1], "a", "b"
# test that metadata is routed correctly for pipelines when requested
est = SimpleEstimator()
est = set_request(est, method, sample_weight=True, prop=True)
est = set_request(est, "fit", sample_weight=True, prop=True)
trs = (
ConsumingTransformer()
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True)
.set_inverse_transform_request(sample_weight=True, metadata=True)
)
pipeline = Pipeline([("trs", trs), ("estimator", est)])
if "fit" not in method:
pipeline = pipeline.fit(X, y, sample_weight=sample_weight, prop=prop)
try:
getattr(pipeline, method)(
X, y, sample_weight=sample_weight, prop=prop, metadata=metadata
)
except TypeError:
# Some methods don't accept y
getattr(pipeline, method)(
X, sample_weight=sample_weight, prop=prop, metadata=metadata
)
# Make sure the transformer has received the metadata
# For the transformer, always only `fit` and `transform` are called.
check_recorded_metadata(
obj=trs,
method="fit",
parent="fit",
sample_weight=sample_weight,
metadata=metadata,
)
check_recorded_metadata(
obj=trs,
method="transform",
parent="transform",
sample_weight=sample_weight,
metadata=metadata,
)
# split and partial_fit not relevant for pipelines
# sorted is here needed to make `pytest -nX` work. W/o it, tests are collected
# in different orders between workers and that makes it fail.
@pytest.mark.parametrize("method", sorted(set(METHODS) - {"split", "partial_fit"}))
@config_context(enable_metadata_routing=True)
def test_metadata_routing_error_for_pipeline(method):
"""Test that metadata is not routed for pipelines when not requested."""
X, y = [[1]], [1]
sample_weight, prop = [1], "a"
est = SimpleEstimator()
# here not setting sample_weight request and leaving it as None
pipeline = Pipeline([("estimator", est)])
error_message = (
"[sample_weight, prop] are passed but are not explicitly set as requested"
f" or not requested for SimpleEstimator.{method}"
)
with pytest.raises(ValueError, match=re.escape(error_message)):
try:
# passing X, y positional as the first two arguments
getattr(pipeline, method)(X, y, sample_weight=sample_weight, prop=prop)
except TypeError:
# not all methods accept y (like `predict`), so here we only
# pass X as a positional arg.
getattr(pipeline, method)(X, sample_weight=sample_weight, prop=prop)
@pytest.mark.parametrize(
"method", ["decision_function", "transform", "inverse_transform"]
)
def test_routing_passed_metadata_not_supported(method):
"""Test that the right error message is raised when metadata is passed while
not supported when `enable_metadata_routing=False`."""
pipe = Pipeline([("estimator", SimpleEstimator())])
with pytest.raises(
ValueError, match="is only supported if enable_metadata_routing=True"
):
getattr(pipe, method)([[1]], sample_weight=[1], prop="a")
@config_context(enable_metadata_routing=True)
def test_pipeline_with_estimator_with_len():
"""Test that pipeline works with estimators that have a `__len__` method."""
pipe = Pipeline(
[("trs", RandomTreesEmbedding()), ("estimator", RandomForestClassifier())]
)
pipe.fit([[1]], [1])
pipe.predict([[1]])
@pytest.mark.parametrize("last_step", [None, "passthrough"])
@config_context(enable_metadata_routing=True)
def test_pipeline_with_no_last_step(last_step):
"""Test that the pipeline works when there is not last step.
It should just ignore and pass through the data on transform.
"""
pipe = Pipeline([("trs", FunctionTransformer()), ("estimator", last_step)])
assert pipe.fit([[1]], [1]).transform([[1], [2], [3]]) == [[1], [2], [3]]
@config_context(enable_metadata_routing=True)
def test_feature_union_metadata_routing_error():
"""Test that the right error is raised when metadata is not requested."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
# test lacking set_fit_request
feature_union = FeatureUnion([("sub_transformer", ConsumingTransformer())])
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested"
f" or not requested for {ConsumingTransformer.__name__}.fit"
)
with pytest.raises(UnsetMetadataPassedError, match=re.escape(error_message)):
feature_union.fit(X, y, sample_weight=sample_weight, metadata=metadata)
# test lacking set_transform_request
feature_union = FeatureUnion(
[
(
"sub_transformer",
ConsumingTransformer().set_fit_request(
sample_weight=True, metadata=True
),
)
]
)
error_message = (
"[sample_weight, metadata] are passed but are not explicitly set as requested "
f"or not requested for {ConsumingTransformer.__name__}.transform"
)
with pytest.raises(UnsetMetadataPassedError, match=re.escape(error_message)):
feature_union.fit(
X, y, sample_weight=sample_weight, metadata=metadata
).transform(X, sample_weight=sample_weight, metadata=metadata)
@config_context(enable_metadata_routing=True)
def test_feature_union_get_metadata_routing_without_fit():
"""Test that get_metadata_routing() works regardless of the Child's
consumption of any metadata."""
feature_union = FeatureUnion([("sub_transformer", ConsumingTransformer())])
feature_union.get_metadata_routing()
@config_context(enable_metadata_routing=True)
@pytest.mark.parametrize(
"transformer", [ConsumingTransformer, ConsumingNoFitTransformTransformer]
)
def test_feature_union_metadata_routing(transformer):
"""Test that metadata is routed correctly for FeatureUnion."""
X = np.array([[0, 1], [2, 2], [4, 6]])
y = [1, 2, 3]
sample_weight, metadata = [1, 1, 1], "a"
feature_union = FeatureUnion(
[
(
"sub_trans1",
transformer(registry=_Registry())
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
),
(
"sub_trans2",
transformer(registry=_Registry())
.set_fit_request(sample_weight=True, metadata=True)
.set_transform_request(sample_weight=True, metadata=True),
),
]
)
kwargs = {"sample_weight": sample_weight, "metadata": metadata}
feature_union.fit(X, y, **kwargs)
feature_union.fit_transform(X, y, **kwargs)
feature_union.fit(X, y, **kwargs).transform(X, **kwargs)
for transformer in feature_union.transformer_list:
# access sub-transformer in (name, trans) with transformer[1]
registry = transformer[1].registry
assert len(registry)
for sub_trans in registry:
check_recorded_metadata(
obj=sub_trans,
method="fit",
parent="fit",
**kwargs,
)
# End of routing tests
# ====================
| SimpleEstimator |
python | pypa__build | tests/test_projectbuilder.py | {
"start": 2999,
"end": 3374
} | class ____(MockDistribution):
def read_text(self, filename):
if filename == 'METADATA':
return textwrap.dedent(
"""
Metadata-Version: 2.2
Name: circular_dep
Version: 1.0.0
Requires-Dist: nested_circular_dep
"""
).strip()
| CircularMockDistribution |
python | doocs__leetcode | solution/0900-0999/0947.Most Stones Removed with Same Row or Column/Solution2.py | {
"start": 563,
"end": 813
} | class ____:
def removeStones(self, stones: List[List[int]]) -> int:
m = 10001
uf = UnionFind(m << 1)
for x, y in stones:
uf.union(x, y + m)
return len(stones) - len({uf.find(x) for x, _ in stones})
| Solution |
python | ansible__ansible | lib/ansible/_internal/_ansiballz/_builder.py | {
"start": 288,
"end": 4080
} | class ____:
"""AnsiballZ extension manager."""
def __init__(
self,
pydevd: _pydevd.Options | None = None,
debugpy: _debugpy.Options | None = None,
coverage: _coverage.Options | None = None,
) -> None:
options = dict(
_pydevd=pydevd,
_debugpy=debugpy,
_coverage=coverage,
)
self._pydevd = pydevd
self._debugpy = debugpy
self._coverage = coverage
self._extension_names = tuple(name for name, option in options.items() if option)
self._module_names = tuple(f'{_extensions.__name__}.{name}' for name in self._extension_names)
self.source_mapping: dict[str, str] = {}
@property
def debugger_enabled(self) -> bool:
"""Returns True if the debugger extension is enabled, otherwise False."""
return bool(self._pydevd or self._debugpy)
@property
def extension_names(self) -> tuple[str, ...]:
"""Names of extensions to include in the AnsiballZ payload."""
return self._extension_names
@property
def module_names(self) -> tuple[str, ...]:
"""Python module names of extensions to include in the AnsiballZ payload."""
return self._module_names
def get_extensions(self) -> dict[str, dict[str, object]]:
"""Return the configured extensions and their options."""
extension_options: dict[str, t.Any] = {}
if self._debugpy:
extension_options['_debugpy'] = dataclasses.replace(
self._debugpy,
source_mapping=self._get_source_mapping(self._debugpy.source_mapping),
)
if self._pydevd:
extension_options['_pydevd'] = dataclasses.replace(
self._pydevd,
source_mapping=self._get_source_mapping(self._pydevd.source_mapping),
)
if self._coverage:
extension_options['_coverage'] = self._coverage
extensions = {extension: dataclasses.asdict(options) for extension, options in extension_options.items()}
return extensions
def _get_source_mapping(self, debugger_mapping: dict[str, str]) -> dict[str, str]:
"""Get the source mapping, adjusting the source root as needed."""
if debugger_mapping:
source_mapping = {self._translate_path(key, debugger_mapping): value for key, value in self.source_mapping.items()}
else:
source_mapping = self.source_mapping
return source_mapping
@staticmethod
def _translate_path(path: str, debugger_mapping: dict[str, str]) -> str:
"""Translate a local path to a foreign path."""
for replace, match in debugger_mapping.items():
if path.startswith(match):
return replace + path[len(match) :]
return path
@classmethod
def create(cls, task_vars: dict[str, object]) -> t.Self:
"""Create an instance using the provided task vars."""
return cls(
pydevd=cls._get_options('_ANSIBALLZ_PYDEVD_CONFIG', _pydevd.Options, task_vars),
debugpy=cls._get_options('_ANSIBALLZ_DEBUGPY_CONFIG', _debugpy.Options, task_vars),
coverage=cls._get_options('_ANSIBALLZ_COVERAGE_CONFIG', _coverage.Options, task_vars),
)
@classmethod
def _get_options[T](cls, name: str, config_type: type[T], task_vars: dict[str, object]) -> T | None:
"""Parse configuration from the named environment variable as the specified type, or None if not configured."""
if (value := config.get_config_value(name, variables=task_vars)) is None:
return None
data = json.loads(value) if isinstance(value, str) else value
options = config_type(**data)
return options
| ExtensionManager |
python | allegroai__clearml | examples/advanced/model_finetuning/extract.py | {
"start": 3328,
"end": 4760
} | class ____(cst.CSTVisitor):
"""
Collect all function definitions in the module.
"""
def __init__(self):
self.functions = []
def visit_FunctionDef(self, node: cst.FunctionDef):
self.functions.append(node)
def main():
parser = argparse.ArgumentParser(
description="Clone a repo, extract Python functions with LibCST, and prepare them for LLM fine-tuning."
)
parser.add_argument("repo_url", help="Git repository URL to clone")
parser.add_argument(
"-o",
"--output",
default="output",
help="Directory to store cloned repo, functions, and dataset",
)
parser.add_argument(
"--dataset-name",
default="Finetune Example",
help="The name of the ClearML dataset to dump the data to",
)
parser.add_argument(
"--dataset-project",
default="Finetune Example",
help="The name of the ClearML dataset project to dump the data to",
)
args = parser.parse_args()
out_dir = Path(args.output)
repo_dir = clone_repo(args.repo_url, out_dir)
funcs = extract_functions(repo_dir)
organize_functions(funcs, out_dir)
dataset = Dataset.create(
dataset_name=args.dataset_name, dataset_project=args.dataset_project
)
dataset.add_files(args.output, wildcard="*.jsonl")
dataset.finalize(auto_upload=True)
if __name__ == "__main__":
main()
| FunctionCollector |
python | getsentry__sentry | src/sentry/issues/endpoints/group_activities.py | {
"start": 410,
"end": 865
} | class ____(GroupEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
def get(self, request: Request, group: Group) -> Response:
"""
Retrieve all the Activities for a Group
"""
activity = Activity.objects.get_activities_for_group(group, num=100)
return Response(
{
"activity": serialize(activity, request.user),
}
)
| GroupActivitiesEndpoint |
python | tensorflow__tensorflow | tensorflow/compiler/tests/scan_ops_test.py | {
"start": 8174,
"end": 11066
} | class ____(xla_test.XLATestCase):
valid_dtypes = [np.float32, np.int32]
def axis_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, np.ones_like, x, axis, exclusive,
reverse)
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
prod = math_ops.cumprod(p, axis, exclusive, reverse)
tf_out = prod.eval(feed_dict={p: x})
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in self.axis_dtypes():
with self.session(), self.test_scope():
p = array_ops.placeholder(x.dtype)
axis = constant_op.constant(0, axis_dtype)
math_ops.cumprod(x, axis).eval(feed_dict={p: x})
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.disable_mlir_bridge("Error handling")
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
with self.session(), self.test_scope():
input_tensor = ops.convert_to_tensor(x)
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumprod(input_tensor, [0]).eval()
if __name__ == "__main__":
test.main()
| CumprodTest |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/redshift_datasource.py | {
"start": 1119,
"end": 1580
} | class ____(BaseModel):
"""
Information needed to connect to a Redshift database.
"""
user: str
password: Union[ConfigStr, str]
host: str
port: int
database: str
sslmode: RedshiftSSLModes
schema_: Optional[str] = Field(
default=None, alias="schema", description="`schema` that the Datasource is mapped to."
)
class Config:
allow_population_by_field_name = True
@public_api
| RedshiftConnectionDetails |
python | redis__redis-py | redis/commands/search/commands.py | {
"start": 33863,
"end": 43995
} | class ____(SearchCommands):
async def info(self):
"""
Get info an stats about the the current index, including the number of
documents, memory consumption, etc
For more information see `FT.INFO <https://redis.io/commands/ft.info>`_.
"""
res = await self.execute_command(INFO_CMD, self.index_name)
return self._parse_results(INFO_CMD, res)
async def search(
self,
query: Union[str, Query],
query_params: Optional[Dict[str, Union[str, int, float, bytes]]] = None,
):
"""
Search the index for a given query, and return a result of documents
### Parameters
- **query**: the search query. Either a text for simple queries with
default parameters, or a Query object for complex queries.
See RediSearch's documentation on query format
For more information see `FT.SEARCH <https://redis.io/commands/ft.search>`_.
""" # noqa
args, query = self._mk_query_args(query, query_params=query_params)
st = time.monotonic()
options = {}
if get_protocol_version(self.client) not in ["3", 3]:
options[NEVER_DECODE] = True
res = await self.execute_command(SEARCH_CMD, *args, **options)
if isinstance(res, Pipeline):
return res
return self._parse_results(
SEARCH_CMD, res, query=query, duration=(time.monotonic() - st) * 1000.0
)
async def hybrid_search(
self,
query: HybridQuery,
combine_method: Optional[CombineResultsMethod] = None,
post_processing: Optional[HybridPostProcessingConfig] = None,
params_substitution: Optional[Dict[str, Union[str, int, float, bytes]]] = None,
timeout: Optional[int] = None,
cursor: Optional[HybridCursorQuery] = None,
) -> Union[HybridResult, HybridCursorResult, Pipeline]:
"""
Execute a hybrid search using both text and vector queries
Args:
- **query**: HybridQuery object
Contains the text and vector queries
- **combine_method**: CombineResultsMethod object
Contains the combine method and parameters
- **post_processing**: HybridPostProcessingConfig object
Contains the post processing configuration
- **params_substitution**: Dict[str, Union[str, int, float, bytes]]
Contains the parameters substitution
- **timeout**: int - contains the timeout in milliseconds
- **cursor**: HybridCursorQuery object - contains the cursor configuration
For more information see `FT.SEARCH <https://redis.io/commands/ft.hybrid>`.
"""
index = self.index_name
options = {}
pieces = [HYBRID_CMD, index]
pieces.extend(query.get_args())
if combine_method:
pieces.extend(combine_method.get_args())
if post_processing:
pieces.extend(post_processing.build_args())
if params_substitution:
pieces.extend(self.get_params_args(params_substitution))
if timeout:
pieces.extend(("TIMEOUT", timeout))
if cursor:
options["cursor"] = True
pieces.extend(cursor.build_args())
if get_protocol_version(self.client) not in ["3", 3]:
options[NEVER_DECODE] = True
res = await self.execute_command(*pieces, **options)
if isinstance(res, Pipeline):
return res
return self._parse_results(HYBRID_CMD, res, **options)
async def aggregate(
self,
query: Union[AggregateResult, Cursor],
query_params: Optional[Dict[str, Union[str, int, float, bytes]]] = None,
):
"""
Issue an aggregation query.
### Parameters
**query**: This can be either an `AggregateRequest`, or a `Cursor`
An `AggregateResult` object is returned. You can access the rows from
its `rows` property, which will always yield the rows of the result.
For more information see `FT.AGGREGATE <https://redis.io/commands/ft.aggregate>`_.
""" # noqa
if isinstance(query, AggregateRequest):
has_cursor = bool(query._cursor)
cmd = [AGGREGATE_CMD, self.index_name] + query.build_args()
elif isinstance(query, Cursor):
has_cursor = True
cmd = [CURSOR_CMD, "READ", self.index_name] + query.build_args()
else:
raise ValueError("Bad query", query)
cmd += self.get_params_args(query_params)
raw = await self.execute_command(*cmd)
return self._parse_results(
AGGREGATE_CMD, raw, query=query, has_cursor=has_cursor
)
async def spellcheck(self, query, distance=None, include=None, exclude=None):
"""
Issue a spellcheck query
### Parameters
**query**: search query.
**distance***: the maximal Levenshtein distance for spelling
suggestions (default: 1, max: 4).
**include**: specifies an inclusion custom dictionary.
**exclude**: specifies an exclusion custom dictionary.
For more information see `FT.SPELLCHECK <https://redis.io/commands/ft.spellcheck>`_.
""" # noqa
cmd = [SPELLCHECK_CMD, self.index_name, query]
if distance:
cmd.extend(["DISTANCE", distance])
if include:
cmd.extend(["TERMS", "INCLUDE", include])
if exclude:
cmd.extend(["TERMS", "EXCLUDE", exclude])
res = await self.execute_command(*cmd)
return self._parse_results(SPELLCHECK_CMD, res)
@deprecated_function(
version="8.0.0",
reason="deprecated since Redis 8.0, call config_set from core module instead",
)
async def config_set(self, option: str, value: str) -> bool:
"""Set runtime configuration option.
### Parameters
- **option**: the name of the configuration option.
- **value**: a value for the configuration option.
For more information see `FT.CONFIG SET <https://redis.io/commands/ft.config-set>`_.
""" # noqa
cmd = [CONFIG_CMD, "SET", option, value]
raw = await self.execute_command(*cmd)
return raw == "OK"
@deprecated_function(
version="8.0.0",
reason="deprecated since Redis 8.0, call config_get from core module instead",
)
async def config_get(self, option: str) -> str:
"""Get runtime configuration option value.
### Parameters
- **option**: the name of the configuration option.
For more information see `FT.CONFIG GET <https://redis.io/commands/ft.config-get>`_.
""" # noqa
cmd = [CONFIG_CMD, "GET", option]
res = {}
res = await self.execute_command(*cmd)
return self._parse_results(CONFIG_CMD, res)
async def load_document(self, id):
"""
Load a single document by id
"""
fields = await self.client.hgetall(id)
f2 = {to_string(k): to_string(v) for k, v in fields.items()}
fields = f2
try:
del fields["id"]
except KeyError:
pass
return Document(id=id, **fields)
async def sugadd(self, key, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has
a score and string.
If kwargs["increment"] is true and the terms are already in the
server's dictionary, we increment their scores.
For more information see `FT.SUGADD <https://redis.io/commands/ft.sugadd>`_.
""" # noqa
# If Transaction is not False it will MULTI/EXEC which will error
pipe = self.pipeline(transaction=False)
for sug in suggestions:
args = [SUGADD_COMMAND, key, sug.string, sug.score]
if kwargs.get("increment"):
args.append("INCR")
if sug.payload:
args.append("PAYLOAD")
args.append(sug.payload)
pipe.execute_command(*args)
return (await pipe.execute())[-1]
async def sugget(
self,
key: str,
prefix: str,
fuzzy: bool = False,
num: int = 10,
with_scores: bool = False,
with_payloads: bool = False,
) -> List[SuggestionParser]:
"""
Get a list of suggestions from the AutoCompleter, for a given prefix.
Parameters:
prefix : str
The prefix we are searching. **Must be valid ascii or utf-8**
fuzzy : bool
If set to true, the prefix search is done in fuzzy mode.
**NOTE**: Running fuzzy searches on short (<3 letters) prefixes
can be very
slow, and even scan the entire index.
with_scores : bool
If set to true, we also return the (refactored) score of
each suggestion.
This is normally not needed, and is NOT the original score
inserted into the index.
with_payloads : bool
Return suggestion payloads
num : int
The maximum number of results we return. Note that we might
return less. The algorithm trims irrelevant suggestions.
Returns:
list:
A list of Suggestion objects. If with_scores was False, the
score of all suggestions is 1.
For more information see `FT.SUGGET <https://redis.io/commands/ft.sugget>`_.
""" # noqa
args = [SUGGET_COMMAND, key, prefix, "MAX", num]
if fuzzy:
args.append(FUZZY)
if with_scores:
args.append(WITHSCORES)
if with_payloads:
args.append(WITHPAYLOADS)
ret = await self.execute_command(*args)
results = []
if not ret:
return results
parser = SuggestionParser(with_scores, with_payloads, ret)
return [s for s in parser]
| AsyncSearchCommands |
python | Netflix__metaflow | metaflow/includefile.py | {
"start": 15325,
"end": 18936
} | class ____:
file_type = "uploader-v1"
@classmethod
def encode_url(cls, url_type, url, **kwargs):
return_value = {"type": url_type, "url": url}
return_value.update(kwargs)
return return_value
@classmethod
def store(cls, flow_name, path, is_text, encoding, handler, echo):
sz = os.path.getsize(path)
unit = ["B", "KB", "MB", "GB", "TB"]
pos = 0
while pos < len(unit) and sz >= 1024:
sz = sz // 1024
pos += 1
if pos >= 3:
extra = "(this may take a while)"
else:
extra = ""
echo("Including file %s of size %d%s %s" % (path, sz, unit[pos], extra))
try:
input_file = io.open(path, mode="rb").read()
except IOError:
# If we get an error here, since we know that the file exists already,
# it means that read failed which happens with Python 2.7 for large files
raise MetaflowException(
"Cannot read file at %s -- this is likely because it is too "
"large to be properly handled by Python 2.7" % path
)
sha = sha1(input_file).hexdigest()
path = os.path.join(handler.get_root_from_config(echo, True), flow_name, sha)
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb", compresslevel=3) as f:
f.write(input_file)
buf.seek(0)
with handler() as client:
url = client.put(path, buf.getvalue(), overwrite=False)
return cls.encode_url(cls.file_type, url, is_text=is_text, encoding=encoding)
@classmethod
def size(cls, descriptor):
# We never have the size so we look it up
url = descriptor["url"]
handler = cls._get_handler(url)
with handler() as client:
obj = client.info(url, return_missing=True)
if obj.exists:
return obj.size
raise FileNotFoundError("File at '%s' does not exist" % url)
@classmethod
def load(cls, descriptor):
url = descriptor["url"]
handler = cls._get_handler(url)
with handler() as client:
obj = client.get(url, return_missing=True)
if obj.exists:
if descriptor["type"] == cls.file_type:
# We saved this file directly, so we know how to read it out
with gzip.GzipFile(filename=obj.path, mode="rb") as f:
if descriptor["is_text"]:
return io.TextIOWrapper(
f, encoding=descriptor.get("encoding")
).read()
return f.read()
else:
# We open this file according to the is_text and encoding information
if descriptor["is_text"]:
return io.open(
obj.path, mode="rt", encoding=descriptor.get("encoding")
).read()
else:
return io.open(obj.path, mode="rb").read()
raise FileNotFoundError("File at '%s' does not exist" % descriptor["url"])
@staticmethod
def _get_handler(url):
prefix_pos = url.find("://")
if prefix_pos < 0:
raise MetaflowException("Malformed URL: '%s'" % url)
prefix = url[:prefix_pos]
handler = _dict_dataclients.get(prefix)
if handler is None:
raise MetaflowException("Could not find data client for '%s'" % prefix)
return handler
| UploaderV1 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/translate.py | {
"start": 6118,
"end": 7041
} | class ____(BaseGoogleLink):
"""
Helper class for constructing Translation results Link.
Provides link to gcs destination output translation results, by provided output_config
with gcs destination specified.
"""
name = "Translate Results By Output Config"
key = "translate_results_by_output_config"
format_str = TRANSLATION_TRANSLATE_TEXT_BATCH
@staticmethod
def extract_output_uri_prefix(output_config):
return output_config["gcs_destination"]["output_uri_prefix"].rpartition("gs://")[-1]
@classmethod
def persist(cls, context: Context, **value):
output_config = value.get("output_config")
output_uri_prefix = cls.extract_output_uri_prefix(output_config)
super().persist(
context=context,
project_id=value.get("project_id"),
output_uri_prefix=output_uri_prefix,
)
| TranslateResultByOutputConfigLink |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 97069,
"end": 98681
} | class ____:
@pytest.fixture
def NewBlock(self):
# Ignore warning caused by matching key in registry due to block fixture
warnings.filterwarnings("ignore", category=UserWarning)
class NewBlock(Block):
a: str
b: str
_block_type_slug = "new-block"
return NewBlock
async def test_delete_block(self, NewBlock):
new_block = NewBlock(a="foo", b="bar")
new_block_name = "my-block"
await new_block.save(new_block_name)
loaded_new_block = await new_block.load(new_block_name)
assert loaded_new_block._block_document_name == new_block_name
await NewBlock.delete(new_block_name)
with pytest.raises(ValueError) as exception:
await new_block.load(new_block_name)
assert (
f"Unable to find block document named {new_block_name}"
in exception.value
)
async def test_delete_block_from_base_block(self, NewBlock):
new_block = NewBlock(a="foo", b="bar")
new_block_name = "my-block"
await new_block.save(new_block_name)
loaded_new_block = await new_block.load(new_block_name)
assert loaded_new_block._block_document_name == new_block_name
await Block.delete(f"{new_block._block_type_slug}/{new_block_name}")
with pytest.raises(ValueError) as exception:
await new_block.load(new_block_name)
assert (
f"Unable to find block document named {new_block_name}"
in exception.value
)
| TestDeleteBlock |
python | encode__httpx | httpx/_exceptions.py | {
"start": 4033,
"end": 4151
} | class ____(NetworkError):
"""
Failed to close a connection.
"""
# Other transport exceptions...
| CloseError |
python | doocs__leetcode | solution/0300-0399/0336.Palindrome Pairs/Solution.py | {
"start": 0,
"end": 526
} | class ____:
def palindromePairs(self, words: List[str]) -> List[List[int]]:
d = {w: i for i, w in enumerate(words)}
ans = []
for i, w in enumerate(words):
for j in range(len(w) + 1):
a, b = w[:j], w[j:]
ra, rb = a[::-1], b[::-1]
if ra in d and d[ra] != i and b == rb:
ans.append([i, d[ra]])
if j and rb in d and d[rb] != i and a == ra:
ans.append([d[rb], i])
return ans
| Solution |
python | pytest-dev__pytest | src/_pytest/monkeypatch.py | {
"start": 3718,
"end": 15510
} | class ____:
"""Helper to conveniently monkeypatch attributes/items/environment
variables/syspath.
Returned by the :fixture:`monkeypatch` fixture.
.. versionchanged:: 6.2
Can now also be used directly as `pytest.MonkeyPatch()`, for when
the fixture is not available. In this case, use
:meth:`with MonkeyPatch.context() as mp: <context>` or remember to call
:meth:`undo` explicitly.
"""
def __init__(self) -> None:
self._setattr: list[tuple[object, str, object]] = []
self._setitem: list[tuple[Mapping[Any, Any], object, object]] = []
self._cwd: str | None = None
self._savesyspath: list[str] | None = None
@classmethod
@contextmanager
def context(cls) -> Generator[MonkeyPatch]:
"""Context manager that returns a new :class:`MonkeyPatch` object
which undoes any patching done inside the ``with`` block upon exit.
Example:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see :issue:`3290`).
"""
m = cls()
try:
yield m
finally:
m.undo()
@overload
def setattr(
self,
target: str,
name: object,
value: NotSetType = ...,
raising: bool = ...,
) -> None: ...
@overload
def setattr(
self,
target: object,
name: str,
value: object,
raising: bool = ...,
) -> None: ...
def setattr(
self,
target: str | object,
name: object | str,
value: object = NOTSET,
raising: bool = True,
) -> None:
"""
Set attribute value on target, memorizing the old value.
For example:
.. code-block:: python
import os
monkeypatch.setattr(os, "getcwd", lambda: "/")
The code above replaces the :func:`os.getcwd` function by a ``lambda`` which
always returns ``"/"``.
For convenience, you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name:
.. code-block:: python
monkeypatch.setattr("os.getcwd", lambda: "/")
Raises :class:`AttributeError` if the attribute does not exist, unless
``raising`` is set to False.
**Where to patch**
``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one.
There can be many names pointing to any individual object, so for patching to work you must ensure
that you patch the name used by the system under test.
See the section :ref:`Where to patch <python:where-to-patch>` in the :mod:`unittest.mock`
docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but
applies to ``monkeypatch.setattr`` as well.
"""
__tracebackhide__ = True
import inspect
if value is NOTSET:
if not isinstance(target, str):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
else:
if not isinstance(name, str):
raise TypeError(
"use setattr(target, name, value) with name being a string or "
"setattr(target, value) with target being a dotted "
"import string"
)
oldval = getattr(target, name, NOTSET)
if raising and oldval is NOTSET:
raise AttributeError(f"{target!r} has no attribute {name!r}")
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, NOTSET)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(
self,
target: object | str,
name: str | NotSetType = NOTSET,
raising: bool = True,
) -> None:
"""Delete attribute ``name`` from ``target``.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
Raises AttributeError it the attribute does not exist, unless
``raising`` is set to False.
"""
__tracebackhide__ = True
import inspect
if name is NOTSET:
if not isinstance(target, str):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, NOTSET)
# Avoid class descriptors like staticmethod/classmethod.
if inspect.isclass(target):
oldval = target.__dict__.get(name, NOTSET)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None:
"""Set dictionary entry ``name`` to value."""
self._setitem.append((dic, name, dic.get(name, NOTSET)))
# Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict
dic[name] = value # type: ignore[index]
def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None:
"""Delete ``name`` from dict.
Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to
False.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, NOTSET)))
# Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict
del dic[name] # type: ignore[attr-defined]
def setenv(self, name: str, value: str, prepend: str | None = None) -> None:
"""Set environment variable ``name`` to ``value``.
If ``prepend`` is a character, read the current environment variable
value and prepend the ``value`` adjoined with the ``prepend``
character.
"""
if not isinstance(value, str):
warnings.warn( # type: ignore[unreachable]
PytestWarning(
f"Value of environment variable {name} type should be str, but got "
f"{value!r} (type: {type(value).__name__}); converted to str implicitly"
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self.setitem(os.environ, name, value)
def delenv(self, name: str, raising: bool = True) -> None:
"""Delete ``name`` from the environment.
Raises ``KeyError`` if it does not exist, unless ``raising`` is set to
False.
"""
environ: MutableMapping[str, str] = os.environ
self.delitem(environ, name, raising=raising)
def syspath_prepend(self, path) -> None:
"""Prepend ``path`` to ``sys.path`` list of import locations."""
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
# this is only needed when pkg_resources was already loaded by the namespace package
if "pkg_resources" in sys.modules:
import pkg_resources
from pkg_resources import fixup_namespace_packages
# Only issue deprecation warning if this call would actually have an
# effect for this specific path.
if (
hasattr(pkg_resources, "_namespace_packages")
and pkg_resources._namespace_packages
):
path_obj = Path(str(path))
for ns_pkg in pkg_resources._namespace_packages:
if ns_pkg is None:
continue
ns_pkg_path = path_obj / ns_pkg.replace(".", os.sep)
if ns_pkg_path.is_dir():
warnings.warn(
MONKEYPATCH_LEGACY_NAMESPACE_PACKAGES, stacklevel=2
)
break
fixup_namespace_packages(str(path))
# A call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches.
# This is especially important when any namespace package is in use,
# since then the mtime based FileFinder cache (that gets created in
# this case already) gets not invalidated when writing the new files
# quickly afterwards.
from importlib import invalidate_caches
invalidate_caches()
def chdir(self, path: str | os.PathLike[str]) -> None:
"""Change the current working directory to the specified path.
:param path:
The path to change into.
"""
if self._cwd is None:
self._cwd = os.getcwd()
os.chdir(path)
def undo(self) -> None:
"""Undo previous changes.
This call consumes the undo stack. Calling it a second time has no
effect unless you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
.. note::
The same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
Prefer to use :meth:`context() <pytest.MonkeyPatch.context>` instead.
"""
for obj, name, value in reversed(self._setattr):
if value is not NOTSET:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, key, value in reversed(self._setitem):
if value is NOTSET:
try:
# Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict
del dictionary[key] # type: ignore[attr-defined]
except KeyError:
pass # Was already deleted, so we have the desired state.
else:
# Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict
dictionary[key] = value # type: ignore[index]
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
| MonkeyPatch |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 281762,
"end": 282254
} | class ____(sgqlc.types.Input):
"""Ordering options for repository invitation connections."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(RepositoryInvitationOrderField), graphql_name="field")
"""The field to order repository invitations by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The ordering direction."""
| RepositoryInvitationOrder |
python | getsentry__sentry | src/sentry/backup/crypto.py | {
"start": 9547,
"end": 12167
} | class ____(Decryptor):
"""
Decrypt using a config JSON file that uses remote decryption over Google Cloud Platform's Key
Management Service.
"""
def __init__(self, fp: IO[bytes]):
self.__key = fp.read()
@classmethod
def from_bytes(cls, b: bytes) -> GCPKMSDecryptor:
return cls(io.BytesIO(b))
def decrypt_data_encryption_key(self, unwrapped: UnwrappedEncryptedExportTarball) -> bytes:
gcp_kms_config_bytes = self.__key
# Read the user supplied configuration into the proper format.
gcp_kms_config_json = orjson.loads(gcp_kms_config_bytes)
try:
crypto_key_version = CryptoKeyVersion(**gcp_kms_config_json)
except TypeError:
raise DecryptionError(
"""Your supplied KMS configuration did not have the correct fields - please
ensure that it is a single, top-level object with the fields `project_id`
`location`, `key_ring`, `key`, and `version`, with all values as strings."""
)
kms_client = KeyManagementServiceClient()
key_name = kms_client.crypto_key_version_path(
project=crypto_key_version.project_id,
location=crypto_key_version.location,
key_ring=crypto_key_version.key_ring,
crypto_key=crypto_key_version.key,
crypto_key_version=crypto_key_version.version,
)
ciphertext = unwrapped.encrypted_data_encryption_key
dek_crc32c = crc32c(ciphertext)
decrypt_response = kms_client.asymmetric_decrypt(
request={
"name": key_name,
"ciphertext": ciphertext,
"ciphertext_crc32c": dek_crc32c,
}
)
if not decrypt_response.plaintext_crc32c == crc32c(decrypt_response.plaintext):
raise DecryptionError("The response received from the server was corrupted in-transit.")
return decrypt_response.plaintext
def decrypt_encrypted_tarball(tarball: IO[bytes], decryptor: Decryptor) -> bytes:
"""
A tarball encrypted by a call to `_export` with `encrypt_with` set has some specific properties
(filenames, etc). This method handles all of those, and decrypts using the provided private key
into an in-memory JSON string.
"""
unwrapped = unwrap_encrypted_export_tarball(tarball)
# Decrypt the DEK, then use it to decrypt the underlying JSON.
decrypted_dek = decryptor.decrypt_data_encryption_key(unwrapped)
fernet = Fernet(decrypted_dek)
return fernet.decrypt(unwrapped.encrypted_json_blob)
| GCPKMSDecryptor |
python | kamyu104__LeetCode-Solutions | Python/maximum-xor-for-each-query.py | {
"start": 29,
"end": 390
} | class ____(object):
def getMaximumXor(self, nums, maximumBit):
"""
:type nums: List[int]
:type maximumBit: int
:rtype: List[int]
"""
result = [0]*len(nums)
mask = 2**maximumBit-1
for i in xrange(len(nums)):
mask ^= nums[i]
result[-1-i] = mask
return result
| Solution |
python | Textualize__textual | docs/examples/styles/overflow.py | {
"start": 443,
"end": 788
} | class ____(App):
CSS_PATH = "overflow.tcss"
def compose(self):
yield Horizontal(
VerticalScroll(Static(TEXT), Static(TEXT), Static(TEXT), id="left"),
VerticalScroll(Static(TEXT), Static(TEXT), Static(TEXT), id="right"),
)
if __name__ == "__main__":
app = OverflowApp()
app.run()
| OverflowApp |
python | kamyu104__LeetCode-Solutions | Python/two-out-of-three.py | {
"start": 574,
"end": 1054
} | class ____(object):
def twoOutOfThree(self, nums1, nums2, nums3):
"""
:type nums1: List[int]
:type nums2: List[int]
:type nums3: List[int]
:rtype: List[int]
"""
K = 2
cnt = collections.Counter()
result = []
for nums in nums1, nums2, nums3:
for x in set(nums):
cnt[x] += 1
if cnt[x] == K:
result.append(x)
return result
| Solution2 |
python | kevin1024__vcrpy | vcr/cassette.py | {
"start": 5786,
"end": 14710
} | class ____:
"""A container for recorded requests and responses"""
@classmethod
def load(cls, **kwargs):
"""Instantiate and load the cassette stored at the specified path."""
new_cassette = cls(**kwargs)
new_cassette._load()
return new_cassette
@classmethod
def use_arg_getter(cls, arg_getter):
return CassetteContextDecorator(cls, arg_getter)
@classmethod
def use(cls, **kwargs):
return CassetteContextDecorator.from_args(cls, **kwargs)
def __init__(
self,
path,
serializer=None,
persister=None,
record_mode=RecordMode.ONCE,
match_on=(uri, method),
before_record_request=None,
before_record_response=None,
custom_patches=(),
inject=False,
allow_playback_repeats=False,
drop_unused_requests=False,
):
self._persister = persister or FilesystemPersister
self._path = path
self._serializer = serializer or yamlserializer
self._match_on = match_on
self._before_record_request = before_record_request or (lambda x: x)
log.info(self._before_record_request)
self._before_record_response = before_record_response or (lambda x: x)
self.inject = inject
self.record_mode = record_mode
self.custom_patches = custom_patches
self.allow_playback_repeats = allow_playback_repeats
self.drop_unused_requests = drop_unused_requests
# self.data is the list of (req, resp) tuples
self.data = []
self.play_counts = collections.Counter()
self.dirty = False
self.rewound = False
# Subsets of self.data to store old and played interactions
self._old_interactions = []
self._played_interactions = []
@property
def play_count(self):
return sum(self.play_counts.values())
@property
def all_played(self):
"""Returns True if all responses have been played, False otherwise."""
return len(self.play_counts.values()) == len(self)
@property
def requests(self):
return [request for (request, response) in self.data]
@property
def responses(self):
return [response for (request, response) in self.data]
@property
def write_protected(self):
return (self.rewound and self.record_mode == RecordMode.ONCE) or self.record_mode == RecordMode.NONE
def append(self, request, response):
"""Add a request, response pair to this cassette"""
request = self._before_record_request(request)
if not request:
return
log.info("Appending request %s and response %s", request, response)
# Deepcopy is here because mutation of `response` will corrupt the
# real response.
response = copy.deepcopy(response)
response = self._before_record_response(response)
if response is None:
return
self.data.append((request, response))
self.dirty = True
def filter_request(self, request):
return self._before_record_request(request)
def _responses(self, request):
"""
internal API, returns an iterator with all responses matching
the request.
"""
request = self._before_record_request(request)
for index, (stored_request, response) in enumerate(self.data):
if requests_match(request, stored_request, self._match_on):
yield index, response
def can_play_response_for(self, request):
request = self._before_record_request(request)
return request and request in self and self.record_mode != RecordMode.ALL and self.rewound
def play_response(self, request):
"""
Get the response corresponding to a request, but only if it
hasn't been played back before, and mark it as played
"""
for index, response in self._responses(request):
if self.play_counts[index] == 0 or self.allow_playback_repeats:
self.play_counts[index] += 1
self._played_interactions.append((request, response))
return response
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
f"The cassette ({self._path!r}) doesn't contain the request ({request!r}) asked for",
)
def responses_of(self, request):
"""
Find the responses corresponding to a request.
This function isn't actually used by VCR internally, but is
provided as an external API.
"""
responses = [response for index, response in self._responses(request)]
if responses:
return responses
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
f"The cassette ({self._path!r}) doesn't contain the request ({request!r}) asked for",
)
def rewind(self):
self.play_counts = collections.Counter()
def find_requests_with_most_matches(self, request):
"""
Get the most similar request(s) stored in the cassette
of a given request as a list of tuples like this:
- the request object
- the successful matchers as string
- the failed matchers and the related assertion message with the difference details as strings tuple
This is useful when a request failed to be found,
we can get the similar request(s) in order to know what have changed in the request parts.
"""
best_matches = []
request = self._before_record_request(request)
for _, (stored_request, _) in enumerate(self.data):
successes, fails = get_matchers_results(request, stored_request, self._match_on)
best_matches.append((len(successes), stored_request, successes, fails))
best_matches.sort(key=lambda t: t[0], reverse=True)
# Get the first best matches (multiple if equal matches)
final_best_matches = []
if not best_matches:
return final_best_matches
previous_nb_success = best_matches[0][0]
for best_match in best_matches:
nb_success = best_match[0]
# Do not keep matches that have 0 successes,
# it means that the request is totally different from
# the ones stored in the cassette
if nb_success < 1 or previous_nb_success != nb_success:
break
previous_nb_success = nb_success
final_best_matches.append(best_match[1:])
return final_best_matches
def _new_interactions(self):
"""List of new HTTP interactions (request/response tuples)"""
new_interactions = []
for request, response in self.data:
if all(
not requests_match(request, old_request, self._match_on)
for old_request, _ in self._old_interactions
):
new_interactions.append((request, response))
return new_interactions
def _as_dict(self):
return {"requests": self.requests, "responses": self.responses}
def _build_used_interactions_dict(self):
interactions = self._played_interactions + self._new_interactions()
cassete_dict = {
"requests": [request for request, _ in interactions],
"responses": [response for _, response in interactions],
}
return cassete_dict
def _save(self, force=False):
if self.drop_unused_requests and len(self._played_interactions) < len(self._old_interactions):
cassete_dict = self._build_used_interactions_dict()
force = True
else:
cassete_dict = self._as_dict()
if force or self.dirty:
self._persister.save_cassette(self._path, cassete_dict, serializer=self._serializer)
self.dirty = False
def _load(self):
try:
requests, responses = self._persister.load_cassette(self._path, serializer=self._serializer)
for request, response in zip(requests, responses, strict=False):
self.append(request, response)
self._old_interactions.append((request, response))
self.dirty = False
self.rewound = True
except (CassetteDecodeError, CassetteNotFoundError):
pass
def __str__(self):
return f"<Cassette containing {len(self)} recorded response(s)>"
def __len__(self):
"""Return the number of request,response pairs stored in here"""
return len(self.data)
def __contains__(self, request):
"""Return whether or not a request has been stored"""
for index, _ in self._responses(request):
if self.play_counts[index] == 0 or self.allow_playback_repeats:
return True
return False
| Cassette |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.