language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | tests/models/mistral/test_modeling_mistral.py | {
"start": 16776,
"end": 22632
} | class ____(unittest.TestCase):
model_name = "mistralai/Mistral-7B-v0.1"
model = None
model_dtype = None
@classmethod
def setUpClass(cls):
cleanup(torch_device, gc_collect=True)
if cls.model_dtype is None:
cls.model_dtype = torch.float16
if cls.model is None:
cls.model = MistralForCausalLM.from_pretrained(cls.model_name, dtype=cls.model_dtype).to(torch_device)
@classmethod
def tearDownClass(cls):
del cls.model_dtype
del cls.model
cleanup(torch_device, gc_collect=True)
def setUp(self):
cleanup(torch_device, gc_collect=True)
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name, use_fast=False)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def get_test_data(self):
template = "my favorite {}"
items = ("pet is a", "artist plays a", "name is L") # same number of tokens in each item
batch_separate = [template.format(x) for x in items] # 3 separate lines
batch_shared_prefix = template.format(" ".join(items)) # 1 line with options concatenated
input_ids = self.tokenizer(batch_separate, return_tensors="pt").input_ids.to(torch_device)
input_ids_shared_prefix = self.tokenizer(batch_shared_prefix, return_tensors="pt").input_ids.to(torch_device)
mask_shared_prefix = torch.tensor(
[
[
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
]
]
],
device=torch_device,
)
position_ids = torch.arange(input_ids.shape[1]).tile(input_ids.shape[0], 1).to(torch_device)
# building custom positions ids based on custom mask
position_ids_shared_prefix = (mask_shared_prefix.sum(dim=-1) - 1).reshape(1, -1)
# effectively: position_ids_shared_prefix = torch.tensor([[0, 1, 2, 3, 4, 5, 3, 4, 5, 3, 4, 5]]).to(device)
# inverting the mask
min_dtype = torch.finfo(self.model_dtype).min
mask_shared_prefix = (mask_shared_prefix.eq(0.0)).to(dtype=self.model_dtype) * min_dtype
return input_ids, position_ids, input_ids_shared_prefix, mask_shared_prefix, position_ids_shared_prefix
def test_stacked_causal_mask(self):
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# single forward run with 4D custom mask
logits_shared_prefix = self.model.forward(
input_ids_shared_prefix, attention_mask=mask_shared_prefix, position_ids=position_ids_shared_prefix
).logits
logits_shared_prefix_last = logits_shared_prefix[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1], :
] # last three tokens
decoded_shared_prefix = [self.tokenizer.decode(t) for t in logits_shared_prefix_last.argmax(dim=-1)]
self.assertEqual(decoded, decoded_shared_prefix)
def test_partial_stacked_causal_mask(self):
# Same as the test above, but the input is passed in two groups. It tests that we can pass partial 4D attention masks
(
input_ids,
position_ids,
input_ids_shared_prefix,
mask_shared_prefix,
position_ids_shared_prefix,
) = self.get_test_data()
# regular batch
logits = self.model.forward(input_ids, position_ids=position_ids).logits
logits_last = logits[:, -1, :] # last tokens in each batch line
decoded = [self.tokenizer.decode(t) for t in logits_last.argmax(dim=-1)]
# 2 forward runs with custom 4D masks
part_a = 3 # split point
input_1a = input_ids_shared_prefix[:, :part_a]
position_ids_1a = position_ids_shared_prefix[:, :part_a]
mask_1a = mask_shared_prefix[:, :, :part_a, :part_a]
outs_1a = self.model.forward(input_1a, attention_mask=mask_1a, position_ids=position_ids_1a)
past_key_values_a = outs_1a["past_key_values"]
# Case 1: we pass a 4D attention mask regarding the current sequence length (i.e. [..., seq_len, full_len])
input_1b = input_ids_shared_prefix[:, part_a:]
position_ids_1b = position_ids_shared_prefix[:, part_a:]
mask_1b = mask_shared_prefix[:, :, part_a:, :]
outs_1b = self.model.forward(
input_1b, attention_mask=mask_1b, position_ids=position_ids_1b, past_key_values=past_key_values_a
)
decoded_1b = [
self.tokenizer.decode(t)
for t in outs_1b.logits.argmax(-1)[
0, torch.where(position_ids_shared_prefix == position_ids_shared_prefix.max())[1] - part_a
]
]
self.assertEqual(decoded, decoded_1b)
| Mask4DTestHard |
python | openai__openai-python | src/openai/types/static_file_chunking_strategy.py | {
"start": 163,
"end": 595
} | class ____(BaseModel):
chunk_overlap_tokens: int
"""The number of tokens that overlap between chunks. The default value is `400`.
Note that the overlap must not exceed half of `max_chunk_size_tokens`.
"""
max_chunk_size_tokens: int
"""The maximum number of tokens in each chunk.
The default value is `800`. The minimum value is `100` and the maximum value is
`4096`.
"""
| StaticFileChunkingStrategy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclass1.py | {
"start": 1626,
"end": 1830
} | class ____(Generic[T1]):
# This should generate an error.
x: T1 = 1
v7_1 = DC7()
reveal_type(v7_1, expected_text="DC7[int]")
# This should generate an error.
v7_2: DC7[str] = DC7()
@dataclass
| DC7 |
python | tornadoweb__tornado | tornado/test/httputil_test.py | {
"start": 8982,
"end": 16967
} | class ____(unittest.TestCase):
def test_multi_line(self):
# Lines beginning with whitespace are appended to the previous line
# with any leading whitespace replaced by a single space.
# Note that while multi-line headers are a part of the HTTP spec,
# their use is strongly discouraged.
data = """\
Foo: bar
baz
Asdf: qwer
\tzxcv
Foo: even
more
lines
""".replace(
"\n", "\r\n"
)
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["asdf"], "qwer zxcv")
self.assertEqual(headers.get_list("asdf"), ["qwer zxcv"])
self.assertEqual(headers["Foo"], "bar baz,even more lines")
self.assertEqual(headers.get_list("foo"), ["bar baz", "even more lines"])
self.assertEqual(
sorted(list(headers.get_all())),
[("Asdf", "qwer zxcv"), ("Foo", "bar baz"), ("Foo", "even more lines")],
)
def test_continuation(self):
data = "Foo: bar\r\n\tasdf"
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["Foo"], "bar asdf")
# If the first line starts with whitespace, it's a
# continuation line with nothing to continue, so reject it
# (with a proper error).
data = " Foo: bar"
self.assertRaises(HTTPInputError, HTTPHeaders.parse, data)
# \f (formfeed) is whitespace according to str.isspace, but
# not according to the HTTP spec.
data = "Foo: bar\r\n\fasdf"
self.assertRaises(HTTPInputError, HTTPHeaders.parse, data)
def test_forbidden_ascii_characters(self):
# Control characters and ASCII whitespace other than space, tab, and CRLF are not allowed in
# headers.
for c in range(0xFF):
data = f"Foo: bar{chr(c)}baz\r\n"
if c == 0x09 or (c >= 0x20 and c != 0x7F):
headers = HTTPHeaders.parse(data)
self.assertEqual(headers["Foo"], f"bar{chr(c)}baz")
else:
self.assertRaises(HTTPInputError, HTTPHeaders.parse, data)
def test_unicode_newlines(self):
# Ensure that only \r\n is recognized as a header separator, and not
# the other newline-like unicode characters.
# Characters that are likely to be problematic can be found in
# http://unicode.org/standard/reports/tr13/tr13-5.html
# and cpython's unicodeobject.c (which defines the implementation
# of unicode_type.splitlines(), and uses a different list than TR13).
newlines = [
# The following ascii characters are sometimes treated as newline-like,
# but they're disallowed in HTTP headers. This test covers unicode
# characters that are permitted in headers (under the obs-text rule).
# "\u001b", # VERTICAL TAB
# "\u001c", # FILE SEPARATOR
# "\u001d", # GROUP SEPARATOR
# "\u001e", # RECORD SEPARATOR
"\u0085", # NEXT LINE
"\u2028", # LINE SEPARATOR
"\u2029", # PARAGRAPH SEPARATOR
]
for newline in newlines:
# Try the utf8 and latin1 representations of each newline
for encoding in ["utf8", "latin1"]:
try:
try:
encoded = newline.encode(encoding)
except UnicodeEncodeError:
# Some chars cannot be represented in latin1
continue
data = b"Cookie: foo=" + encoded + b"bar"
# parse() wants a native_str, so decode through latin1
# in the same way the real parser does.
headers = HTTPHeaders.parse(native_str(data.decode("latin1")))
expected = [
(
"Cookie",
"foo=" + native_str(encoded.decode("latin1")) + "bar",
)
]
self.assertEqual(expected, list(headers.get_all()))
except Exception:
gen_log.warning("failed while trying %r in %s", newline, encoding)
raise
def test_unicode_whitespace(self):
# Only tabs and spaces are to be stripped according to the HTTP standard.
# Other unicode whitespace is to be left as-is. In the context of headers,
# this specifically means the whitespace characters falling within the
# latin1 charset.
whitespace = [
(" ", True), # SPACE
("\t", True), # TAB
("\u00a0", False), # NON-BREAKING SPACE
("\u0085", False), # NEXT LINE
]
for c, stripped in whitespace:
headers = HTTPHeaders.parse("Transfer-Encoding: %schunked" % c)
if stripped:
expected = [("Transfer-Encoding", "chunked")]
else:
expected = [("Transfer-Encoding", "%schunked" % c)]
self.assertEqual(expected, list(headers.get_all()))
def test_optional_cr(self):
# Bare CR is not a valid line separator
with self.assertRaises(HTTPInputError):
HTTPHeaders.parse("CRLF: crlf\r\nLF: lf\nCR: cr\rMore: more\r\n")
# Both CRLF and LF should be accepted as separators. CR should not be
# part of the data when followed by LF.
headers = HTTPHeaders.parse("CRLF: crlf\r\nLF: lf\nMore: more\r\n")
self.assertEqual(
sorted(headers.get_all()),
[("Crlf", "crlf"), ("Lf", "lf"), ("More", "more")],
)
def test_copy(self):
all_pairs = [("A", "1"), ("A", "2"), ("B", "c")]
h1 = HTTPHeaders()
for k, v in all_pairs:
h1.add(k, v)
h2 = h1.copy()
h3 = copy.copy(h1)
h4 = copy.deepcopy(h1)
for headers in [h1, h2, h3, h4]:
# All the copies are identical, no matter how they were
# constructed.
self.assertEqual(list(sorted(headers.get_all())), all_pairs)
for headers in [h2, h3, h4]:
# Neither the dict or its member lists are reused.
self.assertIsNot(headers, h1)
self.assertIsNot(headers.get_list("A"), h1.get_list("A"))
def test_pickle_roundtrip(self):
headers = HTTPHeaders()
headers.add("Set-Cookie", "a=b")
headers.add("Set-Cookie", "c=d")
headers.add("Content-Type", "text/html")
pickled = pickle.dumps(headers)
unpickled = pickle.loads(pickled)
self.assertEqual(sorted(headers.get_all()), sorted(unpickled.get_all()))
self.assertEqual(sorted(headers.items()), sorted(unpickled.items()))
def test_setdefault(self):
headers = HTTPHeaders()
headers["foo"] = "bar"
# If a value is present, setdefault returns it without changes.
self.assertEqual(headers.setdefault("foo", "baz"), "bar")
self.assertEqual(headers["foo"], "bar")
# If a value is not present, setdefault sets it for future use.
self.assertEqual(headers.setdefault("quux", "xyzzy"), "xyzzy")
self.assertEqual(headers["quux"], "xyzzy")
self.assertEqual(sorted(headers.get_all()), [("Foo", "bar"), ("Quux", "xyzzy")])
def test_string(self):
headers = HTTPHeaders()
headers.add("Foo", "1")
headers.add("Foo", "2")
headers.add("Foo", "3")
headers2 = HTTPHeaders.parse(str(headers))
self.assertEqual(headers, headers2)
def test_invalid_header_names(self):
invalid_names = [
"",
"foo bar",
"foo\tbar",
"foo\nbar",
"foo\x00bar",
"foo ",
" foo",
"é",
]
for name in invalid_names:
headers = HTTPHeaders()
with self.assertRaises(HTTPInputError):
headers.add(name, "bar")
| HTTPHeadersTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/inconsistentConstructor1.py | {
"start": 512,
"end": 721
} | class ____:
def __new__(cls, *args: object, **kwargs: object) -> Self: ...
# This should generate an error if reportInconsistentConstructor is enabled.
def __init__(self, a: int) -> None: ...
| Class3 |
python | astropy__astropy | astropy/units/tests/test_quantity_ufuncs.py | {
"start": 29432,
"end": 31835
} | class ____:
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_valid_units(self, ufunc):
q_i1 = np.array([-3.3, 2.1, 10.2]) * u.kg / u.s
q_i2 = np.array([10.0, -5.0, 1.0e6]) * u.g / u.Ms
q_o = ufunc(q_i1, q_i2)
assert not isinstance(q_o, u.Quantity)
assert q_o.dtype == bool
assert np.all(q_o == ufunc(q_i1.value, q_i2.to_value(q_i1.unit)))
q_o2 = ufunc(q_i1 / q_i2, 2.0)
assert not isinstance(q_o2, u.Quantity)
assert q_o2.dtype == bool
assert np.all(
q_o2 == ufunc((q_i1 / q_i2).to_value(u.dimensionless_unscaled), 2.0)
)
# comparison with 0., inf, nan is OK even for dimensional quantities
# (though ignore numpy runtime warnings for comparisons with nan).
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
for arbitrary_unit_value in (0.0, np.inf, np.nan):
ufunc(q_i1, arbitrary_unit_value)
ufunc(q_i1, arbitrary_unit_value * np.ones(len(q_i1)))
# and just for completeness
ufunc(q_i1, np.array([0.0, np.inf, np.nan]))
@pytest.mark.parametrize(
"ufunc",
[np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal, np.equal],
)
def test_comparison_invalid_units(self, ufunc):
q_i1 = 4.7 * u.m
q_i2 = 9.4 * u.s
with pytest.raises(u.UnitsError, match="compatible dimensions"):
ufunc(q_i1, q_i2)
@pytest.mark.parametrize("ufunc", (np.isfinite, np.isinf, np.isnan, np.signbit))
def test_onearg_test_ufuncs(self, ufunc):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = ufunc(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == bool
assert np.all(out == ufunc(q.value))
# Ignore RuntimeWarning raised on Windows and s390.
@pytest.mark.filterwarnings("ignore:.*invalid value encountered in sign")
def test_sign(self):
q = [1.0, np.inf, -np.inf, np.nan, -1.0, 0.0] * u.m
out = np.sign(q)
assert not isinstance(out, u.Quantity)
assert out.dtype == q.dtype
assert np.all((out == np.sign(q.value)) | (np.isnan(out) & np.isnan(q.value)))
| TestComparisonUfuncs |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 4411,
"end": 5126
} | class ____(MetaFeature):
"""
Calculate the number of classes.
Calls np.unique on the targets. If the dataset is a multilabel dataset,
does this for each label seperately and returns the mean.
"""
def _calculate(self, X, y, logger, feat_type):
if type_of_target(y) == "multilabel-indicator":
# We have a label binary indicator array:
# each sample is one row of a 2d array of shape (n_samples, n_classes)
return y.shape[1]
if len(y.shape) == 2:
return np.mean([len(np.unique(y[:, i])) for i in range(y.shape[1])])
else:
return float(len(np.unique(y)))
@metafeatures.define("NumberOfFeatures")
| NumberOfClasses |
python | kamyu104__LeetCode-Solutions | Python/multiply-strings.py | {
"start": 653,
"end": 1324
} | class ____(object):
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
num1, num2 = num1[::-1], num2[::-1]
result = [0]*(len(num1)+len(num2))
for i in xrange(len(num1)):
for j in xrange(len(num2)):
result[i+j] += int(num1[i])*int(num2[j])
result[i+j+1] += result[i+j]//10
result[i+j] %= 10
for i in reversed(xrange(len(result))):
if result[i]:
break
return "".join(map(str, result[i::-1]))
# Time: O(m * n)
# Space: O(m + n)
# Using built-in bignum solution.
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-visible-points.py | {
"start": 47,
"end": 813
} | class ____(object):
def visiblePoints(self, points, angle, location):
"""
:type points: List[List[int]]
:type angle: int
:type location: List[int]
:rtype: int
"""
arr, extra = [], 0
for p in points:
if p == location:
extra += 1
continue
arr.append(math.atan2(p[1]-location[1], p[0]-location[0]))
arr.sort()
arr.extend([x + 2.0*math.pi for x in arr]) # make it circular
d = 2.0*math.pi * (angle/360.0)
left = result = 0
for right in xrange(len(arr)):
while arr[right]-arr[left] > d:
left += 1
result = max(result, right-left+1)
return result + extra
| Solution |
python | pennersr__django-allauth | allauth/socialaccount/providers/slack/provider.py | {
"start": 406,
"end": 1469
} | class ____(OAuth2Provider):
id = "slack"
name = "Slack"
account_class = SlackAccount
oauth2_adapter_class = SlackOAuth2Adapter
def extract_uid(self, data):
team_id = data.get("https://slack.com/team_id")
user_id = data.get("https://slack.com/user_id")
if not (team_id and user_id):
team_id = data.get("team").get("id")
user_id = data.get("user").get("id")
return "%s_%s" % (
str(team_id),
str(user_id),
)
def extract_common_fields(self, data):
user = data.get("user", {})
return {"name": user.get("name"), "email": user.get("email", None)}
def extract_email_addresses(self, data):
ret = []
email = data.get("email")
if email:
verified = data.get("email_verified")
ret.append(EmailAddress(email=email, verified=verified, primary=True))
return ret
def get_default_scope(self):
return ["openid", "profile", "email"]
provider_classes = [SlackProvider]
| SlackProvider |
python | apache__airflow | airflow-core/src/airflow/timetables/_delta.py | {
"start": 1097,
"end": 1977
} | class ____:
"""Mixin to provide interface to work with timedelta and relativedelta."""
def __init__(self, delta: datetime.timedelta | relativedelta) -> None:
self._delta = delta
@property
def summary(self) -> str:
return str(self._delta)
def validate(self) -> None:
now = datetime.datetime.now()
if (now + self._delta) <= now:
raise AirflowTimetableInvalid(f"schedule interval must be positive, not {self._delta!r}")
def _get_next(self, current: DateTime) -> DateTime:
return convert_to_utc(current + self._delta)
def _get_prev(self, current: DateTime) -> DateTime:
return convert_to_utc(current - self._delta)
def _align_to_next(self, current: DateTime) -> DateTime:
return current
def _align_to_prev(self, current: DateTime) -> DateTime:
return current
| DeltaMixin |
python | mlflow__mlflow | mlflow/utils/autologging_utils/config.py | {
"start": 193,
"end": 1099
} | class ____:
"""
A dataclass to hold common autologging configuration options.
"""
log_input_examples: bool
log_model_signatures: bool
log_traces: bool
extra_tags: dict[str, Any] | None = None
log_models: bool = True
@classmethod
def init(cls, flavor_name: str):
config_dict = AUTOLOGGING_INTEGRATIONS.get(flavor_name, {})
# NB: These defaults are only used when the autolog() function for the
# flavor does not specify the corresponding configuration option
return cls(
log_models=config_dict.get("log_models", False),
log_input_examples=config_dict.get("log_input_examples", False),
log_model_signatures=config_dict.get("log_model_signatures", False),
log_traces=config_dict.get("log_traces", True),
extra_tags=config_dict.get("extra_tags", None),
)
| AutoLoggingConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1283593,
"end": 1286660
} | class ____(sgqlc.types.Type, Node):
"""An Identity Provider configured to provision SAML and SCIM
identities for Organizations. Visible to (1) organization owners,
(2) organization owners' personal access tokens (classic) with
read:org or admin:org scope, (3) GitHub App with an installation
token with read or write access to members.
"""
__schema__ = github_schema
__field_names__ = ("digest_method", "external_identities", "idp_certificate", "issuer", "organization", "signature_method", "sso_url")
digest_method = sgqlc.types.Field(URI, graphql_name="digestMethod")
"""The digest algorithm used to sign SAML requests for the Identity
Provider.
"""
external_identities = sgqlc.types.Field(
sgqlc.types.non_null(ExternalIdentityConnection),
graphql_name="externalIdentities",
args=sgqlc.types.ArgDict(
(
("members_only", sgqlc.types.Arg(Boolean, graphql_name="membersOnly", default=None)),
("login", sgqlc.types.Arg(String, graphql_name="login", default=None)),
("user_name", sgqlc.types.Arg(String, graphql_name="userName", default=None)),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
("before", sgqlc.types.Arg(String, graphql_name="before", default=None)),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
"""External Identities provisioned by this Identity Provider
Arguments:
* `members_only` (`Boolean`): Filter to external identities with
valid org membership only
* `login` (`String`): Filter to external identities with the users
login
* `user_name` (`String`): Filter to external identities with the
users userName/NameID attribute
* `after` (`String`): Returns the elements in the list that come
after the specified cursor.
* `before` (`String`): Returns the elements in the list that come
before the specified cursor.
* `first` (`Int`): Returns the first _n_ elements from the list.
* `last` (`Int`): Returns the last _n_ elements from the list.
"""
idp_certificate = sgqlc.types.Field(X509Certificate, graphql_name="idpCertificate")
"""The x509 certificate used by the Identity Provider to sign
assertions and responses.
"""
issuer = sgqlc.types.Field(String, graphql_name="issuer")
"""The Issuer Entity ID for the SAML Identity Provider"""
organization = sgqlc.types.Field(Organization, graphql_name="organization")
"""Organization this Identity Provider belongs to"""
signature_method = sgqlc.types.Field(URI, graphql_name="signatureMethod")
"""The signature algorithm used to sign SAML requests for the
Identity Provider.
"""
sso_url = sgqlc.types.Field(URI, graphql_name="ssoUrl")
"""The URL endpoint for the Identity Provider's SAML SSO."""
| OrganizationIdentityProvider |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_logging_sink.py | {
"start": 18415,
"end": 28573
} | class ____:
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_template_fields(self, sink_config, update_mask):
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name=SINK_NAME,
sink_config=sink_config,
update_mask=update_mask,
project_id=PROJECT_ID,
)
assert "sink_config" in operator.template_fields
assert "update_mask" in operator.template_fields
assert "sink_name" in operator.template_fields
_assert_common_template_fields(operator.template_fields)
def test_missing_required_params(self):
with pytest.raises(AirflowException) as excinfo:
CloudLoggingDeleteSinkOperator(
task_id=TASK_ID,
sink_name=None,
project_id=None,
).execute(context={})
assert "Required parameters are missing" in str(excinfo.value)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_update_sink_success(self, hook_mock, sink_config, update_mask):
hook_instance = hook_mock.return_value
hook_instance.get_sink.return_value = sink
sink_ = LogSink(**sink_config)
hook_instance.update_sink.return_value = sink_
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name=SINK_NAME,
project_id=PROJECT_ID,
sink_config=sink_config,
update_mask=update_mask,
)
result = operator.execute(context=mock.MagicMock())
hook_instance.get_sink.assert_called_once_with(sink_name=SINK_NAME, project_id=PROJECT_ID)
hook_instance.update_sink.assert_called_once()
assert result == LogSink.to_dict(sink_)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_update_sink_raises_not_found(self, hook_mock, sink_config, update_mask):
hook_instance = hook_mock.return_value
hook_instance.get_sink.side_effect = NotFound("not found")
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name=SINK_NAME,
sink_config=sink_config,
update_mask=update_mask,
project_id=PROJECT_ID,
)
with pytest.raises(NotFound, match="not found"):
operator.execute(context=mock.MagicMock())
hook_instance.get_sink.assert_called_once()
hook_instance.update_sink.assert_not_called()
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_update_sink_raises_generic_error(self, hook_mock, sink_config, update_mask):
hook_instance = hook_mock.return_value
hook_instance.get_sink.side_effect = GoogleAPICallError("something went wrong")
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name=SINK_NAME,
sink_config=sink_config,
update_mask=update_mask,
project_id=PROJECT_ID,
)
with pytest.raises(GoogleAPICallError, match="something went wrong"):
operator.execute(context=mock.MagicMock())
hook_instance.get_sink.assert_called_once()
hook_instance.update_sink.assert_not_called()
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(
"impersonation_chain",
[
["user1@project.iam.gserviceaccount.com", "user2@project.iam.gserviceaccount.com"],
"user2@project.iam.gserviceaccount.com",
],
)
def test_create_with_impersonation_chain(self, hook_mock, impersonation_chain):
hook_instance = hook_mock.return_value
hook_instance.get_sink.return_value = sink
hook_instance.update_sink.return_value = sink
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_config=update_test_cases[0][0],
update_mask=update_test_cases[0][1],
sink_name=SINK_NAME,
impersonation_chain=impersonation_chain,
project_id=PROJECT_ID,
)
operator.execute(context=mock.MagicMock())
hook_mock.assert_called_once_with(
gcp_conn_id="google_cloud_default",
impersonation_chain=impersonation_chain,
)
def test_missing_rendered_field_raises(self):
with DAG(
dag_id="test_render_native",
start_date=datetime(1997, 9, 25),
render_template_as_native_obj=True,
) as dag:
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name="{{ var.value.sink_name }}",
sink_config="{{ var.value.sink_config }}",
update_mask="{{ var.value.update_mask }}",
project_id="{{ var.value.project_id }}",
dag=dag,
)
context = {
"var": {
"value": {
"project_id": PROJECT_ID,
"sink_name": None,
"sink_config": None,
"update_mask": None,
}
},
}
operator.render_template_fields(context)
with pytest.raises(
AirflowException,
match=re.escape(
"Required parameters are missing: ['sink_name', 'sink_config', 'update_mask']. These must be passed as keyword parameters."
),
):
operator.execute(context)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_template_rendering(self, hook_mock, sink_config, update_mask):
with DAG(
dag_id="test_render_native",
start_date=datetime(2024, 1, 1),
render_template_as_native_obj=True,
) as dag:
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name="{{ var.value.sink_name }}",
update_mask="{{ var.value.update_mask }}",
sink_config="{{ var.value.sink_config }}",
project_id="{{ var.value.project_id }}",
unique_writer_identity="{{ var.value.unique_writer_identity }}",
dag=dag,
)
context = {
"var": {
"value": {
"project_id": PROJECT_ID,
"sink_config": sink_config,
"sink_name": SINK_NAME,
"update_mask": update_mask,
"unique_writer_identity": UNIQUE_WRITER_IDENTITY,
}
}
}
hook_instance = hook_mock.return_value
hook_instance.get_sink.return_value = LogSink(name=SINK_NAME)
hook_instance.update_sink.return_value = LogSink(**sink_config)
operator.render_template_fields(context)
result = operator.execute(context=mock.MagicMock())
# Assertions
assert isinstance(operator.sink_config, dict)
assert isinstance(operator.update_mask, dict)
assert isinstance(operator.unique_writer_identity, bool)
assert operator.sink_config["name"] == sink_config["name"]
assert result["name"] == sink_config["name"]
assert operator.update_mask == update_mask
hook_instance.update_sink.assert_called_once_with(
project_id=PROJECT_ID,
sink_name=SINK_NAME,
sink=sink_config,
update_mask=update_mask,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
)
@mock.patch(CLOUD_LOGGING_HOOK_PATH)
@pytest.mark.parametrize(("sink_config", "update_mask"), update_test_cases, ids=update_test_ids)
def test_template_rendering_with_proto(self, hook_mock, sink_config, update_mask):
sink_obj = LogSink(**sink_config)
mask_obj = FieldMask(paths=update_mask["paths"])
with DAG(
dag_id="test_render_native_proto",
start_date=datetime(2024, 1, 1),
render_template_as_native_obj=True,
) as dag:
operator = CloudLoggingUpdateSinkOperator(
task_id=TASK_ID,
sink_name="{{ var.value.sink_name }}",
update_mask="{{ var.value.update_mask }}",
sink_config="{{ var.value.sink_config }}",
project_id="{{ var.value.project_id }}",
unique_writer_identity="{{ var.value.unique_writer_identity }}",
dag=dag,
)
context = {
"var": {
"value": {
"project_id": PROJECT_ID,
"sink_name": SINK_NAME,
"sink_config": sink_obj,
"update_mask": mask_obj,
"unique_writer_identity": UNIQUE_WRITER_IDENTITY,
}
}
}
hook_instance = hook_mock.return_value
hook_instance.get_sink.return_value = LogSink(name=SINK_NAME)
hook_instance.update_sink.return_value = sink_obj
operator.render_template_fields(context)
result = operator.execute(context=mock.MagicMock())
assert isinstance(operator.sink_config, LogSink)
assert isinstance(operator.update_mask, FieldMask)
assert isinstance(operator.unique_writer_identity, bool)
assert operator.sink_config.name == sink_obj.name
assert result["name"] == sink_obj.name
assert operator.update_mask == mask_obj
assert operator.sink_config == sink_obj
hook_instance.update_sink.assert_called_once_with(
project_id=PROJECT_ID,
sink_name=SINK_NAME,
sink=sink_obj,
update_mask=mask_obj,
unique_writer_identity=UNIQUE_WRITER_IDENTITY,
)
| TestCloudLoggingUpdateSinksOperator |
python | getsentry__sentry | src/sentry/web/frontend/debug/debug_error_embed.py | {
"start": 348,
"end": 927
} | class ____(View):
def _get_project_key(self):
return ProjectKey.objects.filter(project=settings.SENTRY_PROJECT)[0]
def get(self, request: HttpRequest) -> HttpResponse:
context = {
"query_params": urlencode(
{
"dsn": self._get_project_key().dsn_public,
"eventId": "342a3d7f690a49f8bd7c4cf0e61a9ded",
**request.GET,
}
)
}
return render_to_response("sentry/debug/error-page-embed.html", context, request)
| DebugErrorPageEmbedView |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 91998,
"end": 95230
} | class ____(IR):
"""Produce a new dataframe with distinct rows."""
__slots__ = ("keep", "stable", "subset", "zlice")
_non_child = ("schema", "keep", "subset", "zlice", "stable")
keep: plc.stream_compaction.DuplicateKeepOption
"""Which distinct value to keep."""
subset: frozenset[str] | None
"""Which columns should be used to define distinctness. If None,
then all columns are used."""
zlice: Zlice | None
"""Optional slice to apply to the result."""
stable: bool
"""Should the result maintain ordering."""
def __init__(
self,
schema: Schema,
keep: plc.stream_compaction.DuplicateKeepOption,
subset: frozenset[str] | None,
zlice: Zlice | None,
stable: bool, # noqa: FBT001
df: IR,
):
self.schema = schema
self.keep = keep
self.subset = subset
self.zlice = zlice
self.stable = stable
self._non_child_args = (keep, subset, zlice, stable)
self.children = (df,)
_KEEP_MAP: ClassVar[dict[str, plc.stream_compaction.DuplicateKeepOption]] = {
"first": plc.stream_compaction.DuplicateKeepOption.KEEP_FIRST,
"last": plc.stream_compaction.DuplicateKeepOption.KEEP_LAST,
"none": plc.stream_compaction.DuplicateKeepOption.KEEP_NONE,
"any": plc.stream_compaction.DuplicateKeepOption.KEEP_ANY,
}
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Distinct")
def do_evaluate(
cls,
keep: plc.stream_compaction.DuplicateKeepOption,
subset: frozenset[str] | None,
zlice: Zlice | None,
stable: bool, # noqa: FBT001
df: DataFrame,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
if subset is None:
indices = list(range(df.num_columns))
keys_sorted = all(c.is_sorted for c in df.column_map.values())
else:
indices = [i for i, k in enumerate(df.column_names) if k in subset]
keys_sorted = all(df.column_map[name].is_sorted for name in subset)
if keys_sorted:
table = plc.stream_compaction.unique(
df.table,
indices,
keep,
plc.types.NullEquality.EQUAL,
stream=df.stream,
)
else:
distinct = (
plc.stream_compaction.stable_distinct
if stable
else plc.stream_compaction.distinct
)
table = distinct(
df.table,
indices,
keep,
plc.types.NullEquality.EQUAL,
plc.types.NanEquality.ALL_EQUAL,
df.stream,
)
# TODO: Is this sortedness setting correct
result = DataFrame(
[
Column(new, name=old.name, dtype=old.dtype).sorted_like(old)
for new, old in zip(table.columns(), df.columns, strict=True)
],
stream=df.stream,
)
if keys_sorted or stable:
result = result.sorted_like(df)
return result.slice(zlice)
| Distinct |
python | numpy__numpy | numpy/f2py/tests/test_regression.py | {
"start": 1370,
"end": 1995
} | class ____(util.F2PyTest):
# Check that negative bounds work correctly
sources = [util.getpath("tests", "src", "negative_bounds", "issue_20853.f90")]
@pytest.mark.slow
def test_negbound(self):
xvec = np.arange(12)
xlow = -6
xhigh = 4
# Calculate the upper bound,
# Keeping the 1 index in mind
def ubound(xl, xh):
return xh - xl + 1
rval = self.module.foo(is_=xlow, ie_=xhigh,
arr=xvec[:ubound(xlow, xhigh)])
expval = np.arange(11, dtype=np.float32)
assert np.allclose(rval, expval)
| TestNegativeBounds |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 16113,
"end": 19981
} | class ____(fixtures.MappedTest):
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
"organizations",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(50)),
)
Table(
"engineers_to_org",
metadata,
Column("org_id", Integer, ForeignKey("organizations.id")),
Column("engineer_id", Integer, ForeignKey("engineers.person_id")),
)
Table(
"people",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column("type", String(30)),
)
Table(
"engineers",
metadata,
Column(
"person_id",
Integer,
ForeignKey("people.person_id"),
primary_key=True,
),
Column("primary_language", String(50)),
)
@classmethod
def setup_mappers(cls):
organizations = cls.tables.organizations
people = cls.tables.people
engineers = cls.tables.engineers
engineers_to_org = cls.tables.engineers_to_org
class Organization(cls.Comparable):
pass
cls.mapper_registry.map_imperatively(
Organization,
organizations,
properties={
"engineers": relationship(
Engineer,
secondary=engineers_to_org,
backref="organizations",
)
},
)
cls.mapper_registry.map_imperatively(
Person,
people,
polymorphic_on=people.c.type,
polymorphic_identity="person",
)
cls.mapper_registry.map_imperatively(
Engineer,
engineers,
inherits=Person,
polymorphic_identity="engineer",
)
@classmethod
def insert_data(cls, connection):
Organization = cls.classes.Organization
e1 = Engineer(name="e1")
e2 = Engineer(name="e2")
e3 = Engineer(name="e3")
e4 = Engineer(name="e4")
org1 = Organization(name="org1", engineers=[e1, e2])
org2 = Organization(name="org2", engineers=[e3, e4])
with sessionmaker(connection).begin() as sess:
sess.add(org1)
sess.add(org2)
def test_not_contains(self):
Organization = self.classes.Organization
sess = fixture_session()
e1 = sess.query(Person).filter(Engineer.name == "e1").one()
eq_(
sess.query(Organization)
.filter(~Organization.engineers.of_type(Engineer).contains(e1))
.all(),
[Organization(name="org2")],
)
# this had a bug
eq_(
sess.query(Organization)
.filter(~Organization.engineers.contains(e1))
.all(),
[Organization(name="org2")],
)
def test_any(self):
sess = fixture_session()
Organization = self.classes.Organization
eq_(
sess.query(Organization)
.filter(
Organization.engineers.of_type(Engineer).any(
Engineer.name == "e1"
)
)
.all(),
[Organization(name="org1")],
)
eq_(
sess.query(Organization)
.filter(Organization.engineers.any(Engineer.name == "e1"))
.all(),
[Organization(name="org1")],
)
| M2MFilterTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 142820,
"end": 143217
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SponsorshipOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| SponsorshipOrder |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_count.py | {
"start": 403,
"end": 4608
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-detector-count"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.environment = Environment.objects.create(
organization_id=self.organization.id, name="production"
)
def test_simple(self) -> None:
# Create active detectors
self.create_detector(
project=self.project,
name="Active Detector 1",
type=MetricIssue.slug,
enabled=True,
config={"detection_type": AlertRuleDetectionType.STATIC.value},
)
self.create_detector(
project=self.project,
name="Active Detector 2",
type=ErrorGroupType.slug,
enabled=True,
config={},
)
# Create inactive detector
self.create_detector(
project=self.project,
name="Inactive Detector",
type=UptimeDomainCheckFailure.slug,
enabled=False,
config={
"mode": 1,
"environment": "production",
"recovery_threshold": 1,
"downtime_threshold": 3,
},
)
response = self.get_success_response(self.organization.slug)
assert response.data == {
"active": 2,
"deactive": 1,
"total": 3,
}
def test_filtered_by_type(self) -> None:
# Create detectors of different types
self.create_detector(
project=self.project,
name="Metric Detector 1",
type=MetricIssue.slug,
enabled=True,
config={"detection_type": AlertRuleDetectionType.STATIC.value},
)
self.create_detector(
project=self.project,
name="Metric Detector 2",
type=MetricIssue.slug,
enabled=False,
config={"detection_type": AlertRuleDetectionType.STATIC.value},
)
self.create_detector(
project=self.project,
name="Error Detector",
type=ErrorGroupType.slug,
enabled=True,
config={},
)
self.create_detector(
project=self.project,
name="Uptime Detector",
type=UptimeDomainCheckFailure.slug,
enabled=True,
config={
"mode": 1,
"environment": "production",
"recovery_threshold": 1,
"downtime_threshold": 3,
},
)
# Test with single type filter
response = self.get_success_response(
self.organization.slug, qs_params={"type": MetricIssue.slug}
)
assert response.data == {
"active": 1,
"deactive": 1,
"total": 2,
}
# Test with multiple type filters
response = self.get_success_response(
self.organization.slug,
qs_params={"type": [ErrorGroupType.slug, UptimeDomainCheckFailure.slug]},
)
assert response.data == {
"active": 2,
"deactive": 0,
"total": 2,
}
def test_no_detectors(self) -> None:
response = self.get_success_response(self.organization.slug)
assert response.data == {
"active": 0,
"deactive": 0,
"total": 0,
}
def test_no_projects_access(self) -> None:
# Create another organization with detectors
other_org = self.create_organization()
other_project = self.create_project(organization=other_org)
self.create_detector(
project_id=other_project.id,
name="Other Org Detector",
type=MetricIssue.slug,
enabled=True,
config={"detection_type": AlertRuleDetectionType.STATIC.value},
)
# Test with no project access
response = self.get_success_response(self.organization.slug, qs_params={"project": []})
assert response.data == {
"active": 0,
"deactive": 0,
"total": 0,
}
| OrganizationDetectorCountTest |
python | scipy__scipy | scipy/linalg/tests/test_blas.py | {
"start": 3208,
"end": 5732
} | class ____:
@parametrize_blas(fblas, "axpy", "sdcz")
def test_axpy(self, f, dtype):
assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5),
[7, 9, 18])
if dtype in COMPLEX_DTYPES:
assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5),
[7, 10j-1, 18])
@parametrize_blas(fblas, "copy", "sdcz")
def test_copy(self, f, dtype):
assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5])
if dtype in COMPLEX_DTYPES:
assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j])
@parametrize_blas(fblas, "asum", ["s", "d", "sc", "dz"])
def test_asum(self, f, dtype):
assert_almost_equal(f([3, -4, 5]), 12)
if dtype in COMPLEX_DTYPES:
assert_almost_equal(f([3j, -4, 3-4j]), 14)
@parametrize_blas(fblas, "dot", "sd")
def test_dot(self, f, dtype):
assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9)
@parametrize_blas(fblas, "dotu", "cz")
def test_dotu(self, f, dtype):
assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j)
@parametrize_blas(fblas, "dotc", "cz")
def test_dotc(self, f, dtype):
assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j)
@parametrize_blas(fblas, "nrm2", ["s", "d", "sc", "dz"])
def test_nrm2(self, f, dtype):
assert_almost_equal(f([3, -4, 5]), math.sqrt(50))
if dtype in COMPLEX_DTYPES:
assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50))
@parametrize_blas(fblas, "scal", ["s", "d", "cs", "zd"])
def test_scal(self, f, dtype):
assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10])
if dtype in COMPLEX_DTYPES:
assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j])
@parametrize_blas(fblas, "swap", "sdcz")
def test_swap(self, f, dtype):
x, y = [2, 3, 1], [-2, 3, 7]
x1, y1 = f(x, y)
assert_array_almost_equal(x1, y)
assert_array_almost_equal(y1, x)
if dtype in COMPLEX_DTYPES:
x, y = [2, 3j, 1], [-2, 3, 7-3j]
x1, y1 = f(x, y)
assert_array_almost_equal(x1, y)
assert_array_almost_equal(y1, x)
@parametrize_blas(fblas, "amax", ["is", "id", "ic", "iz"])
def test_amax(self, f, dtype):
assert_equal(f([-2, 4, 3]), 1)
if dtype in COMPLEX_DTYPES:
assert_equal(f([-5, 4+3j, 6]), 1)
# XXX: need tests for rot,rotm,rotg,rotmg
| TestFBLAS1Simple |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/secret.py | {
"start": 161,
"end": 306
} | class ____(str, Enum):
"""Secret scope enum for filtering."""
DEPLOYMENT = "deployment"
ORGANIZATION = "organization"
| DgApiSecretScope |
python | kamyu104__LeetCode-Solutions | Python/beautiful-pairs.py | {
"start": 232,
"end": 1947
} | class ____(object):
def beautifulPair(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
INF = float("inf")
def dist(a, b):
if a[2] > b[2]:
a, b = b, a
return [abs(a[0]-b[0])+abs(a[1]-b[1]), a[2], b[2]]
def cell(point, size):
x, y, _ = point
return math.floor(x/size), math.floor(y/size)
def improve():
lookup = {}
for p in points:
i, j = map(int, cell(p, result[0]/2.0))
for ni in xrange(i-2, (i+2)+1):
for nj in xrange(j-2, (j+2)+1):
if (ni, nj) not in lookup:
continue
d = dist(p, lookup[ni, nj])
if d < result:
result[:] = d
return True
lookup[i, j] = p
return False
points = [(i, j, idx) for idx, (i, j) in enumerate(itertools.izip(nums1, nums2))]
result = [INF]*3
lookup = {}
for i in reversed(xrange(len(points))):
if points[i][:2] in lookup:
result = [0, i, lookup[points[i][:2]]]
lookup[points[i][:2]] = i
if result[0] == 0:
return result[1:]
random.shuffle(points)
result = dist(points[0], points[1])
while improve():
pass
return result[1:]
# Time: O(nlogn)
# Space: O(n)
import itertools
# divide and conquer, merge sort, variant of closest pair
# reference: https://www.baeldung.com/cs/minimal-manhattan-distance
| Solution |
python | PyCQA__pylint | doc/data/messages/a/abstract-method/bad/function_raising_not_implemented_error.py | {
"start": 0,
"end": 73
} | class ____:
def make_sound(self):
raise NotImplementedError
| Pet |
python | astral-sh__uv | scripts/packages/keyring_test_plugin/keyrings/test_keyring.py | {
"start": 77,
"end": 1172
} | class ____(backend.KeyringBackend):
priority = 9
def get_password(self, service, username):
print(f"Keyring request for {username}@{service}", file=sys.stderr)
entries = json.loads(os.environ.get("KEYRING_TEST_CREDENTIALS", "{}"))
return entries.get(service, {}).get(username)
def set_password(self, service, username, password):
raise NotImplementedError()
def delete_password(self, service, username):
raise NotImplementedError()
def get_credential(self, service, username):
print(f"Keyring request for {service}", file=sys.stderr)
entries = json.loads(os.environ.get("KEYRING_TEST_CREDENTIALS", "{}"))
service_entries = entries.get(service, {})
if not service_entries:
return None
if username:
password = service_entries.get(username)
if not password:
return None
return credentials.SimpleCredential(username, password)
else:
return credentials.SimpleCredential(*list(service_entries.items())[0])
| KeyringTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-calories-burnt-from-jumps.py | {
"start": 601,
"end": 1031
} | class ____(object):
def maxCaloriesBurnt(self, heights):
"""
:type heights: List[int]
:rtype: int
"""
heights.sort()
d = 0
left, right = 0, len(heights)-1
result = (0-heights[right])**2
while left != right:
result += (heights[right]-heights[left])**2
left += d
d ^= 1
right -= d
return result
| Solution2 |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_detector.py | {
"start": 21186,
"end": 28098
} | class ____(BaseDetectorHandlerTest):
def test(self) -> None:
handler = self.build_handler()
assert handler.evaluate(DataPacket("1", {"dedupe": 1})) == {}
detector_occurrence, _ = build_mock_occurrence_and_event(
handler, "val1", PriorityLevel.HIGH
)
issue_occurrence, event_data = self.detector_to_issue_occurrence(
detector_occurrence=detector_occurrence,
detector=handler.detector,
group_key="val1",
value=6,
priority=DetectorPriorityLevel.HIGH,
occurrence_id=str(self.mock_uuid4.return_value),
)
assert handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 6}})) == {
"val1": DetectorEvaluationResult(
group_key="val1",
is_triggered=True,
priority=DetectorPriorityLevel.HIGH,
result=issue_occurrence,
event_data=event_data,
)
}
self.assert_updates(
handler,
"val1",
2,
{
**handler.test_get_empty_counter_state(),
DetectorPriorityLevel.HIGH: 1,
},
True,
DetectorPriorityLevel.HIGH,
)
def test_above_below_threshold(self) -> None:
handler = self.build_handler()
assert handler.evaluate(DataPacket("1", {"dedupe": 1, "group_vals": {"val1": 0}})) == {}
detector_occurrence, _ = build_mock_occurrence_and_event(
handler, "val1", PriorityLevel.HIGH
)
issue_occurrence, event_data = self.detector_to_issue_occurrence(
detector_occurrence=detector_occurrence,
detector=handler.detector,
group_key="val1",
value=6,
priority=DetectorPriorityLevel.HIGH,
occurrence_id=str(self.mock_uuid4.return_value),
)
assert handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 6}})) == {
"val1": DetectorEvaluationResult(
group_key="val1",
is_triggered=True,
priority=DetectorPriorityLevel.HIGH,
result=issue_occurrence,
event_data=event_data,
)
}
assert handler.evaluate(DataPacket("1", {"dedupe": 3, "group_vals": {"val1": 6}})) == {}
assert handler.evaluate(DataPacket("1", {"dedupe": 4, "group_vals": {"val1": 0}})) == {
"val1": DetectorEvaluationResult(
group_key="val1",
is_triggered=False,
result=StatusChangeMessage(
fingerprint=[f"detector:{handler.detector.id}:val1"],
project_id=self.project.id,
new_status=1,
new_substatus=None,
),
priority=DetectorPriorityLevel.OK,
)
}
def test_no_condition_group(self) -> None:
detector = self.create_detector(type=self.handler_type.slug)
handler = MockDetectorStateHandler(detector)
with mock.patch(
"sentry.workflow_engine.handlers.detector.stateful.metrics"
) as mock_metrics:
assert (
handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 100}})) == {}
)
mock_metrics.incr.assert_called_once_with(
"workflow_engine.detector.skipping_invalid_condition_group"
)
self.assert_updates(handler, "val1", 2, None, None, None)
def test_results_on_change(self) -> None:
handler = self.build_handler()
detector_occurrence, _ = build_mock_occurrence_and_event(
handler, "val1", PriorityLevel.HIGH
)
issue_occurrence, event_data = self.detector_to_issue_occurrence(
detector_occurrence=detector_occurrence,
detector=handler.detector,
group_key="val1",
value=100,
priority=DetectorPriorityLevel.HIGH,
occurrence_id=str(self.mock_uuid4.return_value),
)
result = handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 100}}))
assert result == {
"val1": DetectorEvaluationResult(
group_key="val1",
is_triggered=True,
priority=DetectorPriorityLevel.HIGH,
result=issue_occurrence,
event_data=event_data,
)
}
self.assert_updates(
handler,
"val1",
2,
{
**handler.test_get_empty_counter_state(),
DetectorPriorityLevel.HIGH: 1,
},
True,
DetectorPriorityLevel.HIGH,
)
# This detector is already triggered, so no status change occurred. Should be no result
assert handler.evaluate(DataPacket("1", {"dedupe": 3, "group_vals": {"val1": 200}})) == {}
def test_dedupe(self) -> None:
handler = self.build_handler()
detector_occurrence, _ = build_mock_occurrence_and_event(
handler, "val1", PriorityLevel.HIGH
)
issue_occurrence, event_data = self.detector_to_issue_occurrence(
detector_occurrence=detector_occurrence,
detector=handler.detector,
group_key="val1",
value=8,
priority=DetectorPriorityLevel.HIGH,
occurrence_id=str(self.mock_uuid4.return_value),
)
result = handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 8}}))
assert result == {
"val1": DetectorEvaluationResult(
group_key="val1",
is_triggered=True,
priority=DetectorPriorityLevel.HIGH,
result=issue_occurrence,
event_data=event_data,
)
}
self.assert_updates(
handler,
"val1",
2,
{
**handler.test_get_empty_counter_state(),
DetectorPriorityLevel.HIGH: 1,
},
True,
DetectorPriorityLevel.HIGH,
)
with mock.patch(
"sentry.workflow_engine.handlers.detector.stateful.metrics"
) as mock_metrics:
assert handler.evaluate(DataPacket("1", {"dedupe": 2, "group_vals": {"val1": 0}})) == {}
mock_metrics.incr.assert_called_once_with(
"workflow_engine.detector.skipping_already_processed_update"
)
self.assert_updates(
handler,
"val1",
None,
{
**handler.test_get_empty_counter_state(),
DetectorPriorityLevel.HIGH: 1,
},
None,
None,
)
@freeze_time()
| TestEvaluate |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 7291,
"end": 7352
} | class ____(HTTPClientError):
status_code = 404
| HTTPNotFound |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 406391,
"end": 406506
} | class ____(Node):
"""
Base class for cython.parallel constructs.
"""
nogil_check = None
| ParallelNode |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 35770,
"end": 36850
} | class ____(Benchmark):
r"""
Step objective function.
This class defines the Step 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Step}}(x) = \sum_{i=1}^{n} \left ( \lfloor x_i
+ 0.5 \rfloor \right )^2
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-100, 100]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0.5` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = ([-5, 5], [-5, 5])
self.global_optimum = [[0.5 for _ in range(self.N)]]
self.fglob = 0.5
def fun(self, x, *args):
self.nfev += 1
return sum((floor(x) + 0.5) ** 2.0)
| Step2 |
python | apache__airflow | providers/standard/src/airflow/providers/standard/sensors/date_time.py | {
"start": 1773,
"end": 4219
} | class ____(BaseSensorOperator):
"""
Waits until the specified datetime.
A major advantage of this sensor is idempotence for the ``target_time``.
It handles some cases for which ``TimeSensor`` and ``TimeDeltaSensor`` are not suited.
**Example** 1 :
If a task needs to wait for 11am on each ``logical_date``. Using
``TimeSensor`` or ``TimeDeltaSensor``, all backfill tasks started at
1am have to wait for 10 hours. This is unnecessary, e.g. a backfill
task with ``{{ ds }} = '1970-01-01'`` does not need to wait because
``1970-01-01T11:00:00`` has already passed.
**Example** 2 :
If a DAG is scheduled to run at 23:00 daily, but one of the tasks is
required to run at 01:00 next day, using ``TimeSensor`` will return
``True`` immediately because 23:00 > 01:00. Instead, we can do this:
.. code-block:: python
DateTimeSensor(
task_id="wait_for_0100",
target_time="{{ data_interval_end.tomorrow().replace(hour=1) }}",
)
:param target_time: datetime after which the job succeeds. (templated)
"""
template_fields: Sequence[str] = ("target_time",)
def __init__(self, *, target_time: str | datetime.datetime, **kwargs) -> None:
super().__init__(**kwargs)
# self.target_time can't be a datetime object as it is a template_field
if isinstance(target_time, datetime.datetime):
self.target_time = target_time.isoformat()
elif isinstance(target_time, str):
self.target_time = target_time
else:
raise TypeError(
f"Expected str or datetime.datetime type for target_time. Got {type(target_time)}"
)
def poke(self, context: Context) -> bool:
self.log.info("Checking if the time (%s) has come", self.target_time)
return timezone.utcnow() > timezone.parse(self.target_time)
@property
def _moment(self) -> datetime.datetime:
# Note following is reachable code if Jinja is used for redering template fields and
# render_template_as_native_obj=True is used.
# In this case, the target_time is already a datetime object.
if isinstance(self.target_time, datetime.datetime): # type:ignore[unreachable]
return self.target_time # type:ignore[unreachable]
return timezone.parse(self.target_time)
| DateTimeSensor |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_test.py | {
"start": 1357,
"end": 2450
} | class ____(linalg.LinearOperator):
"""LinearOperator that implements the methods ._shape and _shape_tensor."""
def __init__(self,
shape,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None):
parameters = dict(
shape=shape,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square
)
self._stored_shape = shape
super(LinearOperatorShape, self).__init__(
dtype=dtypes.float32,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters)
def _shape(self):
return tensor_shape.TensorShape(self._stored_shape)
def _shape_tensor(self):
return constant_op.constant(self._stored_shape, dtype=dtypes.int32)
def _matmul(self):
raise NotImplementedError("Not needed for this test.")
| LinearOperatorShape |
python | doocs__leetcode | solution/2200-2299/2249.Count Lattice Points Inside a Circle/Solution.py | {
"start": 0,
"end": 476
} | class ____:
def countLatticePoints(self, circles: List[List[int]]) -> int:
ans = 0
mx = max(x + r for x, _, r in circles)
my = max(y + r for _, y, r in circles)
for i in range(mx + 1):
for j in range(my + 1):
for x, y, r in circles:
dx, dy = i - x, j - y
if dx * dx + dy * dy <= r * r:
ans += 1
break
return ans
| Solution |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 126007,
"end": 127300
} | class ____(TestCase):
"""Tests for ``exactly_n()``"""
def test_true(self):
"""Iterable has ``n`` ``True`` elements"""
self.assertTrue(mi.exactly_n([True, False, True], 2))
self.assertTrue(mi.exactly_n([1, 1, 1, 0], 3))
self.assertTrue(mi.exactly_n([False, False], 0))
self.assertTrue(mi.exactly_n(range(100), 10, lambda x: x < 10))
self.assertTrue(mi.exactly_n(repeat(True, 100), 100))
self.assertTrue(mi.exactly_n(repeat(False, 100), 100, predicate=not_))
def test_false(self):
"""Iterable does not have ``n`` ``True`` elements"""
self.assertFalse(mi.exactly_n([True, False, False], 2))
self.assertFalse(mi.exactly_n([True, True, False], 1))
self.assertFalse(mi.exactly_n([False], 1))
self.assertFalse(mi.exactly_n([True], -1))
self.assertFalse(mi.exactly_n([True], -10))
self.assertFalse(mi.exactly_n([], -1))
self.assertFalse(mi.exactly_n([], -10))
self.assertFalse(mi.exactly_n([True], 0))
self.assertFalse(mi.exactly_n(repeat(True), 100))
def test_empty(self):
"""Return ``True`` if the iterable is empty and ``n`` is 0"""
self.assertTrue(mi.exactly_n([], 0))
self.assertFalse(mi.exactly_n([], 1))
| ExactlyNTests |
python | catalyst-team__catalyst | catalyst/contrib/data/sampler.py | {
"start": 9388,
"end": 11986
} | class ____(BatchSampler):
"""
A dynamic batch length data sampler.
Should be used with `catalyst.utils.trim_tensors`.
Adapted from `Dynamic minibatch trimming to improve BERT training speed`_.
Args:
sampler: Base sampler.
batch_size: Size of minibatch.
drop_last: If ``True``, the sampler will drop the last batch
if its size would be less than ``batch_size``.
Usage example:
>>> from torch.utils import data
>>> from catalyst.data import DynamicLenBatchSampler
>>> from catalyst import utils
>>> dataset = data.TensorDataset(
>>> input_ids, input_mask, segment_ids, labels
>>> )
>>> sampler_ = data.RandomSampler(dataset)
>>> sampler = DynamicLenBatchSampler(
>>> sampler_, batch_size=16, drop_last=False
>>> )
>>> loader = data.DataLoader(dataset, batch_sampler=sampler)
>>> for batch in loader:
>>> tensors = utils.trim_tensors(batch)
>>> b_input_ids, b_input_mask, b_segment_ids, b_labels = \
>>> tuple(t.to(device) for t in tensors)
.. _`Dynamic minibatch trimming to improve BERT training speed`:
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779
"""
def __iter__(self):
"""
Iteration over BatchSampler.
"""
buckets = [[]] * 100
yielded = 0
for idx in self.sampler:
count_zeros = torch.sum(self.sampler.data_source[idx][0] == 0)
count_zeros = int(count_zeros / 64)
if len(buckets[count_zeros]) == 0:
buckets[count_zeros] = []
buckets[count_zeros].append(idx)
if len(buckets[count_zeros]) == self.batch_size:
batch = list(buckets[count_zeros])
yield batch
yielded += 1
buckets[count_zeros] = []
batch = []
leftover = [idx2 for bucket in buckets for idx2 in bucket]
for idx3 in leftover:
batch.append(idx3)
if len(batch) == self.batch_size:
yielded += 1
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yielded += 1
yield batch
assert len(self) == yielded, (
"produced an inccorect number of batches."
f" expected {len(self)}, but yielded {yielded}"
)
__all__ = [
"BalanceBatchSampler",
"DynamicBalanceClassSampler",
]
| DynamicLenBatchSampler |
python | huggingface__transformers | src/transformers/models/vits/modeling_vits.py | {
"start": 35676,
"end": 37303
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = config.duration_predictor_kernel_size
filter_channels = config.duration_predictor_filter_channels
self.dropout = nn.Dropout(config.duration_predictor_dropout)
self.conv_1 = nn.Conv1d(config.hidden_size, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_1 = nn.LayerNorm(filter_channels, eps=config.layer_norm_eps)
self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.norm_2 = nn.LayerNorm(filter_channels, eps=config.layer_norm_eps)
self.proj = nn.Conv1d(filter_channels, 1, 1)
if config.speaker_embedding_size != 0:
self.cond = nn.Conv1d(config.speaker_embedding_size, config.hidden_size, 1)
def forward(self, inputs, padding_mask, global_conditioning=None):
inputs = torch.detach(inputs)
if global_conditioning is not None:
global_conditioning = torch.detach(global_conditioning)
inputs = inputs + self.cond(global_conditioning)
inputs = self.conv_1(inputs * padding_mask)
inputs = torch.relu(inputs)
inputs = self.norm_1(inputs.transpose(1, -1)).transpose(1, -1)
inputs = self.dropout(inputs)
inputs = self.conv_2(inputs * padding_mask)
inputs = torch.relu(inputs)
inputs = self.norm_2(inputs.transpose(1, -1)).transpose(1, -1)
inputs = self.dropout(inputs)
inputs = self.proj(inputs * padding_mask)
return inputs * padding_mask
| VitsDurationPredictor |
python | numpy__numpy | numpy/polynomial/tests/test_legendre.py | {
"start": 16539,
"end": 17114
} | class ____:
def test_100(self):
x, w = leg.leggauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = leg.legvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = 2.0
assert_almost_equal(w.sum(), tgt)
| TestGauss |
python | lepture__authlib | authlib/oauth2/rfc7662/introspection.py | {
"start": 177,
"end": 5292
} | class ____(TokenEndpoint):
"""Implementation of introspection endpoint which is described in
`RFC7662`_.
.. _RFC7662: https://tools.ietf.org/html/rfc7662
"""
#: Endpoint name to be registered
ENDPOINT_NAME = "introspection"
def authenticate_token(self, request, client):
"""The protected resource calls the introspection endpoint using an HTTP
``POST`` request with parameters sent as
"application/x-www-form-urlencoded" data. The protected resource sends a
parameter representing the token along with optional parameters
representing additional context that is known by the protected resource
to aid the authorization server in its response.
token
**REQUIRED** The string value of the token. For access tokens, this
is the ``access_token`` value returned from the token endpoint
defined in OAuth 2.0. For refresh tokens, this is the
``refresh_token`` value returned from the token endpoint as defined
in OAuth 2.0.
token_type_hint
**OPTIONAL** A hint about the type of the token submitted for
introspection.
"""
self.check_params(request, client)
token = self.query_token(
request.form["token"], request.form.get("token_type_hint")
)
if token and self.check_permission(token, client, request):
return token
def check_params(self, request, client):
params = request.form
if "token" not in params:
raise InvalidRequestError()
hint = params.get("token_type_hint")
if hint and hint not in self.SUPPORTED_TOKEN_TYPES:
raise UnsupportedTokenTypeError()
def create_endpoint_response(self, request):
"""Validate introspection request and create the response.
:returns: (status_code, body, headers)
"""
# The authorization server first validates the client credentials
client = self.authenticate_endpoint_client(request)
# then verifies whether the token was issued to the client making
# the revocation request
token = self.authenticate_token(request, client)
# the authorization server invalidates the token
body = self.create_introspection_payload(token)
return 200, body, default_json_headers
def create_introspection_payload(self, token):
# the token is not active, does not exist on this server, or the
# protected resource is not allowed to introspect this particular
# token, then the authorization server MUST return an introspection
# response with the "active" field set to "false"
if not token:
return {"active": False}
if token.is_expired() or token.is_revoked():
return {"active": False}
payload = self.introspect_token(token)
if "active" not in payload:
payload["active"] = True
return payload
def check_permission(self, token, client, request):
"""Check if the request has permission to introspect the token. Developers
MUST implement this method::
def check_permission(self, token, client, request):
# only allow a special client to introspect the token
return client.client_id == "introspection_client"
:return: bool
"""
raise NotImplementedError()
def query_token(self, token_string, token_type_hint):
"""Get the token from database/storage by the given token string.
Developers should implement this method::
def query_token(self, token_string, token_type_hint):
if token_type_hint == "access_token":
tok = Token.query_by_access_token(token_string)
elif token_type_hint == "refresh_token":
tok = Token.query_by_refresh_token(token_string)
else:
tok = Token.query_by_access_token(token_string)
if not tok:
tok = Token.query_by_refresh_token(token_string)
return tok
"""
raise NotImplementedError()
def introspect_token(self, token):
"""Read given token and return its introspection metadata as a
dictionary following `Section 2.2`_::
def introspect_token(self, token):
return {
"active": True,
"client_id": token.client_id,
"token_type": token.token_type,
"username": get_token_username(token),
"scope": token.get_scope(),
"sub": get_token_user_sub(token),
"aud": token.client_id,
"iss": "https://server.example.com/",
"exp": token.expires_at,
"iat": token.issued_at,
}
.. _`Section 2.2`: https://tools.ietf.org/html/rfc7662#section-2.2
"""
raise NotImplementedError()
| IntrospectionEndpoint |
python | django__django | django/db/migrations/operations/models.py | {
"start": 20028,
"end": 21996
} | class ____(ModelOptionOperation):
"""Rename a model's table."""
def __init__(self, name, table):
self.table = table
super().__init__(name)
def deconstruct(self):
kwargs = {
"name": self.name,
"table": self.table,
}
return (self.__class__.__qualname__, [], kwargs)
def state_forwards(self, app_label, state):
state.alter_model_options(app_label, self.name_lower, {"db_table": self.table})
def database_forwards(self, app_label, schema_editor, from_state, to_state):
new_model = to_state.apps.get_model(app_label, self.name)
if self.allow_migrate_model(schema_editor.connection.alias, new_model):
old_model = from_state.apps.get_model(app_label, self.name)
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
# Rename M2M fields whose name is based on this model's db_table
for old_field, new_field in zip(
old_model._meta.local_many_to_many, new_model._meta.local_many_to_many
):
if new_field.remote_field.through._meta.auto_created:
schema_editor.alter_db_table(
new_field.remote_field.through,
old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (
self.name,
self.table if self.table is not None else "(default)",
)
@property
def migration_name_fragment(self):
return "alter_%s_table" % self.name_lower
| AlterModelTable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-surveycto/source_surveycto/source.py | {
"start": 3713,
"end": 5024
} | class ____(AbstractSource):
def check_connection(self, logger, config) -> Tuple[bool, Any]:
form_ids = config["form_id"]
try:
for form_id in form_ids:
schema = Helpers.call_survey_cto(config, form_id)
filter_data = Helpers.get_filter_data(schema)
schema_res = Helpers.get_json_schema(filter_data)
stream = SurveyctoStream(config=config, form_id=form_id, schema=schema_res)
next(stream.read_records(sync_mode=SyncMode.full_refresh))
return True, None
except Exception as error:
return False, f"Unable to connect - {(error)}"
def generate_streams(self, config: str) -> List[Stream]:
forms = config.get("form_id", [])
streams = []
for form_id in forms:
schema = Helpers.call_survey_cto(config, form_id)
filter_data = Helpers.get_filter_data(schema)
schema_res = Helpers.get_json_schema(filter_data)
stream = SurveyctoStream(config=config, form_id=form_id, schema=schema_res)
streams.append(stream)
return streams
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
streams = self.generate_streams(config=config)
return streams
| SourceSurveycto |
python | doocs__leetcode | solution/0400-0499/0493.Reverse Pairs/Solution3.py | {
"start": 95,
"end": 1179
} | class ____:
def __init__(self, n):
self.tr = [Node() for _ in range(4 * n)]
self.build(1, 1, n)
def build(self, u, l, r):
self.tr[u].l = l
self.tr[u].r = r
if l == r:
return
mid = (l + r) >> 1
self.build(u << 1, l, mid)
self.build(u << 1 | 1, mid + 1, r)
def modify(self, u, x, v):
if self.tr[u].l == x and self.tr[u].r == x:
self.tr[u].v += 1
return
mid = (self.tr[u].l + self.tr[u].r) >> 1
if x <= mid:
self.modify(u << 1, x, v)
else:
self.modify(u << 1 | 1, x, v)
self.pushup(u)
def pushup(self, u):
self.tr[u].v = self.tr[u << 1].v + self.tr[u << 1 | 1].v
def query(self, u, l, r):
if self.tr[u].l >= l and self.tr[u].r <= r:
return self.tr[u].v
mid = (self.tr[u].l + self.tr[u].r) >> 1
v = 0
if l <= mid:
v += self.query(u << 1, l, r)
if r > mid:
v += self.query(u << 1 | 1, l, r)
return v
| SegmentTree |
python | encode__django-rest-framework | rest_framework/filters.py | {
"start": 8457,
"end": 14912
} | class ____(BaseFilterBackend):
# The URL query parameter used for the ordering.
ordering_param = api_settings.ORDERING_PARAM
ordering_fields = None
ordering_title = _('Ordering')
ordering_description = _('Which field to use when ordering the results.')
template = 'rest_framework/filters/ordering.html'
def get_ordering(self, request, queryset, view):
"""
Ordering is set by a comma delimited ?ordering=... query parameter.
The `ordering` query parameter can be overridden by setting
the `ordering_param` value on the OrderingFilter or by
specifying an `ORDERING_PARAM` value in the API settings.
"""
params = request.query_params.get(self.ordering_param)
if params:
fields = [param.strip() for param in params.split(',')]
ordering = self.remove_invalid_fields(queryset, fields, view, request)
if ordering:
return ordering
# No ordering was included, or all the ordering fields were invalid
return self.get_default_ordering(view)
def get_default_ordering(self, view):
ordering = getattr(view, 'ordering', None)
if isinstance(ordering, str):
return (ordering,)
return ordering
def get_default_valid_fields(self, queryset, view, context=None):
if context is None:
context = {}
# If `ordering_fields` is not specified, then we determine a default
# based on the serializer class, if one exists on the view.
if hasattr(view, 'get_serializer_class'):
try:
serializer_class = view.get_serializer_class()
except AssertionError:
# Raised by the default implementation if
# no serializer_class was found
serializer_class = None
else:
serializer_class = getattr(view, 'serializer_class', None)
if serializer_class is None:
msg = (
"Cannot use %s on a view which does not have either a "
"'serializer_class', an overriding 'get_serializer_class' "
"or 'ordering_fields' attribute."
)
raise ImproperlyConfigured(msg % self.__class__.__name__)
model_class = queryset.model
model_property_names = [
# 'pk' is a property added in Django's Model class, however it is valid for ordering.
attr for attr in dir(model_class) if isinstance(getattr(model_class, attr), property) and attr != 'pk'
]
return [
(field.source.replace('.', '__') or field_name, field.label)
for field_name, field in serializer_class(context=context).fields.items()
if (
not getattr(field, 'write_only', False) and
not field.source == '*' and
field.source not in model_property_names
)
]
def get_valid_fields(self, queryset, view, context=None):
if context is None:
context = {}
valid_fields = getattr(view, 'ordering_fields', self.ordering_fields)
if valid_fields is None:
# Default to allowing filtering on serializer fields
return self.get_default_valid_fields(queryset, view, context)
elif valid_fields == '__all__':
# View explicitly allows filtering on any model field
valid_fields = [
(field.name, field.verbose_name) for field in queryset.model._meta.fields
]
valid_fields += [
(key, key.title().split('__'))
for key in queryset.query.annotations
]
else:
valid_fields = [
(item, item) if isinstance(item, str) else item
for item in valid_fields
]
return valid_fields
def remove_invalid_fields(self, queryset, fields, view, request):
valid_fields = [item[0] for item in self.get_valid_fields(queryset, view, {'request': request})]
def term_valid(term):
if term.startswith("-"):
term = term[1:]
return term in valid_fields
return [term for term in fields if term_valid(term)]
def filter_queryset(self, request, queryset, view):
ordering = self.get_ordering(request, queryset, view)
if ordering:
return queryset.order_by(*ordering)
return queryset
def get_template_context(self, request, queryset, view):
current = self.get_ordering(request, queryset, view)
current = None if not current else current[0]
options = []
context = {
'request': request,
'current': current,
'param': self.ordering_param,
}
for key, label in self.get_valid_fields(queryset, view, context):
options.append((key, '%s - %s' % (label, _('ascending'))))
options.append(('-' + key, '%s - %s' % (label, _('descending'))))
context['options'] = options
return context
def to_html(self, request, queryset, view):
template = loader.get_template(self.template)
context = self.get_template_context(request, queryset, view)
return template.render(context)
def get_schema_fields(self, view):
assert coreapi is not None, 'coreapi must be installed to use `get_schema_fields()`'
if coreapi is not None:
warnings.warn('CoreAPI compatibility is deprecated and will be removed in DRF 3.18', RemovedInDRF318Warning)
assert coreschema is not None, 'coreschema must be installed to use `get_schema_fields()`'
return [
coreapi.Field(
name=self.ordering_param,
required=False,
location='query',
schema=coreschema.String(
title=force_str(self.ordering_title),
description=force_str(self.ordering_description)
)
)
]
def get_schema_operation_parameters(self, view):
return [
{
'name': self.ordering_param,
'required': False,
'in': 'query',
'description': force_str(self.ordering_description),
'schema': {
'type': 'string',
},
},
]
| OrderingFilter |
python | hynek__structlog | src/structlog/exceptions.py | {
"start": 505,
"end": 697
} | class ____(Exception):
"""
A user asked for the current `structlog.dev.ConsoleRenderer` but none is
configured.
.. versionadded:: 25.5.0
"""
| NoConsoleRendererConfiguredError |
python | huggingface__transformers | src/transformers/models/dinov3_vit/modeling_dinov3_vit.py | {
"start": 19331,
"end": 21206
} | class ____(DINOv3ViTPreTrainedModel):
def __init__(self, config: DINOv3ViTConfig):
super().__init__(config)
self.config = config
self.embeddings = DINOv3ViTEmbeddings(config)
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config)
self.layer = nn.ModuleList([DINOv3ViTLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
hidden_states = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
position_embeddings = self.rope_embeddings(pixel_values)
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
position_embeddings=position_embeddings,
)
sequence_output = self.norm(hidden_states)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
@auto_docstring
| DINOv3ViTModel |
python | ansible__ansible | test/units/module_utils/common/test_dict_transformations.py | {
"start": 3948,
"end": 4724
} | class ____:
def test_recursive_diff(self):
a = {'foo': {'bar': [{'baz': {'qux': 'ham_sandwich'}}]}}
c = {'foo': {'bar': [{'baz': {'qux': 'ham_sandwich'}}]}}
b = {'foo': {'bar': [{'baz': {'qux': 'turkey_sandwich'}}]}}
assert recursive_diff(a, b) is not None
assert len(recursive_diff(a, b)) == 2
assert recursive_diff(a, c) is None
@pytest.mark.parametrize(
'p1, p2', (
([1, 2], [2, 3]),
({1: 2}, [2, 3]),
([1, 2], {2: 3}),
({2: 3}, 'notadict'),
('notadict', {2: 3}),
)
)
def test_recursive_diff_negative(self, p1, p2):
with pytest.raises(TypeError, match="Unable to diff"):
recursive_diff(p1, p2)
| TestCaseRecursiveDiff |
python | ray-project__ray | python/ray/train/_internal/session.py | {
"start": 2663,
"end": 3079
} | class ____:
"""A (checkpoint, metrics) result reported by the user."""
def __init__(self, checkpoint: Optional[Checkpoint], metrics: Dict[str, Any]):
self.checkpoint = checkpoint
self.metrics = metrics
def __repr__(self) -> str:
return f"TrainingResult(checkpoint={self.checkpoint}, metrics={self.metrics})"
# TODO(xwjiang): This needs a better name.
@DeveloperAPI
| _TrainingResult |
python | pytorch__pytorch | test/distributed/pipelining/test_backward.py | {
"start": 491,
"end": 8459
} | class ____(TestCase):
@skipXPUIf(True, "https://github.com/intel/torch-xpu-ops/issues/1682")
def test_stage_backward(self, device):
# MLP as a stage module
mod = MLPModule(d_hid).to(device)
x = torch.randn(batch_size, d_hid, device=device)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid, device=device)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod).to(device)
ref_x = x.detach().requires_grad_(x.requires_grad).to(device)
ref_target = target.detach().to(device)
# Forward and backward in stage manner
out = mod(x)
loss = loss_fn(out, target)
grad_inputs = stage_backward(
stage_output=loss,
output_grads=None,
input_values=(x,),
)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
torch.testing.assert_close(grad_inputs[0], ref_x.grad)
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
def test_stage_backward_input(self, device):
# MLP as a stage module
mod = MLPModule(d_hid).to(device)
x = torch.randn(batch_size, d_hid, device=device)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid, device=device)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod).to(device)
ref_x = x.detach().requires_grad_(x.requires_grad).to(device)
ref_target = target.detach().to(device)
# Forward, then backward of loss with respect to inputs
out = mod(x)
loss = loss_fn(out, target)
dinputs, _param_groups = stage_backward_input(
stage_outputs_or_loss=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
torch.testing.assert_close(x.grad, ref_x.grad)
torch.testing.assert_close(dinputs[0], ref_x.grad)
for _, p in mod.named_parameters():
# Check that the weight gradients were not updated
self.assertEqual(p.grad, None)
@skipXPUIf(True, "https://github.com/intel/torch-xpu-ops/issues/1682")
def test_stage_backward_weight(self, device):
# MLP as a stage module
mod = MLPModule(d_hid).to(device)
x = torch.randn(batch_size, d_hid, device=device)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid, device=device)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod).to(device)
ref_x = x.detach().requires_grad_(x.requires_grad).to(device)
ref_target = target.detach().to(device)
# Forward, then backward of loss with respect to inputs
out = mod(x)
loss = loss_fn(out, target)
_dinputs, param_groups = stage_backward_input(
stage_outputs_or_loss=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# backward of loss with respect to weights
stage_backward_weight(mod.parameters(), param_groups, retain_graph=True)
# Run reference
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
@skipXPUIf(True, "https://github.com/intel/torch-xpu-ops/issues/1682")
def test_stage_backward_weight_multiple_iters(self, device):
# MLP as a stage module
mod = MLPModule(d_hid).to(device)
inputs = []
for _ in range(10):
x = torch.randn(batch_size, d_hid, device=device)
inputs.append(x)
# As in a pipeline stage, the inputs to this stage requires gradients
x.requires_grad_(True)
target = torch.randn(batch_size, d_hid, device=device)
loss_fn = torch.nn.MSELoss(reduction="sum")
# Make a copy
ref_mod = copy.deepcopy(mod).to(device)
ref_inputs = []
for x in inputs:
ref_x = x.detach().requires_grad_(x.requires_grad).to(device)
ref_inputs.append(ref_x)
ref_target = target.detach().to(device)
# Forward, then backward of loss with respect to inputs
for x in inputs:
out = mod(x)
loss = loss_fn(out, target)
_dinputs, param_groups = stage_backward_input(
stage_outputs_or_loss=(loss,),
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# backward of loss with respect to weights
stage_backward_weight(mod.parameters(), param_groups)
# Run reference
for ref_x in ref_inputs:
ref_out = ref_mod(ref_x)
ref_loss = loss_fn(ref_out, ref_target)
ref_loss.backward()
# Every rank checks gradients
for name, p in mod.named_parameters():
ref_p = ref_mod.get_parameter(name)
try:
torch.testing.assert_close(p.grad, ref_p.grad)
except AssertionError:
print(f"Gradient test failed for {name}: {p.grad} vs {ref_p.grad}")
raise
def test_stage_backward_weight_grad_validation(self, device):
test_cases = [
(
"size >= 2",
lambda: [
(
torch.randn(batch_size, d_hid, device=device),
torch.randn(batch_size, d_hid, device=device),
)
],
),
("size = 1", lambda: [(torch.randn(batch_size, d_hid, device=device),)]),
(
"1 grad, 1 None",
lambda: [(torch.randn(batch_size, d_hid, device=device), None)],
),
]
for description, mock_grads_factory in test_cases:
with self.subTest(description=description):
mod = MLPModule(d_hid).to(device)
x = torch.randn(batch_size, d_hid, device=device)
x.requires_grad_(True)
out = mod(x)
loss = torch.sum(out)
dinputs, param_groups = stage_backward_input(
stage_outputs_or_loss=[loss],
output_grads=None,
input_values=[x],
weights=mod.parameters(),
)
# Set up mock grads
for param_group in param_groups:
param_group["grads"] = mock_grads_factory()
stage_backward_weight(mod.parameters(), param_groups)
devices = ["cpu", "cuda", "hpu", "xpu"]
instantiate_device_type_tests(
StageBackwardTests, globals(), only_for=devices, allow_xpu=True
)
if __name__ == "__main__":
run_tests()
| StageBackwardTests |
python | dagster-io__dagster | python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/sensor_builder.py | {
"start": 1467,
"end": 10446
} | class ____:
"""A cursor that stores the last effective timestamp and offset."""
finished_at_lower_bound: Optional[float] = None
finished_at_upper_bound: Optional[float] = None
offset: Optional[int] = None
def materializations_from_batch_iter(
context: SensorEvaluationContext,
finished_at_lower_bound: float,
finished_at_upper_bound: float,
offset: int,
workspace: DbtCloudWorkspace,
dagster_dbt_translator: DagsterDbtTranslator,
) -> Iterator[Optional[BatchResult]]:
client = workspace.get_client()
workspace_data = workspace.get_or_fetch_workspace_data()
total_processed_runs = 0
while True:
latest_offset = total_processed_runs + offset
runs, total_runs = client.get_runs_batch(
project_id=workspace.project_id,
environment_id=workspace.environment_id,
finished_at_lower_bound=datetime_from_timestamp(finished_at_lower_bound),
finished_at_upper_bound=datetime_from_timestamp(finished_at_upper_bound),
offset=latest_offset,
)
if len(runs) == 0:
yield None
context.log.info("Received no runs. Breaking.")
break
context.log.info(
f"Processing {len(runs)}/{total_runs} runs for dbt Cloud workspace "
f"for project {workspace.project_name} and environment {workspace.environment_name}..."
)
for i, run_details in enumerate(runs):
run = DbtCloudRun.from_run_details(run_details=run_details)
if run.job_definition_id == workspace_data.adhoc_job_id:
context.log.info(f"Run {run.id} was triggered by Dagster. Continuing.")
continue
run_artifacts = client.list_run_artifacts(run_id=run.id)
if "run_results.json" not in run_artifacts:
context.log.info(
f"Run {run.id} does not have a run_results.json artifact. Continuing."
)
continue
run_results = DbtCloudJobRunResults.from_run_results_json(
run_results_json=client.get_run_results_json(run_id=run.id)
)
events = run_results.to_default_asset_events(
client=workspace.get_client(),
manifest=workspace_data.manifest,
dagster_dbt_translator=dagster_dbt_translator,
)
# Currently, only materializations are tracked
mats = [event for event in events if isinstance(event, AssetMaterialization)]
context.log.info(f"Found {len(mats)} materializations for {run.id}")
all_asset_keys_materialized = {mat.asset_key for mat in mats}
yield (
BatchResult(
idx=i + latest_offset,
asset_events=mats,
all_asset_keys_materialized=all_asset_keys_materialized,
)
if mats
else None
)
total_processed_runs += len(runs)
context.log.info(
f"Processed {total_processed_runs}/{total_runs} runs for dbt Cloud workspace "
f"for project {workspace.project_name} and environment {workspace.environment_name}..."
)
if total_processed_runs == total_runs:
yield None
context.log.info("Processed all runs. Breaking.")
break
def sorted_asset_events(
asset_events: Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]],
repository_def: RepositoryDefinition,
) -> list[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]:
"""Sort asset events by end date and toposort order."""
topo_aks = repository_def.asset_graph.toposorted_asset_keys
materializations_and_timestamps = [
(mat.metadata[COMPLETED_AT_TIMESTAMP_METADATA_KEY].value, mat) for mat in asset_events
]
return [
sorted_event[1]
for sorted_event in sorted(
materializations_and_timestamps, key=lambda x: (x[0], topo_aks.index(x[1].asset_key))
)
]
def build_dbt_cloud_polling_sensor(
*,
workspace: DbtCloudWorkspace,
dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,
minimum_interval_seconds: int = DEFAULT_DBT_CLOUD_SENSOR_INTERVAL_SECONDS,
default_sensor_status: Optional[DefaultSensorStatus] = None,
) -> SensorDefinition:
"""The constructed sensor polls the dbt Cloud Workspace for activity, and inserts asset events into Dagster's event log.
Args:
workspace (DbtCloudWorkspace): The dbt Cloud workspace to poll for runs.
dagster_dbt_translator (Optional[DagsterDbtTranslator], optional): The translator to use
to convert dbt Cloud content into :py:class:`dagster.AssetSpec`.
Defaults to :py:class:`DagsterDbtTranslator`.
minimum_interval_seconds (int, optional): The minimum interval in seconds between sensor runs. Defaults to 30.
default_sensor_status (Optional[DefaultSensorStatus], optional): The default status of the sensor.
Returns:
Definitions: A `SensorDefinitions` object.
"""
dagster_dbt_translator = dagster_dbt_translator or DagsterDbtTranslator()
@sensor(
name=clean_name(
f"{workspace.account_name}_{workspace.project_name}_{workspace.environment_name}__run_status_sensor"
),
description=(
f"dbt Cloud polling sensor for dbt Cloud workspace for account {workspace.account_name}, "
f"project {workspace.project_name} and environment {workspace.environment_name}"
),
minimum_interval_seconds=minimum_interval_seconds,
default_status=default_sensor_status or DefaultSensorStatus.RUNNING,
)
def dbt_cloud_run_sensor(context: SensorEvaluationContext) -> SensorResult:
"""Sensor to report materialization events for each asset as new runs come in."""
context.log.info(
f"************"
f"Running sensor for dbt Cloud workspace for account {workspace.account_name}, "
f"project {workspace.project_name} and environment {workspace.environment_name}"
f"***********"
)
try:
cursor = (
deserialize_value(context.cursor, DbtCloudPollingSensorCursor)
if context.cursor
else DbtCloudPollingSensorCursor()
)
except Exception as e:
context.log.info(f"Failed to interpret cursor. Starting from scratch. Error: {e}")
cursor = DbtCloudPollingSensorCursor()
current_date = get_current_datetime()
current_offset = cursor.offset or 0
finished_at_lower_bound = (
cursor.finished_at_lower_bound
or (current_date - timedelta(seconds=START_LOOKBACK_SECONDS)).timestamp()
)
finished_at_upper_bound = cursor.finished_at_upper_bound or current_date.timestamp()
sensor_iter = materializations_from_batch_iter(
context=context,
finished_at_lower_bound=finished_at_lower_bound,
finished_at_upper_bound=finished_at_upper_bound,
offset=current_offset,
workspace=workspace,
dagster_dbt_translator=dagster_dbt_translator,
)
all_asset_events: list[AssetMaterialization] = []
latest_offset = current_offset
repository_def = check.not_none(context.repository_def)
batch_result = None
while get_current_datetime() - current_date < timedelta(seconds=MAIN_LOOP_TIMEOUT_SECONDS):
batch_result = next(sensor_iter, None)
if batch_result is None:
context.log.info("Received no batch result. Breaking.")
break
all_asset_events.extend(batch_result.asset_events)
latest_offset = batch_result.idx
if batch_result is not None:
new_cursor = DbtCloudPollingSensorCursor(
finished_at_lower_bound=finished_at_lower_bound,
finished_at_upper_bound=finished_at_upper_bound,
offset=latest_offset + 1,
)
else:
# We have completed iteration for this range
new_cursor = DbtCloudPollingSensorCursor(
finished_at_lower_bound=finished_at_upper_bound,
finished_at_upper_bound=None,
offset=0,
)
context.update_cursor(serialize_value(new_cursor))
context.log.info(
f"************"
f"Exiting sensor for dbt Cloud workspace for account {workspace.account_name}, "
f"project {workspace.project_name} and environment {workspace.environment_name}"
f"***********"
)
return SensorResult(
asset_events=sorted_asset_events(all_asset_events, repository_def),
)
return dbt_cloud_run_sensor
| DbtCloudPollingSensorCursor |
python | run-llama__llama_index | llama-index-core/llama_index/core/response_synthesizers/simple_summarize.py | {
"start": 563,
"end": 3633
} | class ____(BaseSynthesizer):
def __init__(
self,
llm: Optional[LLM] = None,
callback_manager: Optional[CallbackManager] = None,
prompt_helper: Optional[PromptHelper] = None,
text_qa_template: Optional[BasePromptTemplate] = None,
streaming: bool = False,
) -> None:
super().__init__(
llm=llm,
callback_manager=callback_manager,
prompt_helper=prompt_helper,
streaming=streaming,
)
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT_SEL
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"text_qa_template": self._text_qa_template}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "text_qa_template" in prompts:
self._text_qa_template = prompts["text_qa_template"]
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
single_text_chunk = "\n".join(text_chunks)
truncated_chunks = self._prompt_helper.truncate(
prompt=text_qa_template,
text_chunks=[single_text_chunk],
llm=self._llm,
)
response: RESPONSE_TEXT_TYPE
if not self._streaming:
response = await self._llm.apredict(
text_qa_template,
context_str=truncated_chunks,
**response_kwargs,
)
else:
response = await self._llm.astream(
text_qa_template,
context_str=truncated_chunks,
**response_kwargs,
)
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
text_qa_template = self._text_qa_template.partial_format(query_str=query_str)
single_text_chunk = "\n".join(text_chunks)
truncated_chunks = self._prompt_helper.truncate(
prompt=text_qa_template,
text_chunks=[single_text_chunk],
llm=self._llm,
)
response: RESPONSE_TEXT_TYPE
if not self._streaming:
response = self._llm.predict(
text_qa_template,
context_str=truncated_chunks,
**kwargs,
)
else:
response = self._llm.stream(
text_qa_template,
context_str=truncated_chunks,
**kwargs,
)
if isinstance(response, str):
response = response or "Empty Response"
else:
response = cast(Generator, response)
return response
| SimpleSummarize |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/conjecture/test_provider.py | {
"start": 18398,
"end": 18949
} | class ____(TrivialProvider):
scope = "exhausted"
def __init__(self, conjecturedata: "ConjectureData", /) -> None:
super().__init__(conjecturedata)
self._calls = 0
def draw_integer(self, *args, **constraints):
self._calls += 1
if self._calls > 20:
# This is complete nonsense of course, so we'll see Hypothesis complain
# that we found a problem after the backend reported verification.
raise BackendCannotProceed(self.scope)
return self._calls
| ExhaustibleProvider |
python | django__django | tests/postgres_tests/models.py | {
"start": 6311,
"end": 6379
} | class ____(PostgreSQLModel):
one_off = OffByOneField()
| OffByOneModel |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 536,
"end": 644
} | class ____(Exception):
"""Generic parent class for exceptions thrown by Hypothesis."""
| HypothesisException |
python | pennersr__django-allauth | allauth/socialaccount/providers/hubspot/provider.py | {
"start": 403,
"end": 1033
} | class ____(OAuth2Provider):
id = "hubspot"
name = "Hubspot"
account_class = HubspotAccount
oauth2_adapter_class = HubspotOAuth2Adapter
def get_default_scope(self):
return ["oauth"]
def extract_uid(self, data):
return str(data["user_id"])
def extract_common_fields(self, data):
return dict(email=data.get("user"))
def extract_email_addresses(self, data):
ret = []
email = data.get("user")
if email:
ret.append(EmailAddress(email=email, verified=True, primary=True))
return ret
provider_classes = [HubspotProvider]
| HubspotProvider |
python | networkx__networkx | networkx/algorithms/isomorphism/tests/test_isomorphvf2.py | {
"start": 2143,
"end": 4252
} | class ____:
# https://web.archive.org/web/20090303210205/http://amalfi.dis.unina.it/graph/db/
@staticmethod
def create_graph(filename):
"""Creates a Graph instance from the filename."""
# The file is assumed to be in the format from the VF2 graph database.
# Each file is composed of 16-bit numbers (unsigned short int).
# So we will want to read 2 bytes at a time.
# We can read the number as follows:
# number = struct.unpack('<H', file.read(2))
# This says, expect the data in little-endian encoding
# as an unsigned short int and unpack 2 bytes from the file.
fh = open(filename, mode="rb")
# Grab the number of nodes.
# Node numeration is 0-based, so the first node has index 0.
nodes = struct.unpack("<H", fh.read(2))[0]
graph = nx.Graph()
for from_node in range(nodes):
# Get the number of edges.
edges = struct.unpack("<H", fh.read(2))[0]
for edge in range(edges):
# Get the terminal node.
to_node = struct.unpack("<H", fh.read(2))[0]
graph.add_edge(from_node, to_node)
fh.close()
return graph
def test_graph(self):
head = importlib.resources.files("networkx.algorithms.isomorphism.tests")
g1 = self.create_graph(head / "iso_r01_s80.A99")
g2 = self.create_graph(head / "iso_r01_s80.B99")
gm = iso.GraphMatcher(g1, g2)
assert gm.is_isomorphic()
def test_subgraph(self):
# A is the subgraph
# B is the full graph
head = importlib.resources.files("networkx.algorithms.isomorphism.tests")
subgraph = self.create_graph(head / "si2_b06_m200.A99")
graph = self.create_graph(head / "si2_b06_m200.B99")
gm = iso.GraphMatcher(graph, subgraph)
assert gm.subgraph_is_isomorphic()
# Just testing some cases
assert gm.subgraph_is_monomorphic()
# There isn't a similar test implemented for subgraph monomorphism,
# feel free to create one.
| TestVF2GraphDB |
python | apache__airflow | providers/cncf/kubernetes/src/airflow/providers/cncf/kubernetes/operators/resource.py | {
"start": 6179,
"end": 7489
} | class ____(KubernetesResourceBaseOperator):
"""Delete a resource in a kubernetes."""
def delete_custom_from_yaml_object(self, body: dict):
name = body["metadata"]["name"]
group, version, namespace, plural = self.get_crd_fields(body)
if self.namespaced:
self.custom_object_client.delete_namespaced_custom_object(group, version, namespace, plural, name)
else:
self.custom_object_client.delete_cluster_custom_object(group, version, plural, name)
def _delete_objects(self, objects):
if not self.custom_resource_definition:
delete_from_yaml(
k8s_client=self.client,
yaml_objects=objects,
namespace=self.get_namespace(),
)
else:
k8s_resource_iterator(self.delete_custom_from_yaml_object, objects)
def execute(self, context) -> None:
if self.yaml_conf:
self._delete_objects(yaml.safe_load_all(self.yaml_conf))
elif self.yaml_conf_file and os.path.exists(self.yaml_conf_file):
with open(self.yaml_conf_file) as stream:
self._delete_objects(yaml.safe_load_all(stream))
else:
raise AirflowException("File %s not found", self.yaml_conf_file)
| KubernetesDeleteResourceOperator |
python | pytorch__pytorch | torch/distributed/checkpoint/_extension.py | {
"start": 6336,
"end": 7790
} | class ____:
def __init__(self) -> None:
# Populate default registry contents
self.extensions: dict[str, type[Extension]] = {
cls.registry_name(): cls for cls in (ZStandard,)
}
def register(self, cls: type[Extension]) -> None:
self.extensions[cls.registry_name()] = cls
def from_descriptor_list(self, descriptors: Sequence[str]) -> Sequence[Extension]:
"""
Given a seuquence of descriptor strings as returned by
Extension.get_descriptor at save time, creates a sequence of
Extension instances. The name[@local-domain] preceding the
version number is used to look up an implementation class in
the registry, and the version is passed to the class's
from_descriptor static method. If the registry contains no
match, this will throw ValueError. If the from_descriptor
method raises an exception, that will pass through to the
caller.
"""
def from_descriptor(desc: str) -> Extension:
name, _, version = desc.partition("/")
if version is None:
version = 0
ext = self.extensions.get(name)
if not ext:
raise ValueError(f"Unknown extension {name=}")
# pyrefly: ignore [bad-argument-type]
return ext.from_descriptor(version)
return [from_descriptor(desc) for desc in descriptors]
| ExtensionRegistry |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_indexing.py | {
"start": 23026,
"end": 43284
} | class ____(TestCase):
"""
These tests use code to mimic the C-Code indexing for selection.
NOTE:
* This still lacks tests for complex item setting.
* If you change behavior of indexing, you might want to modify
these tests to try more combinations.
* Behavior was written to match numpy version 1.8. (though a
first version matched 1.7.)
* Only tuple indices are supported by the mimicking code.
(and tested as of writing this)
* Error types should match most of the time as long as there
is only one error. For multiple errors, what gets raised
will usually not be the same one. They are *not* tested.
Update 2016-11-30: It is probably not worth maintaining this test
indefinitely and it can be dropped if maintenance becomes a burden.
"""
def setupUp(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = [
"skip",
Ellipsis,
0,
# Boolean indices, up to 3-d for some special cases of eating up
# dimensions, also need to test all False
np.array([True, False, False]),
np.array([[True, False], [False, True]]),
np.array([[[False, False], [False, False]]]),
# Some slices:
slice(-5, 5, 2),
slice(1, 1, 100),
slice(4, -1, -2),
slice(None, None, -3),
# Some Fancy indexes:
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
np.array([0, 1, -2]),
np.array([[2], [0], [1]]),
np.array([[0, -1], [0, 1]], dtype=np.dtype("intp")),
np.array([2, -1], dtype=np.int8),
np.zeros([1] * 31, dtype=int), # trigger too large array.
np.array([0.0, 1.0]),
] # invalid datatype
# Some simpler indices that still cover a bit more
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), "skip"]
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
def _get_multi_index(self, arr, indices):
"""Mimic multi dimensional indexing.
Parameters
----------
arr : ndarray
Array to be indexed.
indices : tuple of index objects
Returns
-------
out : ndarray
An array equivalent to the indexing operation (but always a copy).
`arr[indices]` should be identical.
no_copy : bool
Whether the indexing operation requires a copy. If this is `True`,
`np.may_share_memory(arr, arr[indices])` should be `True` (with
some exceptions for scalars and possibly 0-d arrays).
Notes
-----
While the function may mostly match the errors of normal indexing this
is generally not the case.
"""
in_indices = list(indices)
indices = []
# if False, this is a fancy or boolean index
no_copy = True
# number of fancy/scalar indexes that are not consecutive
num_fancy = 0
# number of dimensions indexed by a "fancy" index
fancy_dim = 0
# NOTE: This is a funny twist (and probably OK to change).
# The boolean array has illegal indexes, but this is
# allowed if the broadcast fancy-indices are 0-sized.
# This variable is to catch that case.
error_unless_broadcast_to_empty = False
# We need to handle Ellipsis and make arrays from indices, also
# check if this is fancy indexing (set no_copy).
ndim = 0
ellipsis_pos = None # define here mostly to replace all but first.
for i, indx in enumerate(in_indices): # codespell:ignore
if indx is None: # codespell:ignore
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool: # codespell:ignore
no_copy = False
if indx.ndim == 0: # codespell:ignore
raise IndexError
# boolean indices can have higher dimensions
ndim += indx.ndim # codespell:ignore
fancy_dim += indx.ndim # codespell:ignore
continue
if indx is Ellipsis: # codespell:ignore
if ellipsis_pos is None:
ellipsis_pos = i
continue # do not increment ndim counter
raise IndexError
if isinstance(indx, slice): # codespell:ignore
ndim += 1
continue
if not isinstance(indx, np.ndarray): # codespell:ignore
# This could be open for changes in numpy.
# numpy should maybe raise an error if casting to intp
# is not safe. It rejects np.array([1., 2.]) but not
# [1., 2.] as index (same for ie. np.take).
# (Note the importance of empty lists if changing this here)
try:
indx = np.array(indx, dtype=np.intp) # codespell:ignore
except ValueError:
raise IndexError from None
in_indices[i] = indx # codespell:ignore
elif indx.dtype.kind != "b" and indx.dtype.kind != "i": # codespell:ignore
raise IndexError(
"arrays used as indices must be of integer (or boolean) type"
)
if indx.ndim != 0: # codespell:ignore
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
# we can't take more dimensions then we have, not even for 0-d
# arrays. since a[()] makes sense, but not a[(),]. We will
# raise an error later on, unless a broadcasting error occurs
# first.
raise IndexError
if ndim == 0 and None not in in_indices:
# Well we have no indexes or one Ellipsis. This is legal.
return arr.copy(), no_copy
if ellipsis_pos is not None:
in_indices[ellipsis_pos : ellipsis_pos + 1] = [slice(None, None)] * (
arr.ndim - ndim
)
for ax, indx in enumerate(in_indices): # codespell:ignore
if isinstance(indx, slice): # codespell:ignore
# convert to an index array
indx = np.arange(*indx.indices(arr.shape[ax])) # codespell:ignore
indices.append(["s", indx]) # codespell:ignore
continue
elif indx is None: # codespell:ignore
# this is like taking a slice with one element from a new axis:
indices.append(["n", np.array([0], dtype=np.intp)])
arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:])
continue
if isinstance(indx, np.ndarray) and indx.dtype == bool: # codespell:ignore
if indx.shape != arr.shape[ax : ax + indx.ndim]: # codespell:ignore
raise IndexError
try:
flat_indx = np.ravel_multi_index(
np.nonzero(indx), # codespell:ignore
arr.shape[ax : ax + indx.ndim], # codespell:ignore
mode="raise",
)
except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array(
[0] * indx.sum(), # codespell:ignore
dtype=np.intp,
)
# concatenate axis into a single one:
if indx.ndim != 0: # codespell:ignore
arr = arr.reshape(
arr.shape[:ax]
+ (np.prod(arr.shape[ax : ax + indx.ndim]),) # codespell:ignore
+ arr.shape[ax + indx.ndim :] # codespell:ignore
)
indx = flat_indx # codespell:ignore
else:
# This could be changed, a 0-d boolean index can
# make sense (even outside the 0-d indexed array case)
# Note that originally this is could be interpreted as
# integer in the full integer special case.
raise IndexError
else:
# If the index is a singleton, the bounds check is done
# before the broadcasting. This used to be different in <1.9
if indx.ndim == 0: # codespell:ignore
if (
indx >= arr.shape[ax] # codespell:ignore
or indx < -arr.shape[ax] # codespell:ignore
):
raise IndexError
if indx.ndim == 0: # codespell:ignore
# The index is a scalar. This used to be two fold, but if
# fancy indexing was active, the check was done later,
# possibly after broadcasting it away (1.7. or earlier).
# Now it is always done.
if indx >= arr.shape[ax] or indx < -arr.shape[ax]: # codespell:ignore
raise IndexError
if len(indices) > 0 and indices[-1][0] == "f" and ax != ellipsis_pos:
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx) # codespell:ignore
else:
# We have a fancy index that is not after an existing one.
# NOTE: A 0-d array triggers this as well, while one may
# expect it to not trigger it, since a scalar would not be
# considered fancy indexing.
num_fancy += 1
indices.append(["f", indx]) # codespell:ignore
if num_fancy > 1 and not no_copy:
# We have to flush the fancy indexes left
new_indices = indices[:]
axes = list(range(arr.ndim))
fancy_axes = []
new_indices.insert(0, ["f"])
ni = 0
ai = 0
for indx in indices: # codespell:ignore
ni += 1
if indx[0] == "f": # codespell:ignore
new_indices[0].extend(indx[1:]) # codespell:ignore
del new_indices[ni]
ni -= 1
for ax in range(ai, ai + len(indx[1:])): # codespell:ignore
fancy_axes.append(ax)
axes.remove(ax)
ai += len(indx) - 1 # axis we are at # codespell:ignore
indices = new_indices
# and now we need to transpose arr:
arr = arr.transpose(*(fancy_axes + axes))
# We only have one 'f' index now and arr is transposed accordingly.
# Now handle newaxis by reshaping...
ax = 0
for indx in indices: # codespell:ignore
if indx[0] == "f": # codespell:ignore
if len(indx) == 1: # codespell:ignore
continue
# First of all, reshape arr to combine fancy axes into one:
orig_shape = arr.shape
orig_slice = orig_shape[ax : ax + len(indx[1:])] # codespell:ignore
arr = arr.reshape(
arr.shape[:ax]
+ (np.prod(orig_slice).astype(int),)
+ arr.shape[ax + len(indx[1:]) :] # codespell:ignore
)
# Check if broadcasting works
res = np.broadcast(*indx[1:]) # codespell:ignore
# unfortunately the indices might be out of bounds. So check
# that first, and use mode='wrap' then. However only if
# there are any indices...
if res.size != 0:
if error_unless_broadcast_to_empty:
raise IndexError
for _indx, _size in zip(indx[1:], orig_slice): # codespell:ignore
if _indx.size == 0:
continue
if np.any(_indx >= _size) or np.any(_indx < -_size):
raise IndexError
if len(indx[1:]) == len(orig_slice): # codespell:ignore
if np.prod(orig_slice) == 0:
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
mi = np.ravel_multi_index(
indx[1:], # codespell:ignore
orig_slice,
mode="raise", # codespell:ignore
)
except Exception as exc:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError("invalid index into 0-sized") from exc
else:
mi = np.ravel_multi_index(
indx[1:], # codespell:ignore
orig_slice,
mode="wrap",
)
else:
# Maybe never happens...
raise ValueError
arr = arr.take(mi.ravel(), axis=ax)
try:
arr = arr.reshape(arr.shape[:ax] + mi.shape + arr.shape[ax + 1 :])
except ValueError:
# too many dimensions, probably
raise IndexError from None
ax += mi.ndim
continue
# If we are here, we have a 1D array for take:
arr = arr.take(indx[1], axis=ax) # codespell:ignore
ax += 1
return arr, no_copy
def _check_multi_index(self, arr, index):
"""Check a multi index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be a reshaped arange.
index : tuple of indexing objects
Index being tested.
"""
# Test item getting
try:
mimic_get, no_copy = self._get_multi_index(arr, index)
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _check_single_index(self, arr, index):
"""Check a single index item getting and simple setting.
Parameters
----------
arr : ndarray
Array to be indexed, must be an arange.
index : indexing object
Index being tested. Must be a single index and not a tuple
of indexing objects (see also `_check_multi_index`).
"""
try:
mimic_get, no_copy = self._get_multi_index(arr, (index,))
except Exception as e:
if HAS_REFCOUNT:
prev_refcount = sys.getrefcount(arr)
assert_raises(type(e), arr.__getitem__, index)
assert_raises(type(e), arr.__setitem__, index, 0)
if HAS_REFCOUNT:
assert_equal(prev_refcount, sys.getrefcount(arr))
return
self._compare_index_result(arr, index, mimic_get, no_copy)
def _compare_index_result(self, arr, index, mimic_get, no_copy):
"""Compare mimicked result to indexing result."""
raise SkipTest("torch does not support subclassing")
arr = arr.copy()
indexed_arr = arr[index]
assert_array_equal(indexed_arr, mimic_get)
# Check if we got a view, unless its a 0-sized or 0-d array.
# (then its not a view, and that does not matter)
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
# Check reference count of the original array
if HAS_REFCOUNT:
if no_copy:
# refcount increases by one:
assert_equal(sys.getrefcount(arr), 3)
else:
assert_equal(sys.getrefcount(arr), 2)
# Test non-broadcast setitem:
b = arr.copy()
b[index] = mimic_get + 1000
if b.size == 0:
return # nothing to compare here...
if no_copy and indexed_arr.ndim != 0:
# change indexed_arr in-place to manipulate original:
indexed_arr += 1000
assert_array_equal(arr, b)
return
# Use the fact that the array is originally an arange:
arr.flat[indexed_arr.ravel()] += 1000
assert_array_equal(arr, b)
def test_boolean(self):
a = np.array(5)
assert_equal(a[np.array(True)], 5)
a[np.array(True)] = 1
assert_equal(a, 1)
# NOTE: This is different from normal broadcasting, as
# arr[boolean_array] works like in a multi index. Which means
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),))
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
# spot and the simple ones in one other spot.
with warnings.catch_warnings():
# This is so that np.array(True) is not accepted in a full integer
# index, when running the file separately.
warnings.filterwarnings("error", "", DeprecationWarning)
# np.VisibleDeprecationWarning moved to np.exceptions in numpy>=2.0.0
# np.exceptions only available in numpy>=1.25.0
has_exceptions_ns = hasattr(np, "exceptions")
VisibleDeprecationWarning = ( # noqa: F841
np.exceptions.VisibleDeprecationWarning
if has_exceptions_ns
else np.VisibleDeprecationWarning
)
# FIXME(rec): should this use VisibleDeprecationWarning instead?
warnings.filterwarnings("error", "", np.VisibleDeprecationWarning)
def isskip(idx):
return isinstance(idx, str) and idx == "skip"
for simple_pos in [0, 2, 3]:
tocheck = [
self.fill_indices,
self.complex_indices,
self.fill_indices,
self.fill_indices,
]
tocheck[simple_pos] = self.simple_indices
for index in product(*tocheck):
index = tuple(i for i in index if not isskip(i))
self._check_multi_index(self.a, index)
self._check_multi_index(self.b, index)
# Check very simple item getting:
self._check_multi_index(self.a, (0, 0, 0, 0))
self._check_multi_index(self.b, (0, 0, 0, 0))
# Also check (simple cases of) too many indices:
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
def test_1d(self):
a = np.arange(10)
for index in self.complex_indices:
self._check_single_index(a, index)
| TestMultiIndexingAutomated |
python | ray-project__ray | python/ray/util/client/common.py | {
"start": 14580,
"end": 18246
} | class ____(ClientStub):
"""Client-side stub for instantiated actor.
A stub created on the Ray Client to represent a remote actor that
has been started on the cluster. This class is allowed to be passed
around between remote functions.
Args:
actor_ref: A reference to the running actor given to the client. This
is a serialized version of the actual handle as an opaque token.
"""
def __init__(
self,
actor_ref: ClientActorRef,
actor_class: Optional[ClientActorClass] = None,
):
self.actor_ref = actor_ref
self._dir: Optional[List[str]] = None
if actor_class is not None:
self._method_num_returns = {}
self._method_signatures = {}
for method_name, method_obj in inspect.getmembers(
actor_class.actor_cls, is_function_or_method
):
self._method_num_returns[method_name] = getattr(
method_obj, "__ray_num_returns__", None
)
self._method_signatures[method_name] = inspect.Signature(
parameters=extract_signature(
method_obj,
ignore_first=(
not (
is_class_method(method_obj)
or is_static_method(actor_class.actor_cls, method_name)
)
),
)
)
else:
self._method_num_returns = None
self._method_signatures = None
def __dir__(self) -> List[str]:
if self._method_num_returns is not None:
return self._method_num_returns.keys()
if ray.is_connected():
self._init_class_info()
return self._method_num_returns.keys()
return super().__dir__()
# For compatibility with core worker ActorHandle._actor_id which returns
# ActorID
@property
def _actor_id(self) -> ClientActorRef:
return self.actor_ref
def __hash__(self) -> int:
return hash(self._actor_id)
def __eq__(self, __value) -> bool:
return hash(self) == hash(__value)
def __getattr__(self, key):
if key == "_method_num_returns":
# We need to explicitly handle this value since it is used below,
# otherwise we may end up infinitely recursing when deserializing.
# This can happen after unpickling an object but before
# _method_num_returns is correctly populated.
raise AttributeError(f"ClientActorRef has no attribute '{key}'")
if self._method_num_returns is None:
self._init_class_info()
if key not in self._method_signatures:
raise AttributeError(f"ClientActorRef has no attribute '{key}'")
return ClientRemoteMethod(
self,
key,
self._method_num_returns.get(key),
self._method_signatures.get(key),
)
def __repr__(self):
return "ClientActorHandle(%s)" % (self.actor_ref.id.hex())
def _init_class_info(self):
# TODO: fetch Ray method decorators
@ray.remote(num_cpus=0)
def get_class_info(x):
return x._ray_method_num_returns, x._ray_method_signatures
self._method_num_returns, method_parameters = ray.get(
get_class_info.remote(self)
)
self._method_signatures = {}
for method, parameters in method_parameters.items():
self._method_signatures[method] = inspect.Signature(parameters=parameters)
| ClientActorHandle |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_async_job.py | {
"start": 1758,
"end": 6040
} | class ____:
"""
Minimal AdReportRun stand-in:
- first api_get() call: RUNNING
- second api_get() call: COMPLETED
- get_result(): returns fake rows with the requested id field
"""
def __init__(self, id_field: str):
self._id_field = id_field
self._calls = 0
self._status = Status.RUNNING
self._percent = 0
def api_get(self):
self._calls += 1
if self._calls >= 2:
self._status = Status.COMPLETED
self._percent = 100
else:
self._status = Status.RUNNING
self._percent = 50
return self
def get(self, key):
if key == "async_status":
return self._status
if key == "async_percent_completion":
return self._percent
raise KeyError(key)
# FB SDK typically supports dict-style indexing too
def __getitem__(self, key):
return self.get(key)
def get_result(self, params=None):
# three rows with the requested PK field
return [{self._id_field: 1}, {self._id_field: 2}, {self._id_field: 3}]
@pytest.fixture(name="api_limit")
def api_limit_fixture():
return DummyAPILimit()
@pytest.fixture(name="adreport")
def adreport_fixture(mocker, api):
ao = AdReportRun(fbid="123", api=api)
ao["report_run_id"] = "123"
mocker.patch.object(ao, "api_get", side_effect=ao.api_get)
mocker.patch.object(ao, "get_result", side_effect=ao.get_result)
return ao
@pytest.fixture(name="account")
def account_fixture(mocker, adreport):
account = mocker.Mock(spec=AdAccount)
account.get_insights.return_value = adreport
return account
@pytest.fixture(name="job")
def job_fixture(api, account):
params = {
"level": "ad",
"action_breakdowns": [],
"breakdowns": [],
"fields": ["field1", "field2"],
"time_increment": 1,
"action_attribution_windows": [],
}
interval = DateInterval(date(2019, 1, 1), date(2019, 1, 1))
return InsightAsyncJob(
edge_object=account,
api=api,
interval=interval,
params=params,
job_timeout=timedelta(minutes=60),
)
@pytest.fixture(name="grouped_jobs")
def grouped_jobs_fixture(mocker):
return [
mocker.Mock(spec=InsightAsyncJob, attempt_number=1, failed=False, completed=False, started=False, elapsed_time=None, new_jobs=[])
for _ in range(10)
]
@pytest.fixture(name="parent_job")
def parent_job_fixture(api, grouped_jobs):
interval = DateInterval(date(2019, 1, 1), date(2019, 1, 1))
return ParentAsyncJob(api=api, jobs=grouped_jobs, interval=interval)
@pytest.fixture(name="started_job")
def started_job_fixture(job, adreport, mocker, api_limit):
adreport["async_status"] = Status.RUNNING.value
adreport["async_percent_completion"] = 0
mocker.patch.object(job, "update_job", wraps=job.update_job)
job.start(api_limit)
return job
@pytest.fixture(name="completed_job")
def completed_job_fixture(started_job, adreport):
adreport["async_status"] = Status.COMPLETED.value
adreport["async_percent_completion"] = 100
started_job.update_job()
started_job._check_status()
return started_job
@pytest.fixture(name="late_job")
def late_job_fixture(started_job, adreport):
adreport["async_status"] = Status.COMPLETED.value
adreport["async_percent_completion"] = 100
started_job.update_job()
return started_job
@pytest.fixture(name="failed_job")
def failed_job_fixture(started_job, adreport, api_limit, mocker):
adreport["async_status"] = Status.FAILED.value
adreport["async_percent_completion"] = 0
started_job._check_status()
started_job.start(api_limit)
mocker.patch.object(started_job, "_split_by_edge_class")
started_job._check_status()
return started_job
@pytest.fixture(name="api")
def api_fixture(mocker):
api = mocker.Mock(spec=MyFacebookAdsApi)
api.call().json.return_value = {}
api.call().error.return_value = False
return api
@pytest.fixture(name="batch")
def batch_fixture(api, mocker):
batch = FacebookAdsApiBatch(api=api)
mocker.patch.object(batch, "execute", wraps=batch.execute)
api.new_batch.return_value = batch
return batch
| DummyRun |
python | scipy__scipy | scipy/interpolate/_rbfinterp.py | {
"start": 1689,
"end": 19502
} | class ____:
"""Radial basis function interpolator in N ≥ 1 dimensions.
Parameters
----------
y : (npoints, ndims) array_like
2-D array of data point coordinates.
d : (npoints, ...) array_like
N-D array of data values at `y`. The length of `d` along the first
axis must be equal to the length of `y`. Unlike some interpolators, the
interpolation axis cannot be changed.
neighbors : int, optional
If specified, the value of the interpolant at each evaluation point
will be computed using only this many nearest data points. All the data
points are used by default.
smoothing : float or (npoints, ) array_like, optional
Smoothing parameter. The interpolant perfectly fits the data when this
is set to 0. For large values, the interpolant approaches a least
squares fit of a polynomial with the specified degree. Default is 0.
kernel : str, optional
Type of RBF. This should be one of
- 'linear' : ``-r``
- 'thin_plate_spline' : ``r**2 * log(r)``
- 'cubic' : ``r**3``
- 'quintic' : ``-r**5``
- 'multiquadric' : ``-sqrt(1 + r**2)``
- 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
- 'inverse_quadratic' : ``1/(1 + r**2)``
- 'gaussian' : ``exp(-r**2)``
Default is 'thin_plate_spline'.
epsilon : float, optional
Shape parameter that scales the input to the RBF. If `kernel` is
'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
1 and can be ignored because it has the same effect as scaling the
smoothing parameter. Otherwise, this must be specified.
degree : int, optional
Degree of the added polynomial. For some RBFs the interpolant may not
be well-posed if the polynomial degree is too small. Those RBFs and
their corresponding minimum degrees are
- 'multiquadric' : 0
- 'linear' : 0
- 'thin_plate_spline' : 1
- 'cubic' : 1
- 'quintic' : 2
The default value is the minimum degree for `kernel` or 0 if there is
no minimum degree. Set this to -1 for no added polynomial.
Notes
-----
An RBF is a scalar valued function in N-dimensional space whose value at
:math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
is the center of the RBF.
An RBF interpolant for the vector of data values :math:`d`, which are from
locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
plus a polynomial with a specified degree. The RBF interpolant is written
as
.. math::
f(x) = K(x, y) a + P(x) b,
where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
monomials, which span polynomials with the specified degree, evaluated at
:math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
linear equations
.. math::
(K(x, y) + \\lambda I) a + P(y) b = d
and
.. math::
P(y)^T a = 0,
where :math:`\\lambda` is a non-negative smoothing parameter that controls
how well we want to fit the data. The data are fit exactly when the
smoothing parameter is 0.
The above system is uniquely solvable if the following requirements are
met:
- :math:`P(y)` must have full column rank. :math:`P(y)` always has full
column rank when `degree` is -1 or 0. When `degree` is 1,
:math:`P(y)` has full column rank if the data point locations are not
all collinear (N=2), coplanar (N=3), etc.
- If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
'cubic', or 'quintic', then `degree` must not be lower than the
minimum value listed above.
- If `smoothing` is 0, then each data point location must be distinct.
When using an RBF that is not scale invariant ('multiquadric',
'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
shape parameter must be chosen (e.g., through cross validation). Smaller
values for the shape parameter correspond to wider RBFs. The problem can
become ill-conditioned or singular when the shape parameter is too small.
The memory required to solve for the RBF interpolation coefficients
increases quadratically with the number of data points, which can become
impractical when interpolating more than about a thousand data points.
To overcome memory limitations for large interpolation problems, the
`neighbors` argument can be specified to compute an RBF interpolant for
each evaluation point using only the nearest data points.
.. versionadded:: 1.7.0
See Also
--------
NearestNDInterpolator
LinearNDInterpolator
CloughTocher2DInterpolator
References
----------
.. [1] Fasshauer, G., 2007. Meshfree Approximation Methods with Matlab.
World Scientific Publishing Co.
.. [2] http://amadeus.math.iit.edu/~fass/603_ch3.pdf
.. [3] Wahba, G., 1990. Spline Models for Observational Data. SIAM.
.. [4] http://pages.stat.wisc.edu/~wahba/stat860public/lect/lect8/lect8.pdf
Examples
--------
Demonstrate interpolating scattered data to a grid in 2-D.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.interpolate import RBFInterpolator
>>> from scipy.stats.qmc import Halton
>>> rng = np.random.default_rng()
>>> xobs = 2*Halton(2, seed=rng).random(100) - 1
>>> yobs = np.sum(xobs, axis=1)*np.exp(-6*np.sum(xobs**2, axis=1))
>>> x1 = np.linspace(-1, 1, 50)
>>> xgrid = np.asarray(np.meshgrid(x1, x1, indexing='ij'))
>>> xflat = xgrid.reshape(2, -1).T # make it a 2-D array
>>> yflat = RBFInterpolator(xobs, yobs)(xflat)
>>> ygrid = yflat.reshape(50, 50)
>>> fig, ax = plt.subplots()
>>> ax.pcolormesh(*xgrid, ygrid, vmin=-0.25, vmax=0.25, shading='gouraud')
>>> p = ax.scatter(*xobs.T, c=yobs, s=50, ec='k', vmin=-0.25, vmax=0.25)
>>> fig.colorbar(p)
>>> plt.show()
"""
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
def __init__(self, y, d,
neighbors=None,
smoothing=0.0,
kernel="thin_plate_spline",
epsilon=None,
degree=None):
xp = array_namespace(y, d, smoothing)
_backend = _get_backend(xp)
if neighbors is not None:
if not is_numpy(xp):
raise NotImplementedError(
"neighbors not None is numpy-only because it relies on KDTree"
)
y = _asarray(y, dtype=xp.float64, order="C", xp=xp)
if y.ndim != 2:
raise ValueError("`y` must be a 2-dimensional array.")
ny, ndim = y.shape
d = xp.asarray(d)
if xp.isdtype(d.dtype, 'complex floating'):
d_dtype = xp.complex128
else:
d_dtype = xp.float64
d = _asarray(d, dtype=d_dtype, order="C", xp=xp)
if d.shape[0] != ny:
raise ValueError(
f"Expected the first axis of `d` to have length {ny}."
)
d_shape = d.shape[1:]
d = xp.reshape(d, (ny, -1))
# If `d` is complex, convert it to a float array with twice as many
# columns. Otherwise, the LHS matrix would need to be converted to
# complex and take up 2x more memory than necessary.
d = d.view(float) # NB not Array API compliant (and jax copies)
if isinstance(smoothing, int | float) or smoothing.shape == ():
smoothing = xp.full(ny, smoothing, dtype=xp.float64)
else:
smoothing = _asarray(smoothing, dtype=float, order="C", xp=xp)
if smoothing.shape != (ny,):
raise ValueError(
"Expected `smoothing` to be a scalar or have shape "
f"({ny},)."
)
kernel = kernel.lower()
if kernel not in _AVAILABLE:
raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
if epsilon is None:
if kernel in _SCALE_INVARIANT:
epsilon = 1.0
else:
raise ValueError(
"`epsilon` must be specified if `kernel` is not one of "
f"{_SCALE_INVARIANT}."
)
else:
epsilon = float(epsilon)
min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
if degree is None:
degree = max(min_degree, 0)
else:
degree = int(degree)
if degree < -1:
raise ValueError("`degree` must be at least -1.")
elif -1 < degree < min_degree:
warnings.warn(
f"`degree` should not be below {min_degree} except -1 "
f"when `kernel` is '{kernel}'."
f"The interpolant may not be uniquely "
f"solvable, and the smoothing parameter may have an "
f"unintuitive effect.",
UserWarning, stacklevel=2
)
if neighbors is None:
nobs = ny
else:
# Make sure the number of nearest neighbors used for interpolation
# does not exceed the number of observations.
neighbors = int(min(neighbors, ny))
nobs = neighbors
powers = _backend._monomial_powers(ndim, degree, xp)
# The polynomial matrix must have full column rank in order for the
# interpolant to be well-posed, which is not possible if there are
# fewer observations than monomials.
if powers.shape[0] > nobs:
raise ValueError(
f"At least {powers.shape[0]} data points are required when "
f"`degree` is {degree} and the number of dimensions is {ndim}."
)
if neighbors is None:
shift, scale, coeffs = _backend._build_and_solve_system(
y, d, smoothing, kernel, epsilon, powers,
xp
)
# Make these attributes private since they do not always exist.
self._shift = shift
self._scale = scale
self._coeffs = coeffs
else:
self._tree = KDTree(y)
self.y = y
self.d = d
self.d_shape = d_shape
self.d_dtype = d_dtype
self.neighbors = neighbors
self.smoothing = smoothing
self.kernel = kernel
self.epsilon = epsilon
self.powers = powers
self._xp = xp
def __setstate__(self, state):
tpl1, tpl2 = state
(self.y, self.d, self.d_shape, self.d_dtype, self.neighbors,
self.smoothing, self.kernel, self.epsilon, self.powers) = tpl1
if self.neighbors is None:
self._shift, self._scale, self._coeffs = tpl2
else:
self._tree, = tpl2
self._xp = array_namespace(self.y, self.d, self.smoothing)
def __getstate__(self):
tpl = (self.y, self.d, self.d_shape, self.d_dtype, self.neighbors,
self.smoothing, self.kernel, self.epsilon, self.powers
)
if self.neighbors is None:
tpl2 = (self._shift, self._scale, self._coeffs)
else:
tpl2 = (self._tree,)
return (tpl, tpl2)
def _chunk_evaluator(
self,
x,
y,
shift,
scale,
coeffs,
memory_budget=1000000
):
"""
Evaluate the interpolation while controlling memory consumption.
We chunk the input if we need more memory than specified.
Parameters
----------
x : (Q, N) float ndarray
array of points on which to evaluate
y: (P, N) float ndarray
array of points on which we know function values
shift: (N, ) ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
coeffs: (P+R, S) float ndarray
Coefficients in front of basis functions
memory_budget: int
Total amount of memory (in units of sizeof(float)) we wish
to devote for storing the array of coefficients for
interpolated points. If we need more memory than that, we
chunk the input.
Returns
-------
(Q, S) float ndarray
Interpolated array
"""
_backend = _get_backend(self._xp)
nx, ndim = x.shape
if self.neighbors is None:
nnei = y.shape[0]
else:
nnei = self.neighbors
# in each chunk we consume the same space we already occupy
chunksize = memory_budget // (self.powers.shape[0] + nnei) + 1
if chunksize <= nx:
out = self._xp.empty((nx, self.d.shape[1]), dtype=self._xp.float64)
for i in range(0, nx, chunksize):
chunk = _backend.compute_interpolation(
x[i:i + chunksize, :],
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale,
coeffs,
self._xp
)
out = xpx.at(out, (slice(i, i + chunksize), slice(None,))).set(chunk)
else:
out = _backend.compute_interpolation(
x,
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale,
coeffs,
self._xp
)
return out
def __call__(self, x):
"""Evaluate the interpolant at `x`.
Parameters
----------
x : (npts, ndim) array_like
Evaluation point coordinates.
Returns
-------
ndarray, shape (npts, )
Values of the interpolant at `x`.
"""
x = _asarray(x, dtype=self._xp.float64, order="C", xp=self._xp)
if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional array.")
nx, ndim = x.shape
if ndim != self.y.shape[1]:
raise ValueError("Expected the second axis of `x` to have length "
f"{self.y.shape[1]}.")
# Our memory budget for storing RBF coefficients is
# based on how many floats in memory we already occupy
# If this number is below 1e6 we just use 1e6
# This memory budget is used to decide how we chunk
# the inputs
memory_budget = max(xp_size(x) + xp_size(self.y) + xp_size(self.d), 1_000_000)
if self.neighbors is None:
out = self._chunk_evaluator(
x,
self.y,
self._shift,
self._scale,
self._coeffs,
memory_budget=memory_budget)
else:
# XXX: this relies on KDTree, hence is numpy-only until KDTree is converted
_build_and_solve_system = _get_backend(np)._build_and_solve_system
# Get the indices of the k nearest observation points to each
# evaluation point.
_, yindices = self._tree.query(x, self.neighbors)
if self.neighbors == 1:
# `KDTree` squeezes the output when neighbors=1.
yindices = yindices[:, None]
# Multiple evaluation points may have the same neighborhood of
# observation points. Make the neighborhoods unique so that we only
# compute the interpolation coefficients once for each
# neighborhood.
yindices = np.sort(yindices, axis=1)
yindices, inv = np.unique(yindices, return_inverse=True, axis=0)
inv = np.reshape(inv, (-1,)) # flatten, we need 1-D indices
# `inv` tells us which neighborhood will be used by each evaluation
# point. Now we find which evaluation points will be using each
# neighborhood.
xindices = [[] for _ in range(len(yindices))]
for i, j in enumerate(inv):
xindices[j].append(i)
out = np.empty((nx, self.d.shape[1]), dtype=float)
for xidx, yidx in zip(xindices, yindices):
# `yidx` are the indices of the observations in this
# neighborhood. `xidx` are the indices of the evaluation points
# that are using this neighborhood.
xnbr = x[xidx]
ynbr = self.y[yidx]
dnbr = self.d[yidx]
snbr = self.smoothing[yidx]
shift, scale, coeffs = _build_and_solve_system(
ynbr,
dnbr,
snbr,
self.kernel,
self.epsilon,
self.powers,
np
)
out[xidx] = self._chunk_evaluator(
xnbr,
ynbr,
shift,
scale,
coeffs,
memory_budget=memory_budget)
out = out.view(self.d_dtype) # NB not Array API compliant (and jax copies)
out = self._xp.reshape(out, (nx, ) + self.d_shape)
return out
| RBFInterpolator |
python | airbytehq__airbyte | airbyte-ci/connectors/live-tests/src/live_tests/commons/json_schema_helper.py | {
"start": 2212,
"end": 13124
} | class ____:
"""Helper class to simplify schema validation and read of records according to their schema."""
def __init__(self, schema):
self._schema = schema
def get_ref(self, path: str) -> Any:
"""Resolve reference
:param path: reference (#/definitions/SomeClass, etc)
:return: part of schema that is definition of the reference
:raises KeyError: in case path can't be followed
"""
node = self._schema
for segment in path.split("/")[1:]:
node = node[segment]
return node
def get_property(self, path: List[str]) -> Mapping[str, Any]:
"""Get any part of schema according to provided path, resolves $refs if necessary
schema = {
"properties": {
"field1": {
"properties": {
"nested_field": {
<inner_object>
}
}
},
"field2": ...
}
}
helper = JsonSchemaHelper(schema)
helper.get_property(["field1", "nested_field"]) == <inner_object>
:param path: list of fields in the order of navigation
:return: discovered part of schema
:raises KeyError: in case path can't be followed
"""
node = self._schema
for segment in path:
if "$ref" in node:
node = self.get_ref(node["$ref"])
node = node["properties"][segment]
return node
def field(self, path: List[str]) -> CatalogField:
"""Get schema property and wrap it into CatalogField.
CatalogField is a helper to ease the read of values from records according to schema definition.
:param path: list of fields in the order of navigation
:return: discovered part of schema wrapped in CatalogField
:raises KeyError: in case path can't be followed
"""
return CatalogField(schema=self.get_property(path), path=path)
def get_node(self, path: List[Union[str, int]]) -> Any:
"""Return part of schema by specified path
:param path: list of fields in the order of navigation
"""
node = self._schema
for segment in path:
if "$ref" in node:
node = self.get_ref(node["$ref"])
node = node[segment]
return node
def get_parent_path(self, path: str, separator="/") -> Any:
"""
Returns the parent path of the supplied path
"""
absolute_path = f"{separator}{path}" if not path.startswith(separator) else path
parent_path, _ = absolute_path.rsplit(sep=separator, maxsplit=1)
return parent_path
def get_parent(self, path: str, separator="/") -> Any:
"""
Returns the parent dict of a given path within the `obj` dict
"""
parent_path = self.get_parent_path(path, separator=separator)
if parent_path == "":
return self._schema
return dpath.util.get(self._schema, parent_path, separator=separator)
def find_nodes(self, keys: List[str]) -> List[List[Union[str, int]]]:
"""Find all paths that lead to nodes with the specified keys.
:param keys: list of keys
:return: list of json object paths
"""
variant_paths = []
def traverse_schema(_schema: Union[Dict[Text, Any], List], path=None):
path = path or []
if path and path[-1] in keys:
variant_paths.append(path)
if isinstance(_schema, dict):
for item in _schema:
traverse_schema(_schema[item], [*path, item])
elif isinstance(_schema, list):
for i, item in enumerate(_schema):
traverse_schema(_schema[i], [*path, i])
traverse_schema(self._schema)
return variant_paths
def get_object_structure(obj: dict) -> List[str]:
"""
Traverse through object structure and compose a list of property keys including nested one.
This list reflects object's structure with list of all obj property key
paths. In case if object is nested inside array we assume that it has same
structure as first element.
:param obj: data object to get its structure
:returns list of object property keys paths
"""
paths = []
def _traverse_obj_and_get_path(obj, path=""):
if path:
paths.append(path)
if isinstance(obj, dict):
return {k: _traverse_obj_and_get_path(v, path + "/" + k) for k, v in obj.items()}
elif isinstance(obj, list) and len(obj) > 0:
return [_traverse_obj_and_get_path(obj[0], path + "/[]")]
_traverse_obj_and_get_path(obj)
return paths
def get_expected_schema_structure(schema: dict, annotate_one_of: bool = False) -> List[str]:
"""
Traverse through json schema and compose list of property keys that object expected to have.
:param annotate_one_of: Generate one_of index in path
:param schema: jsonschema to get expected paths
:returns list of object property keys paths
"""
paths = []
if "$ref" in schema:
"""
JsonRef doesnt work correctly with schemas that has refenreces in root e.g.
{
"$ref": "#/definitions/ref"
"definitions": {
"ref": ...
}
}
Considering this schema already processed by resolver so it should
contain only references to definitions section, replace root reference
manually before processing it with JsonRef library.
"""
ref = schema["$ref"].split("/")[-1]
schema.update(schema["definitions"][ref])
schema.pop("$ref")
# Resolve all references to simplify schema processing.
schema = JsonRef.replace_refs(schema)
def _scan_schema(subschema, path=""):
if "oneOf" in subschema or "anyOf" in subschema:
if annotate_one_of:
return [
_scan_schema({"type": "object", **s}, path + f"({num})")
for num, s in enumerate(subschema.get("oneOf") or subschema.get("anyOf"))
]
return [_scan_schema({"type": "object", **s}, path) for s in subschema.get("oneOf") or subschema.get("anyOf")]
schema_type = subschema.get("type", ["object", "null"])
if not isinstance(schema_type, list):
schema_type = [schema_type]
if "object" in schema_type:
props = subschema.get("properties")
if not props:
# Handle objects with arbitrary properties:
# {"type": "object", "additionalProperties": {"type": "string"}}
if path:
paths.append(path)
return
return {k: _scan_schema(v, path + "/" + k) for k, v in props.items()}
elif "array" in schema_type:
items = subschema.get("items", {})
return [_scan_schema(items, path + "/[]")]
paths.append(path)
_scan_schema(schema)
return paths
def flatten_tuples(to_flatten):
"""Flatten a tuple of tuples into a single tuple."""
types = set()
if not isinstance(to_flatten, tuple):
to_flatten = (to_flatten,)
for thing in to_flatten:
if isinstance(thing, tuple):
types.update(flatten_tuples(thing))
else:
types.add(thing)
return tuple(types)
def get_paths_in_connector_config(schema: dict) -> List[str]:
"""
Traverse through the provided schema's values and extract the path_in_connector_config paths
:param properties: jsonschema containing values which may have path_in_connector_config attributes
:returns list of path_in_connector_config paths
"""
return ["/" + "/".join(value["path_in_connector_config"]) for value in schema.values()]
def conforms_to_schema(record: Mapping[str, Any], schema: Mapping[str, Any]) -> bool:
"""
Return true iff the record conforms to the supplied schema.
The record conforms to the supplied schema iff:
- All columns in the record are in the schema.
- For every column in the record, that column's type is equal to or narrower than the same column's
type in the schema.
"""
schema_columns = set(schema.get("properties", {}).keys())
record_columns = set(record.keys())
if not record_columns.issubset(schema_columns):
return False
for column, definition in schema.get("properties", {}).items():
expected_type = definition.get("type")
value = record.get(column)
if value is not None:
if isinstance(expected_type, list):
return any(_is_equal_or_narrower_type(value, e) for e in expected_type)
elif expected_type == "object":
return isinstance(value, dict)
elif expected_type == "array":
if not isinstance(value, list):
return False
array_type = definition.get("items", {}).get("type")
if not all(_is_equal_or_narrower_type(v, array_type) for v in value):
return False
elif not _is_equal_or_narrower_type(value, expected_type):
return False
return True
def _is_equal_or_narrower_type(value: Any, expected_type: str) -> bool:
if isinstance(value, list):
# We do not compare lists directly; the individual items are compared.
# If we hit this condition, it means that the expected type is not
# compatible with the inferred type.
return False
inferred_type = ComparableType(_get_inferred_type(value))
if inferred_type is None:
return False
return ComparableType(inferred_type) <= ComparableType(_get_comparable_type(expected_type))
def _get_inferred_type(value: Any) -> Optional[ComparableType]:
if value is None:
return ComparableType.NULL
if isinstance(value, bool):
return ComparableType.BOOLEAN
if isinstance(value, int):
return ComparableType.INTEGER
if isinstance(value, float):
return ComparableType.NUMBER
if isinstance(value, str):
return ComparableType.STRING
if isinstance(value, dict):
return ComparableType.OBJECT
else:
return None
def _get_comparable_type(value: Any) -> Optional[ComparableType]:
if value == "null":
return ComparableType.NULL
if value == "boolean":
return ComparableType.BOOLEAN
if value == "integer":
return ComparableType.INTEGER
if value == "number":
return ComparableType.NUMBER
if value == "string":
return ComparableType.STRING
if value == "object":
return ComparableType.OBJECT
else:
return None
| JsonSchemaHelper |
python | redis__redis-py | tests/test_search.py | {
"start": 113799,
"end": 117104
} | class ____(SearchTestsBase):
@pytest.mark.redismod
@skip_if_redis_enterprise()
def test_search_commands_in_pipeline(self, client):
p = client.ft().pipeline()
p.create_index((TextField("txt"),))
p.hset("doc1", mapping={"txt": "foo bar"})
p.hset("doc2", mapping={"txt": "foo bar"})
q = Query("foo bar").with_payloads()
p.search(q)
res = p.execute()
if is_resp2_connection(client):
assert res[:3] == ["OK", True, True]
assert 2 == res[3][0]
assert "doc1" == res[3][1]
assert "doc2" == res[3][4]
assert res[3][5] is None
assert res[3][3] == res[3][6] == ["txt", "foo bar"]
else:
assert res[:3] == ["OK", True, True]
assert 2 == res[3]["total_results"]
assert "doc1" == res[3]["results"][0]["id"]
assert "doc2" == res[3]["results"][1]["id"]
assert res[3]["results"][0]["payload"] is None
assert (
res[3]["results"][0]["extra_attributes"]
== res[3]["results"][1]["extra_attributes"]
== {"txt": "foo bar"}
)
@pytest.mark.redismod
@skip_if_server_version_lt("8.3.224")
def test_hybrid_search_query_with_pipeline(self, client):
p = client.ft().pipeline()
p.create_index(
(
TextField("txt"),
VectorField(
"embedding",
"FLAT",
{"TYPE": "FLOAT32", "DIM": 4, "DISTANCE_METRIC": "L2"},
),
)
)
p.hset(
"doc1",
mapping={
"txt": "foo bar",
"embedding": np.array([1, 2, 3, 4], dtype=np.float32).tobytes(),
},
)
p.hset(
"doc2",
mapping={
"txt": "foo bar",
"embedding": np.array([1, 2, 2, 3], dtype=np.float32).tobytes(),
},
)
# set search query
search_query = HybridSearchQuery("foo")
vsim_query = HybridVsimQuery(
vector_field_name="@embedding",
vector_data=np.array([2, 2, 3, 3], dtype=np.float32).tobytes(),
)
hybrid_query = HybridQuery(search_query, vsim_query)
p.hybrid_search(query=hybrid_query)
res = p.execute()
# the default results count limit is 10
assert res[:3] == ["OK", 2, 2]
hybrid_search_res = res[3]
if is_resp2_connection(client):
# it doesn't get parsed to object in pipeline
assert hybrid_search_res[0] == "total_results"
assert hybrid_search_res[1] == 2
assert hybrid_search_res[2] == "results"
assert len(hybrid_search_res[3]) == 2
assert hybrid_search_res[4] == "warnings"
assert hybrid_search_res[5] == []
assert hybrid_search_res[6] == "execution_time"
assert float(hybrid_search_res[7]) > 0
else:
assert hybrid_search_res["total_results"] == 2
assert len(hybrid_search_res["results"]) == 2
assert hybrid_search_res["warnings"] == []
assert hybrid_search_res["execution_time"] > 0
| TestPipeline |
python | huggingface__transformers | src/transformers/models/dinat/configuration_dinat.py | {
"start": 919,
"end": 7356
} | class ____(BackboneConfigMixin, PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DinatModel`]. It is used to instantiate a Dinat
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Dinat
[shi-labs/dinat-mini-in1k-224](https://huggingface.co/shi-labs/dinat-mini-in1k-224) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
patch_size (`int`, *optional*, defaults to 4):
The size (resolution) of each patch. NOTE: Only patch size of 4 is supported at the moment.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embed_dim (`int`, *optional*, defaults to 64):
Dimensionality of patch embedding.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 5]`):
Number of layers in each level of the encoder.
num_heads (`list[int]`, *optional*, defaults to `[2, 4, 8, 16]`):
Number of attention heads in each layer of the Transformer encoder.
kernel_size (`int`, *optional*, defaults to 7):
Neighborhood Attention kernel size.
dilations (`list[list[int]]`, *optional*, defaults to `[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]]`):
Dilation value of each NA layer in the Transformer encoder.
mlp_ratio (`float`, *optional*, defaults to 3.0):
Ratio of MLP hidden dimensionality to embedding dimensionality.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether or not a learnable bias should be added to the queries, keys and values.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings and encoder.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
drop_path_rate (`float`, *optional*, defaults to 0.1):
Stochastic depth rate.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`,
`"selu"` and `"gelu_new"` are supported.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
layer_scale_init_value (`float`, *optional*, defaults to 0.0):
The initial value for the layer scale. Disabled if <=0.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
Example:
```python
>>> from transformers import DinatConfig, DinatModel
>>> # Initializing a Dinat shi-labs/dinat-mini-in1k-224 style configuration
>>> configuration = DinatConfig()
>>> # Initializing a model (with random weights) from the shi-labs/dinat-mini-in1k-224 style configuration
>>> model = DinatModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dinat"
attribute_map = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__(
self,
patch_size=4,
num_channels=3,
embed_dim=64,
depths=[3, 4, 6, 5],
num_heads=[2, 4, 8, 16],
kernel_size=7,
dilations=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]],
mlp_ratio=3.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
initializer_range=0.02,
layer_norm_eps=1e-5,
layer_scale_init_value=0.0,
out_features=None,
out_indices=None,
**kwargs,
):
super().__init__(**kwargs)
self.patch_size = patch_size
self.num_channels = num_channels
self.embed_dim = embed_dim
self.depths = depths
self.num_layers = len(depths)
self.num_heads = num_heads
self.kernel_size = kernel_size
self.dilations = dilations
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
self.layer_scale_init_value = layer_scale_init_value
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
__all__ = ["DinatConfig"]
| DinatConfig |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/tree_clearing.py | {
"start": 77,
"end": 805
} | class ____(App[None]):
CSS = """
Screen {
layout: horizontal;
}
"""
@staticmethod
def _populate(tree: Tree) -> Tree:
for n in range(5):
branch = tree.root.add(str(n))
for m in range(5):
branch.add_leaf(f"{n}-{m}")
return tree
def compose(self) -> ComposeResult:
yield self._populate(Tree("Left", id="left"))
yield self._populate(Tree("Right", id="right"))
def on_mount(self) -> None:
self.query_one("#left", Tree).root.expand()
self.query_one("#left", Tree).clear()
self.query_one("#right", Tree).clear()
if __name__ == "__main__":
TreeClearingSnapshotApp().run()
| TreeClearingSnapshotApp |
python | getsentry__sentry | src/sentry/auth/providers/github/provider.py | {
"start": 861,
"end": 3620
} | class ____(OAuth2Provider):
access_token_url = ACCESS_TOKEN_URL
authorize_url = AUTHORIZE_URL
name = "GitHub"
key = IntegrationProviderSlug.GITHUB.value
def get_client_id(self) -> str:
assert isinstance(CLIENT_ID, str)
return CLIENT_ID
def get_client_secret(self) -> str:
assert isinstance(CLIENT_SECRET, str)
return CLIENT_SECRET
def __init__(self, org: RpcOrganization | dict[str, Any] | None = None, **config: Any) -> None:
super().__init__(**config)
self.org = org
def get_configure_view(
self,
) -> Callable[[HttpRequest, RpcOrganization, RpcAuthProvider], DeferredResponse]:
return github_configure_view
def get_auth_pipeline(self) -> list[AuthView]:
return [
OAuth2Login(
authorize_url=self.authorize_url, client_id=self.get_client_id(), scope=SCOPE
),
OAuth2Callback(
access_token_url=self.access_token_url,
client_id=self.get_client_id(),
client_secret=self.get_client_secret(),
),
FetchUser(org=self.org),
ConfirmEmail(),
]
def get_setup_pipeline(self) -> list[AuthView]:
pipeline = self.get_auth_pipeline()
pipeline.append(SelectOrganization())
return pipeline
def get_refresh_token_url(self) -> str:
return ACCESS_TOKEN_URL
def build_config(self, state: Mapping[str, Any]) -> dict[str, dict[str, Any]]:
"""
On configuration, we determine which provider organization to configure SSO for
This configuration is then stored and passed into the pipeline instances during SSO
to determine whether the Auth'd user has the appropriate access to the provider org
"""
return {"org": {"id": state["org"]["id"], "name": state["org"]["login"]}}
def build_identity(self, state: Mapping[str, Any]) -> Mapping[str, Any]:
data = state["data"]
user_data = state["user"]
return {
"id": user_data["id"],
"email": user_data["email"],
"name": user_data["name"],
"data": self.get_oauth_data(data),
}
def refresh_identity(self, auth_identity: AuthIdentity) -> None:
with GitHubClient(auth_identity.data["access_token"]) as client:
try:
if not self.org:
raise IdentityNotValid
org_id = self.org.id if isinstance(self.org, RpcOrganization) else self.org["id"]
if not client.is_org_member(org_id):
raise IdentityNotValid
except GitHubApiError as e:
raise IdentityNotValid(e)
| GitHubOAuth2Provider |
python | django__django | tests/migrations/migrations_test_apps/conflicting_app_with_dependencies/migrations/0002_conflicting_second.py | {
"start": 43,
"end": 354
} | class ____(migrations.Migration):
dependencies = [
("conflicting_app_with_dependencies", "0001_initial"),
]
operations = [
migrations.CreateModel(
"Something",
[
("id", models.AutoField(primary_key=True)),
],
)
]
| Migration |
python | django__django | django/core/serializers/__init__.py | {
"start": 858,
"end": 8785
} | class ____:
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type(
"BadSerializerModule",
(),
{
"Deserializer": bad_serializer,
"Serializer": bad_serializer,
},
)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Return an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(
format, settings.SERIALIZATION_MODULES[format], serializers
)
_serializers = serializers
def sort_dependencies(app_list, allow_cycles=False):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
If allow_cycles is True, return the best-effort ordering that will respect
most of dependencies but ignore some of them to break the cycles.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, "natural_key"):
deps = getattr(model.natural_key, "dependencies", [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, "natural_key") and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, "natural_key") and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization
# list, then we've found another model with all it's dependencies
# satisfied.
if all(d not in models or d in model_list for d in deps):
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
if allow_cycles:
# If cycles are allowed, add the last skipped model and ignore
# its dependencies. This could be improved by some graph
# analysis to ignore as few dependencies as possible.
model, _ = skipped.pop()
model_list.append(model)
else:
raise RuntimeError(
"Can't resolve dependencies for %s in serialized app list."
% ", ".join(
model._meta.label
for model, deps in sorted(
skipped, key=lambda obj: obj[0].__name__
)
),
)
model_dependencies = skipped
return model_list
| BadSerializer |
python | apache__airflow | providers/microsoft/winrm/tests/unit/microsoft/winrm/operators/test_winrm.py | {
"start": 1020,
"end": 4657
} | class ____:
def test_no_winrm_hook_no_ssh_conn_id(self):
op = WinRMOperator(task_id="test_task_id", winrm_hook=None, ssh_conn_id=None)
exception_msg = "Cannot operate without winrm_hook or ssh_conn_id."
with pytest.raises(AirflowException, match=exception_msg):
op.execute(None)
@mock.patch("airflow.providers.microsoft.winrm.operators.winrm.WinRMHook")
def test_no_command(self, mock_hook):
op = WinRMOperator(task_id="test_task_id", winrm_hook=mock_hook, command=None)
exception_msg = "No command specified so nothing to execute here."
with pytest.raises(AirflowException, match=exception_msg):
op.execute(None)
@mock.patch("airflow.providers.microsoft.winrm.operators.winrm.WinRMHook")
def test_default_returning_0_command(self, mock_hook):
stdout = [b"O", b"K"]
command = "not_empty"
working_dir = "c:\\temp"
mock_hook.run.return_value = (0, stdout, [])
op = WinRMOperator(
task_id="test_task_id", winrm_hook=mock_hook, command=command, working_directory=working_dir
)
execute_result = op.execute(None)
assert execute_result == b64encode(b"".join(stdout)).decode("utf-8")
mock_hook.run.assert_called_once_with(
command=command,
ps_path=None,
output_encoding="utf-8",
return_output=True,
working_directory=working_dir,
)
@mock.patch("airflow.providers.microsoft.winrm.operators.winrm.WinRMHook")
def test_default_returning_1_command(self, mock_hook):
stderr = [b"K", b"O"]
command = "not_empty"
mock_hook.run.return_value = (1, [], stderr)
op = WinRMOperator(task_id="test_task_id", winrm_hook=mock_hook, command=command)
exception_msg = f"Error running cmd: {command}, return code: 1, error: KO"
with pytest.raises(AirflowException, match=exception_msg):
op.execute(None)
@mock.patch("airflow.providers.microsoft.winrm.operators.winrm.WinRMHook")
@pytest.mark.parametrize("expected_return_code", [1, [1, 2], range(1, 3)])
@pytest.mark.parametrize("real_return_code", [0, 1, 2])
def test_expected_return_code_command(self, mock_hook, expected_return_code, real_return_code):
stdout = [b"O", b"K"]
stderr = [b"K", b"O"]
command = "not_empty"
mock_hook.run.return_value = (real_return_code, stdout, stderr)
op = WinRMOperator(
task_id="test_task_id",
winrm_hook=mock_hook,
command=command,
expected_return_code=expected_return_code,
)
should_task_succeed = False
if isinstance(expected_return_code, int):
should_task_succeed = real_return_code == expected_return_code
elif isinstance(expected_return_code, list) or isinstance(expected_return_code, range):
should_task_succeed = real_return_code in expected_return_code
if should_task_succeed:
execute_result = op.execute(None)
assert execute_result == b64encode(b"".join(stdout)).decode("utf-8")
mock_hook.run.assert_called_once_with(
command=command,
ps_path=None,
output_encoding="utf-8",
return_output=True,
working_directory=None,
)
else:
exception_msg = f"Error running cmd: {command}, return code: {real_return_code}, error: KO"
with pytest.raises(AirflowException, match=exception_msg):
op.execute(None)
| TestWinRMOperator |
python | hyperopt__hyperopt | hyperopt/pyll/base.py | {
"start": 688,
"end": 5161
} | class ____:
"""
An object whose methods generally allocate Apply nodes.
_impls is a dictionary containing implementations for those nodes.
>>> self.add(a, b) # -- creates a new 'add' Apply node
>>> self._impl['add'](a, b) # -- this computes a + b
"""
def __init__(self):
# -- list and dict are special because they are Python builtins
self._impls = {
"list": list,
"dict": dict,
"range": range,
"len": len,
"int": int,
"float": float,
"map": map,
"max": max,
"min": min,
"getattr": getattr,
}
def _new_apply(self, name, args, kwargs, o_len, pure):
pos_args = [as_apply(a) for a in args]
named_args = [(k, as_apply(v)) for (k, v) in list(kwargs.items())]
named_args.sort()
return Apply(
name, pos_args=pos_args, named_args=named_args, o_len=o_len, pure=pure
)
def dict(self, *args, **kwargs):
# XXX: figure out len
return self._new_apply("dict", args, kwargs, o_len=None, pure=True)
def int(self, arg):
return self._new_apply("int", [as_apply(arg)], {}, o_len=None, pure=True)
def float(self, arg):
return self._new_apply("float", [as_apply(arg)], {}, o_len=None, pure=True)
def len(self, obj):
return self._new_apply("len", [obj], {}, o_len=None, pure=True)
def list(self, init):
return self._new_apply("list", [as_apply(init)], {}, o_len=None, pure=True)
def map(self, fn, seq, pure=False):
"""
pure - True is assertion that fn does not modify seq[i]
"""
return self._new_apply(
"map", [as_apply(fn), as_apply(seq)], {}, o_len=seq.o_len, pure=pure
)
def range(self, *args):
return self._new_apply("range", args, {}, o_len=None, pure=True)
def max(self, *args):
"""return max of args"""
return self._new_apply(
"max", list(map(as_apply, args)), {}, o_len=None, pure=True
)
def min(self, *args):
"""return min of args"""
return self._new_apply(
"min", list(map(as_apply, args)), {}, o_len=None, pure=True
)
def getattr(self, obj, attr, *args):
return self._new_apply(
"getattr",
[as_apply(obj), as_apply(attr)] + list(map(as_apply, args)),
{},
o_len=None,
pure=True,
)
def _define(self, f, o_len, pure):
name = f.__name__
entry = SymbolTableEntry(self, name, o_len, pure)
setattr(self, name, entry)
self._impls[name] = f
return f
def define(self, f, o_len=None, pure=False):
"""Decorator for adding python functions to self"""
name = f.__name__
if hasattr(self, name):
raise ValueError("Cannot override existing symbol", name)
return self._define(f, o_len, pure)
def define_if_new(self, f, o_len=None, pure=False):
"""Pass silently if f matches the current implementation
for f.__name__"""
name = f.__name__
if hasattr(self, name) and self._impls[name] is not f:
raise ValueError("Cannot redefine existing symbol", name)
return self._define(f, o_len, pure)
def undefine(self, f):
if isinstance(f, str):
name = f
else:
name = f.__name__
del self._impls[name]
delattr(self, name)
def define_pure(self, f):
return self.define(f, o_len=None, pure=True)
def define_info(self, o_len=None, pure=False):
def wrapper(f):
return self.define(f, o_len=o_len, pure=pure)
return wrapper
def inject(self, *args, **kwargs):
"""
Add symbols from self into a dictionary and return the dict.
This is used for import-like syntax: see `import_`.
"""
rval = {}
for k in args:
try:
rval[k] = getattr(self, k)
except AttributeError:
raise PyllImportError(k)
for k, origk in list(kwargs.items()):
try:
rval[k] = getattr(self, origk)
except AttributeError:
raise PyllImportError(origk)
return rval
def import_(self, _globals, *args, **kwargs):
_globals.update(self.inject(*args, **kwargs))
| SymbolTable |
python | walkccc__LeetCode | solutions/1189. Maximum Number of Balloons/1189.py | {
"start": 0,
"end": 215
} | class ____:
def maxNumberOfBalloons(self, text: str) -> int:
count = collections.Counter(text)
return min(
count['b'],
count['a'],
count['l'] // 2, count['o'] // 2, count['n'])
| Solution |
python | walkccc__LeetCode | solutions/3233. Find the Count of Numbers Which Are Not Special/3233.py | {
"start": 0,
"end": 592
} | class ____:
def nonSpecialCount(self, l: int, r: int) -> int:
maxRoot = math.isqrt(r)
isPrime = self._sieveEratosthenes(maxRoot + 1)
specialCount = 0
for num in range(2, math.isqrt(r) + 1):
if isPrime[num] and l <= num**2 <= r:
specialCount += 1
return r - l + 1 - specialCount
def _sieveEratosthenes(self, n: int) -> list[bool]:
isPrime = [True] * n
isPrime[0] = False
isPrime[1] = False
for i in range(2, int(n**0.5) + 1):
if isPrime[i]:
for j in range(i * i, n, i):
isPrime[j] = False
return isPrime
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 758,
"end": 942
} | class ____(PrefectBaseModel):
"""Filter by `Flow.id`."""
any_: Optional[List[UUID]] = Field(
default=None, description="A list of flow ids to include"
)
| FlowFilterId |
python | getsentry__sentry | tests/sentry/issues/test_utils.py | {
"start": 4310,
"end": 8566
} | class ____(OccurrenceTestMixin):
def store_search_issue(
self,
project_id: int,
user_id: int,
fingerprints: Sequence[str],
environment: str | None = None,
insert_time: datetime | None = None,
tags: Sequence[tuple[str, Any]] | None = None,
release: str | None = None,
user: dict[str, Any] | None = None,
event_data: dict[str, Any] | None = None,
override_occurrence_data: dict[str, Any] | None = None,
) -> tuple[Event, IssueOccurrence, GroupInfo | None]:
from sentry.utils import snuba
insert_timestamp = (insert_time if insert_time else timezone.now()).replace(microsecond=0)
user_id_val = f"user_{user_id}"
event_data = {
"tags": [("sentry:user", user_id_val)],
"timestamp": insert_timestamp.isoformat(),
**(event_data or {}),
}
if tags:
event_data["tags"].extend(tags)
if user:
event_data["user"] = user
if environment:
event_data["environment"] = environment
event_data["tags"].extend([("environment", environment)])
if release:
event_data["release"] = release
event_data["tags"].extend([("release", release)])
event = self.store_event(
data=event_data,
project_id=project_id,
)
occurrence = self.build_occurrence(
event_id=event.event_id, fingerprint=fingerprints, **(override_occurrence_data or {})
)
saved_occurrence, group_info = save_issue_occurrence(occurrence.to_dict(), event)
self.assert_occurrences_identical(occurrence, saved_occurrence)
assert Group.objects.filter(grouphash__hash=saved_occurrence.fingerprint[0]).exists()
result = snuba.raw_query(
dataset=Dataset.IssuePlatform,
start=insert_timestamp - timedelta(days=1),
end=insert_timestamp + timedelta(days=1),
selected_columns=[
"event_id",
"project_id",
"environment",
"group_id",
"tags[sentry:user]",
"timestamp",
],
groupby=None,
filter_keys={"project_id": [project_id], "event_id": [event.event_id]},
referrer="test_utils.store_search_issue",
tenant_ids={"referrer": "test_utils.store_search_issue", "organization_id": 1},
)
assert len(result["data"]) == 1
assert result["data"][0]["project_id"] == project_id
assert result["data"][0]["group_id"] == group_info.group.id if group_info else None
assert (
result["data"][0]["tags[sentry:user]"] == user_id_val if not user else f"id:{user_id}"
)
assert result["data"][0]["environment"] == environment
assert result["data"][0]["timestamp"] == insert_timestamp.isoformat()
return event, saved_occurrence, group_info
def get_mock_groups_past_counts_response(
num_days: int,
num_hours: int,
groups: list[Group],
) -> list[GroupsCountResponse]:
"""
Returns a mocked response of type `GroupsCountResponse` from `query_groups_past_counts`.
Creates event count data for each group in `groups` for `num_days`, for `num_hours`.
`groups`: The groups that data will be generated for
`num_days`: The number of days that data will be generated for
`num_hours`: The number of hours per day that data will be generated for
"""
data = []
now = datetime.now()
for group in groups:
for day in range(num_days, 0, -1):
time = now - timedelta(days=day)
for hour in range(num_hours, 0, -1):
hourly_time = time - timedelta(hours=hour)
data.append(
GroupsCountResponse(
{
"group_id": group.id,
"hourBucket": hourly_time.strftime("%Y-%m-%dT%H:%M:%S%f") + "+00:00",
"count()": 10,
"project_id": group.project.id,
}
)
)
return data
| SearchIssueTestMixin |
python | keon__algorithms | algorithms/tree/fenwick_tree/fenwick_tree.py | {
"start": 1021,
"end": 2756
} | class ____(object):
def __init__(self, freq):
self.arr = freq
self.n = len(freq)
def get_sum(self, bit_tree, i):
"""
Returns sum of arr[0..index]. This function assumes that the array is preprocessed and partial sums of array elements are stored in bit_tree[].
"""
s = 0
# index in bit_tree[] is 1 more than the index in arr[]
i = i+1
# Traverse ancestors of bit_tree[index]
while i > 0:
# Add current element of bit_tree to sum
s += bit_tree[i]
# Move index to parent node in getSum View
i -= i & (-i)
return s
def update_bit(self, bit_tree, i, v):
"""
Updates a node in Binary Index Tree (bit_tree) at given index in bit_tree. The given value 'val' is added to bit_tree[i] and all of its ancestors in tree.
"""
# index in bit_ree[] is 1 more than the index in arr[]
i += 1
# Traverse all ancestors and add 'val'
while i <= self.n:
# Add 'val' to current node of bit_tree
bit_tree[i] += v
# Update index to that of parent in update View
i += i & (-i)
def construct(self):
"""
Constructs and returns a Binary Indexed Tree for given array of size n.
"""
# Create and initialize bit_ree[] as 0
bit_tree = [0]*(self.n+1)
# Store the actual values in bit_ree[] using update()
for i in range(self.n):
self.update_bit(bit_tree, i, self.arr[i])
return bit_tree
| Fenwick_Tree |
python | geekcomputers__Python | Sorting Algorithms/Sorted_Inserted_Linked_List.py | {
"start": 94,
"end": 1339
} | class ____:
def __init__(self):
self.head = None
def Sorted_Insert(self, new_node):
current = self.head
if current is None:
new_node.next = new_node
self.head = new_node
elif current.data >= new_node.data:
while current.next != self.head:
current = current.next
current.next = new_node
new_node.next = self.head
self.head = new_node
else:
while current.next != self.head and current.next.data < new_node.data:
current = current.next
new_node.next = current.next
current.next = new_node
def Display(self):
temp = self.head
if self.head is not None:
while temp:
print(temp.data, "->", end=" ")
temp = temp.next
if temp == self.head:
print(temp.data)
break
if __name__ == "__main__":
L_list = Circular_Linked_List()
Test_list = [12, 56, 2, 11, 1, 90]
for keys in Test_list:
temp = Node(keys)
L_list.Sorted_Insert(temp)
print("Sorted Inserted Circular Linked List: ")
L_list.Display()
| Circular_Linked_List |
python | django__django | tests/queries/test_bulk_update.py | {
"start": 716,
"end": 4247
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.notes = [Note.objects.create(note=str(i), misc=str(i)) for i in range(10)]
def create_tags(self):
self.tags = [Tag.objects.create(name=str(i)) for i in range(10)]
def test_simple(self):
for note in self.notes:
note.note = "test-%s" % note.id
with self.assertNumQueries(1):
Note.objects.bulk_update(self.notes, ["note"])
self.assertCountEqual(
Note.objects.values_list("note", flat=True),
[cat.note for cat in self.notes],
)
def test_multiple_fields(self):
for note in self.notes:
note.note = "test-%s" % note.id
note.misc = "misc-%s" % note.id
with self.assertNumQueries(1):
Note.objects.bulk_update(self.notes, ["note", "misc"])
self.assertCountEqual(
Note.objects.values_list("note", flat=True),
[cat.note for cat in self.notes],
)
self.assertCountEqual(
Note.objects.values_list("misc", flat=True),
[cat.misc for cat in self.notes],
)
def test_batch_size(self):
with self.assertNumQueries(len(self.notes)):
Note.objects.bulk_update(self.notes, fields=["note"], batch_size=1)
def test_max_batch_size(self):
max_batch_size = connection.ops.bulk_batch_size(
# PK is used twice, see comment in bulk_update().
[Note._meta.pk, Note._meta.pk, Note._meta.get_field("note")],
self.notes,
)
with self.assertNumQueries(ceil(len(self.notes) / max_batch_size)):
Note.objects.bulk_update(self.notes, fields=["note"])
def test_unsaved_models(self):
objs = self.notes + [Note(note="test", misc="test")]
msg = "All bulk_update() objects must have a primary key set."
with self.assertRaisesMessage(ValueError, msg):
Note.objects.bulk_update(objs, fields=["note"])
def test_foreign_keys_do_not_lookup(self):
self.create_tags()
for note, tag in zip(self.notes, self.tags):
note.tag = tag
with self.assertNumQueries(1):
Note.objects.bulk_update(self.notes, ["tag"])
self.assertSequenceEqual(Note.objects.filter(tag__isnull=False), self.notes)
def test_set_field_to_null(self):
self.create_tags()
Note.objects.update(tag=self.tags[0])
for note in self.notes:
note.tag = None
Note.objects.bulk_update(self.notes, ["tag"])
self.assertCountEqual(Note.objects.filter(tag__isnull=True), self.notes)
def test_set_mixed_fields_to_null(self):
self.create_tags()
midpoint = len(self.notes) // 2
top, bottom = self.notes[:midpoint], self.notes[midpoint:]
for note in top:
note.tag = None
for note in bottom:
note.tag = self.tags[0]
Note.objects.bulk_update(self.notes, ["tag"])
self.assertCountEqual(Note.objects.filter(tag__isnull=True), top)
self.assertCountEqual(Note.objects.filter(tag__isnull=False), bottom)
def test_functions(self):
Note.objects.update(note="TEST")
for note in self.notes:
note.note = Lower("note")
Note.objects.bulk_update(self.notes, ["note"])
self.assertEqual(set(Note.objects.values_list("note", flat=True)), {"test"})
# Tests that use self.notes go here, otherwise put them in another class.
| BulkUpdateNoteTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/cards.py | {
"start": 2648,
"end": 3202
} | class ____(CreditCard):
"""
https://developer.paypal.com/braintree/docs/reference/response/us-bank-account
"""
account_holder_name: str
account_type: str
ach_mandate: str
bank_name: str
business_name: str
last_name: str
owner_id: str
ownership_type: str
plaid_verified_at: datetime
routing_number: str
verifiable: bool
verified: bool
PaymentMethod = Union[
CreditCard, AndroidPayCard, ApplePayCard, SamsungPayCard, USBankAccount, PayPalAccount, VenmoAccount, VisaCheckoutCard
]
| USBankAccount |
python | tornadoweb__tornado | maint/test/redbot/red_test.py | {
"start": 1132,
"end": 7938
} | class ____(object):
def get_handlers(self):
return [
('/hello', HelloHandler),
('/redirect(/.*)', RedirectHandler),
('/post', PostHandler),
('/chunked', ChunkedHandler),
('/cache/(.*)', CacheHandler),
]
def get_app_kwargs(self):
return dict(static_path='.')
def get_allowed_warnings(self):
return [
# We can't set a non-heuristic freshness at the framework level,
# so just ignore this warning
rs.FRESHNESS_HEURISTIC,
# For our small test responses the Content-Encoding header
# wipes out any gains from compression
rs.CONNEG_GZIP_BAD,
]
def get_allowed_errors(self):
return []
def check_url(self, path, method='GET', body=None, headers=None,
expected_status=200, allowed_warnings=None,
allowed_errors=None):
url = self.get_url(path)
red = self.run_redbot(url, method, body, headers)
if not red.response.complete:
if isinstance(red.response.http_error, Exception):
logging.warning((red.response.http_error.desc, vars(red.response.http_error), url))
raise red.response.http_error.res_error
else:
raise Exception("unknown error; incomplete response")
self.assertEqual(int(red.response.status_code), expected_status)
allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings()
allowed_errors = (allowed_errors or []) + self.get_allowed_errors()
errors = []
warnings = []
for msg in red.response.notes:
if msg.level == 'bad':
logger = logging.error
if not isinstance(msg, tuple(allowed_errors)):
errors.append(msg)
elif msg.level == 'warning':
logger = logging.warning
if not isinstance(msg, tuple(allowed_warnings)):
warnings.append(msg)
elif msg.level in ('good', 'info', 'uri'):
logger = logging.info
else:
raise Exception('unknown level' + msg.level)
logger('%s: %s (%s)', msg.category, msg.show_summary('en'),
msg.__class__.__name__)
logger(msg.show_text('en'))
self.assertEqual(len(warnings) + len(errors), 0,
'Had %d unexpected warnings and %d errors' %
(len(warnings), len(errors)))
def run_redbot(self, url, method, body, headers):
red = HttpResource(url, method=method, req_body=body,
req_hdrs=headers)
def work():
red.run(thor.stop)
thor.run()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=work)
thread.start()
self.wait()
thread.join()
return red
def test_hello(self):
self.check_url('/hello')
def test_static(self):
# TODO: 304 responses SHOULD return the same etag that a full
# response would. We currently do for If-None-Match, but not
# for If-Modified-Since (because IMS does not otherwise
# require us to read the file from disk)
self.check_url('/static/red_test.py',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_static_versioned_url(self):
self.check_url('/static/red_test.py?v=1234',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_redirect(self):
self.check_url('/redirect/hello', expected_status=302)
def test_permanent_redirect(self):
self.check_url('/redirect/hello?status=301', expected_status=301)
def test_404(self):
self.check_url('/404', expected_status=404)
def test_post(self):
body = 'foo=bar'
# Without an explicit Content-Length redbot will try to send the
# request chunked.
self.check_url(
'/post', method='POST', body=body,
headers=[('Content-Length', str(len(body))),
('Content-Type', 'application/x-www-form-urlencoded')],
expected_status=303)
def test_chunked(self):
self.check_url('/chunked')
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = '*'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304,
allowed_warnings=[rs.MISSING_HDRS_304])
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
| TestMixin |
python | prabhupant__python-ds | data_structures/linked_list/linked_list.py | {
"start": 0,
"end": 94
} | class ____():
def __init__(self, val):
self.val = val
self.next = None
| Node |
python | pypa__pip | src/pip/_internal/commands/check.py | {
"start": 499,
"end": 2244
} | class ____(Command):
"""Verify installed packages have compatible dependencies."""
ignore_require_venv = True
usage = """
%prog [options]"""
def run(self, options: Values, args: list[str]) -> int:
package_set, parsing_probs = create_package_set_from_installed()
missing, conflicting = check_package_set(package_set)
unsupported = list(
check_unsupported(
get_default_environment().iter_installed_distributions(),
get_supported(),
)
)
for project_name in missing:
version = package_set[project_name].version
for dependency in missing[project_name]:
write_output(
"%s %s requires %s, which is not installed.",
project_name,
version,
dependency[0],
)
for project_name in conflicting:
version = package_set[project_name].version
for dep_name, dep_version, req in conflicting[project_name]:
write_output(
"%s %s has requirement %s, but you have %s %s.",
project_name,
version,
req,
dep_name,
dep_version,
)
for package in unsupported:
write_output(
"%s %s is not supported on this platform",
package.raw_name,
package.version,
)
if missing or conflicting or parsing_probs or unsupported:
return ERROR
else:
write_output("No broken requirements found.")
return SUCCESS
| CheckCommand |
python | pydata__xarray | asv_bench/benchmarks/dataarray_missing.py | {
"start": 396,
"end": 978
} | class ____:
def setup(self, shape, chunks, limit):
if chunks is not None:
requires_dask()
self.da = make_bench_data(shape, 0.1, chunks)
@parameterized(
["shape", "chunks", "limit"],
(
[(365, 75, 75)],
[None, {"x": 25, "y": 25}],
[None, 3],
),
)
def time_interpolate_na(self, shape, chunks, limit):
actual = self.da.interpolate_na(dim="time", method="linear", limit=limit)
if chunks is not None:
actual = actual.compute()
| DataArrayMissingInterpolateNA |
python | pypa__warehouse | tests/unit/api/test_billing.py | {
"start": 19888,
"end": 21642
} | class ____:
def test_billing_webhook(self, pyramid_request, billing_service, monkeypatch):
pyramid_request.body = json.dumps({"type": "mock.webhook.payload"})
pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"}
monkeypatch.setattr(
billing_service,
"webhook_received",
lambda p, s: json.loads(p),
)
monkeypatch.setattr(
billing, "handle_billing_webhook_event", lambda *a, **kw: None
)
result = billing.billing_webhook(pyramid_request)
assert isinstance(result, HTTPNoContent)
def test_billing_webhook_value_error(
self, pyramid_request, billing_service, monkeypatch
):
pyramid_request.body = json.dumps({"type": "mock.webhook.payload"})
pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"}
def webhook_received(payload, sig_header):
raise ValueError()
monkeypatch.setattr(billing_service, "webhook_received", webhook_received)
with pytest.raises(HTTPBadRequest):
billing.billing_webhook(pyramid_request)
def test_billing_webhook_signature_error(
self, pyramid_request, billing_service, monkeypatch
):
pyramid_request.body = json.dumps({"type": "mock.webhook.payload"})
pyramid_request.headers = {"Stripe-Signature": "mock-stripe-signature"}
def webhook_received(payload, sig_header):
raise stripe.error.SignatureVerificationError("signature error", sig_header)
monkeypatch.setattr(billing_service, "webhook_received", webhook_received)
with pytest.raises(HTTPBadRequest):
billing.billing_webhook(pyramid_request)
| TestBillingWebhook |
python | getsentry__sentry | tests/snuba/rules/conditions/test_event_frequency.py | {
"start": 4784,
"end": 18886
} | class ____(EventFrequencyQueryTestBase):
rule_cls = EventFrequencyCondition
def test_batch_query(self) -> None:
batch_query = self.condition_inst.batch_query_hook(
group_ids=[self.event.group_id, self.event2.group_id, self.perf_event.group_id],
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
assert batch_query == {
self.event.group_id: 1,
self.event2.group_id: 1,
self.perf_event.group_id: 1,
}
batch_query = self.condition_inst2.batch_query_hook(
group_ids=[self.event3.group_id],
start=self.start,
end=self.end,
environment_id=self.environment2.id,
)
assert batch_query == {self.event3.group_id: 1}
@patch("sentry.tsdb.snuba.LIMIT", 3)
def test_batch_query_group_on_time(self) -> None:
"""
Test that if we hit the snuba query limit we get incorrect results when group_on_time is enabled
"""
def _store_events(fingerprint: str, offset=True) -> int:
hours = 1
group_id = None
for i in range(4):
if offset:
hours += 1
event = self.store_event(
data={
"event_id": str(i) * 32,
"timestamp": before_now(hours=hours).isoformat(),
"fingerprint": [fingerprint],
},
project_id=self.project.id,
)
hours += 1
group_id = event.group_id
assert group_id
return group_id
group_1_id = _store_events("group-hb")
group_2_id = _store_events("group-pb", offset=False)
condition_inst = self.get_rule(
data={"interval": "1w", "value": 1},
project=self.project.id,
rule=Rule(),
)
start = before_now(days=7)
end = timezone.now()
batch_query = condition_inst.get_chunked_result(
tsdb_function=condition_inst.tsdb.get_timeseries_sums,
model=get_issue_tsdb_group_model(GroupCategory.ERROR),
group_ids=[group_1_id, group_2_id],
organization_id=self.organization.id,
start=start,
end=end,
environment_id=None,
referrer_suffix="batch_alert_event_frequency",
group_on_time=True,
)
assert batch_query == {
group_1_id: 1,
group_2_id: 2,
}
batch_query = condition_inst.get_chunked_result(
tsdb_function=condition_inst.tsdb.get_sums,
model=get_issue_tsdb_group_model(GroupCategory.ERROR),
group_ids=[group_1_id, group_2_id],
organization_id=self.organization.id,
start=start,
end=end,
environment_id=None,
referrer_suffix="batch_alert_event_frequency",
group_on_time=False,
)
assert batch_query == {
group_1_id: 4,
group_2_id: 4,
}
def test_get_error_and_generic_group_ids(self) -> None:
groups = Group.objects.filter(
id__in=[self.event.group_id, self.event2.group_id, self.perf_event.group_id]
).values("id", "type", "project__organization_id")
error_issue_ids, generic_issue_ids = self.condition_inst.get_error_and_generic_group_ids(
groups
)
assert self.event.group_id in error_issue_ids
assert self.event2.group_id in error_issue_ids
assert self.perf_event.group_id in generic_issue_ids
def test_upsampled_aggregation_with_real_events_error_groups(self) -> None:
"""Test that real events with sample weights produce upsampled counts for error groups"""
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store 2 error events with 0.2 sample rate (5x weight = 10 total count)
sampled_event1 = self.store_event(
data={
"event_id": "d" * 32,
"environment": self.environment.name,
"timestamp": before_now(seconds=30).isoformat(),
"fingerprint": ["sampled-group"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": "e" * 32,
"environment": self.environment.name,
"timestamp": before_now(seconds=20).isoformat(),
"fingerprint": ["sampled-group"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
# Query using the EventFrequency condition's query_hook for proper integration
group_event = sampled_event1.for_group(sampled_event1.group)
count = self.condition_inst.query_hook(
event=group_event,
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
# Expect upsampled count: 2 events * 5 sample_weight = 10
assert count == 10
def test_regular_aggregation_with_real_events_when_disabled(self) -> None:
"""Test that real events with sample weights produce regular counts when upsampling is disabled"""
# Store 2 error events with 0.2 sample rate
sampled_event1 = self.store_event(
data={
"event_id": "f" * 32,
"environment": self.environment.name,
"timestamp": before_now(seconds=30).isoformat(),
"fingerprint": ["non-upsampled-group"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=20).isoformat(),
"fingerprint": ["non-upsampled-group"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
# Query using EventFrequency condition WITHOUT upsampling enabled - should return raw count
group_event = sampled_event1.for_group(sampled_event1.group)
count = self.condition_inst.query_hook(
event=group_event,
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
# Expect regular count: 2 events (no upsampling)
assert count == 2
def test_regular_aggregation_with_real_events_performance_groups(self) -> None:
"""Test that performance groups use regular counts even when upsampling is enabled"""
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Query using EventFrequency condition for performance groups - should use regular count
count = self.condition_inst.query_hook(
event=self.perf_event, # self.perf_event is already a GroupEvent
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
# Expect regular count: 1 event (no upsampling for performance)
# The perf_event was created in setUp, so we expect count of 1
assert count == 1
def test_upsampled_aggregation_with_real_events_batch_query_error_groups(self) -> None:
"""Test that real events with sample weights produce upsampled counts in batch queries for error groups"""
with self.options({"issues.client_error_sampling.project_allowlist": [self.project.id]}):
# Store 2 error events with 0.2 sample rate for first group (5x weight = 10 total count)
batch_event1_1 = self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=30).isoformat(),
"fingerprint": ["batch-group-1"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=25).isoformat(),
"fingerprint": ["batch-group-1"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
# Store 3 error events with 0.1 sample rate for second group (10x weight = 30 total count)
batch_event2_1 = self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=20).isoformat(),
"fingerprint": ["batch-group-2"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=15).isoformat(),
"fingerprint": ["batch-group-2"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=10).isoformat(),
"fingerprint": ["batch-group-2"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Query using batch_query_hook - should return upsampled counts
result = self.condition_inst.batch_query_hook(
group_ids={batch_event1_1.group_id, batch_event2_1.group_id},
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
# Expect upsampled counts:
# Group 1: 2 events * 5 sample_weight = 10
# Group 2: 3 events * 10 sample_weight = 30
expected_results = {
batch_event1_1.group_id: 10,
batch_event2_1.group_id: 30,
}
assert result == expected_results
def test_regular_aggregation_with_real_events_batch_query_when_disabled(self) -> None:
"""Test that real events with sample weights produce regular counts in batch queries when upsampling is disabled"""
# Store 2 error events with 0.2 sample rate for first group
batch_event1_1 = self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=30).isoformat(),
"fingerprint": ["batch-non-upsampled-1"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=25).isoformat(),
"fingerprint": ["batch-non-upsampled-1"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.2}},
},
project_id=self.project.id,
)
# Store 1 error event for second group
batch_event2_1 = self.store_event(
data={
"event_id": str(uuid4()).replace("-", ""),
"environment": self.environment.name,
"timestamp": before_now(seconds=20).isoformat(),
"fingerprint": ["batch-non-upsampled-2"],
"user": {"id": uuid4().hex},
"contexts": {"error_sampling": {"client_sample_rate": 0.1}},
},
project_id=self.project.id,
)
# Query using batch_query_hook WITHOUT upsampling enabled - should return raw counts
result = self.condition_inst.batch_query_hook(
group_ids={batch_event1_1.group_id, batch_event2_1.group_id},
start=self.start,
end=self.end,
environment_id=self.environment.id,
)
# Expect regular counts: Group 1: 2 events, Group 2: 1 event (no upsampling)
expected_results = {
batch_event1_1.group_id: 2,
batch_event2_1.group_id: 1,
}
assert result == expected_results
| EventFrequencyQueryTest |
python | joblib__joblib | joblib/test/test_parallel.py | {
"start": 43156,
"end": 78095
} | class ____(list):
'''MyList is interactively defined by MyList.append is a built-in'''
def __hash__(self):
# XXX: workaround limitation in cloudpickle
return hash(self).__hash__()
l = MyList()
print(Parallel(backend="loky", n_jobs=2)(
delayed(l.append)(i) for i in range(3)
))
""".format(joblib_root_folder=os.path.dirname(os.path.dirname(joblib.__file__)))
@with_multiprocessing
def test_parallel_with_interactively_defined_bound_method_loky(tmpdir):
script = tmpdir.join("joblib_interactive_bound_method_script.py")
script.write(INTERACTIVELY_DEFINED_SUBCLASS_WITH_METHOD_SCRIPT_CONTENT)
check_subprocess_call(
[sys.executable, script.strpath],
stdout_regex=r"\[None, None, None\]",
stderr_regex=r"LokyProcess",
timeout=15,
)
def test_parallel_with_exhausted_iterator():
exhausted_iterator = iter([])
assert Parallel(n_jobs=2)(exhausted_iterator) == []
def check_memmap(a):
if not isinstance(a, np.memmap):
raise TypeError("Expected np.memmap instance, got %r", type(a))
return a.copy() # return a regular array instead of a memmap
@with_numpy
@with_multiprocessing
@parametrize("backend", PROCESS_BACKENDS)
def test_auto_memmap_on_arrays_from_generator(backend):
# Non-regression test for a problem with a bad interaction between the
# GC collecting arrays recently created during iteration inside the
# parallel dispatch loop and the auto-memmap feature of Parallel.
# See: https://github.com/joblib/joblib/pull/294
def generate_arrays(n):
for i in range(n):
yield np.ones(10, dtype=np.float32) * i
# Use max_nbytes=1 to force the use of memory-mapping even for small
# arrays
results = Parallel(n_jobs=2, max_nbytes=1, backend=backend)(
delayed(check_memmap)(a) for a in generate_arrays(100)
)
for result, expected in zip(results, generate_arrays(len(results))):
np.testing.assert_array_equal(expected, result)
# Second call to force loky to adapt the executor by growing the number
# of worker processes. This is a non-regression test for:
# https://github.com/joblib/joblib/issues/629.
results = Parallel(n_jobs=4, max_nbytes=1, backend=backend)(
delayed(check_memmap)(a) for a in generate_arrays(100)
)
for result, expected in zip(results, generate_arrays(len(results))):
np.testing.assert_array_equal(expected, result)
def identity(arg):
return arg
@with_numpy
@with_multiprocessing
def test_memmap_with_big_offset(tmpdir):
fname = tmpdir.join("test.mmap").strpath
size = mmap.ALLOCATIONGRANULARITY
obj = [np.zeros(size, dtype="uint8"), np.ones(size, dtype="uint8")]
dump(obj, fname)
memmap = load(fname, mmap_mode="r")
(result,) = Parallel(n_jobs=2)(delayed(identity)(memmap) for _ in [0])
assert isinstance(memmap[1], np.memmap)
assert memmap[1].offset > size
np.testing.assert_array_equal(obj, result)
def test_warning_about_timeout_not_supported_by_backend():
with warnings.catch_warnings(record=True) as warninfo:
Parallel(n_jobs=1, timeout=1)(delayed(square)(i) for i in range(50))
assert len(warninfo) == 1
w = warninfo[0]
assert isinstance(w.message, UserWarning)
assert str(w.message) == (
"The backend class 'SequentialBackend' does not support timeout. "
"You have set 'timeout=1' in Parallel but the 'timeout' parameter "
"will not be used."
)
def set_list_value(input_list, index, value):
input_list[index] = value
return value
@pytest.mark.parametrize("n_jobs", [1, 2, 4])
def test_parallel_return_order_with_return_as_generator_parameter(n_jobs):
# This test inserts values in a list in some expected order
# in sequential computing, and then checks that this order has been
# respected by Parallel output generator.
input_list = [0] * 5
result = Parallel(n_jobs=n_jobs, return_as="generator", backend="threading")(
delayed(set_list_value)(input_list, i, i) for i in range(5)
)
# Ensure that all the tasks are completed before checking the result
result = list(result)
assert all(v == r for v, r in zip(input_list, result))
def _sqrt_with_delay(e, delay):
if delay:
sleep(30)
return sqrt(e)
# Use a private function so it can also be called for the dask backend in
# test_dask.py without triggering the test twice.
# We isolate the test with the dask backend to simplify optional deps
# management and leaking environment variables.
def _test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
# This test submits 10 tasks, but the second task is super slow. This test
# checks that the 9 other tasks return before the slow task is done, when
# `return_as` parameter is set to `'generator_unordered'`
result = Parallel(n_jobs=n_jobs, return_as="generator_unordered", backend=backend)(
delayed(_sqrt_with_delay)(i**2, (i == 1)) for i in range(10)
)
quickly_returned = sorted(next(result) for _ in range(9))
expected_quickly_returned = [0] + list(range(2, 10))
assert all(v == r for v, r in zip(expected_quickly_returned, quickly_returned))
del result
@pytest.mark.parametrize("n_jobs", [2, 4])
# NB: for this test to work, the backend must be allowed to process tasks
# concurrently, so at least two jobs with a non-sequential backend are
# mandatory.
@with_multiprocessing
@parametrize("backend", set(RETURN_GENERATOR_BACKENDS) - {"sequential"})
def test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs):
_test_parallel_unordered_generator_returns_fastest_first(backend, n_jobs)
@parametrize("backend", ALL_VALID_BACKENDS)
@parametrize("n_jobs", [1, 2, -2, -1])
def test_abort_backend(n_jobs, backend):
delays = ["a"] + [10] * 100
with raises(TypeError):
t_start = time.time()
Parallel(n_jobs=n_jobs, backend=backend)(delayed(time.sleep)(i) for i in delays)
dt = time.time() - t_start
assert dt < 20
def get_large_object(arg):
result = np.ones(int(5 * 1e5), dtype=bool)
result[0] = False
return result
# Use a private function so it can also be called for the dask backend in
# test_dask.py without triggering the test twice.
# We isolate the test with the dask backend to simplify optional deps
# management and leaking environment variables.
def _test_deadlock_with_generator(backend, return_as, n_jobs):
# Non-regression test for a race condition in the backends when the pickler
# is delayed by a large object.
with Parallel(n_jobs=n_jobs, backend=backend, return_as=return_as) as parallel:
result = parallel(delayed(get_large_object)(i) for i in range(10))
next(result)
next(result)
del result
@with_numpy
@parametrize("backend", RETURN_GENERATOR_BACKENDS)
@parametrize("return_as", ["generator", "generator_unordered"])
@parametrize("n_jobs", [1, 2, -2, -1])
def test_deadlock_with_generator(backend, return_as, n_jobs):
_test_deadlock_with_generator(backend, return_as, n_jobs)
@parametrize("backend", RETURN_GENERATOR_BACKENDS)
@parametrize("return_as", ["generator", "generator_unordered"])
@parametrize("n_jobs", [1, 2, -2, -1])
def test_multiple_generator_call(backend, return_as, n_jobs):
# Non-regression test that ensures the dispatch of the tasks starts
# immediately when Parallel.__call__ is called. This test relies on the
# assumption that only one generator can be submitted at a time.
with raises(RuntimeError, match="This Parallel instance is already running"):
parallel = Parallel(n_jobs, backend=backend, return_as=return_as)
g = parallel(delayed(sleep)(1) for _ in range(10)) # noqa: F841
t_start = time.time()
gen2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
# Make sure that the error is raised quickly
assert time.time() - t_start < 2, (
"The error should be raised immediately when submitting a new task "
"but it took more than 2s."
)
del g
@parametrize("backend", RETURN_GENERATOR_BACKENDS)
@parametrize("return_as", ["generator", "generator_unordered"])
@parametrize("n_jobs", [1, 2, -2, -1])
def test_multiple_generator_call_managed(backend, return_as, n_jobs):
# Non-regression test that ensures the dispatch of the tasks starts
# immediately when Parallel.__call__ is called. This test relies on the
# assumption that only one generator can be submitted at a time.
with Parallel(n_jobs, backend=backend, return_as=return_as) as parallel:
g = parallel(delayed(sleep)(10) for _ in range(10)) # noqa: F841
t_start = time.time()
with raises(RuntimeError, match="This Parallel instance is already running"):
g2 = parallel(delayed(id)(i) for i in range(100)) # noqa: F841
# Make sure that the error is raised quickly
assert time.time() - t_start < 2, (
"The error should be raised immediately when submitting a new task "
"but it took more than 2s."
)
del g
@parametrize("backend", RETURN_GENERATOR_BACKENDS)
@parametrize("return_as_1", ["generator", "generator_unordered"])
@parametrize("return_as_2", ["generator", "generator_unordered"])
@parametrize("n_jobs", [1, 2, -2, -1])
def test_multiple_generator_call_separated(backend, return_as_1, return_as_2, n_jobs):
# Check that for separated Parallel, both tasks are correctly returned.
g = Parallel(n_jobs, backend=backend, return_as=return_as_1)(
delayed(sqrt)(i**2) for i in range(10)
)
g2 = Parallel(n_jobs, backend=backend, return_as=return_as_2)(
delayed(sqrt)(i**2) for i in range(10, 20)
)
if return_as_1 == "generator_unordered":
g = sorted(g)
if return_as_2 == "generator_unordered":
g2 = sorted(g2)
assert all(res == i for res, i in zip(g, range(10)))
assert all(res == i for res, i in zip(g2, range(10, 20)))
@parametrize(
"backend, error",
[
("loky", True),
("threading", False),
("sequential", False),
],
)
@parametrize("return_as_1", ["generator", "generator_unordered"])
@parametrize("return_as_2", ["generator", "generator_unordered"])
def test_multiple_generator_call_separated_gc(backend, return_as_1, return_as_2, error):
if (backend == "loky") and (mp is None):
pytest.skip("Requires multiprocessing")
# Check that in loky, only one call can be run at a time with
# a single executor.
parallel = Parallel(2, backend=backend, return_as=return_as_1)
g = parallel(delayed(sleep)(10) for i in range(10))
g_wr = weakref.finalize(g, lambda: print("Generator collected"))
ctx = (
raises(RuntimeError, match="The executor underlying Parallel")
if error
else nullcontext()
)
with ctx:
# For loky, this call will raise an error as the gc of the previous
# generator will shutdown the shared executor.
# For the other backends, as the worker pools are not shared between
# the two calls, this should proceed correctly.
t_start = time.time()
g = Parallel(2, backend=backend, return_as=return_as_2)(
delayed(sqrt)(i**2) for i in range(10, 20)
)
if return_as_2 == "generator_unordered":
g = sorted(g)
assert all(res == i for res, i in zip(g, range(10, 20)))
assert time.time() - t_start < 5
# Make sure that the computation are stopped for the gc'ed generator
retry = 0
while g_wr.alive and retry < 3:
retry += 1
time.sleep(0.5)
assert time.time() - t_start < 5
if parallel._effective_n_jobs() != 1:
# check that the first parallel object is aborting (the final _aborted
# state might be delayed).
assert parallel._aborting
@with_numpy
@with_multiprocessing
@parametrize("backend", PROCESS_BACKENDS)
def test_memmapping_leaks(backend, tmpdir):
# Non-regression test for memmapping backends. Ensure that the data
# does not stay too long in memory
tmpdir = tmpdir.strpath
# Use max_nbytes=1 to force the use of memory-mapping even for small
# arrays
with Parallel(n_jobs=2, max_nbytes=1, backend=backend, temp_folder=tmpdir) as p:
p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
# The memmap folder should not be clean in the context scope
assert len(os.listdir(tmpdir)) > 0
# Make sure that the shared memory is cleaned at the end when we exit
# the context
for _ in range(100):
if not os.listdir(tmpdir):
break
sleep(0.1)
else:
raise AssertionError("temporary directory of Parallel was not removed")
# Make sure that the shared memory is cleaned at the end of a call
p = Parallel(n_jobs=2, max_nbytes=1, backend=backend)
p(delayed(check_memmap)(a) for a in [np.random.random(10)] * 2)
for _ in range(100):
if not os.listdir(tmpdir):
break
sleep(0.1)
else:
raise AssertionError("temporary directory of Parallel was not removed")
@parametrize(
"backend", ([None, "threading"] if mp is None else [None, "loky", "threading"])
)
def test_lambda_expression(backend):
# cloudpickle is used to pickle delayed callables
results = Parallel(n_jobs=2, backend=backend)(
delayed(lambda x: x**2)(i) for i in range(10)
)
assert results == [i**2 for i in range(10)]
@with_multiprocessing
@parametrize("backend", PROCESS_BACKENDS)
def test_backend_batch_statistics_reset(backend):
"""Test that a parallel backend correctly resets its batch statistics."""
n_jobs = 2
n_inputs = 500
task_time = 2.0 / n_inputs
p = Parallel(verbose=10, n_jobs=n_jobs, backend=backend)
p(delayed(time.sleep)(task_time) for i in range(n_inputs))
assert p._backend._effective_batch_size == p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE
assert (
p._backend._smoothed_batch_duration
== p._backend._DEFAULT_SMOOTHED_BATCH_DURATION
)
p(delayed(time.sleep)(task_time) for i in range(n_inputs))
assert p._backend._effective_batch_size == p._backend._DEFAULT_EFFECTIVE_BATCH_SIZE
assert (
p._backend._smoothed_batch_duration
== p._backend._DEFAULT_SMOOTHED_BATCH_DURATION
)
@with_multiprocessing
@parametrize("context", [parallel_config, parallel_backend])
def test_backend_hinting_and_constraints(context):
for n_jobs in [1, 2, -1]:
assert type(Parallel(n_jobs=n_jobs)._backend) is get_default_backend_instance()
p = Parallel(n_jobs=n_jobs, prefer="threads")
assert type(p._backend) is ThreadingBackend
p = Parallel(n_jobs=n_jobs, prefer="processes")
assert type(p._backend) is LokyBackend
p = Parallel(n_jobs=n_jobs, require="sharedmem")
assert type(p._backend) is ThreadingBackend
# Explicit backend selection can override backend hinting although it
# is useless to pass a hint when selecting a backend.
p = Parallel(n_jobs=2, backend="loky", prefer="threads")
assert type(p._backend) is LokyBackend
with context("loky", n_jobs=2):
# Explicit backend selection by the user with the context manager
# should be respected when combined with backend hints only.
p = Parallel(prefer="threads")
assert type(p._backend) is LokyBackend
assert p.n_jobs == 2
with context("loky", n_jobs=2):
# Locally hard-coded n_jobs value is respected.
p = Parallel(n_jobs=3, prefer="threads")
assert type(p._backend) is LokyBackend
assert p.n_jobs == 3
with context("loky", n_jobs=2):
# Explicit backend selection by the user with the context manager
# should be ignored when the Parallel call has hard constraints.
# In this case, the default backend that supports shared mem is
# used an the default number of processes is used.
p = Parallel(require="sharedmem")
assert type(p._backend) is ThreadingBackend
assert p.n_jobs == 1
with context("loky", n_jobs=2):
p = Parallel(n_jobs=3, require="sharedmem")
assert type(p._backend) is ThreadingBackend
assert p.n_jobs == 3
@parametrize("n_jobs", [1, 2])
@parametrize("prefer", [None, "processes", "threads"])
def test_backend_hinting_always_running(n_jobs, prefer):
# Check that the backend hinting never results in an error
# Non-regression test for https://github.com/joblib/joblib/issues/1720
expected_results = [i**2 for i in range(10)]
results = Parallel(n_jobs=n_jobs, prefer=prefer)(
delayed(square)(i) for i in range(10)
)
assert results == expected_results
with parallel_config(prefer=prefer, n_jobs=n_jobs):
results = Parallel()(delayed(square)(i) for i in range(10))
assert results == expected_results
@parametrize("context", [parallel_config, parallel_backend])
def test_backend_hinting_and_constraints_with_custom_backends(capsys, context):
# Custom backends can declare that they use threads and have shared memory
# semantics:
class MyCustomThreadingBackend(ParallelBackendBase):
supports_sharedmem = True
use_threads = True
def apply_async(self):
pass
def effective_n_jobs(self, n_jobs):
return n_jobs
with context(MyCustomThreadingBackend()):
p = Parallel(n_jobs=2, prefer="processes") # ignored
assert type(p._backend) is MyCustomThreadingBackend
p = Parallel(n_jobs=2, require="sharedmem")
assert type(p._backend) is MyCustomThreadingBackend
class MyCustomProcessingBackend(ParallelBackendBase):
supports_sharedmem = False
use_threads = False
def apply_async(self):
pass
def effective_n_jobs(self, n_jobs):
return n_jobs
with context(MyCustomProcessingBackend()):
p = Parallel(n_jobs=2, prefer="processes")
assert type(p._backend) is MyCustomProcessingBackend
out, err = capsys.readouterr()
assert out == ""
assert err == ""
p = Parallel(n_jobs=2, require="sharedmem", verbose=10)
assert type(p._backend) is ThreadingBackend
out, err = capsys.readouterr()
expected = (
"Using ThreadingBackend as joblib backend "
"instead of MyCustomProcessingBackend as the latter "
"does not provide shared memory semantics."
)
assert out.strip() == expected
assert err == ""
with raises(ValueError):
Parallel(backend=MyCustomProcessingBackend(), require="sharedmem")
def test_invalid_backend_hinting_and_constraints():
with raises(ValueError):
Parallel(prefer="invalid")
with raises(ValueError):
Parallel(require="invalid")
with raises(ValueError):
# It is inconsistent to prefer process-based parallelism while
# requiring shared memory semantics.
Parallel(prefer="processes", require="sharedmem")
if mp is not None:
# It is inconsistent to ask explicitly for a process-based
# parallelism while requiring shared memory semantics.
with raises(ValueError):
Parallel(backend="loky", require="sharedmem")
with raises(ValueError):
Parallel(backend="multiprocessing", require="sharedmem")
def _recursive_backend_info(limit=3, **kwargs):
"""Perform nested parallel calls and introspect the backend on the way"""
with Parallel(n_jobs=2) as p:
this_level = [(type(p._backend).__name__, p._backend.nesting_level)]
if limit == 0:
return this_level
results = p(
delayed(_recursive_backend_info)(limit=limit - 1, **kwargs)
for i in range(1)
)
return this_level + results[0]
@with_multiprocessing
@parametrize("backend", ["loky", "threading"])
@parametrize("context", [parallel_config, parallel_backend])
def test_nested_parallelism_limit(context, backend):
with context(backend, n_jobs=2):
backend_types_and_levels = _recursive_backend_info()
top_level_backend_type = backend.title() + "Backend"
expected_types_and_levels = [
(top_level_backend_type, 0),
("ThreadingBackend", 1),
("SequentialBackend", 2),
("SequentialBackend", 2),
]
assert backend_types_and_levels == expected_types_and_levels
def _recursive_parallel(nesting_limit=None):
"""A horrible function that does recursive parallel calls"""
return Parallel()(delayed(_recursive_parallel)() for i in range(2))
@pytest.mark.no_cover
@parametrize("context", [parallel_config, parallel_backend])
@parametrize("backend", (["threading"] if mp is None else ["loky", "threading"]))
def test_thread_bomb_mitigation(context, backend):
# Test that recursive parallelism raises a recursion rather than
# saturating the operating system resources by creating a unbounded number
# of threads.
with context(backend, n_jobs=2):
with raises(BaseException) as excinfo:
_recursive_parallel()
exc = excinfo.value
if backend == "loky":
# Local import because loky may not be importable for lack of
# multiprocessing
from joblib.externals.loky.process_executor import TerminatedWorkerError # noqa
if isinstance(exc, (TerminatedWorkerError, PicklingError)):
# The recursion exception can itself cause an error when
# pickling it to be send back to the parent process. In this
# case the worker crashes but the original traceback is still
# printed on stderr. This could be improved but does not seem
# simple to do and this is not critical for users (as long
# as there is no process or thread bomb happening).
pytest.xfail("Loky worker crash when serializing RecursionError")
assert isinstance(exc, RecursionError)
def _run_parallel_sum():
env_vars = {}
for var in [
"OMP_NUM_THREADS",
"OPENBLAS_NUM_THREADS",
"MKL_NUM_THREADS",
"VECLIB_MAXIMUM_THREADS",
"NUMEXPR_NUM_THREADS",
"NUMBA_NUM_THREADS",
"ENABLE_IPC",
]:
env_vars[var] = os.environ.get(var)
return env_vars, parallel_sum(100)
@parametrize("backend", ([None, "loky"] if mp is not None else [None]))
@skipif(parallel_sum is None, reason="Need OpenMP helper compiled")
def test_parallel_thread_limit(backend):
results = Parallel(n_jobs=2, backend=backend)(
delayed(_run_parallel_sum)() for _ in range(2)
)
expected_num_threads = max(cpu_count() // 2, 1)
for worker_env_vars, omp_num_threads in results:
assert omp_num_threads == expected_num_threads
for name, value in worker_env_vars.items():
if name.endswith("_THREADS"):
assert value == str(expected_num_threads)
else:
assert name == "ENABLE_IPC"
assert value == "1"
@parametrize("context", [parallel_config, parallel_backend])
@skipif(distributed is not None, reason="This test requires dask")
def test_dask_backend_when_dask_not_installed(context):
with raises(ValueError, match="Please install dask"):
context("dask")
@parametrize("context", [parallel_config, parallel_backend])
def test_zero_worker_backend(context):
# joblib.Parallel should reject with an explicit error message parallel
# backends that have no worker.
class ZeroWorkerBackend(ThreadingBackend):
def configure(self, *args, **kwargs):
return 0
def apply_async(self, func, callback=None): # pragma: no cover
raise TimeoutError("No worker available")
def effective_n_jobs(self, n_jobs): # pragma: no cover
return 0
expected_msg = "ZeroWorkerBackend has no active worker"
with context(ZeroWorkerBackend()):
with pytest.raises(RuntimeError, match=expected_msg):
Parallel(n_jobs=2)(delayed(id)(i) for i in range(2))
def test_globals_update_at_each_parallel_call():
# This is a non-regression test related to joblib issues #836 and #833.
# Cloudpickle versions between 0.5.4 and 0.7 introduced a bug where global
# variables changes in a parent process between two calls to
# joblib.Parallel would not be propagated into the workers.
global MY_GLOBAL_VARIABLE
MY_GLOBAL_VARIABLE = "original value"
def check_globals():
global MY_GLOBAL_VARIABLE
return MY_GLOBAL_VARIABLE
assert check_globals() == "original value"
workers_global_variable = Parallel(n_jobs=2)(
delayed(check_globals)() for i in range(2)
)
assert set(workers_global_variable) == {"original value"}
# Change the value of MY_GLOBAL_VARIABLE, and make sure this change gets
# propagated into the workers environment
MY_GLOBAL_VARIABLE = "changed value"
assert check_globals() == "changed value"
workers_global_variable = Parallel(n_jobs=2)(
delayed(check_globals)() for i in range(2)
)
assert set(workers_global_variable) == {"changed value"}
##############################################################################
# Test environment variable in child env, in particular for limiting
# the maximal number of threads in C-library threadpools.
#
def _check_numpy_threadpool_limits():
import numpy as np
# Let's call BLAS on a Matrix Matrix multiplication with dimensions large
# enough to ensure that the threadpool managed by the underlying BLAS
# implementation is actually used so as to force its initialization.
a = np.random.randn(100, 100)
np.dot(a, a)
threadpoolctl = pytest.importorskip("threadpoolctl")
return threadpoolctl.threadpool_info()
def _parent_max_num_threads_for(child_module, parent_info):
for parent_module in parent_info:
if parent_module["filepath"] == child_module["filepath"]:
return parent_module["num_threads"]
raise ValueError(
"An unexpected module was loaded in child:\n{}".format(child_module)
)
def check_child_num_threads(workers_info, parent_info, num_threads):
# Check that the number of threads reported in workers_info is consistent
# with the expectation. We need to be careful to handle the cases where
# the requested number of threads is below max_num_thread for the library.
for child_threadpool_info in workers_info:
for child_module in child_threadpool_info:
parent_max_num_threads = _parent_max_num_threads_for(
child_module, parent_info
)
expected = {min(num_threads, parent_max_num_threads), num_threads}
assert child_module["num_threads"] in expected
@with_numpy
@with_multiprocessing
@parametrize("n_jobs", [2, 4, -2, -1])
def test_threadpool_limitation_in_child_loky(n_jobs):
# Check that the protection against oversubscription in workers is working
# using threadpoolctl functionalities.
# Skip this test if numpy is not linked to a BLAS library
parent_info = _check_numpy_threadpool_limits()
if len(parent_info) == 0:
pytest.skip(reason="Need a version of numpy linked to BLAS")
workers_threadpool_infos = Parallel(backend="loky", n_jobs=n_jobs)(
delayed(_check_numpy_threadpool_limits)() for i in range(2)
)
n_jobs = effective_n_jobs(n_jobs)
if n_jobs == 1:
expected_child_num_threads = parent_info[0]["num_threads"]
else:
expected_child_num_threads = max(cpu_count() // n_jobs, 1)
check_child_num_threads(
workers_threadpool_infos, parent_info, expected_child_num_threads
)
@with_numpy
@with_multiprocessing
@parametrize("inner_max_num_threads", [1, 2, 4, None])
@parametrize("n_jobs", [2, -1])
@parametrize("context", [parallel_config, parallel_backend])
def test_threadpool_limitation_in_child_context(context, n_jobs, inner_max_num_threads):
# Check that the protection against oversubscription in workers is working
# using threadpoolctl functionalities.
# Skip this test if numpy is not linked to a BLAS library
parent_info = _check_numpy_threadpool_limits()
if len(parent_info) == 0:
pytest.skip(reason="Need a version of numpy linked to BLAS")
with context("loky", inner_max_num_threads=inner_max_num_threads):
workers_threadpool_infos = Parallel(n_jobs=n_jobs)(
delayed(_check_numpy_threadpool_limits)() for i in range(2)
)
n_jobs = effective_n_jobs(n_jobs)
if n_jobs == 1:
expected_child_num_threads = parent_info[0]["num_threads"]
elif inner_max_num_threads is None:
expected_child_num_threads = max(cpu_count() // n_jobs, 1)
else:
expected_child_num_threads = inner_max_num_threads
check_child_num_threads(
workers_threadpool_infos, parent_info, expected_child_num_threads
)
@with_multiprocessing
@parametrize("n_jobs", [2, -1])
@parametrize("var_name", ["OPENBLAS_NUM_THREADS", "MKL_NUM_THREADS", "OMP_NUM_THREADS"])
@parametrize("context", [parallel_config, parallel_backend])
def test_threadpool_limitation_in_child_override(context, n_jobs, var_name):
# Check that environment variables set by the user on the main process
# always have the priority.
# Skip this test if the process is run sequetially
if effective_n_jobs(n_jobs) == 1:
pytest.skip("Skip test when n_jobs == 1")
# Clean up the existing executor because we change the environment of the
# parent at runtime and it is not detected in loky intentionally.
get_reusable_executor(reuse=True).shutdown()
def _get_env(var_name):
return os.environ.get(var_name)
original_var_value = os.environ.get(var_name)
try:
os.environ[var_name] = "4"
# Skip this test if numpy is not linked to a BLAS library
results = Parallel(n_jobs=n_jobs)(delayed(_get_env)(var_name) for i in range(2))
assert results == ["4", "4"]
with context("loky", inner_max_num_threads=1):
results = Parallel(n_jobs=n_jobs)(
delayed(_get_env)(var_name) for i in range(2)
)
assert results == ["1", "1"]
finally:
if original_var_value is None:
del os.environ[var_name]
else:
os.environ[var_name] = original_var_value
@with_multiprocessing
@parametrize("n_jobs", [2, 4, -1])
def test_loky_reuse_workers(n_jobs):
# Non-regression test for issue #967 where the workers are not reused when
# calling multiple Parallel loops.
def parallel_call(n_jobs):
x = range(10)
Parallel(n_jobs=n_jobs)(delayed(sum)(x) for i in range(10))
# Run a parallel loop and get the workers used for computations
parallel_call(n_jobs)
first_executor = get_reusable_executor(reuse=True)
# Ensure that the workers are reused for the next calls, as the executor is
# not restarted.
for _ in range(10):
parallel_call(n_jobs)
executor = get_reusable_executor(reuse=True)
assert executor == first_executor
def _set_initialized(status):
status[os.getpid()] = "initialized"
def _check_status(status, n_jobs, wait_workers=False):
pid = os.getpid()
state = status.get(pid, None)
assert state in ("initialized", "started"), (
f"worker should have been in initialized state, got {state}"
)
if not wait_workers:
return
status[pid] = "started"
# wait up to 30 seconds for the workers to be initialized
deadline = time.time() + 30
n_started = len([pid for pid, v in status.items() if v == "started"])
while time.time() < deadline and n_started < n_jobs:
time.sleep(0.1)
n_started = len([pid for pid, v in status.items() if v == "started"])
if time.time() >= deadline:
raise TimeoutError("Waited more than 30s to start all the workers")
return pid
@with_multiprocessing
@parametrize("n_jobs", [2, 4])
@parametrize("backend", PROCESS_BACKENDS)
@parametrize("context", [parallel_config, parallel_backend])
def test_initializer_context(n_jobs, backend, context):
manager = mp.Manager()
status = manager.dict()
# pass the initializer to the backend context
with context(
backend=backend,
n_jobs=n_jobs,
initializer=_set_initialized,
initargs=(status,),
):
# check_status checks that the initializer is correctly call
Parallel()(delayed(_check_status)(status, n_jobs) for i in range(100))
@with_multiprocessing
@parametrize("n_jobs", [2, 4])
@parametrize("backend", PROCESS_BACKENDS)
def test_initializer_parallel(n_jobs, backend):
manager = mp.Manager()
status = manager.dict()
# pass the initializer directly to the Parallel call
# check_status checks that the initializer is called in all tasks
Parallel(
backend=backend,
n_jobs=n_jobs,
initializer=_set_initialized,
initargs=(status,),
)(delayed(_check_status)(status, n_jobs) for i in range(100))
@with_multiprocessing
@pytest.mark.parametrize("n_jobs", [2, 4])
def test_initializer_reused(n_jobs):
# Check that it is possible to pass initializer config via the `Parallel`
# call directly and the worker are reused when the arguments are the same.
n_repetitions = 3
manager = mp.Manager()
status = manager.dict()
pids = set()
for i in range(n_repetitions):
results = Parallel(
backend="loky",
n_jobs=n_jobs,
initializer=_set_initialized,
initargs=(status,),
)(
delayed(_check_status)(status, n_jobs, wait_workers=True)
for i in range(n_jobs)
)
pids = pids.union(set(results))
assert len(pids) == n_jobs, (
"The workers should be reused when the initializer is the same"
)
@with_multiprocessing
@pytest.mark.parametrize("n_jobs", [2, 4])
def test_initializer_not_reused(n_jobs):
# Check that when changing the initializer arguments, each parallel call uses its
# own initializer args, independently of the previous calls, hence the loky workers
# are not reused.
n_repetitions = 3
manager = mp.Manager()
pids = set()
for i in range(n_repetitions):
status = manager.dict()
results = Parallel(
backend="loky",
n_jobs=n_jobs,
initializer=_set_initialized,
initargs=(status,),
)(
delayed(_check_status)(status, n_jobs, wait_workers=True)
for i in range(n_jobs)
)
pids = pids.union(set(results))
assert len(pids) == n_repetitions * n_jobs, (
"The workers should not be reused when the initializer arguments change"
)
| MyList |
python | facebookresearch__faiss | faiss/gpu/test/test_gpu_index.py | {
"start": 12496,
"end": 16249
} | class ____(unittest.TestCase):
def test_indices_ivfflat(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFFlatConfig()
idx = faiss.GpuIndexIVFFlat(res, d, nlist, faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
config.use_cuvs = False
idx = faiss.GpuIndexIVFFlat(res, d, nlist, faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
def test_indices_ivfpq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
M = 4
nbits = 8
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFPQConfig()
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
# 32-bit indices are not supported with cuVS
config.use_cuvs = False
idx = faiss.GpuIndexIVFPQ(res, d, nlist, M, nbits,
faiss.METRIC_L2, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
def test_indices_ivfsq(self):
res = faiss.StandardGpuResources()
d = 128
nb = 5000
nlist = 10
qtype = faiss.ScalarQuantizer.QT_4bit
rs = np.random.RandomState(567)
xb = rs.rand(nb, d).astype('float32')
xb_indices_base = np.arange(nb, dtype=np.int64)
# Force values to not be representable in int32
xb_indices = (xb_indices_base + 4294967296).astype('int64')
config = faiss.GpuIndexIVFScalarQuantizerConfig()
idx = faiss.GpuIndexIVFScalarQuantizer(res, d, nlist, qtype,
faiss.METRIC_L2, True, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
self.assertTrue(np.array_equal(xb_indices[10:20], I[:, 0]))
# Store values using 32-bit indices instead
config.indicesOptions = faiss.INDICES_32_BIT
idx = faiss.GpuIndexIVFScalarQuantizer(res, d, nlist, qtype,
faiss.METRIC_L2, True, config)
idx.train(xb)
idx.add_with_ids(xb, xb_indices)
_, I = idx.search(xb[10:20], 5)
# This will strip the high bit
self.assertTrue(np.array_equal(xb_indices_base[10:20], I[:, 0]))
| TestIVFIndices |
python | doocs__leetcode | lcof2/剑指 Offer II 048. 序列化与反序列化二叉树/Solution.py | {
"start": 172,
"end": 1219
} | class ____:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return ''
res = []
def preorder(root):
if root is None:
res.append("#,")
return
res.append(str(root.val) + ",")
preorder(root.left)
preorder(root.right)
preorder(root)
return ''.join(res)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
vals = data.split(',')
def inner():
first = vals.pop(0)
if first == '#':
return None
return TreeNode(int(first), inner(), inner())
return inner()
# Your Codec object will be instantiated and called as such:
# ser = Codec()
# deser = Codec()
# ans = deser.deserialize(ser.serialize(root))
| Codec |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/enum14.py | {
"start": 212,
"end": 288
} | class ____(Enum):
# This should generate two errors.
x: Literal[A.x]
| A |
python | doocs__leetcode | solution/0200-0299/0224.Basic Calculator/Solution.py | {
"start": 0,
"end": 752
} | class ____:
def calculate(self, s: str) -> int:
stk = []
ans, sign = 0, 1
i, n = 0, len(s)
while i < n:
if s[i].isdigit():
x = 0
j = i
while j < n and s[j].isdigit():
x = x * 10 + int(s[j])
j += 1
ans += sign * x
i = j - 1
elif s[i] == "+":
sign = 1
elif s[i] == "-":
sign = -1
elif s[i] == "(":
stk.append(ans)
stk.append(sign)
ans, sign = 0, 1
elif s[i] == ")":
ans = stk.pop() * ans + stk.pop()
i += 1
return ans
| Solution |
python | openai__openai-python | src/openai/types/audio/translation_verbose.py | {
"start": 247,
"end": 615
} | class ____(BaseModel):
duration: float
"""The duration of the input audio."""
language: str
"""The language of the output translation (always `english`)."""
text: str
"""The translated text."""
segments: Optional[List[TranscriptionSegment]] = None
"""Segments of the translated text and their corresponding details."""
| TranslationVerbose |
python | getsentry__sentry | src/sentry/api/bases/organization.py | {
"start": 7817,
"end": 8346
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read", "org:write", "org:admin", "alerts:read"],
# grant org:read permission, but raise permission denied if the members aren't allowed
# to create alerts and the user isn't a team admin
"POST": ["org:read", "org:write", "org:admin", "alerts:write"],
"PUT": ["org:read", "org:write", "org:admin", "alerts:write"],
"DELETE": ["org:read", "org:write", "org:admin", "alerts:write"],
}
| OrganizationDetectorPermission |
python | python-markdown__markdown | tests/test_apis.py | {
"start": 7440,
"end": 11890
} | class ____(unittest.TestCase):
""" Test the processor registry. """
def testCreateRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
self.assertEqual(len(r), 1)
self.assertIsInstance(r, markdown.util.Registry)
def testRegisterWithoutPriority(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r.register(Item('a'))
def testSortRegistry(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 21)
r.register(Item('c'), 'c', 20.5)
self.assertEqual(len(r), 3)
self.assertEqual(list(r), ['b', 'c', 'a'])
def testIsSorted(self):
r = markdown.util.Registry()
self.assertIs(r._is_sorted, False)
r.register(Item('a'), 'a', 20)
list(r)
self.assertIs(r._is_sorted, True)
r.register(Item('b'), 'b', 21)
self.assertIs(r._is_sorted, False)
r['a']
self.assertIs(r._is_sorted, True)
r._is_sorted = False
r.get_index_for_name('a')
self.assertIs(r._is_sorted, True)
r._is_sorted = False
repr(r)
self.assertIs(r._is_sorted, True)
def testDeregister(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
self.assertEqual(len(r), 3)
r.deregister('b')
self.assertEqual(len(r), 2)
r.deregister('c', strict=False)
self.assertEqual(len(r), 1)
# deregister non-existent item with `strict=False`
r.deregister('d', strict=False)
self.assertEqual(len(r), 1)
with self.assertRaises(ValueError):
# deregister non-existent item with `strict=True`
r.deregister('e')
self.assertEqual(list(r), ['a'])
def testRegistryContains(self):
r = markdown.util.Registry()
item = Item('a')
r.register(item, 'a', 20)
self.assertIs('a' in r, True)
self.assertIn(item, r)
self.assertNotIn('b', r)
def testRegistryIter(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(list(r), ['b', 'a'])
def testRegistryGetItemByIndex(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r[0], 'b')
self.assertEqual(r[1], 'a')
with self.assertRaises(IndexError):
r[3]
def testRegistryGetItemByItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r['a'], 'a')
self.assertEqual(r['b'], 'b')
with self.assertRaises(KeyError):
r['c']
def testRegistrySetItem(self):
r = markdown.util.Registry()
with self.assertRaises(TypeError):
r[0] = 'a'
with self.assertRaises(TypeError):
r['a'] = 'a'
def testRegistryDelItem(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
with self.assertRaises(TypeError):
del r[0]
with self.assertRaises(TypeError):
del r['a']
def testRegistrySlice(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
r.register(Item('c'), 'c', 40)
slc = r[1:]
self.assertEqual(len(slc), 2)
self.assertIsInstance(slc, markdown.util.Registry)
self.assertEqual(list(slc), ['b', 'a'])
def testGetIndexForName(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b'), 'b', 30)
self.assertEqual(r.get_index_for_name('a'), 1)
self.assertEqual(r.get_index_for_name('b'), 0)
with self.assertRaises(ValueError):
r.get_index_for_name('c')
def testRegisterDupplicate(self):
r = markdown.util.Registry()
r.register(Item('a'), 'a', 20)
r.register(Item('b1'), 'b', 10)
self.assertEqual(list(r), ['a', 'b1'])
self.assertEqual(len(r), 2)
r.register(Item('b2'), 'b', 30)
self.assertEqual(len(r), 2)
self.assertEqual(list(r), ['b2', 'a'])
| RegistryTests |
python | pypa__hatch | tests/backend/metadata/test_core.py | {
"start": 6738,
"end": 7034
} | class ____:
@pytest.mark.parametrize("name", ["My--App", "My__App", "My..App"])
def test_normalization(self, isolation, name):
metadata = ProjectMetadata(str(isolation), None, {"project": {"name": name}})
assert metadata.core.name == metadata.core.name == "my-app"
| TestName |
python | getsentry__sentry | tests/sentry/db/models/manager/test_base_query_set.py | {
"start": 389,
"end": 1315
} | class ____(TestCase):
def test(self) -> None:
group_2 = self.create_group()
ids = [self.group.id, group_2.id]
returned = Group.objects.filter(id__in=ids).update_with_returning(
returned_fields=["id"], message="hi"
)
assert {r[0] for r in returned} == set(ids)
returned = Group.objects.filter(id=self.group.id).update_with_returning(
returned_fields=["id"], message="hi"
)
assert [r[0] for r in returned] == [self.group.id]
returned = Group.objects.filter(id__in=ids).update_with_returning(
returned_fields=["id", "message"], message="hi"
)
assert {r for r in returned} == {(id_, "hi") for id_ in ids}
def test_empty_query(self) -> None:
assert [] == Group.objects.filter(id__in=[]).update_with_returning(
returned_fields=["id"], message="hi"
)
| TestUpdateWithReturning |
python | davidhalter__jedi | jedi/inference/filters.py | {
"start": 9516,
"end": 11155
} | class ____(DictFilter):
"""
A filter for methods that are defined in this module on the corresponding
classes like Generator (for __next__, etc).
"""
class SpecialMethodName(AbstractNameDefinition):
api_type = 'function'
def __init__(self, parent_context, string_name, callable_, builtin_value):
self.parent_context = parent_context
self.string_name = string_name
self._callable = callable_
self._builtin_value = builtin_value
def infer(self):
for filter in self._builtin_value.get_filters():
# We can take the first index, because on builtin methods there's
# always only going to be one name. The same is true for the
# inferred values.
for name in filter.get(self.string_name):
builtin_func = next(iter(name.infer()))
break
else:
continue
break
return ValueSet([
_BuiltinMappedMethod(self.parent_context, self._callable, builtin_func)
])
def __init__(self, value, dct, builtin_value):
super().__init__(dct)
self.value = value
self._builtin_value = builtin_value
"""
This value is what will be used to introspect the name, where as the
other value will be used to execute the function.
We distinguish, because we have to.
"""
def _convert(self, name, value):
return self.SpecialMethodName(self.value, name, value, self._builtin_value)
| SpecialMethodFilter |
python | facebookresearch__faiss | tests/test_standalone_codec.py | {
"start": 481,
"end": 2116
} | class ____(unittest.TestCase):
def do_encode_twice(self, factory_key):
d = 96
nb = 1000
nq = 0
nt = 2000
xt, x, _ = get_dataset_2(d, nt, nb, nq)
assert x.size > 0
codec = faiss.index_factory(d, factory_key)
codec.train(xt)
codes = codec.sa_encode(x)
x2 = codec.sa_decode(codes)
codes2 = codec.sa_encode(x2)
if 'IVF' in factory_key or 'RQ' in factory_key:
# some rows are not reconstructed exactly because they
# flip into another quantization cell
nrowdiff = (codes != codes2).any(axis=1).sum()
self.assertTrue(nrowdiff < 10)
else:
self.assertTrue(np.all(codes == codes2))
x3 = codec.sa_decode(codes2)
if 'IVF' in factory_key or 'RQ' in factory_key:
diffs = np.abs(x2 - x3).sum(axis=1)
avg = np.abs(x2).sum(axis=1).mean()
diffs.sort()
assert diffs[-10] < avg * 1e-5
else:
self.assertTrue(np.allclose(x2, x3))
def test_SQ8(self):
self.do_encode_twice('SQ8')
def test_IVFSQ8(self):
self.do_encode_twice('IVF256,SQ8')
def test_PCAIVFSQ8(self):
self.do_encode_twice('PCAR32,IVF256,SQ8')
def test_PQ6x8(self):
self.do_encode_twice('PQ6np')
def test_PQ6x6(self):
self.do_encode_twice('PQ6x6np')
def test_IVFPQ6x8np(self):
self.do_encode_twice('IVF512,PQ6np')
def test_LSH(self):
self.do_encode_twice('LSHrt')
def test_RQ6x8(self):
self.do_encode_twice('RQ6x8')
| TestEncodeDecode |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_animated_formats.py | {
"start": 912,
"end": 3301
} | class ____(TestCase):
def test_scale(self):
no_frames = 20
im = create_animated_image(no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.scale_and_crop(im, (100, 100))
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (100, 100))
def test_scale_crop(self):
frames = 9
im = create_animated_image(no_frames=frames)
frames_count = im.n_frames
self.assertEqual(frames_count, frames)
processed = processors.scale_and_crop(im, (900, 950), crop=True)
processed_frames_count = processed.n_frames
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (900, 950))
def test_colorspace(self):
# to have a color conversion
no_frames = 6
im = create_animated_image(format="png")
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.colorspace(im, bw=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.mode, "L")
self.assertEqual(processed.size, (1000, 1000))
def test_filter(self):
no_frames = 12
im = create_animated_image(format="webp", no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.filters(im, detail=True, sharpen=True)
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (1000, 1000))
def test_background(self):
no_frames = 9
im = create_animated_image(format="webp", no_frames=no_frames)
frames_count = im.n_frames
self.assertEqual(frames_count, no_frames)
processed = processors.background(im, background="#ff00ff", size=(1000, 1800))
processed_frames_count = processed.n_frames
# indeed processed?
self.assertEqual(frames_count, processed_frames_count)
self.assertEqual(processed.size, (1000, 1800))
| AnimatedFormatProcessorsTests |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol19.py | {
"start": 206,
"end": 271
} | class ____(Protocol):
x: Final[int] = field()
@dataclass
| ProtoA |
python | doocs__leetcode | solution/2100-2199/2184.Number of Ways to Build Sturdy Brick Wall/Solution.py | {
"start": 0,
"end": 1393
} | class ____:
def buildWall(self, height: int, width: int, bricks: List[int]) -> int:
def dfs(v):
if v > width:
return
if v == width:
s.append(t[:])
return
for x in bricks:
t.append(x)
dfs(v + x)
t.pop()
def check(a, b):
s1, s2 = a[0], b[0]
i = j = 1
while i < len(a) and j < len(b):
if s1 == s2:
return False
if s1 < s2:
s1 += a[i]
i += 1
else:
s2 += b[j]
j += 1
return True
mod = 10**9 + 7
s = []
t = []
dfs(0)
g = defaultdict(list)
n = len(s)
for i in range(n):
if check(s[i], s[i]):
g[i].append(i)
for j in range(i + 1, n):
if check(s[i], s[j]):
g[i].append(j)
g[j].append(i)
dp = [[0] * n for _ in range(height)]
for j in range(n):
dp[0][j] = 1
for i in range(1, height):
for j in range(n):
for k in g[j]:
dp[i][j] += dp[i - 1][k]
dp[i][j] %= mod
return sum(dp[-1]) % mod
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/cli/plus/constants.py | {
"start": 24,
"end": 122
} | class ____(Enum):
FULL_DEPLOYMENT = "full"
BRANCH_DEPLOYMENT = "branch"
| DgPlusDeploymentType |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 2587,
"end": 2861
} | class ____(ShowFieldTypeAndContent, PolymorphicModel):
field_base = models.CharField(max_length=30)
fk = models.ForeignKey(
"self", on_delete=models.CASCADE, null=True, related_name="relationbase_set"
)
m2m = models.ManyToManyField("self")
| RelationBase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.