language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | wandb__wandb | wandb/sdk/artifacts/storage_handlers/http_handler.py | {
"start": 922,
"end": 2232
} | class ____:
"""Partial ArtifactManifestEntry fields parsed from an HTTP response."""
ref: str # The reference URL for the manifest entry, i.e. original URL of the request
extra: Dict[str, Any] # noqa: UP006
digest: Optional[str] # noqa: UP045
size: Optional[int] # noqa: UP045
@classmethod
def from_response(cls, rsp: requests.Response) -> Self:
# NOTE(tonyyli): For continuity with prior behavior, note that:
# - `extra["etag"]` includes leading/trailing quotes, if any, from the ETag
# - `digest` strips leading/trailing quotes, if any, from the ETag
#
# - `digest` apparently falls back to the original reference URL (request URL)
# if no ETag is present. This is weird: wouldn't we hash the response body
# instead of using the URL? For now, at the time of writing/refactoring this,
# we'll maintain the prior behavior to minimize risk of breakage.
headers: CaseInsensitiveDict = rsp.headers
etag = headers.get("etag")
ref_url = rsp.request.url
return cls(
ref=cast(str, ref_url),
extra={"etag": etag} if etag else {},
digest=etag.strip('"') if etag else ref_url,
size=headers.get("content-length"),
)
| _HttpEntryInfo |
python | pypa__warehouse | tests/unit/admin/views/test_users.py | {
"start": 6665,
"end": 10993
} | class ____:
def test_updates_user_emails(self, db_request):
email1 = EmailFactory.create(primary=True)
email2 = EmailFactory.create(primary=False)
user = UserFactory.create(emails=[email1, email2])
db_request.matchdict["username"] = str(user.username)
db_request.method = "POST"
db_request.POST["name"] = "Jane Doe"
db_request.POST["emails-0-email"] = email1.email
db_request.POST["emails-0-primary"] = False
db_request.POST["emails-1-email"] = email2.email
db_request.POST["emails-1-primary"] = True
db_request.POST = MultiDict(db_request.POST)
db_request.route_path = pretend.call_recorder(
lambda route_name, username=None: f"/admin/users/{username}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.user_submit_email(user, db_request)
assert email1.primary is False
assert email2.primary is True
assert isinstance(resp, HTTPSeeOther)
assert resp.headers["Location"] == f"/admin/users/{user.username}/"
assert db_request.session.flash.calls == [
pretend.call(f"User '{user.username}': emails updated", queue="success")
]
def test_updates_user_no_primary_email(self, db_request):
email = EmailFactory.create(primary=True)
user = UserFactory.create(emails=[email])
db_request.matchdict["username"] = str(user.username)
db_request.method = "POST"
db_request.POST["name"] = "Jane Doe"
db_request.POST["emails-0-email"] = email.email
# No primary = checkbox unchecked
db_request.POST = MultiDict(db_request.POST)
db_request.route_path = pretend.call_recorder(
lambda route_name, username=None: f"/admin/users/{username}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.user_submit_email(user, db_request)
assert isinstance(resp, HTTPSeeOther)
assert resp.headers["Location"] == f"/admin/users/{user.username}/"
assert db_request.session.flash.calls == [
pretend.call(
"emails: ['There must be exactly one primary email']", queue="error"
)
]
def test_updates_user_multiple_primary_emails(self, db_request):
email1 = EmailFactory.create(primary=True)
email2 = EmailFactory.create(primary=True)
user = UserFactory.create(emails=[email1, email2])
db_request.matchdict["username"] = str(user.username)
db_request.method = "POST"
db_request.POST["name"] = "Jane Doe"
db_request.POST["emails-0-email"] = email1.email
db_request.POST["emails-0-primary"] = "true"
db_request.POST["emails-1-email"] = email2.email
db_request.POST["emails-1-primary"] = "true"
# No primary = checkbox unchecked
db_request.POST = MultiDict(db_request.POST)
db_request.route_path = pretend.call_recorder(
lambda route_name, username=None: f"/admin/users/{username}/"
)
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
resp = views.user_submit_email(user, db_request)
assert isinstance(resp, HTTPSeeOther)
assert resp.headers["Location"] == f"/admin/users/{user.username}/"
assert db_request.session.flash.calls == [
pretend.call(
"emails: ['There must be exactly one primary email']", queue="error"
)
]
def test_user_detail_redirects_actual_name(self, db_request):
user = UserFactory.create(username="wu-tang")
db_request.matchdict["username"] = "Wu-Tang"
db_request.route_path = pretend.call_recorder(
lambda route_name, username=None: "/user/the-redirect/"
)
result = views.user_submit_email(user, db_request)
assert isinstance(result, HTTPMovedPermanently)
assert result.headers["Location"] == "/user/the-redirect/"
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
| TestUserEmailSubmit |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_math.py | {
"start": 113375,
"end": 125371
} | class ____(__TestCase):
""" Tests for math.fma. """
def test_fma_nan_results(self):
# Selected representative values.
values = [
-math.inf, -1e300, -2.3, -1e-300, -0.0,
0.0, 1e-300, 2.3, 1e300, math.inf, math.nan
]
# If any input is a NaN, the result should be a NaN, too.
for a, b in itertools.product(values, repeat=2):
with self.subTest(a=a, b=b):
self.assertIsNaN(math.fma(math.nan, a, b))
self.assertIsNaN(math.fma(a, math.nan, b))
self.assertIsNaN(math.fma(a, b, math.nan))
def test_fma_infinities(self):
# Cases involving infinite inputs or results.
positives = [1e-300, 2.3, 1e300, math.inf]
finites = [-1e300, -2.3, -1e-300, -0.0, 0.0, 1e-300, 2.3, 1e300]
non_nans = [-math.inf, -2.3, -0.0, 0.0, 2.3, math.inf]
# ValueError due to inf * 0 computation.
for c in non_nans:
for infinity in [math.inf, -math.inf]:
for zero in [0.0, -0.0]:
with self.subTest(c=c, infinity=infinity, zero=zero):
with self.assertRaises(ValueError):
math.fma(infinity, zero, c)
with self.assertRaises(ValueError):
math.fma(zero, infinity, c)
# ValueError when a*b and c both infinite of opposite signs.
for b in positives:
with self.subTest(b=b):
with self.assertRaises(ValueError):
math.fma(math.inf, b, -math.inf)
with self.assertRaises(ValueError):
math.fma(math.inf, -b, math.inf)
with self.assertRaises(ValueError):
math.fma(-math.inf, -b, -math.inf)
with self.assertRaises(ValueError):
math.fma(-math.inf, b, math.inf)
with self.assertRaises(ValueError):
math.fma(b, math.inf, -math.inf)
with self.assertRaises(ValueError):
math.fma(-b, math.inf, math.inf)
with self.assertRaises(ValueError):
math.fma(-b, -math.inf, -math.inf)
with self.assertRaises(ValueError):
math.fma(b, -math.inf, math.inf)
# Infinite result when a*b and c both infinite of the same sign.
for b in positives:
with self.subTest(b=b):
self.assertEqual(math.fma(math.inf, b, math.inf), math.inf)
self.assertEqual(math.fma(math.inf, -b, -math.inf), -math.inf)
self.assertEqual(math.fma(-math.inf, -b, math.inf), math.inf)
self.assertEqual(math.fma(-math.inf, b, -math.inf), -math.inf)
self.assertEqual(math.fma(b, math.inf, math.inf), math.inf)
self.assertEqual(math.fma(-b, math.inf, -math.inf), -math.inf)
self.assertEqual(math.fma(-b, -math.inf, math.inf), math.inf)
self.assertEqual(math.fma(b, -math.inf, -math.inf), -math.inf)
# Infinite result when a*b finite, c infinite.
for a, b in itertools.product(finites, finites):
with self.subTest(b=b):
self.assertEqual(math.fma(a, b, math.inf), math.inf)
self.assertEqual(math.fma(a, b, -math.inf), -math.inf)
# Infinite result when a*b infinite, c finite.
for b, c in itertools.product(positives, finites):
with self.subTest(b=b, c=c):
self.assertEqual(math.fma(math.inf, b, c), math.inf)
self.assertEqual(math.fma(-math.inf, b, c), -math.inf)
self.assertEqual(math.fma(-math.inf, -b, c), math.inf)
self.assertEqual(math.fma(math.inf, -b, c), -math.inf)
self.assertEqual(math.fma(b, math.inf, c), math.inf)
self.assertEqual(math.fma(b, -math.inf, c), -math.inf)
self.assertEqual(math.fma(-b, -math.inf, c), math.inf)
self.assertEqual(math.fma(-b, math.inf, c), -math.inf)
# gh-73468: On some platforms, libc fma() doesn't implement IEE 754-2008
# properly: it doesn't use the right sign when the result is zero.
@unittest.skipIf(
sys.platform.startswith(("freebsd", "wasi", "netbsd", "emscripten"))
or (sys.platform == "android" and platform.machine() == "x86_64"),
f"this platform doesn't implement IEE 754-2008 properly")
def test_fma_zero_result(self):
nonnegative_finites = [0.0, 1e-300, 2.3, 1e300]
# Zero results from exact zero inputs.
for b in nonnegative_finites:
with self.subTest(b=b):
self.assertIsPositiveZero(math.fma(0.0, b, 0.0))
self.assertIsPositiveZero(math.fma(0.0, b, -0.0))
self.assertIsNegativeZero(math.fma(0.0, -b, -0.0))
self.assertIsPositiveZero(math.fma(0.0, -b, 0.0))
self.assertIsPositiveZero(math.fma(-0.0, -b, 0.0))
self.assertIsPositiveZero(math.fma(-0.0, -b, -0.0))
self.assertIsNegativeZero(math.fma(-0.0, b, -0.0))
self.assertIsPositiveZero(math.fma(-0.0, b, 0.0))
self.assertIsPositiveZero(math.fma(b, 0.0, 0.0))
self.assertIsPositiveZero(math.fma(b, 0.0, -0.0))
self.assertIsNegativeZero(math.fma(-b, 0.0, -0.0))
self.assertIsPositiveZero(math.fma(-b, 0.0, 0.0))
self.assertIsPositiveZero(math.fma(-b, -0.0, 0.0))
self.assertIsPositiveZero(math.fma(-b, -0.0, -0.0))
self.assertIsNegativeZero(math.fma(b, -0.0, -0.0))
self.assertIsPositiveZero(math.fma(b, -0.0, 0.0))
# Exact zero result from nonzero inputs.
self.assertIsPositiveZero(math.fma(2.0, 2.0, -4.0))
self.assertIsPositiveZero(math.fma(2.0, -2.0, 4.0))
self.assertIsPositiveZero(math.fma(-2.0, -2.0, -4.0))
self.assertIsPositiveZero(math.fma(-2.0, 2.0, 4.0))
# Underflow to zero.
tiny = 1e-300
self.assertIsPositiveZero(math.fma(tiny, tiny, 0.0))
self.assertIsNegativeZero(math.fma(tiny, -tiny, 0.0))
self.assertIsPositiveZero(math.fma(-tiny, -tiny, 0.0))
self.assertIsNegativeZero(math.fma(-tiny, tiny, 0.0))
self.assertIsPositiveZero(math.fma(tiny, tiny, -0.0))
self.assertIsNegativeZero(math.fma(tiny, -tiny, -0.0))
self.assertIsPositiveZero(math.fma(-tiny, -tiny, -0.0))
self.assertIsNegativeZero(math.fma(-tiny, tiny, -0.0))
# Corner case where rounding the multiplication would
# give the wrong result.
x = float.fromhex('0x1p-500')
y = float.fromhex('0x1p-550')
z = float.fromhex('0x1p-1000')
self.assertIsNegativeZero(math.fma(x-y, x+y, -z))
self.assertIsPositiveZero(math.fma(y-x, x+y, z))
self.assertIsNegativeZero(math.fma(y-x, -(x+y), -z))
self.assertIsPositiveZero(math.fma(x-y, -(x+y), z))
def test_fma_overflow(self):
a = b = float.fromhex('0x1p512')
c = float.fromhex('0x1p1023')
# Overflow from multiplication.
with self.assertRaises(OverflowError):
math.fma(a, b, 0.0)
self.assertEqual(math.fma(a, b/2.0, 0.0), c)
# Overflow from the addition.
with self.assertRaises(OverflowError):
math.fma(a, b/2.0, c)
# No overflow, even though a*b overflows a float.
self.assertEqual(math.fma(a, b, -c), c)
# Extreme case: a * b is exactly at the overflow boundary, so the
# tiniest offset makes a difference between overflow and a finite
# result.
a = float.fromhex('0x1.ffffffc000000p+511')
b = float.fromhex('0x1.0000002000000p+512')
c = float.fromhex('0x0.0000000000001p-1022')
with self.assertRaises(OverflowError):
math.fma(a, b, 0.0)
with self.assertRaises(OverflowError):
math.fma(a, b, c)
self.assertEqual(math.fma(a, b, -c),
float.fromhex('0x1.fffffffffffffp+1023'))
# Another extreme case: here a*b is about as large as possible subject
# to math.fma(a, b, c) being finite.
a = float.fromhex('0x1.ae565943785f9p+512')
b = float.fromhex('0x1.3094665de9db8p+512')
c = float.fromhex('0x1.fffffffffffffp+1023')
self.assertEqual(math.fma(a, b, -c), c)
def test_fma_single_round(self):
a = float.fromhex('0x1p-50')
self.assertEqual(math.fma(a - 1.0, a + 1.0, 1.0), a*a)
def test_random(self):
# A collection of randomly generated inputs for which the naive FMA
# (with two rounds) gives a different result from a singly-rounded FMA.
# tuples (a, b, c, expected)
test_values = [
('0x1.694adde428b44p-1', '0x1.371b0d64caed7p-1',
'0x1.f347e7b8deab8p-4', '0x1.19f10da56c8adp-1'),
('0x1.605401ccc6ad6p-2', '0x1.ce3a40bf56640p-2',
'0x1.96e3bf7bf2e20p-2', '0x1.1af6d8aa83101p-1'),
('0x1.e5abd653a67d4p-2', '0x1.a2e400209b3e6p-1',
'0x1.a90051422ce13p-1', '0x1.37d68cc8c0fbbp+0'),
('0x1.f94e8efd54700p-2', '0x1.123065c812cebp-1',
'0x1.458f86fb6ccd0p-1', '0x1.ccdcee26a3ff3p-1'),
('0x1.bd926f1eedc96p-1', '0x1.eee9ca68c5740p-1',
'0x1.960c703eb3298p-2', '0x1.3cdcfb4fdb007p+0'),
('0x1.27348350fbccdp-1', '0x1.3b073914a53f1p-1',
'0x1.e300da5c2b4cbp-1', '0x1.4c51e9a3c4e29p+0'),
('0x1.2774f00b3497bp-1', '0x1.7038ec336bff0p-2',
'0x1.2f6f2ccc3576bp-1', '0x1.99ad9f9c2688bp-1'),
('0x1.51d5a99300e5cp-1', '0x1.5cd74abd445a1p-1',
'0x1.8880ab0bbe530p-1', '0x1.3756f96b91129p+0'),
('0x1.73cb965b821b8p-2', '0x1.218fd3d8d5371p-1',
'0x1.d1ea966a1f758p-2', '0x1.5217b8fd90119p-1'),
('0x1.4aa98e890b046p-1', '0x1.954d85dff1041p-1',
'0x1.122b59317ebdfp-1', '0x1.0bf644b340cc5p+0'),
('0x1.e28f29e44750fp-1', '0x1.4bcc4fdcd18fep-1',
'0x1.fd47f81298259p-1', '0x1.9b000afbc9995p+0'),
('0x1.d2e850717fe78p-3', '0x1.1dd7531c303afp-1',
'0x1.e0869746a2fc2p-2', '0x1.316df6eb26439p-1'),
('0x1.cf89c75ee6fbap-2', '0x1.b23decdc66825p-1',
'0x1.3d1fe76ac6168p-1', '0x1.00d8ea4c12abbp+0'),
('0x1.3265ae6f05572p-2', '0x1.16d7ec285f7a2p-1',
'0x1.0b8405b3827fbp-1', '0x1.5ef33c118a001p-1'),
('0x1.c4d1bf55ec1a5p-1', '0x1.bc59618459e12p-2',
'0x1.ce5b73dc1773dp-1', '0x1.496cf6164f99bp+0'),
('0x1.d350026ac3946p-1', '0x1.9a234e149a68cp-2',
'0x1.f5467b1911fd6p-2', '0x1.b5cee3225caa5p-1'),
]
for a_hex, b_hex, c_hex, expected_hex in test_values:
with self.subTest(a_hex=a_hex, b_hex=b_hex, c_hex=c_hex,
expected_hex=expected_hex):
a = float.fromhex(a_hex)
b = float.fromhex(b_hex)
c = float.fromhex(c_hex)
expected = float.fromhex(expected_hex)
self.assertEqual(math.fma(a, b, c), expected)
self.assertEqual(math.fma(b, a, c), expected)
# Custom assertions.
def assertIsNaN(self, value):
self.assertTrue(
math.isnan(value),
msg="Expected a NaN, got {!r}".format(value)
)
def assertIsPositiveZero(self, value):
self.assertTrue(
value == 0 and math.copysign(1, value) > 0,
msg="Expected a positive zero, got {!r}".format(value)
)
def assertIsNegativeZero(self, value):
self.assertTrue(
value == 0 and math.copysign(1, value) < 0,
msg="Expected a negative zero, got {!r}".format(value)
)
if __name__ == "__main__":
run_tests()
| FMATests |
python | giampaolo__psutil | tests/test_linux.py | {
"start": 26418,
"end": 27616
} | class ____(PsutilTestCase):
@pytest.mark.skipif(
not shutil.which("lscpu"), reason="lscpu utility not available"
)
def test_against_lscpu(self):
out = sh("lscpu -p")
core_ids = set()
for line in out.split('\n'):
if not line.startswith('#'):
fields = line.split(',')
core_ids.add(fields[1])
assert psutil.cpu_count(logical=False) == len(core_ids)
@pytest.mark.skipif(
platform.machine() not in {"x86_64", "i686"}, reason="x86_64/i686 only"
)
def test_method_2(self):
meth_1 = psutil._pslinux.cpu_count_cores()
with mock.patch('glob.glob', return_value=[]) as m:
meth_2 = psutil._pslinux.cpu_count_cores()
assert m.called
if meth_1 is not None:
assert meth_1 == meth_2
def test_emulate_none(self):
with mock.patch('glob.glob', return_value=[]) as m1:
with mock.patch('psutil._common.open', create=True) as m2:
assert psutil._pslinux.cpu_count_cores() is None
assert m1.called
assert m2.called
@pytest.mark.skipif(not LINUX, reason="LINUX only")
| TestSystemCPUCountCores |
python | huggingface__transformers | tests/models/internvl/test_modeling_internvl.py | {
"start": 7661,
"end": 25862
} | class ____(unittest.TestCase):
def setUp(self):
self.small_model_checkpoint = "OpenGVLab/InternVL3-1B-hf"
self.medium_model_checkpoint = "OpenGVLab/InternVL3-2B-hf"
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@require_deterministic_for_xpu
def test_qwen2_small_model_integration_generate(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = (
"<|im_start|>user\n<IMG_CONTEXT>\nPlease describe the image explicitly.<|im_end|>\n<|im_start|>assistant\n"
)
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
# fmt: off
expected_outputs = Expectations(
{
(None, None): "The image shows two cats lying on a pink surface, which appears to be a bed or couch.",
("xpu", 3): "The image shows two cats lying on a pink blanket. The cat on the left is a tabby",
}
)
# fmt: on
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
def test_qwen2_small_model_integration_forward(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image = Image.open(requests.get(url, stream=True).raw)
prompt = (
"<|im_start|>user\n<IMG_CONTEXT>\nPlease describe the image explicitly.<|im_end|>\n<|im_start|>assistant\n"
)
inputs = processor(images=image, text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
# Forward
with torch.inference_mode():
output = model(**inputs)
actual_logits = output.logits[0, -1, :5].cpu()
expected_logits_all = Expectations(
{
("xpu", 3): torch.tensor([11.9922, 14.7188, 14.3125, 10.6719, 6.9297], dtype=torch.float16),
("cuda", 7): torch.tensor([11.9531, 14.7031, 14.2734, 10.6562, 6.9219], dtype=torch.float16),
("cuda", 8): torch.tensor([11.9609, 14.7188, 14.2734, 10.6484, 6.9141], dtype=torch.float16),
}
) # fmt: skip
expected_logits = expected_logits_all.get_expectation()
self.assertTrue(
torch.allclose(actual_logits, expected_logits, atol=0.1),
f"Actual logits: {actual_logits}"
f"\nExpected logits: {expected_logits}"
f"\nDifference: {torch.abs(actual_logits - expected_logits)}",
)
@require_deterministic_for_xpu
def test_qwen2_small_model_integration_generate_text_only(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
prompt = "<|im_start|>user\nWrite a haiku<|im_end|>\n<|im_start|>assistant\n"
inputs = processor(text=prompt, return_tensors="pt").to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=200, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_outputs = Expectations(
{
("xpu", 3): "Whispers of dawn,\nSilent whispers of night,\nPeace in the stillness.",
("cuda", 7): 'Whispers of dawn,\nSilent whispers of night,\nPeace in the stillness.',
("cuda", 8): 'Whispers of dawn,\nSilent whispers of night,\nPeace in the stillness.',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(decoded_output, expected_output)
def test_qwen2_small_model_integration_generate_chat_template(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
messages = [
{
"role": "user",
"content": [
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
{"type": "text", "text": "Please describe the image explicitly."},
],
}
]
inputs = processor.apply_chat_template(
messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt"
).to(torch_device, dtype=torch.float16)
with torch.no_grad():
generate_ids = model.generate(**inputs, max_new_tokens=20, do_sample=False)
decoded_output = processor.decode(
generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True
)
expected_output = "The image shows two cats lying on a pink surface, which appears to be a bed or couch."
self.assertEqual(decoded_output, expected_output)
@require_deterministic_for_xpu
def test_qwen2_small_model_integration_batched_generate(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
# Prepare inputs
prompt = [
"<|im_start|>user\n<IMG_CONTEXT>\nWrite a haiku for this image<|im_end|>\n<|im_start|>assistant\n",
"<|im_start|>user\n<IMG_CONTEXT>\nDescribe this image<|im_end|>\n<|im_start|>assistant\n",
]
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(
requests.get(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
stream=True,
).raw
)
inputs = processor(text=prompt, images=[[image1], [image2]], padding=True, return_tensors="pt").to(
torch_device, dtype=torch.float16
)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
# Check first output
decoded_output = processor.decode(output[0], skip_special_tokens=True)
expected_output = "user\n\nWrite a haiku for this image\nassistant\nSilky lake, \nWooden pier, \nNature's peace." # fmt: skip
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): 'user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese archway, known as a "Chinese Gate" or "Chinese Gate of',
("cuda", 7): 'user\n\nDescribe this image\nassistant\nThe image shows a street scene with a traditional Chinese archway, known as a "Chinese Gate" or "Chinese Gate of',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
def test_qwen2_small_model_integration_batched_generate_multi_image(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, device_map=torch_device, dtype=torch.float16
)
# Prepare inputs
prompt = [
"<|im_start|>user\n<IMG_CONTEXT>\nWrite a haiku for this image<|im_end|>\n<|im_start|>assistant\n",
"<|im_start|>user\n<IMG_CONTEXT><IMG_CONTEXT>\nWhat are the differences between these two images?<|im_end|>\n<|im_start|>assistant\n",
]
image1 = Image.open(requests.get("https://llava-vl.github.io/static/images/view.jpg", stream=True).raw)
image2 = Image.open(
BytesIO(
requests.get(
"https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
).content
)
)
image3 = Image.open(
BytesIO(
requests.get(
"https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"
).content
)
)
inputs = processor(text=prompt, images=[[image1], [image2, image3]], padding=True, return_tensors="pt").to(
torch_device, dtype=torch.float16
)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
# Check first output
decoded_output = processor.decode(output[0], skip_special_tokens=True)
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
expected_output = "user\n\nWrite a haiku for this image\nassistant\nSilky lake, \nWooden pier, \nNature's peace." # fmt: skip
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "user\n\nWhat are the differences between these two images?\nassistant\nThe images show the Statue of Liberty and the Golden Gate Bridge from different angles. Here are the differences:\n\n1. **Foreground",
("cuda", 7): "user\n\nWhat are the differences between these two images?\nassistant\nThe images show the Statue of Liberty and the Golden Gate Bridge from different angles. Here are the differences:\n\n1. **Foreground",
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@require_av
@require_bitsandbytes
def test_qwen2_medium_model_integration_video(self):
processor = AutoProcessor.from_pretrained(self.medium_model_checkpoint)
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
model = InternVLForConditionalGeneration.from_pretrained(
self.medium_model_checkpoint, quantization_config=quantization_config
)
# Prepare inputs
messages = [
{
"role": "user",
"content": [
{
"type": "video",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4",
},
{"type": "text", "text": "What type of shot is the man performing?"},
],
}
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
num_frames=8,
).to(torch_device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
decoded_output = processor.decode(output[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "The man is performing a volley.",
("cuda", 7): "The man is performing a forehand shot.",
("rocm", (9, 5)): "The man is performing a volley shot.",
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@require_av
@require_deterministic_for_xpu
def test_qwen2_small_model_integration_interleaved_images_videos(self):
processor = AutoProcessor.from_pretrained(self.small_model_checkpoint)
model = InternVLForConditionalGeneration.from_pretrained(
self.small_model_checkpoint, dtype=torch.float16, device_map=torch_device
)
messages = [
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
},
{
"type": "image",
"url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
},
{"type": "text", "text": "What are the differences between these two images?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "video",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures_videos/resolve/main/tennis.mp4",
},
{"type": "text", "text": "What type of shot is the man performing?"},
],
},
],
[
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://llava-vl.github.io/static/images/view.jpg",
},
{"type": "text", "text": "Write a haiku for this image"},
],
}
],
]
inputs = processor.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
num_frames=8,
).to(torch_device, dtype=torch.float16)
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
decoded_output = processor.decode(output[0], skip_special_tokens=True)
# Batching seems to alter the output slightly, but it is also the case in the original implementation. This seems to be expected: https://github.com/huggingface/transformers/issues/23017#issuecomment-1649630232
expected_outputs = Expectations(
{
("xpu", 3): "user\n\n\nWhat are the differences between these two images?\nassistant\nThe images depict two distinct scenes:\n\n1. **Left Image:**\n - The Statue of Liberty is prominently featured on an",
("cuda", 7): 'user\n\n\nWhat are the differences between these two images?\nassistant\nThe images depict two distinct scenes:\n\n1. **Left Image:**\n - The Statue of Liberty is prominently featured on an',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check second output
decoded_output = processor.decode(output[1], skip_special_tokens=True)
expected_outputs = Expectations(
{
("xpu", 3): "user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nA forehand shot",
("cuda", 7): 'user\nFrame1: \nFrame2: \nFrame3: \nFrame4: \nFrame5: \nFrame6: \nFrame7: \nFrame8: \nWhat type of shot is the man performing?\nassistant\nA forehand shot',
}
) # fmt: skip
expected_output = expected_outputs.get_expectation()
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
# Check third output
decoded_output = processor.decode(output[2], skip_special_tokens=True)
expected_output = (
"user\n\nWrite a haiku for this image\nassistant\nSilky lake, \nWooden pier, \nNature's peace."
)
self.assertEqual(
decoded_output,
expected_output,
f"Decoded output: {decoded_output}\nExpected output: {expected_output}",
)
@slow
@require_torch_accelerator
| InternVLQwen2IntegrationTest |
python | apache__airflow | providers/pagerduty/tests/unit/pagerduty/hooks/test_pagerduty_events.py | {
"start": 1624,
"end": 2698
} | class ____:
def test_prepare_event_data(self):
exp_event_data = {
"action": "trigger",
"dedup_key": "random",
"payload": {
"severity": "error",
"source": "airflow_test",
"summary": "test",
},
}
even_data = prepare_event_data(
summary="test", source="airflow_test", severity="error", dedup_key="random"
)
assert even_data == exp_event_data
def test_prepare_event_data_invalid_action(self):
with pytest.raises(ValueError, match="Event action must be one of: trigger, acknowledge, resolve"):
prepare_event_data(summary="test", severity="error", action="should_raise")
def test_prepare_event_missing_dedup_key(self):
with pytest.raises(
ValueError,
match="The dedup_key property is required for action=acknowledge events, and it must be a string",
):
prepare_event_data(summary="test", severity="error", action="acknowledge")
| TestPrepareEventData |
python | readthedocs__readthedocs.org | readthedocs/builds/tests/test_views.py | {
"start": 608,
"end": 3996
} | class ____(TestCase):
def setUp(self):
self.user = get(User, username="test")
self.project = self._get_project(owners=[self.user])
self.version = get(Version, project=self.project)
self.build = get(
Build,
project=self.project,
version=self.version,
task_id="1234",
state=BUILD_STATE_INSTALLING,
)
def test_cancel_running_build(self, app):
self.build.state = BUILD_STATE_INSTALLING
self.build.save()
self.client.force_login(self.user)
url = reverse("builds_detail", args=[self.project.slug, self.build.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
app.control.revoke.assert_called_once_with(
self.build.task_id, signal=mock.ANY, terminate=True
)
self.build.refresh_from_db()
self.assertEqual(self.build.state, BUILD_STATE_INSTALLING)
def test_cancel_triggered_build(self, app):
self.build.state = BUILD_STATE_TRIGGERED
self.build.save()
self.client.force_login(self.user)
url = reverse("builds_detail", args=[self.project.slug, self.build.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
app.control.revoke.assert_called_once_with(
self.build.task_id, signal=mock.ANY, terminate=False
)
self.build.refresh_from_db()
self.assertEqual(self.build.state, BUILD_STATE_CANCELLED)
def test_cancel_build_anonymous_user(self, app):
url = reverse("builds_detail", args=[self.project.slug, self.build.pk])
self.client.logout()
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
app.control.revoke.assert_not_called()
def test_cancel_build_from_another_project(self, app):
another_user = get(User)
another_project = self._get_project(owners=[another_user])
another_build = get(
Build,
project=another_project,
version=another_project.versions.first(),
state=BUILD_STATE_INSTALLING,
)
self.client.force_login(another_user)
url = reverse("builds_detail", args=[self.project.slug, self.build.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 403)
app.control.revoke.assert_not_called()
url = reverse("builds_detail", args=[another_project.slug, self.build.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 404)
app.control.revoke.assert_not_called()
url = reverse("builds_detail", args=[another_project.slug, another_build.pk])
resp = self.client.post(url)
self.assertEqual(resp.status_code, 302)
app.control.revoke.assert_called_once_with(
another_build.task_id, signal=mock.ANY, terminate=True
)
def _get_project(self, owners, **kwargs):
if settings.RTD_ALLOW_ORGANIZATIONS:
# TODO: don't set `users=owners` when using orgs, it's redundant.
project = get(Project, users=owners, **kwargs)
get(Organization, projects=[project], owners=owners)
return project
return get(Project, users=owners, **kwargs)
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
| CancelBuildViewTests |
python | huggingface__transformers | src/transformers/models/ministral/configuration_ministral.py | {
"start": 671,
"end": 8616
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MinistralModel`]. It is used to instantiate an
Ministral model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Ministral-8B-Instruct-2410.
[mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)
[mistralai/Ministral-8B-Instruct-2410](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Ministral model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MinistralModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 14336):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
The attention head dimension.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
The maximum sequence length that this model might ever be used with. Ministral's sliding window attention
allows sequence of up to 4096*32 tokens.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*):
The id of the padding token.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
sliding_window (`int`, *optional*, defaults to 4096):
Sliding window attention window size. If not specified, will default to `4096`.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
layer_types (`list`, *optional*):
Attention pattern for each layer.
```python
>>> from transformers import MinistralModel, MinistralConfig
>>> # Initializing a Ministral 8B style configuration
>>> configuration = MinistralConfig()
>>> # Initializing a model from the Ministral 8B style configuration
>>> model = MinistralModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "ministral"
keys_to_ignore_at_inference = ["past_key_values"]
# Default tensor parallel plan for base model `MinistralModel`
base_model_tp_plan = {
"layers.*.self_attn.q_proj": "colwise",
"layers.*.self_attn.k_proj": "colwise",
"layers.*.self_attn.v_proj": "colwise",
"layers.*.self_attn.o_proj": "rowwise",
"layers.*.mlp.gate_proj": "colwise",
"layers.*.mlp.up_proj": "colwise",
"layers.*.mlp.down_proj": "rowwise",
}
base_model_pp_plan = {
"embed_tokens": (["input_ids"], ["inputs_embeds"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 4096,
intermediate_size: Optional[int] = 14336,
num_hidden_layers: Optional[int] = 32,
num_attention_heads: Optional[int] = 32,
num_key_value_heads: Optional[int] = 8,
head_dim: Optional[int] = None,
hidden_act: Optional[str] = "silu",
max_position_embeddings: Optional[int] = 4096 * 32,
initializer_range: Optional[float] = 0.02,
rms_norm_eps: Optional[float] = 1e-6,
use_cache: Optional[bool] = True,
pad_token_id: Optional[int] = None,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = False,
rope_parameters: Optional[RopeParameters] = None,
sliding_window: Optional[int] = 4096,
attention_dropout: Optional[float] = 0.0,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.sliding_window = sliding_window
self.head_dim = head_dim
# for backward compatibility
if num_key_value_heads is None:
num_key_value_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.attention_dropout = attention_dropout
self.layer_types = layer_types
if self.layer_types is None:
self.layer_types = [
"sliding_attention" if self.sliding_window is not None else "full_attention"
] * num_hidden_layers
self.rope_parameters = rope_parameters
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["MinistralConfig"]
| MinistralConfig |
python | django__django | tests/admin_scripts/tests.py | {
"start": 53722,
"end": 58980
} | class ____(AdminScriptTestCase):
def test_nonexistent_app(self):
"""check reports an error on a nonexistent app in INSTALLED_APPS."""
self.write_settings(
"settings.py",
apps=["admin_scriptz.broken_app"],
sdict={"USE_I18N": False},
)
args = ["check"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "ModuleNotFoundError")
self.assertOutput(err, "No module named")
self.assertOutput(err, "admin_scriptz")
def test_broken_app(self):
"""manage.py check reports an ImportError if an app's models.py
raises one on import"""
self.write_settings("settings.py", apps=["admin_scripts.broken_app"])
args = ["check"]
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "ImportError")
def test_complex_app(self):
"""manage.py check does not raise an ImportError validating a
complex app with nested calls to load_app"""
self.write_settings(
"settings.py",
apps=[
"admin_scripts.complex_app",
"admin_scripts.simple_app",
"django.contrib.admin.apps.SimpleAdminConfig",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
],
sdict={
"DEBUG": True,
"MIDDLEWARE": [
"django.contrib.messages.middleware.MessageMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
],
"TEMPLATES": [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
],
},
)
args = ["check"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, "System check identified no issues (0 silenced).\n")
def test_app_with_import(self):
"""manage.py check does not raise errors when an app imports a base
class that itself has an abstract base."""
self.write_settings(
"settings.py",
apps=[
"admin_scripts.app_with_import",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
],
sdict={"DEBUG": True},
)
args = ["check"]
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertEqual(out, "System check identified no issues (0 silenced).\n")
def test_output_format(self):
"""All errors/warnings should be sorted by level and by message."""
self.write_settings(
"settings.py",
apps=[
"admin_scripts.app_raising_messages",
"django.contrib.auth",
"django.contrib.contenttypes",
],
sdict={"DEBUG": True},
)
args = ["check"]
out, err = self.run_manage(args)
expected_err = (
"SystemCheckError: System check identified some issues:\n"
"\n"
"ERRORS:\n"
"?: An error\n"
"\tHINT: Error hint\n"
"\n"
"WARNINGS:\n"
"a: Second warning\n"
"obj: First warning\n"
"\tHINT: Hint\n"
"\n"
"System check identified 3 issues (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
def test_warning_does_not_halt(self):
"""
When there are only warnings or less serious messages, then Django
should not prevent user from launching their project, so `check`
command should not raise `CommandError` exception.
In this test we also test output format.
"""
self.write_settings(
"settings.py",
apps=[
"admin_scripts.app_raising_warning",
"django.contrib.auth",
"django.contrib.contenttypes",
],
sdict={"DEBUG": True},
)
args = ["check"]
out, err = self.run_manage(args)
expected_err = (
"System check identified some issues:\n" # No "CommandError: "
"\n"
"WARNINGS:\n"
"?: A warning\n"
"\n"
"System check identified 1 issue (0 silenced).\n"
)
self.assertEqual(err, expected_err)
self.assertNoOutput(out)
| ManageCheck |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 42109,
"end": 42417
} | class ____(CoercionTest):
# previously test_setitem_series_int8 in tests.indexing.test_coercion
@pytest.fixture
def obj(self):
return Series([1, 2, 3, 4], dtype=np.int8)
@pytest.mark.parametrize("val", [1, 1.1, 1 + 1j, True])
@pytest.mark.parametrize("exp_dtype", [object])
| TestCoercionInt8 |
python | coleifer__peewee | tests/libs/mock.py | {
"start": 73252,
"end": 74959
} | class ____(object):
def __init__(self, spec, spec_set=False, parent=None,
name=None, ids=None, instance=False):
self.spec = spec
self.ids = ids
self.spec_set = spec_set
self.parent = parent
self.instance = instance
self.name = name
FunctionTypes = (
# python function
type(create_autospec),
# instance method
type(ANY.__eq__),
# unbound method
type(_ANY.__eq__),
)
FunctionAttributes = set([
'func_closure',
'func_code',
'func_defaults',
'func_dict',
'func_doc',
'func_globals',
'func_name',
])
file_spec = None
def mock_open(mock=None, read_data=''):
"""
A helper function to create a mock to replace the use of `open`. It works
for `open` called directly or used as a context manager.
The `mock` argument is the mock object to configure. If `None` (the
default) then a `MagicMock` will be created for you, with the API limited
to methods or attributes available on standard file handles.
`read_data` is a string for the `read` method of the file handle to return.
This is an empty string by default.
"""
global file_spec
if file_spec is None:
# set on first use
if inPy3k:
import _io
file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO))))
else:
file_spec = file
if mock is None:
mock = MagicMock(name='open', spec=open)
handle = MagicMock(spec=file_spec)
handle.write.return_value = None
handle.__enter__.return_value = handle
handle.read.return_value = read_data
mock.return_value = handle
return mock
| _SpecState |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4.py | {
"start": 20189,
"end": 20436
} | class ____(backend_tools.SaveFigureBase):
def trigger(self, *args, **kwargs):
NavigationToolbar2GTK4.save_figure(
self._make_classic_style_pseudo_toolbar())
@backend_tools._register_tool_class(FigureCanvasGTK4)
| SaveFigureGTK4 |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/component_tree_state.py | {
"start": 1211,
"end": 5344
} | class ____:
"""Stateful class which tracks the state of the component tree
as it is loaded.
"""
def __init__(self, defs_module_path: Path):
self._defs_module_path = defs_module_path
self._component_load_dependents_dict: dict[ComponentPath, set[ComponentPath]] = defaultdict(
set
)
self._component_defs_dependents_dict: dict[ComponentPath, set[ComponentPath]] = defaultdict(
set
)
self._component_defs_state_key_dict: dict[str, ComponentPath] = {}
self._cache: dict[ComponentPath, _CacheData] = defaultdict(_CacheData)
def _invalidate_path_inner(self, path: ComponentPath, visited: set[ComponentPath]) -> None:
"""Invalidate the cache for a given component path and all of its dependents."""
# we invalidate both the fully-specified path and the generic path (without an instance key)
# because some cache data (e.g. component decl information) is stored on the generic path
# because it is generated before we know the instance key
for p in [path, path.without_instance_key()]:
self._cache.pop(p, None)
visited.add(p)
deps = self.get_direct_defs_dependents(p) | self.get_direct_load_dependents(p)
for d in deps:
if d not in visited:
self._invalidate_path_inner(d, visited)
def invalidate_path(self, path: ResolvableToComponentPath) -> None:
"""Invalidates the cache for a given component path and all of its dependents."""
path = ComponentPath.from_resolvable(self._defs_module_path, path)
self._invalidate_path_inner(path, set())
def get_cache_data(self, path: ResolvableToComponentPath) -> _CacheData:
resolved_path = ComponentPath.from_resolvable(self._defs_module_path, path)
return self._cache[resolved_path]
def set_cache_data(self, path: ResolvableToComponentPath, **kwargs) -> None:
resolved_path = ComponentPath.from_resolvable(self._defs_module_path, path)
current_data = self._cache[resolved_path]
self._cache[resolved_path] = replace(current_data, **kwargs)
def mark_component_load_dependency(
self, from_path: ComponentPath, to_path: ComponentPath
) -> None:
self._component_load_dependents_dict[to_path].add(from_path)
def mark_component_defs_dependency(
self, from_path: ComponentPath, to_path: ComponentPath
) -> None:
self._component_defs_dependents_dict[to_path].add(from_path)
def mark_component_defs_state_key(self, path: ComponentPath, defs_state_key: str) -> None:
# warns if components share the same defs state key
existing_path = self._component_defs_state_key_dict.get(defs_state_key)
if existing_path is not None and existing_path != path:
warnings.warn(
f"Multiple components have the same defs state key: {defs_state_key}\n"
"Component paths: \n"
f" {existing_path}\n"
f" {path}\n"
"Configure or override the `get_defs_state_config` method on one or both components to disambiguate.",
category=DuplicateDefsStateKeyWarning,
stacklevel=2,
)
self._component_defs_state_key_dict[defs_state_key] = path
def get_direct_load_dependents(self, path: ComponentPath) -> set[ComponentPath]:
"""Returns the set of components that directly depend on the given component.
Args:
path: The path to the component to get the direct load dependents of.
"""
return self._component_load_dependents_dict[path]
def get_direct_defs_dependents(self, path: ComponentPath) -> set[ComponentPath]:
"""Returns the set of components that directly depend on the given component's defs.
Args:
defs_module_path: The path to the defs module.
component_path: The path to the component to get the direct defs dependents of.
"""
return self._component_defs_dependents_dict[path]
| ComponentTreeStateTracker |
python | tensorflow__tensorflow | tensorflow/python/eager/polymorphic_function/concrete_function.py | {
"start": 37004,
"end": 39059
} | class ____(_TapeGradientFunctions):
"""Caches tape-friendly functions for first-order gradients."""
def __init__(
self,
func_graph: func_graph_module.FuncGraph,
attrs,
func_graph_deleter,
forwardprop_input_indices,
delayed_rewrite_functions,
need_gradients_for_jvps,
):
super().__init__(
func_graph,
attrs,
func_graph_deleter,
forwardprop_input_indices,
delayed_rewrite_functions,
need_gradients_for_jvps,
)
self._func_graph_deleter = func_graph_deleter
self._forwardprop_input_indices = forwardprop_input_indices
def _forward_and_backward_functions(self, inference_args, input_tangents):
"""Shortcut for when only first-order gradients are required.
The returned backward function does not accept gradients with respect to
side output of forward_function. This is fine as long as the user can't
possibly request second order tape gradients, as when they've used a single
non-persistent GradientTape. Since we don't need the backward function to
take gradients with respect to side outputs, we can skip some potentially
slow graph building.
Args:
inference_args: A flat list of Tensors, arguments to the inference
function.
input_tangents: A flat list of Tensors, jvps associated with
`inference_args`.
Returns:
A tuple of (forward_function, backward_function):
forward_function: Takes the same inputs as the inference function, but
returns side outputs used by backward_function in addition to the
inference function's outputs.
backward_function: Takes side outputs from forward_function and
gradients with respect to the "real" outputs of forward_function and
returns gradients with respect to the inputs.
"""
outputs = self._func_graph.outputs[:self._num_inference_outputs]
return self._build_functions_for_outputs(
outputs, inference_args, input_tangents)
| _FirstOrderTapeGradientFunctions |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_data_source.py | {
"start": 418,
"end": 3203
} | class ____(BaseWorkflowTest):
def test_invalid_data_source_type(self) -> None:
with pytest.raises(ValueError):
self.create_data_source(type="invalid_type")
def test_data_source_valid_type(self) -> None:
# Make sure the mock was registered in test_base
assert isinstance(data_source_type_registry.get("test"), mock.Mock)
data_source = self.create_data_source(type="test")
assert data_source is not None
assert data_source.type == "test"
def test_normalize_before_relocation_import(self) -> None:
monitor = self.create_monitor(project=self.project)
data_source = self.create_data_source(
type=DATA_SOURCE_CRON_MONITOR,
source_id=str(monitor.id),
organization_id=self.organization.id,
)
old_monitor_pk = monitor.id
new_monitor_pk = 9999
old_data_source_id = data_source.id
old_org_id = data_source.organization_id
# Create a PrimaryKeyMap that maps the old monitor ID to a new one
pk_map = PrimaryKeyMap()
pk_map.insert(
model_name=NormalizedModelName("monitors.monitor"),
old=old_monitor_pk,
new=new_monitor_pk,
kind=ImportKind.Inserted,
)
pk_map.insert(
model_name=NormalizedModelName("sentry.organization"),
old=old_org_id,
new=old_org_id,
kind=ImportKind.Inserted,
)
old_data_source_pk = data_source.normalize_before_relocation_import(
pk_map, ImportScope.Organization, ImportFlags()
)
assert (
old_data_source_pk == old_data_source_id
), f"Expected {old_data_source_id}, got {old_data_source_pk}"
assert data_source.source_id == str(new_monitor_pk)
assert data_source.pk is None
def test_normalize_before_relocation_import_missing_source(self) -> None:
monitor = self.create_monitor(project=self.project)
data_source = self.create_data_source(
type=DATA_SOURCE_CRON_MONITOR,
source_id=str(monitor.id),
organization_id=self.organization.id,
)
old_org_id = data_source.organization_id
# Create a PrimaryKeyMap without the monitor mapping
pk_map = PrimaryKeyMap()
pk_map.insert(
model_name=NormalizedModelName("sentry.organization"),
old=old_org_id,
new=old_org_id,
kind=ImportKind.Inserted,
)
result = data_source.normalize_before_relocation_import(
pk_map, ImportScope.Organization, ImportFlags()
)
# Should return None when the referenced source is not in pk_map
assert result is None
| DataSourceTest |
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/core_models.py | {
"start": 4124,
"end": 4228
} | class ____(Resolvable, Model):
type: Literal["single_run"] = "single_run"
| SingleRunBackfillPolicyModel |
python | huggingface__transformers | src/transformers/models/convbert/configuration_convbert.py | {
"start": 775,
"end": 6148
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an
ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the ConvBERT
[YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`ConvBertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
head_ratio (`int`, *optional*, defaults to 2):
Ratio gamma to reduce the number of attention heads.
num_groups (`int`, *optional*, defaults to 1):
The number of groups for grouped linear layers for ConvBert model
conv_kernel_size (`int`, *optional*, defaults to 9):
The size of the convolutional kernel.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.
Example:
```python
>>> from transformers import ConvBertConfig, ConvBertModel
>>> # Initializing a ConvBERT convbert-base-uncased style configuration
>>> configuration = ConvBertConfig()
>>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration
>>> model = ConvBertModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "convbert"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
embedding_size=768,
head_ratio=2,
conv_kernel_size=9,
num_groups=1,
classifier_dropout=None,
**kwargs,
):
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
**kwargs,
)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.embedding_size = embedding_size
self.head_ratio = head_ratio
self.conv_kernel_size = conv_kernel_size
self.num_groups = num_groups
self.classifier_dropout = classifier_dropout
__all__ = ["ConvBertConfig"]
| ConvBertConfig |
python | ray-project__ray | python/ray/autoscaler/_private/kuberay/node_provider.py | {
"start": 12425,
"end": 21205
} | class ____(BatchingNodeProvider): # type: ignore
def __init__(
self,
provider_config: Dict[str, Any],
cluster_name: str,
):
logger.info("Creating KubeRayNodeProvider.")
self.namespace = provider_config["namespace"]
self.cluster_name = cluster_name
self.k8s_api_client = KubernetesHttpApiClient(self.namespace)
assert (
provider_config.get(WORKER_LIVENESS_CHECK_KEY, True) is False
), f"To use KubeRayNodeProvider, must set `{WORKER_LIVENESS_CHECK_KEY}:False`."
BatchingNodeProvider.__init__(self, provider_config, cluster_name)
def get_node_data(self) -> Dict[NodeID, NodeData]:
"""Queries K8s for pods in the RayCluster. Converts that pod data into a
map of pod name to Ray NodeData, as required by BatchingNodeProvider.
"""
# Store the raycluster CR
self._raycluster = self._get(f"rayclusters/{self.cluster_name}")
# Get the pods resource version.
# Specifying a resource version in list requests is important for scalability:
# https://kubernetes.io/docs/reference/using-api/api-concepts/#semantics-for-get-and-list
resource_version = self._get_pods_resource_version()
if resource_version:
logger.info(
f"Listing pods for RayCluster {self.cluster_name}"
f" in namespace {self.namespace}"
f" at pods resource version >= {resource_version}."
)
# Filter pods by cluster_name.
label_selector = requests.utils.quote(f"ray.io/cluster={self.cluster_name}")
resource_path = f"pods?labelSelector={label_selector}"
if resource_version:
resource_path += (
f"&resourceVersion={resource_version}"
+ "&resourceVersionMatch=NotOlderThan"
)
pod_list = self._get(resource_path)
fetched_resource_version = pod_list["metadata"]["resourceVersion"]
logger.info(
f"Fetched pod data at resource version" f" {fetched_resource_version}."
)
# Extract node data from the pod list.
node_data_dict = {}
for pod in pod_list["items"]:
# Kubernetes sets metadata.deletionTimestamp immediately after admitting a
# request to delete an object. Full removal of the object may take some time
# after the deletion timestamp is set. See link for details:
# https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-deletion
if "deletionTimestamp" in pod["metadata"]:
# Ignore pods marked for termination.
continue
pod_name = pod["metadata"]["name"]
node_data_dict[pod_name] = node_data_from_pod(pod)
return node_data_dict
def submit_scale_request(self, scale_request: ScaleRequest):
"""Converts the scale request generated by BatchingNodeProvider into
a patch that modifies the RayCluster CR's replicas and/or workersToDelete
fields. Then submits the patch to the K8s API server.
"""
# Transform the scale request into a patch payload.
patch_payload = self._scale_request_to_patch_payload(
scale_request, self._raycluster
)
# Submit the patch to K8s.
logger.info(
"Autoscaler is submitting the following patch to RayCluster "
f"{self.cluster_name} in namespace {self.namespace}."
)
logger.info(patch_payload)
self._submit_raycluster_patch(patch_payload)
def safe_to_scale(self) -> bool:
"""Returns False iff non_terminated_nodes contains any pods in the RayCluster's
workersToDelete lists.
Explanation:
If there are any workersToDelete which are non-terminated,
we should wait for the operator to do its job and delete those
pods. Therefore, we back off the autoscaler update.
If, on the other hand, all of the workersToDelete have already been cleaned up,
then we patch away the workersToDelete lists and return True.
In the future, we may consider having the operator clean up workersToDelete
on it own:
https://github.com/ray-project/kuberay/issues/733
Note (Dmitri):
It is stylistically bad that this function has a side effect.
"""
# Get the list of nodes.
node_set = set(self.node_data_dict.keys())
worker_groups = self._raycluster["spec"].get("workerGroupSpecs", [])
# Accumulates the indices of worker groups with non-empty workersToDelete
non_empty_worker_group_indices = []
for group_index, worker_group in enumerate(worker_groups):
workersToDelete = worker_group.get("scaleStrategy", {}).get(
"workersToDelete", []
)
if workersToDelete:
non_empty_worker_group_indices.append(group_index)
for worker in workersToDelete:
if worker in node_set:
# The operator hasn't removed this worker yet. Abort
# the autoscaler update.
logger.warning(f"Waiting for operator to remove worker {worker}.")
return False
# All required workersToDelete have been removed.
# Clean up the workersToDelete field.
patch_payload = []
for group_index in non_empty_worker_group_indices:
patch = worker_delete_patch(group_index, workers_to_delete=[])
patch_payload.append(patch)
if patch_payload:
logger.info("Cleaning up workers to delete.")
logger.info(f"Submitting patch {patch_payload}.")
self._submit_raycluster_patch(patch_payload)
# It's safe to proceed with the autoscaler update.
return True
def _get_pods_resource_version(self) -> str:
"""
Extract a recent pods resource version by reading the head pod's
metadata.resourceVersion of the response.
"""
if not RAY_HEAD_POD_NAME:
return None
pod_resp = self._get(f"pods/{RAY_HEAD_POD_NAME}")
return pod_resp["metadata"]["resourceVersion"]
def _scale_request_to_patch_payload(
self, scale_request: ScaleRequest, raycluster: Dict[str, Any]
) -> List[Dict[str, Any]]:
"""Converts autoscaler scale request into a RayCluster CR patch payload."""
patch_payload = []
# Collect patches for replica counts.
for node_type, target_replicas in scale_request.desired_num_workers.items():
group_index = _worker_group_index(raycluster, node_type)
group_max_replicas = _worker_group_max_replicas(raycluster, group_index)
# Cap the replica count to maxReplicas.
if group_max_replicas is not None and group_max_replicas < target_replicas:
logger.warning(
"Autoscaler attempted to create "
+ "more than maxReplicas pods of type {}.".format(node_type)
)
target_replicas = group_max_replicas
# Check if we need to change the target count.
if target_replicas == _worker_group_replicas(raycluster, group_index):
# No patch required.
continue
# Need to patch replica count. Format the patch and add it to the payload.
patch = worker_replica_patch(group_index, target_replicas)
patch_payload.append(patch)
# Maps node_type to nodes to delete for that group.
deletion_groups = defaultdict(list)
for worker in scale_request.workers_to_delete:
node_type = self.node_tags(worker)[TAG_RAY_USER_NODE_TYPE]
deletion_groups[node_type].append(worker)
for node_type, workers_to_delete in deletion_groups.items():
group_index = _worker_group_index(raycluster, node_type)
patch = worker_delete_patch(group_index, workers_to_delete)
patch_payload.append(patch)
return patch_payload
def _submit_raycluster_patch(self, patch_payload: List[Dict[str, Any]]):
"""Submits a patch to modify a RayCluster CR."""
path = "rayclusters/{}".format(self.cluster_name)
self._patch(path, patch_payload)
def _get(self, path: str) -> Dict[str, Any]:
"""Wrapper for REST GET of resource with proper headers."""
return self.k8s_api_client.get(path)
def _patch(self, path: str, payload: List[Dict[str, Any]]) -> Dict[str, Any]:
"""Wrapper for REST PATCH of resource with proper headers."""
return self.k8s_api_client.patch(path, payload)
| KubeRayNodeProvider |
python | keon__algorithms | algorithms/queues/queue.py | {
"start": 2470,
"end": 2572
} | class ____:
def __init__(self, value):
self.value = value
self.next = None
| QueueNode |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 43265,
"end": 44738
} | class ____(TestCase):
"""Tests for ``sliced()``"""
def test_even(self):
"""Test when the length of the sequence is divisible by *n*"""
seq = 'ABCDEFGHI'
self.assertEqual(list(mi.sliced(seq, 3)), ['ABC', 'DEF', 'GHI'])
def test_odd(self):
"""Test when the length of the sequence is not divisible by *n*"""
seq = 'ABCDEFGHI'
self.assertEqual(list(mi.sliced(seq, 4)), ['ABCD', 'EFGH', 'I'])
def test_not_sliceable(self):
seq = (x for x in 'ABCDEFGHI')
with self.assertRaises(TypeError):
list(mi.sliced(seq, 3))
def test_odd_and_strict(self):
seq = [x for x in 'ABCDEFGHI']
with self.assertRaises(ValueError):
list(mi.sliced(seq, 4, strict=True))
def test_numpy_like_array(self):
# Numpy arrays don't behave like Python lists - calling bool()
# on them doesn't return False for empty lists and True for non-empty
# ones. Emulate that behavior.
class FalseList(list):
def __getitem__(self, key):
ret = super().__getitem__(key)
if isinstance(key, slice):
return FalseList(ret)
return ret
def __bool__(self):
return False
seq = FalseList(range(9))
actual = list(mi.sliced(seq, 3))
expected = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.assertEqual(actual, expected)
| SlicedTests |
python | apache__avro | lang/py/avro/schema.py | {
"start": 36304,
"end": 36801
} | class ____(LogicalSchema, PrimitiveSchema):
def __init__(self, other_props=None):
LogicalSchema.__init__(self, avro.constants.TIME_MILLIS)
PrimitiveSchema.__init__(self, "int", other_props)
def to_json(self, names=None):
return self.props
def validate(self, datum):
"""Return self if datum is a valid representation of this schema, else None."""
return self if isinstance(datum, datetime.time) else None
#
# time-micros Type
#
| TimeMillisSchema |
python | bokeh__bokeh | tests/unit/bokeh/document/test_callbacks__document.py | {
"start": 1846,
"end": 15106
} | class ____:
def test_basic(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
# module manager should only hold a weak ref
assert len(gc.get_referrers(d)) == 0
assert len(cm._message_callbacks) == 1
assert cm._message_callbacks == {"bokeh_event": [cm.trigger_event]}
def test_session_callbacks(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
assert set(cm.session_callbacks) == set()
s1 = SessionCallback(lambda: None, callback_id=ID("1"))
cm._session_callbacks.add(s1)
assert set(cm.session_callbacks) == {s1}
s2 = SessionCallback(lambda: None, callback_id=ID("2"))
cm._session_callbacks.add(s2)
assert set(cm.session_callbacks) == {s1, s2}
def test_session_destroyed_callbacks(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
assert cm.session_destroyed_callbacks == set()
s1 = lambda x: None
cm._session_destroyed_callbacks.add(s1)
assert cm.session_destroyed_callbacks == {s1}
s2 = lambda x: None
cm._session_destroyed_callbacks.add(s2)
assert cm.session_destroyed_callbacks == {s1, s2}
cm.session_destroyed_callbacks = {s2}
assert cm.session_destroyed_callbacks == {s2}
def test_add_session_callback(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
events = []
def listener(event: DocumentChangedEvent) -> None:
events.append(event)
cm.on_change(listener)
assert len(cm.session_callbacks) == 0
assert not events
def cb() -> None: pass
obj = SessionCallback(cb, callback_id=ID("1"))
callback_obj = cm.add_session_callback(obj, cb, one_shot=False)
assert len(cm.session_callbacks) == len(events) == 1
assert isinstance(events[0], SessionCallbackAdded)
assert callback_obj == cm.session_callbacks[0] == events[0].callback
def test_destroy(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cm._change_callbacks["foo"] = lambda x: None
cm._event_callbacks["bar"] = []
cm._message_callbacks["baz"] = []
assert cm.destroy() is None # type: ignore [func-returns-value]
assert not hasattr(cm, "_change_callbacks")
assert not hasattr(cm, "_event_callbacks")
assert not hasattr(cm, "_messagecallbacks")
@pytest.mark.parametrize('policy', HoldPolicy)
def test_hold(self, policy: HoldPolicyType) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
assert cm.hold_value is None
assert cm._held_events == []
cm.hold(policy)
assert cm.hold_value == policy
def test_hold_bad_policy(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
with pytest.raises(ValueError):
cm.hold("junk") # type: ignore [arg-type]
@pytest.mark.parametrize('first,second', [('combine', 'collect'), ('collect', 'combine')])
def test_hold_rehold(self, first: HoldPolicyType, second: HoldPolicyType, caplog: pytest.LogCaptureFixture) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
with caplog.at_level(logging.WARN):
cm.hold(first)
assert caplog.text == ""
assert len(caplog.records) == 0
cm.hold(first)
assert caplog.text == ""
assert len(caplog.records) == 0
cm.hold(second)
assert caplog.text.strip().endswith(f"hold already active with {first!r}, ignoring {second!r}")
assert len(caplog.records) == 1
cm.unhold()
cm.hold(second)
assert len(caplog.records) == 1
def test_notify_event(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
reported_curdoc = None
reported_foo = None
def invoker() -> None:
nonlocal reported_curdoc
nonlocal reported_foo
reported_curdoc = curdoc()
reported_foo = 10
m = Button()
cm.notify_event(m, ButtonClick(m), invoker)
assert reported_curdoc is d
assert reported_foo == 10
def test_on_change(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb(x: Any) -> None:
pass
cm.on_change(cb)
assert cm._change_callbacks == {cb: cb} # XXX !!!?
def test_on_change_dispatch_to(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
called = None
class recv:
def _document_changed(x: Any) -> None:
nonlocal called
called = x
cm.on_change_dispatch_to(recv)
assert recv in cm._change_callbacks
evt = DocumentChangedEvent(d)
cm._change_callbacks[recv](evt)
assert called == evt
def test_event_callbacks_for_event_name(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb1(event: Any) -> None:
pass
def cb2(event: Any) -> None:
pass
assert cm.event_callbacks_for_event_name("document_ready") == ()
cm.on_event("document_ready", cb1)
assert cm.event_callbacks_for_event_name("junk") == ()
assert cm.event_callbacks_for_event_name("document_ready") == (cb1,)
cm.on_event("document_ready", cb2)
cbs = cm.event_callbacks_for_event_name("document_ready")
assert isinstance(cbs, tuple)
assert len(cbs) == 2
assert cb1 in cbs
assert cb2 in cbs
def test_on_event_good_string(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def good(event: Any) -> None:
pass
cm.on_event("document_ready", good)
assert cm._event_callbacks == {"document_ready": [good]}
@pytest.mark.parametrize("evt", ("button_click", "junk"))
def test_on_event_bad_string(self, evt: str) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def good(event: Any) -> None:
pass
with pytest.raises(ValueError):
cm.on_event(evt, good)
assert cm._event_callbacks == {}
def test_on_event_good_event(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def good(event: Any) -> None:
pass
cm.on_event(DocumentReady, good)
assert cm._event_callbacks == {"document_ready": [good]}
def test_on_event_bad_event(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def good(event: Any) -> None:
pass
with pytest.raises(ValueError):
cm.on_event(ButtonClick, good)
assert cm._event_callbacks == {}
def test_js_on_event_good_string(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cb = CustomJS()
cm.js_on_event("document_ready", cb)
assert cm._js_event_callbacks == {"document_ready": [cb]}
@pytest.mark.parametrize("evt", ("button_click", "junk"))
def test_js_on_event_bad_string(self, evt: str) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cb = CustomJS()
with pytest.raises(ValueError):
cm.js_on_event(evt, cb)
assert cm._js_event_callbacks == {}
def test_js_on_event_good_event(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cb = CustomJS()
cm.js_on_event(DocumentReady, cb)
assert cm._js_event_callbacks == {"document_ready": [cb]}
def test_js_on_event_bad_event(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cb = CustomJS()
with pytest.raises(ValueError):
cm.js_on_event(ButtonClick, cb)
assert cm._js_event_callbacks == {}
def test_on_message(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb(x: Any) -> None:
pass
cm.on_message("foo", cb)
assert cm._message_callbacks == {"foo": [cb], "bokeh_event": [cm.trigger_event]}
def test_on_session_destroyed(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def good(session_context: Any) -> None:
pass
cm.on_session_destroyed(good)
assert cm.session_destroyed_callbacks == {good}
def bad() -> None: # wrong signature
pass
with pytest.raises(ValueError):
cm.on_session_destroyed(bad) # type: ignore [arg-type] # want to test bad param
def test_change_callbacks(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb1(x: Any) -> None:
pass
def cb2(x: Any) -> None:
pass
cm.change_callbacks() == ()
cm.on_change(cb1)
cm.change_callbacks() == (cb1,)
cm.on_change(cb2)
cbs = cm.change_callbacks()
assert isinstance(cbs, tuple)
assert len(cbs) == 2
assert cb1 in cbs
assert cb2 in cbs
def test_remove_on_change(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb(x: Any) -> None:
pass
cm.on_change(cb)
cm.remove_on_change(cb)
assert cm._change_callbacks == {}
def test_remove_on_message(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
def cb(x: Any) -> None:
pass
cm.on_message("foo", cb)
cm.remove_on_message("foo", cb)
assert cm._message_callbacks == {"foo": [], "bokeh_event": [cm.trigger_event]}
def test_remove_session_callback(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
events = []
def listener(event: DocumentChangedEvent) -> None:
events.append(event)
cm.on_change(listener)
assert len(cm.session_callbacks) == 0
assert not events
def cb() -> None: pass
obj = SessionCallback(cb, callback_id=ID("1"))
cm.add_session_callback(obj, cb, one_shot=False)
cm.remove_session_callback(obj)
assert len(cm.session_callbacks) == 0
assert len(events) == 2
assert isinstance(events[0], SessionCallbackAdded)
assert isinstance(events[1], SessionCallbackRemoved)
def test_subscribe(self) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
m = Div()
assert cm._subscribed_models == {}
cm.subscribe("foo", m)
assert "foo" in cm._subscribed_models
assert len(cm._subscribed_models["foo"]) == 1
mref = cm._subscribed_models["foo"].pop()
assert mref() is m
# TODO (bev) def test_trigger_event
# TODO (bev) def test_trigger_on_change
@pytest.mark.parametrize('policy', HoldPolicy)
def test_unhold(self, policy: HoldPolicyType) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
assert cm.hold_value is None
assert cm._held_events == []
cm.hold(policy)
assert cm.hold_value == policy
cm.unhold()
assert cm.hold_value is None
@patch("bokeh.document.callbacks.DocumentCallbackManager.trigger_on_change")
def test_unhold_triggers_events(self, mock_trigger: MagicMock) -> None:
d = Document()
cm = bdc.DocumentCallbackManager(d)
cm.hold('collect')
last = DocumentChangedEvent(d, None)
cm._held_events = [DocumentChangedEvent(d, None), DocumentChangedEvent(d, None), last]
cm.unhold()
assert mock_trigger.call_count == 3
assert mock_trigger.call_args[0] == (last,)
assert mock_trigger.call_args[1] == {}
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def test_invoke_with_curdoc() -> None:
reported_curdoc = None
d = Document()
def f() -> None:
nonlocal reported_curdoc
reported_curdoc = curdoc()
bdc.invoke_with_curdoc(d, f)
assert reported_curdoc == d
def test_invoke_with_curdoc_nolock() -> None:
reported_curdoc: Document | UnlockedDocumentProxy | None = None
d = Document()
def f() -> None:
nonlocal reported_curdoc
reported_curdoc = curdoc()
f.nolock = True # type: ignore [attr-defined]
bdc.invoke_with_curdoc(d, f)
assert isinstance(reported_curdoc, UnlockedDocumentProxy)
assert reported_curdoc._doc == d
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
# needed for caplog tests to function
basicConfig()
| TestDocumentCallbackManager |
python | pyinstaller__pyinstaller | PyInstaller/building/build_main.py | {
"start": 55400,
"end": 61235
} | class ____:
"""
Class that constructs the executable.
"""
# TODO wrap the 'main' and 'build' function into this class.
def build(spec, distpath, workpath, clean_build):
"""
Build the executable according to the created SPEC file.
"""
from PyInstaller.config import CONF
# Ensure starting tilde in distpath / workpath is expanded into user's home directory. This is to work around for
# tilde not being expanded when using `--workpath=~/path/abc` instead of `--workpath ~/path/abc` (or when the path
# argument is quoted). See https://github.com/pyinstaller/pyinstaller/issues/696
distpath = os.path.abspath(os.path.expanduser(distpath))
workpath = os.path.abspath(os.path.expanduser(workpath))
CONF['spec'] = os.path.abspath(spec)
CONF['specpath'], CONF['specnm'] = os.path.split(CONF['spec'])
CONF['specnm'] = os.path.splitext(CONF['specnm'])[0]
# Add 'specname' to workpath and distpath if they point to PyInstaller homepath.
if os.path.dirname(distpath) == HOMEPATH:
distpath = os.path.join(HOMEPATH, CONF['specnm'], os.path.basename(distpath))
CONF['distpath'] = distpath
if os.path.dirname(workpath) == HOMEPATH:
workpath = os.path.join(HOMEPATH, CONF['specnm'], os.path.basename(workpath), CONF['specnm'])
else:
workpath = os.path.join(workpath, CONF['specnm'])
CONF['workpath'] = workpath
CONF['warnfile'] = os.path.join(workpath, 'warn-%s.txt' % CONF['specnm'])
CONF['dot-file'] = os.path.join(workpath, 'graph-%s.dot' % CONF['specnm'])
CONF['xref-file'] = os.path.join(workpath, 'xref-%s.html' % CONF['specnm'])
CONF['code_cache'] = dict()
# Clean PyInstaller cache (CONF['cachedir']) and temporary files (workpath) to be able start a clean build.
if clean_build:
logger.info('Removing temporary files and cleaning cache in %s', CONF['cachedir'])
for pth in (CONF['cachedir'], workpath):
if os.path.exists(pth):
# Remove all files in 'pth'.
for f in glob.glob(pth + '/*'):
# Remove dirs recursively.
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.remove(f)
# Create DISTPATH and workpath if they does not exist.
for pth in (CONF['distpath'], CONF['workpath']):
os.makedirs(pth, exist_ok=True)
# Construct NAMESPACE for running the Python code from .SPEC file.
# NOTE: Passing NAMESPACE allows to avoid having global variables in this module and makes isolated environment for
# running tests.
# NOTE: Defining NAMESPACE allows to map any class to a apecific name for .SPEC.
# FIXME: Some symbols might be missing. Add them if there are some failures.
# TODO: What from this .spec API is deprecated and could be removed?
spec_namespace = {
# Set of global variables that can be used while processing .spec file. Some of them act as configuration
# options.
'DISTPATH': CONF['distpath'],
'HOMEPATH': HOMEPATH,
'SPEC': CONF['spec'],
'specnm': CONF['specnm'],
'SPECPATH': CONF['specpath'],
'WARNFILE': CONF['warnfile'],
'workpath': CONF['workpath'],
# PyInstaller classes for .spec.
'TOC': TOC, # Kept for backward compatibility even though `TOC` class is deprecated.
'Analysis': Analysis,
'BUNDLE': BUNDLE,
'COLLECT': COLLECT,
'EXE': EXE,
'MERGE': MERGE,
'PYZ': PYZ,
'Tree': Tree,
'Splash': Splash,
# Python modules available for .spec.
'os': os,
}
# Execute the specfile. Read it as a binary file...
try:
with open(spec, 'rb') as f:
# ... then let Python determine the encoding, since ``compile`` accepts byte strings.
code = compile(f.read(), spec, 'exec')
except FileNotFoundError:
raise SystemExit(f'ERROR: Spec file "{spec}" not found!')
exec(code, spec_namespace)
logger.info("Build complete! The results are available in: %s", CONF['distpath'])
def __add_options(parser):
parser.add_argument(
"--distpath",
metavar="DIR",
default=DEFAULT_DISTPATH,
help="Where to put the bundled app (default: ./dist)",
)
parser.add_argument(
'--workpath',
default=DEFAULT_WORKPATH,
help="Where to put all the temporary work files, .log, .pyz and etc. (default: ./build)",
)
parser.add_argument(
'-y',
'--noconfirm',
action="store_true",
default=False,
help="Replace output directory (default: %s) without asking for confirmation" %
os.path.join('SPECPATH', 'dist', 'SPECNAME'),
)
parser.add_argument(
'--upx-dir',
default=None,
help="Path to UPX utility (default: search the execution path)",
)
parser.add_argument(
'--clean',
dest='clean_build',
action='store_true',
default=False,
help="Clean PyInstaller cache and remove temporary files before building.",
)
def main(
pyi_config,
specfile,
noconfirm=False,
distpath=DEFAULT_DISTPATH,
workpath=DEFAULT_WORKPATH,
upx_dir=None,
clean_build=False,
**kw
):
from PyInstaller.config import CONF
CONF['noconfirm'] = noconfirm
# If configuration dict is supplied - skip configuration step.
if pyi_config is None:
import PyInstaller.configure as configure
CONF.update(configure.get_config(upx_dir=upx_dir))
else:
CONF.update(pyi_config)
CONF['ui_admin'] = kw.get('ui_admin', False)
CONF['ui_access'] = kw.get('ui_uiaccess', False)
build(specfile, distpath, workpath, clean_build)
| ExecutableBuilder |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 156433,
"end": 166904
} | class ____:
def test_send_yanked_project_release_email_to_maintainer(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="Yanky Doodle went to town",
)
result = email.send_yanked_project_release_email(
pyramid_request,
[stub_user, stub_submitter_user],
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Maintainer",
)
assert result == {
"project": release.project.name,
"release": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "a maintainer",
"yanked_reason": "Yanky Doodle went to town",
}
subject_renderer.assert_(project="test_project")
subject_renderer.assert_(release="0.0.0")
body_renderer.assert_(project="test_project")
body_renderer.assert_(release="0.0.0")
body_renderer.assert_(release_date=release.created.strftime("%Y-%m-%d"))
body_renderer.assert_(submitter=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="a maintainer")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
def test_send_yanked_project_release_email_to_owner(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/yanked-project-release/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="Yanky Doodle went to town",
)
result = email.send_yanked_project_release_email(
pyramid_request,
[stub_user, stub_submitter_user],
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Owner",
)
assert result == {
"project": release.project.name,
"release": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "an owner",
"yanked_reason": "Yanky Doodle went to town",
}
subject_renderer.assert_(project="test_project")
subject_renderer.assert_(release="0.0.0")
body_renderer.assert_(project="test_project")
body_renderer.assert_(release="0.0.0")
body_renderer.assert_(release_date=release.created.strftime("%Y-%m-%d"))
body_renderer.assert_(submitter=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="an owner")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
| TestYankedReleaseEmail |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/typing.py | {
"start": 3030,
"end": 18438
} | class ____(Protocol[_KT, _VT_co]):
def keys(self) -> Iterable[_KT]: ...
def __getitem__(self, __k: _KT) -> _VT_co: ...
# work around https://github.com/microsoft/pyright/issues/3025
_LiteralStar = Literal["*"]
def de_stringify_annotation(
cls: Type[Any],
annotation: _AnnotationScanType,
originating_module: str,
locals_: Mapping[str, Any],
*,
str_cleanup_fn: Optional[Callable[[str, str], str]] = None,
include_generic: bool = False,
_already_seen: Optional[Set[Any]] = None,
) -> Type[Any]:
"""Resolve annotations that may be string based into real objects.
This is particularly important if a module defines "from __future__ import
annotations", as everything inside of __annotations__ is a string. We want
to at least have generic containers like ``Mapped``, ``Union``, ``List``,
etc.
"""
# looked at typing.get_type_hints(), looked at pydantic. We need much
# less here, and we here try to not use any private typing internals
# or construct ForwardRef objects which is documented as something
# that should be avoided.
original_annotation = annotation
if is_fwd_ref(annotation):
annotation = annotation.__forward_arg__
if isinstance(annotation, str):
if str_cleanup_fn:
annotation = str_cleanup_fn(annotation, originating_module)
annotation = eval_expression(
annotation, originating_module, locals_=locals_, in_class=cls
)
if (
include_generic
and is_generic(annotation)
and not is_literal(annotation)
):
if _already_seen is None:
_already_seen = set()
if annotation in _already_seen:
# only occurs recursively. outermost return type
# will always be Type.
# the element here will be either ForwardRef or
# Optional[ForwardRef]
return original_annotation # type: ignore
else:
_already_seen.add(annotation)
elements = tuple(
de_stringify_annotation(
cls,
elem,
originating_module,
locals_,
str_cleanup_fn=str_cleanup_fn,
include_generic=include_generic,
_already_seen=_already_seen,
)
for elem in annotation.__args__
)
return _copy_generic_annotation_with(annotation, elements)
return annotation # type: ignore
def fixup_container_fwd_refs(
type_: _AnnotationScanType,
) -> _AnnotationScanType:
"""Correct dict['x', 'y'] into dict[ForwardRef('x'), ForwardRef('y')]
and similar for list, set
"""
if (
is_generic(type_)
and get_origin(type_)
in (
dict,
set,
list,
collections_abc.MutableSet,
collections_abc.MutableMapping,
collections_abc.MutableSequence,
collections_abc.Mapping,
collections_abc.Sequence,
)
# fight, kick and scream to struggle to tell the difference between
# dict[] and typing.Dict[] which DO NOT compare the same and DO NOT
# behave the same yet there is NO WAY to distinguish between which type
# it is using public attributes
and not re.match(
"typing.(?:Dict|List|Set|.*Mapping|.*Sequence|.*Set)", repr(type_)
)
):
# compat with py3.10 and earlier
return get_origin(type_).__class_getitem__( # type: ignore
tuple(
[
ForwardRef(elem) if isinstance(elem, str) else elem
for elem in get_args(type_)
]
)
)
return type_
def _copy_generic_annotation_with(
annotation: GenericProtocol[_T], elements: Tuple[_AnnotationScanType, ...]
) -> Type[_T]:
if hasattr(annotation, "copy_with"):
# List, Dict, etc. real generics
return annotation.copy_with(elements) # type: ignore
else:
# Python builtins list, dict, etc.
return annotation.__origin__[elements] # type: ignore
def eval_expression(
expression: str,
module_name: str,
*,
locals_: Optional[Mapping[str, Any]] = None,
in_class: Optional[Type[Any]] = None,
) -> Any:
try:
base_globals: Dict[str, Any] = sys.modules[module_name].__dict__
except KeyError as ke:
raise NameError(
f"Module {module_name} isn't present in sys.modules; can't "
f"evaluate expression {expression}"
) from ke
try:
if in_class is not None:
cls_namespace = dict(in_class.__dict__)
cls_namespace.setdefault(in_class.__name__, in_class)
# see #10899. We want the locals/globals to take precedence
# over the class namespace in this context, even though this
# is not the usual way variables would resolve.
cls_namespace.update(base_globals)
annotation = eval(expression, cls_namespace, locals_)
else:
annotation = eval(expression, base_globals, locals_)
except Exception as err:
raise NameError(
f"Could not de-stringify annotation {expression!r}"
) from err
else:
return annotation
def eval_name_only(
name: str,
module_name: str,
*,
locals_: Optional[Mapping[str, Any]] = None,
) -> Any:
if "." in name:
return eval_expression(name, module_name, locals_=locals_)
try:
base_globals: Dict[str, Any] = sys.modules[module_name].__dict__
except KeyError as ke:
raise NameError(
f"Module {module_name} isn't present in sys.modules; can't "
f"resolve name {name}"
) from ke
# name only, just look in globals. eval() works perfectly fine here,
# however we are seeking to have this be faster, as this occurs for
# every Mapper[] keyword, etc. depending on configuration
try:
return base_globals[name]
except KeyError as ke:
# check in builtins as well to handle `list`, `set` or `dict`, etc.
try:
return builtins.__dict__[name]
except KeyError:
pass
raise NameError(
f"Could not locate name {name} in module {module_name}"
) from ke
def resolve_name_to_real_class_name(name: str, module_name: str) -> str:
try:
obj = eval_name_only(name, module_name)
except NameError:
return name
else:
return getattr(obj, "__name__", name)
def is_pep593(type_: Optional[Any]) -> bool:
return type_ is not None and get_origin(type_) in _type_tuples.Annotated
def is_non_string_iterable(obj: Any) -> TypeGuard[Iterable[Any]]:
return isinstance(obj, collections_abc.Iterable) and not isinstance(
obj, (str, bytes)
)
def is_literal(type_: Any) -> bool:
return get_origin(type_) in _type_tuples.Literal
def is_newtype(type_: Optional[_AnnotationScanType]) -> TypeGuard[NewType]:
return isinstance(type_, _type_tuples.NewType)
def is_generic(type_: _AnnotationScanType) -> TypeGuard[GenericProtocol[Any]]:
return hasattr(type_, "__args__") and hasattr(type_, "__origin__")
def is_pep695(type_: _AnnotationScanType) -> TypeGuard[TypeAliasType]:
# NOTE: a generic TAT does not instance check as TypeAliasType outside of
# python 3.10. For sqlalchemy use cases it's fine to consider it a TAT
# though.
# NOTE: things seems to work also without this additional check
if is_generic(type_):
return is_pep695(type_.__origin__)
return isinstance(type_, _type_instances.TypeAliasType)
def pep695_values(type_: _AnnotationScanType) -> Set[Any]:
"""Extracts the value from a TypeAliasType, recursively exploring unions
and inner TypeAliasType to flatten them into a single set.
Forward references are not evaluated, so no recursive exploration happens
into them.
"""
_seen = set()
def recursive_value(inner_type):
if inner_type in _seen:
# recursion are not supported (at least it's flagged as
# an error by pyright). Just avoid infinite loop
return inner_type
_seen.add(inner_type)
if not is_pep695(inner_type):
return inner_type
value = inner_type.__value__
if not is_union(value):
return value
return [recursive_value(t) for t in value.__args__]
res = recursive_value(type_)
if isinstance(res, list):
types = set()
stack = deque(res)
while stack:
t = stack.popleft()
if isinstance(t, list):
stack.extend(t)
else:
types.add(None if t is NoneType or is_fwd_none(t) else t)
return types
else:
return {res}
@overload
def is_fwd_ref(
type_: _AnnotationScanType,
check_generic: bool = ...,
check_for_plain_string: Literal[False] = ...,
) -> TypeGuard[ForwardRef]: ...
@overload
def is_fwd_ref(
type_: _AnnotationScanType,
check_generic: bool = ...,
check_for_plain_string: bool = ...,
) -> TypeGuard[Union[str, ForwardRef]]: ...
def is_fwd_ref(
type_: _AnnotationScanType,
check_generic: bool = False,
check_for_plain_string: bool = False,
) -> TypeGuard[Union[str, ForwardRef]]:
if check_for_plain_string and isinstance(type_, str):
return True
elif isinstance(type_, _type_instances.ForwardRef):
return True
elif check_generic and is_generic(type_):
return any(
is_fwd_ref(
arg, True, check_for_plain_string=check_for_plain_string
)
for arg in type_.__args__
)
else:
return False
@overload
def de_optionalize_union_types(type_: str) -> str: ...
@overload
def de_optionalize_union_types(type_: Type[Any]) -> Type[Any]: ...
@overload
def de_optionalize_union_types(type_: _MatchedOnType) -> _MatchedOnType: ...
@overload
def de_optionalize_union_types(
type_: _AnnotationScanType,
) -> _AnnotationScanType: ...
def de_optionalize_union_types(
type_: _AnnotationScanType,
) -> _AnnotationScanType:
"""Given a type, filter out ``Union`` types that include ``NoneType``
to not include the ``NoneType``.
"""
if is_fwd_ref(type_):
return _de_optionalize_fwd_ref_union_types(type_, False)
elif is_union(type_) and includes_none(type_):
typ = {
t
for t in type_.__args__
if t is not NoneType and not is_fwd_none(t)
}
return make_union_type(*typ)
else:
return type_
@overload
def _de_optionalize_fwd_ref_union_types(
type_: ForwardRef, return_has_none: Literal[True]
) -> bool: ...
@overload
def _de_optionalize_fwd_ref_union_types(
type_: ForwardRef, return_has_none: Literal[False]
) -> _AnnotationScanType: ...
def _de_optionalize_fwd_ref_union_types(
type_: ForwardRef, return_has_none: bool
) -> Union[_AnnotationScanType, bool]:
"""return the non-optional type for Optional[], Union[None, ...], x|None,
etc. without de-stringifying forward refs.
unfortunately this seems to require lots of hardcoded heuristics
"""
annotation = type_.__forward_arg__
mm = re.match(r"^(.+?)\[(.+)\]$", annotation)
if mm:
g1 = mm.group(1).split(".")[-1]
if g1 == "Optional":
return True if return_has_none else ForwardRef(mm.group(2))
elif g1 == "Union":
if "[" in mm.group(2):
# cases like "Union[Dict[str, int], int, None]"
elements: list[str] = []
current: list[str] = []
ignore_comma = 0
for char in mm.group(2):
if char == "[":
ignore_comma += 1
elif char == "]":
ignore_comma -= 1
elif ignore_comma == 0 and char == ",":
elements.append("".join(current).strip())
current.clear()
continue
current.append(char)
else:
elements = re.split(r",\s*", mm.group(2))
parts = [ForwardRef(elem) for elem in elements if elem != "None"]
if return_has_none:
return len(elements) != len(parts)
else:
return make_union_type(*parts) if parts else Never # type: ignore[return-value] # noqa: E501
else:
return False if return_has_none else type_
pipe_tokens = re.split(r"\s*\|\s*", annotation)
has_none = "None" in pipe_tokens
if return_has_none:
return has_none
if has_none:
anno_str = "|".join(p for p in pipe_tokens if p != "None")
return ForwardRef(anno_str) if anno_str else Never # type: ignore[return-value] # noqa: E501
return type_
def make_union_type(*types: _AnnotationScanType) -> Type[Any]:
"""Make a Union type."""
return Union[types] # type: ignore
def includes_none(type_: Any) -> bool:
"""Returns if the type annotation ``type_`` allows ``None``.
This function supports:
* forward refs
* unions
* pep593 - Annotated
* pep695 - TypeAliasType (does not support looking into
fw reference of other pep695)
* NewType
* plain types like ``int``, ``None``, etc
"""
if is_fwd_ref(type_):
return _de_optionalize_fwd_ref_union_types(type_, True)
if is_union(type_):
return any(includes_none(t) for t in get_args(type_))
if is_pep593(type_):
return includes_none(get_args(type_)[0])
if is_pep695(type_):
return any(includes_none(t) for t in pep695_values(type_))
if is_newtype(type_):
return includes_none(type_.__supertype__)
try:
return type_ in (NoneType, None) or is_fwd_none(type_)
except TypeError:
# if type_ is Column, mapped_column(), etc. the use of "in"
# resolves to ``__eq__()`` which then gives us an expression object
# that can't resolve to boolean. just catch it all via exception
return False
def is_a_type(type_: Any) -> bool:
return (
isinstance(type_, type)
or get_origin(type_) is not None
or getattr(type_, "__module__", None)
in ("typing", "typing_extensions")
or type(type_).__mro__[0].__module__ in ("typing", "typing_extensions")
)
def is_union(type_: Any) -> TypeGuard[ArgsTypeProtocol]:
return is_origin_of(type_, "Union", "UnionType")
def is_origin_of_cls(
type_: Any, class_obj: Union[Tuple[Type[Any], ...], Type[Any]]
) -> bool:
"""return True if the given type has an __origin__ that shares a base
with the given class"""
origin = get_origin(type_)
if origin is None:
return False
return isinstance(origin, type) and issubclass(origin, class_obj)
def is_origin_of(
type_: Any, *names: str, module: Optional[str] = None
) -> bool:
"""return True if the given type has an __origin__ with the given name
and optional module."""
origin = get_origin(type_)
if origin is None:
return False
return origin.__name__ in names and (
module is None or origin.__module__.startswith(module)
)
| SupportsKeysAndGetItem |
python | realpython__materials | python-inherit-list-userlist/string_list2.py | {
"start": 35,
"end": 556
} | class ____(UserList):
def __init__(self, iterable):
super().__init__(str(item) for item in iterable)
def __setitem__(self, index, item):
self.data[index] = str(item)
def insert(self, index, item):
self.data.insert(index, str(item))
def append(self, item):
self.data.append(str(item))
def extend(self, other):
if isinstance(other, type(self)):
self.data.extend(other)
else:
self.data.extend(str(item) for item in other)
| StringList |
python | doocs__leetcode | solution/2900-2999/2904.Shortest and Lexicographically Smallest Beautiful String/Solution.py | {
"start": 0,
"end": 405
} | class ____:
def shortestBeautifulSubstring(self, s: str, k: int) -> str:
n = len(s)
ans = ""
for i in range(n):
for j in range(i + k, n + 1):
t = s[i:j]
if t.count("1") == k and (
not ans or j - i < len(ans) or (j - i == len(ans) and t < ans)
):
ans = t
return ans
| Solution |
python | cherrypy__cherrypy | cherrypy/test/test_iterator.py | {
"start": 1407,
"end": 5747
} | class ____(helper.CPWebCase):
@staticmethod
def setup_server():
class Root(object):
@cherrypy.expose
def count(self, clsname):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return str(globals()[clsname].created)
@cherrypy.expose
def getall(self, clsname):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return globals()[clsname]()
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def stream(self, clsname):
return self.getall(clsname)
cherrypy.tree.mount(Root())
def test_iterator(self):
try:
self._test_iterator()
except Exception:
'Test fails intermittently. See #1419'
def _test_iterator(self):
if cherrypy.server.protocol_version != 'HTTP/1.1':
return self.skip()
self.PROTOCOL = 'HTTP/1.1'
# Check the counts of all the classes, they should be zero.
closables = ['OurClosableIterator', 'OurGenerator']
unclosables = ['OurUnclosableIterator', 'OurNotClosableIterator']
all_classes = closables + unclosables
import random
random.shuffle(all_classes)
for clsname in all_classes:
self.getPage('/count/' + clsname)
self.assertStatus(200)
self.assertBody('0')
# We should also be able to read the entire content body
# successfully, though we don't need to, we just want to
# check the header.
for clsname in all_classes:
itr_conn = self.get_conn()
itr_conn.putrequest('GET', '/getall/' + clsname)
itr_conn.endheaders()
response = itr_conn.getresponse()
self.assertEqual(response.status, 200)
headers = response.getheaders()
for header_name, header_value in headers:
if header_name.lower() == 'content-length':
expected = str(1024 * 16 * 256)
assert header_value == expected, header_value
break
else:
raise AssertionError('No Content-Length header found')
# As the response should be fully consumed by CherryPy
# before sending back, the count should still be at zero
# by the time the response has been sent.
self.getPage('/count/' + clsname)
self.assertStatus(200)
self.assertBody('0')
itr_conn.close()
# Now we do the same check with streaming - some classes will
# be automatically closed, while others cannot.
stream_counts = {}
for clsname in all_classes:
itr_conn = self.get_conn()
itr_conn.putrequest('GET', '/stream/' + clsname)
itr_conn.endheaders()
response = itr_conn.getresponse()
self.assertEqual(response.status, 200)
response.fp.read(65536)
# Let's check the count - this should always be one.
self.getPage('/count/' + clsname)
self.assertBody('1')
# Now if we close the connection, the count should go back
# to zero.
itr_conn.close()
self.getPage('/count/' + clsname)
# If this is a response which should be easily closed, then
# we will test to see if the value has gone back down to
# zero.
if clsname in closables:
# Sometimes we try to get the answer too quickly - we
# will wait for 100 ms before asking again if we didn't
# get the answer we wanted.
if self.body != '0':
import time
time.sleep(0.1)
self.getPage('/count/' + clsname)
stream_counts[clsname] = int(self.body)
# Check that we closed off the classes which should provide
# easy mechanisms for doing so.
for clsname in closables:
assert stream_counts[clsname] == 0, (
'did not close off stream response correctly, expected '
'count of zero for %s: %s' % (clsname, stream_counts)
)
| IteratorTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1054357,
"end": 1054743
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("WorkflowRun", graphql_name="node")
"""The item at the end of the edge."""
| WorkflowRunEdge |
python | keon__algorithms | algorithms/maths/polynomial.py | {
"start": 175,
"end": 10324
} | class ____:
"""
A simple Monomial class to
record the details of all variables
that a typical monomial is composed of.
"""
def __init__(self, variables: Dict[int, int], coeff: Union[int, float, Fraction, None]= None) -> None:
'''
Create a monomial in the given variables:
Examples:
Monomial({1:1}) = (a_1)^1
Monomial({
1:3,
2:2,
4:1,
5:0
}, 12) = 12(a_1)^3(a_2)^2(a_4)
Monomial({}) = 0
Monomial({2:3, 3:-1}, 1.5) = (3/2)(a_2)^3(a_3)^(-1)
'''
self.variables = dict()
if coeff is None:
if len(variables) == 0:
coeff = Fraction(0, 1)
else:
coeff = Fraction(1, 1)
elif coeff == 0:
self.coeff = Fraction(0, 1)
return
if len(variables) == 0:
self.coeff = Monomial._rationalize_if_possible(coeff)
return
for i in variables:
if variables[i] != 0:
self.variables[i] = variables[i]
self.coeff = Monomial._rationalize_if_possible(coeff)
@staticmethod
def _rationalize_if_possible(num):
'''
A helper for converting numbers
to Fraction only when possible.
'''
if isinstance(num, Rational):
res = Fraction(num, 1)
return Fraction(res.numerator, res.denominator)
else:
return num
# def equal_upto_scalar(self, other: Monomial) -> bool:
def equal_upto_scalar(self, other) -> bool:
"""
Return True if other is a monomial
and is equivalent to self up to a scalar
multiple.
"""
if not isinstance(other, Monomial):
raise ValueError('Can only compare monomials.')
return other.variables == self.variables
# def __add__(self, other: Union[int, float, Fraction, Monomial]):
def __add__(self, other: Union[int, float, Fraction]):
"""
Define the addition of two
monomials or the addition of
a monomial with an int, float, or a Fraction.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
return self.__add__(Monomial({}, Monomial._rationalize_if_possible(other)))
if not isinstance(other, Monomial):
raise ValueError('Can only add monomials, ints, floats, or Fractions.')
if self.variables == other.variables:
mono = {i: self.variables[i] for i in self.variables}
return Monomial(mono, Monomial._rationalize_if_possible(self.coeff + other.coeff)).clean()
# If they don't share same variables then by the definition,
# if they are added, the result becomes a polynomial and not a monomial.
# Thus, raise ValueError in that case.
raise ValueError(f'Cannot add {str(other)} to {self.__str__()} because they don\'t have same variables.')
# def __eq__(self, other: Monomial) -> bool:
def __eq__(self, other) -> bool:
"""
Return True if two monomials
are equal upto a scalar multiple.
"""
return self.equal_upto_scalar(other) and self.coeff == other.coeff
# def __mul__(self, other: Union[int, float, Fraction, Monomial]) -> Monomial:
def __mul__(self, other: Union[int, float, Fraction]):
"""
Multiply two monomials and merge the variables
in both of them.
Examples:
Monomial({1:1}) * Monomial({1: -3, 2: 1}) = (a_1)^(-2)(a_2)
Monomial({3:2}) * 2.5 = (5/2)(a_3)^2
"""
if isinstance(other, float) or isinstance(other, int) or isinstance(other, Fraction):
mono = {i: self.variables[i] for i in self.variables}
return Monomial(mono, Monomial._rationalize_if_possible(self.coeff * other)).clean()
if not isinstance(other, Monomial):
raise ValueError('Can only multiply monomials, ints, floats, or Fractions.')
else:
mono = {i: self.variables[i] for i in self.variables}
for i in other.variables:
if i in mono:
mono[i] += other.variables[i]
else:
mono[i] = other.variables[i]
temp = dict()
for k in mono:
if mono[k] != 0:
temp[k] = mono[k]
return Monomial(temp, Monomial._rationalize_if_possible(self.coeff * other.coeff)).clean()
# def inverse(self) -> Monomial:
def inverse(self):
"""
Compute the inverse of a monomial.
Examples:
Monomial({1:1, 2:-1, 3:2}, 2.5).inverse() = Monomial({1:-1, 2:1, 3:-2} ,2/5)
"""
mono = {i: self.variables[i] for i in self.variables if self.variables[i] != 0}
for i in mono:
mono[i] *= -1
if self.coeff == 0:
raise ValueError("Coefficient must not be 0.")
return Monomial(mono, Monomial._rationalize_if_possible(1/self.coeff)).clean()
# def __truediv__(self, other: Union[int, float, Fraction, Monomial]) -> Monomial:
def __truediv__(self, other: Union[int, float, Fraction]):
"""
Compute the division between two monomials
or a monomial and some other datatype
like int/float/Fraction.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
mono = {i: self.variables[i] for i in self.variables}
if other == 0:
raise ValueError('Cannot divide by 0.')
return Monomial(mono, Monomial._rationalize_if_possible(self.coeff / other)).clean()
o = other.inverse()
return self.__mul__(o)
# def __floordiv__(self, other: Union[int, float, Fraction, Monomial]) -> Monomial:
def __floordiv__(self, other: Union[int, float, Fraction]):
"""
For monomials,
floor div is the same as true div.
"""
return self.__truediv__(other)
# def clone(self) -> Monomial:
def clone(self):
"""
Clone the monomial.
"""
temp_variables = {i: self.variables[i] for i in self.variables}
return Monomial(temp_variables, Monomial._rationalize_if_possible(self.coeff)).clean()
# def clean(self) -> Monomial:
def clean(self):
"""
Clean the monomial by dropping any variables that have power 0.
"""
temp_variables = {i: self.variables[i] for i in self.variables if self.variables[i] != 0}
return Monomial(temp_variables, Monomial._rationalize_if_possible(self.coeff))
# def __sub__(self, other: Union[int, float, Fraction, Monomial]) -> Monomial:
def __sub__(self, other: Union[int, float, Fraction]):
"""
Compute the subtraction
of a monomial and a datatype
such as int, float, Fraction, or Monomial.
"""
if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction):
mono = {i: self.variables[i] for i in self.variables if self.variables[i] != 0}
if len(mono) != 0:
raise ValueError('Can only subtract like monomials.')
other_term = Monomial(mono, Monomial._rationalize_if_possible(other))
return self.__sub__(other_term)
if not isinstance(other, Monomial):
raise ValueError('Can only subtract monomials')
return self.__add__(other.__mul__(Fraction(-1, 1)))
def __hash__(self) -> int:
"""
Define the hash of a monomial
by the underlying variables.
If hashing is implemented in O(v*log(v))
where v represents the number of
variables in the monomial,
then search queries for the
purposes of simplification of a
polynomial can be performed in
O(v*log(v)) as well; much better than
the length of the polynomial.
"""
arr = []
for i in sorted(self.variables):
if self.variables[i] > 0:
for _ in range(self.variables[i]):
arr.append(i)
return hash(tuple(arr))
def all_variables(self) -> Set:
"""
Get the set of all variables
present in the monomial.
"""
return set(sorted(self.variables.keys()))
def substitute(self, substitutions: Union[int, float, Fraction, Dict[int, Union[int, float, Fraction]]]) -> Fraction:
"""
Substitute the variables in the
monomial for values defined by
the substitutions dictionary.
"""
if isinstance(substitutions, int) or isinstance(substitutions, float) or isinstance(substitutions, Fraction):
substitutions = {v: Monomial._rationalize_if_possible(substitutions) for v in self.all_variables()}
else:
if not self.all_variables().issubset(set(substitutions.keys())):
raise ValueError('Some variables didn\'t receive their values.')
if self.coeff == 0:
return Fraction(0, 1)
ans = Monomial._rationalize_if_possible(self.coeff)
for k in self.variables:
ans *= Monomial._rationalize_if_possible(substitutions[k]**self.variables[k])
return Monomial._rationalize_if_possible(ans)
def __str__(self) -> str:
"""
Get a string representation of
the monomial.
"""
if len(self.variables) == 0:
return str(self.coeff)
result = str(self.coeff)
result += '('
for i in self.variables:
temp = 'a_{}'.format(str(i))
if self.variables[i] > 1:
temp = '(' + temp + ')**{}'.format(self.variables[i])
elif self.variables[i] < 0:
temp = '(' + temp + ')**(-{})'.format(-self.variables[i])
elif self.variables[i] == 0:
continue
else:
temp = '(' + temp + ')'
result += temp
return result + ')'
| Monomial |
python | ansible__ansible | lib/ansible/module_utils/_internal/_datatag/__init__.py | {
"start": 16380,
"end": 20371
} | class ____(AnsibleSerializableDataclass, metaclass=abc.ABCMeta):
"""
Base class for data tagging tag types.
New tag types need to be considered very carefully; e.g.: which serialization/runtime contexts they're allowed in, fallback behavior, propagation.
"""
_validation_allow_subclasses = False
def __init_subclass__(cls, **kwargs) -> None:
# NOTE: This method is called twice when the datatag type is a dataclass.
super(AnsibleDatatagBase, cls).__init_subclass__(**kwargs) # cannot use super() without arguments when using slots
# DTFIX-FUTURE: "freeze" this after module init has completed to discourage custom external tag subclasses
# DTFIX-FUTURE: is there a better way to exclude non-abstract types which are base classes?
if not inspect.isabstract(cls) and not cls.__name__.endswith('Base'):
existing = _known_tag_type_map.get(cls.__name__)
if existing:
# When the datatag type is a dataclass, the first instance will be the non-dataclass type.
# It must be removed from the known tag types before adding the dataclass version.
_known_tag_types.remove(existing)
_known_tag_type_map[cls.__name__] = cls
_known_tag_types.add(cls)
@classmethod
def is_tagged_on(cls, value: t.Any) -> bool:
return cls in _try_get_internal_tags_mapping(value)
@classmethod
def first_tagged_on(cls, *values: t.Any) -> t.Any | None:
"""Return the first value which is tagged with this type, or None if no match is found."""
for value in values:
if cls.is_tagged_on(value):
return value
return None
@classmethod
def get_tag(cls, value: t.Any) -> t.Optional[t.Self]:
return _try_get_internal_tags_mapping(value).get(cls)
@classmethod
def get_required_tag(cls, value: t.Any) -> t.Self:
if (tag := cls.get_tag(value)) is None:
# DTFIX-FUTURE: we really should have a way to use AnsibleError with obj in module_utils when it's controller-side
raise ValueError(f'The type {type(value).__name__!r} is not tagged with {cls.__name__!r}.')
return tag
@classmethod
def untag(cls, value: _T) -> _T:
"""
If this tag type is present on `value`, return a copy with that tag removed.
Otherwise, the original `value` is returned.
"""
return AnsibleTagHelper.untag(value, cls)
def tag(self, value: _T) -> _T:
"""
Return a copy of `value` with this tag applied, overwriting any existing tag of the same type.
If `value` is an ignored type, the original `value` will be returned.
If `value` is not taggable, a `NotTaggableError` exception will be raised.
"""
return AnsibleTagHelper.tag(value, self)
def try_tag(self, value: _T) -> _T:
"""
Return a copy of `value` with this tag applied, overwriting any existing tag of the same type.
If `value` is not taggable, the original `value` will be returned.
"""
return AnsibleTagHelper.try_tag(value, self)
def _get_tag_to_propagate(self, src: t.Any, value: object, *, value_type: t.Optional[type] = None) -> t.Self | None:
"""
Called by `AnsibleTagHelper.tag_copy` during tag propagation.
Returns an instance of this tag appropriate for propagation to `value`, or `None` if the tag should not be propagated.
Derived implementations may consult the arguments relayed from `tag_copy` to determine if and how the tag should be propagated.
"""
return self
def __repr__(self) -> str:
return AnsibleSerializable._repr(self, self.__class__.__name__)
# used by the datatag Ansible/Jinja test plugin to find tags by name
_known_tag_type_map: t.Dict[str, t.Type[AnsibleDatatagBase]] = {}
_known_tag_types: t.Set[t.Type[AnsibleDatatagBase]] = set()
| AnsibleDatatagBase |
python | skorch-dev__skorch | skorch/probabilistic.py | {
"start": 919,
"end": 13372
} | class ____(NeuralNet):
"""Base class for all Gaussian Process estimators.
Most notably, a GPyTorch compatible criterion and likelihood should be
provided.
"""
def __init__(
self,
module,
*args,
likelihood,
criterion,
train_split=None,
**kwargs
):
self.likelihood = likelihood
super().__init__(
module,
*args,
criterion=criterion,
train_split=train_split,
**kwargs
)
def initialize_module(self):
"""Initializes likelihood and module."""
# pylint: disable=attribute-defined-outside-init
ll_kwargs = self.get_params_for('likelihood')
self.likelihood_ = self.initialized_instance(self.likelihood, ll_kwargs)
super().initialize_module()
return self
def initialize_criterion(self):
"""Initializes the criterion."""
# pylint: disable=attribute-defined-outside-init
# The criterion is always re-initialized here, since it depends on the
# likelihood and the module.
criterion_params = self.get_params_for('criterion')
# criterion takes likelihood as first argument
self.criterion_ = self.criterion(
likelihood=self.likelihood_,
model=self.module_,
**criterion_params
)
return self
def check_is_fitted(self, attributes=None, *args, **kwargs):
"""Checks whether the GP is initialized.
Parameters
----------
attributes : iterable of str or None (default=None)
All the attributes that are strictly required of a fitted net. By
default, those are the `module_` and `likelihood_` attributes.
Other arguments as in
``sklearn.utils.validation.check_is_fitted``.
Raises
------
skorch.exceptions.NotInitializedError
When the given attributes are not present.
"""
if attributes is None:
attributes = ['module_', 'likelihood_']
check_is_fitted(self, attributes, *args, **kwargs)
def train_step_single(self, batch, **fit_params):
"""Compute y_pred, loss value, and update net's gradients.
The module is set to be in train mode (e.g. dropout is
applied).
Parameters
----------
batch
A single batch returned by the data loader.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call.
Returns
-------
step : dict
A dictionary ``{'loss': loss, 'y_pred': y_pred}``, where the
float ``loss`` is the result of the loss function and
``y_pred`` the prediction generated by the PyTorch module.
"""
step = super().train_step_single(batch, **fit_params)
# To obtain the posterior, the likelihood must be applied on the output
# of the module. This cannot be performed inside the module, because the
# GPyTorch criteria apply the likelihood on the module output
# themselves.
step['y_pred'] = self.likelihood_(step['y_pred'])
return step
# pylint: disable=unused-argument
def get_loss(self, y_pred, y_true, X=None, training=False):
"""Return the loss for this batch.
Parameters
----------
y_pred : torch tensor
Predicted target values
y_true : torch tensor
True target values.
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
training : bool (default=False)
Whether train mode should be used or not.
Returns
-------
loss : torch Tensor (scalar)
The loss to be minimized.
"""
loss = super().get_loss(y_pred, y_true, X=X, training=training)
if loss.dim() != 0:
loss = loss.mean()
return -loss
def evaluation_step(self, batch, training=False):
"""Perform a forward step to produce the output used for
prediction and scoring.
Therefore, the module is set to evaluation mode by default
beforehand which can be overridden to re-enable features
like dropout by setting ``training=True``.
Parameters
----------
batch
A single batch returned by the data loader.
training : bool (default=False)
Whether to set the module to train mode or not.
Returns
-------
y_infer
The prediction generated by the module.
"""
self.check_is_fitted()
Xi, _ = unpack_data(batch)
with torch.set_grad_enabled(training), gpytorch.settings.fast_pred_var():
self.module_.train(training)
y_infer = self.infer(Xi)
if isinstance(y_infer, tuple): # multiple outputs:
return (self.likelihood_(y_infer[0]),) + y_infer[1:]
return self.likelihood_(y_infer)
def forward_iter(self, X, *args, **kwargs):
"""Yield outputs of module forward calls on each batch of data.
The storage device of the yielded tensors is determined
by the ``device`` parameter.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
training : bool (default=False)
Whether to set the module to train mode or not.
device : string (default='cpu')
The device to store each inference result on.
This defaults to CPU memory since there is genereally
more memory available there. For performance reasons
this might be changed to a specific CUDA device,
e.g. 'cuda:0'.
Yields
------
yp : torch tensor
Result from a forward call on an individual batch.
"""
# GPyTorch caches a couple things that don't depend on the test points
# the first time a prediction is made so that the next time a prediction
# is made, it doesn't have O(n^3) complexity. These caches get deleted
# in some cases, like if the model gets put back in training mode, but
# forward_iter doesn't do that, so it's okay.
with gpytorch.settings.fast_pred_var():
return super().forward_iter(X, *args, **kwargs)
def forward(self, X, training=False, device='cpu'):
"""Gather and concatenate the output from forward call with
input data.
The outputs from ``self.module_.forward`` are gathered on the
compute device specified by ``device`` and then concatenated
using PyTorch :func:`~torch.cat`. If multiple outputs are
returned by ``self.module_.forward``, each one of them must be
able to be concatenated this way.
Notes
-----
For Gaussian Process modules, the return value of the module is a
distribution. These distributions are collected in a list (which may
only contain a single element if just one batch was used). Distributions
*cannot* be concatenated. Therefore, this method will just return the
list of distributions.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
training : bool (default=False)
Whether to set the module to train mode or not.
device : string (default='cpu')
The device to store each inference result on.
This defaults to CPU memory since there is genereally
more memory available there. For performance reasons
this might be changed to a specific CUDA device,
e.g. 'cuda:0'.
Returns
-------
y_infer : list of gpytorch.distributions.Distribution
A list of distributions as generated by the module. The number of
elements in this list will depend on the sample size of X and the
batch size of the estimator.
"""
y_infer = list(self.forward_iter(X, training=training, device=device))
return y_infer
def predict_proba(self, X):
raise AttributeError("'predict_proba' is not implemented for {}".format(
self.__class__.__name__
))
def sample(self, X, n_samples, axis=-1):
"""Return samples conditioned on input data.
The GP doesn't need to be fitted but it must be initialized.
By default, samples all calculated including gradients. If you don't
need the gradients, call ``sample`` inside ``torch.no_grad()``.
If the probability distribution does not support the ``rsample`` method
(i.e. sampling with gradients), try ``sample`` (i.e. without gradients)
instead. One such distribution, at the time of writing, is Bernoulli.
X : input data
The samples where the GP is evaluated.
n_samples : int
The number of samples to return
axis : int (default=-1)
The concatenation axis of the samples. Since samples can come in
batches, they must be concatenated.
Returns
-------
samples : torch.Tensor
Samples from the posterior distribution.
"""
self.check_is_fitted()
samples = []
for p in self.forward_iter(X):
try:
sample = p.rsample(torch.Size([n_samples]))
except NotImplementedError:
# some distributions like Bernoulli have not implemented rsample
# (sampling with gradients), try sample instead.
sample = p.sample(torch.Size([n_samples]))
samples.append(sample)
return torch.cat(samples, axis=axis)
def confidence_region(self, X, sigmas=2):
"""Returns 2 standard deviations above and below the mean.
X : input data
The samples where the GP is evaluated.
sigmas : int (default=2)
The number of standard deviations of the region.
Returns
-------
lower : torch.Tensor
The lower end of the confidence region.
upper : torch.Tensor
The upper end of the confidence region.
"""
nonlin = self._get_predict_nonlinearity()
lower, upper = [], []
for yi in self.forward_iter(X):
posterior = yi[0] if isinstance(yi, tuple) else yi
mean = posterior.mean
std = posterior.stddev
std = std.mul_(sigmas)
lower.append(nonlin(mean.sub(std)))
upper.append(nonlin(mean.add(std)))
lower = torch.cat(lower)
upper = torch.cat(upper)
return lower, upper
def __getstate__(self):
try:
return super().__getstate__()
except pickle.PicklingError as exc:
msg = ("This GPyTorch model cannot be pickled. The reason is probably this:"
" https://github.com/pytorch/pytorch/issues/38137. "
"Try using 'dill' instead of 'pickle'.")
raise pickle.PicklingError(msg) from exc
| GPBase |
python | ethereum__web3.py | web3/types.py | {
"start": 1953,
"end": 2107
} | class ____(TypedDict):
address: HexStr
storageKeys: Sequence[HexStr]
AccessList = NewType("AccessList", Sequence[AccessListEntry])
| AccessListEntry |
python | anthropics__anthropic-sdk-python | src/anthropic/_exceptions.py | {
"start": 1479,
"end": 1964
} | class ____(APIError):
"""Raised when an API response has a status code of 4xx or 5xx."""
response: httpx.Response
status_code: int
request_id: str | None
def __init__(self, message: str, *, response: httpx.Response, body: object | None) -> None:
super().__init__(message, response.request, body=body)
self.response = response
self.status_code = response.status_code
self.request_id = response.headers.get("request-id")
| APIStatusError |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 12796,
"end": 18067
} | class ____(pallas_core.MemoryRef):
transforms: Sequence[MemoryRefTransform] = ()
layout: tcgen05.TMEMLayout | None = dataclasses.field(default=None, kw_only=True)
collective: bool | None = dataclasses.field(default=None, kw_only=True)
def __post_init__(self):
is_tmem = self.memory_space == MemorySpace.TMEM
assert (self.layout is not None) == is_tmem
assert (self.collective is not None) == is_tmem
assert not (self.transforms and is_tmem)
def get_ref_aval(self) -> _Ref:
aval: Any = jax_core.ShapedArray(self.shape, self.dtype)
for t in self.transforms:
aval = t(aval)
if self.memory_space == MemorySpace.TMEM:
aval = AbstractTMEMRef(
aval, self.memory_space, self.layout, self.collective
)
else:
aval = state.AbstractRef(aval, memory_space=self.memory_space)
ref = pallas_core.TransformedRef(aval, ())
for t in reversed(self.transforms):
ref = t.undo(ref)
if not ref.transforms:
return ref.ref
return ref
def align_to(x: int, alignment: int):
if rem := x % alignment:
return x + alignment - rem
return x
# A tree of `GPUMemoryRef`s.
_GPUMemoryRefTree = Any
def _ref_group_size(refs: _GPUMemoryRefTree) -> int:
size = 0
for ref in jax.tree.leaves(refs):
# Make sure that the start of each ref is aligned with `SMEM_ALIGNMENT`.
size = align_to(size, SMEM_ALIGNMENT)
if jnp.issubdtype(ref.dtype, jnp.integer):
nbits = jnp.iinfo(ref.dtype).bits
elif jnp.issubdtype(ref.dtype, jnp.floating):
nbits = jnp.finfo(ref.dtype).bits
else:
raise NotImplementedError(f"Unsupported dtype: {ref.dtype}")
ref_bits = math.prod(ref.shape) * nbits
if ref_bits % 8:
raise ValueError(
"Only byte-aligned shapes are supported. Got shape:"
f" {ref.dtype}{ref.shape}"
)
size += ref_bits // 8
return size
def _ref_group_tmem_col_size(refs: _GPUMemoryRefTree) -> int:
"""Returns the total number of TMEM columns used by a group of aliased Refs.
"""
ncols = 0
for ref in jax.tree.leaves(refs):
ref_ncols = ref.layout.cols_in_shape(ref.shape,
dtypes.itemsize_bits(ref.dtype))
ncols += align_to(ref_ncols, TMEM_COL_ALIGNMENT)
return ncols
def infer_tmem_layout(
shape: tuple[int, ...],
dtype: jnp.dtype,
*,
packed: bool,
collective: bool) -> tcgen05.TMEMLayout:
"""Infers the number of columns used and layout for allocating TMEM Refs."""
if packed:
packing = 32 // dtypes.itemsize_bits(dtype)
else:
packing = 1
return tcgen05._infer_tmem_layout(shape, collective=collective, packing=packing) # type: ignore
def flatten_ref_union(ref_union: AbstractRefUnion) -> tuple[_Ref, ...]:
"""Flattens a union of trees of references into a tuple of references.
This is the moral equivalent of `jax.tree.leaves` for aliased references.
"""
flat_refs = []
if ref_union.memory_space == SMEM:
union_bytes = 0
for ref_group in ref_union.refs:
byte_offset = 0
for ref in jax.tree.leaves(ref_group):
byte_offset = align_to(byte_offset, SMEM_ALIGNMENT)
assert isinstance(ref, state.AbstractRef) or isinstance(
ref, pallas_core.TransformedRef
)
if not isinstance(ref, pallas_core.TransformedRef):
ref = pallas_core.TransformedRef(ref, transforms=())
transform = ExtractAliasedRef.from_transformed_ref(ref, byte_offset)
flat_refs.append(
pallas_core.TransformedRef(
ref_union, transforms=(transform, *ref.transforms)
)
)
if jnp.issubdtype(ref.dtype, jnp.integer):
nbits = jnp.iinfo(ref.dtype).bits
elif jnp.issubdtype(ref.dtype, jnp.floating):
nbits = jnp.finfo(ref.dtype).bits
else:
raise NotImplementedError(f"Unsupported dtype: {ref.dtype}")
ref_bits = math.prod(ref.shape) * nbits
if ref_bits % 8:
raise ValueError(
"Only byte-aligned shapes are supported. Got shape:"
f" {ref.dtype}{ref.shape}"
)
byte_offset += ref_bits // 8
union_bytes = max(union_bytes, byte_offset)
assert union_bytes == ref_union.shape[0]
elif ref_union.memory_space == TMEM:
union_cols = 0
for ref_group in ref_union.refs:
col_offset = 0
for ref in jax.tree.leaves(ref_group):
col_offset = align_to(col_offset, TMEM_COL_ALIGNMENT)
if not isinstance(ref, pallas_core.TransformedRef):
ref = pallas_core.TransformedRef(ref, transforms=())
ncols = ref.layout.cols_in_shape(ref.shape,
dtypes.itemsize_bits(ref.dtype))
transform = ExtractAliasedRef.from_transformed_ref(
ref, col_offset, layout=ref.layout)
flat_refs.append(
pallas_core.TransformedRef(
ref_union, transforms=(transform, *ref.transforms)
)
)
col_offset += ncols
union_cols = max(union_cols, col_offset)
assert union_cols == ref_union.shape[1], (union_cols, ref_union.shape[1])
else:
raise NotImplementedError("Only SMEM and TMEM refs are supported.")
return tuple(flat_refs)
| GPUMemoryRef |
python | pytest-dev__pytest-mock | src/pytest_mock/plugin.py | {
"start": 879,
"end": 964
} | class ____:
mock: MockType
patch: Optional[Any] = None
@dataclass
| MockCacheItem |
python | streamlit__streamlit | lib/streamlit/runtime/session_manager.py | {
"start": 2974,
"end": 3248
} | class ____(Exception):
"""Exception class for errors raised by SessionStorage.
The original error that causes a SessionStorageError to be (re)raised will generally
be an I/O error specific to the concrete SessionStorage implementation.
"""
| SessionStorageError |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_derive_code_mappings.py | {
"start": 1125,
"end": 5278
} | class ____(OrganizationEndpoint):
"""
In the UI, we have a prompt to derive code mappings from the stacktrace filename.
This endpoint is used to get the possible code mappings for it.
"""
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"POST": ApiPublishStatus.EXPERIMENTAL,
}
permission_classes = (OrganizationIntegrationsLoosePermission,)
def get(self, request: Request, organization: Organization) -> Response:
"""
Get all files from the customer repositories that match a stack trace frame.
``````````````````
:param organization:
:param string absPath:
:param string module:
:param string stacktraceFilename:
:param string platform:
:auth: required
"""
try:
file_repo_matches = []
resp_status: Literal[200, 204, 400] = status.HTTP_400_BAD_REQUEST
file_repo_matches = get_file_and_repo_matches(request, organization)
if file_repo_matches:
resp_status = status.HTTP_200_OK
else:
resp_status = status.HTTP_204_NO_CONTENT
return self.respond(serialize(file_repo_matches), status=resp_status)
except InstallationCannotGetTreesError:
return self.respond(
{"text": "The integration does not support getting trees"},
status=status.HTTP_404_NOT_FOUND,
)
except InstallationNotFoundError:
return self.respond(
{"text": "Could not find this integration installed on your organization"},
status=status.HTTP_404_NOT_FOUND,
)
except NeedsExtension:
return self.respond({"text": "Needs extension"}, status=status.HTTP_400_BAD_REQUEST)
except KeyError:
return self.respond(
{"text": "Missing required parameters"}, status=status.HTTP_400_BAD_REQUEST
)
except UnsupportedFrameInfo:
return self.respond(
{"text": "Unsupported frame info"}, status=status.HTTP_400_BAD_REQUEST
)
def post(self, request: Request, organization: Organization) -> Response:
"""
Create a new repository project path config
``````````````````
:param organization:
:param int projectId:
:param string repoName:
:param string defaultBranch:
:param string stackRoot:
:param string sourceRoot:
:auth: required
"""
try:
project = Project.objects.get(id=request.data["projectId"])
except (Project.DoesNotExist, KeyError):
return self.respond(
{"text": "Could not find project"}, status=status.HTTP_404_NOT_FOUND
)
if not request.access.has_project_access(project):
return self.respond(status=status.HTTP_403_FORBIDDEN)
try:
installation = get_installation(organization)
# It helps with typing since org_integration can be None
if not installation.org_integration:
raise InstallationNotFoundError
code_mapping = get_code_mapping_from_request(request)
new_code_mapping = create_code_mapping(organization, code_mapping, project)
except KeyError:
return self.respond(
{"text": "Missing required parameters"}, status=status.HTTP_400_BAD_REQUEST
)
except InstallationNotFoundError:
return self.respond(
{"text": "Could not find this integration installed on your organization"},
status=status.HTTP_404_NOT_FOUND,
)
except InstallationCannotGetTreesError:
return self.respond(
{"text": "The integration does not support getting trees"},
status=status.HTTP_404_NOT_FOUND,
)
return self.respond(
serialize(new_code_mapping, request.user), status=status.HTTP_201_CREATED
)
| OrganizationDeriveCodeMappingsEndpoint |
python | google__pytype | pytype/vm_test.py | {
"start": 1585,
"end": 3217
} | class ____(TraceVmTestBase):
"""Tests for opcode tracing in the VM."""
def test_empty_data(self):
"""Test that we can trace values without data."""
op = test_utils.FakeOpcode("foo.py", 123, 123, 0, 0, "foo")
self.ctx.vm.trace_opcode(op, "x", 42)
self.assertEqual(self.ctx.vm.opcode_traces, [(op, "x", (None,))])
def test_const(self):
src = textwrap.dedent("""
x = 1 # line 1
y = x # line 2
""").lstrip()
if self.ctx.python_version >= (3, 12):
# Compiles to:
# 2 LOAD_CONST 0 (1)
# 4 STORE_NAME 0 (x)
#
# 6 LOAD_NAME 0 (x)
# 8 STORE_NAME 1 (y)
# 10 RETURN_CONST 1 (None)
expected = [
# (opcode, line number, symbol)
("LOAD_CONST", 1, 1),
("STORE_NAME", 1, "x"),
("LOAD_NAME", 2, "x"),
("STORE_NAME", 2, "y"),
("RETURN_CONST", 2, None),
]
else:
# Compiles to:
# 0 LOAD_CONST 0 (1)
# 3 STORE_NAME 0 (x)
#
# 6 LOAD_NAME 0 (x)
# 9 STORE_NAME 1 (y)
# 12 LOAD_CONST 1 (None)
# 15 RETURN_VALUE
expected = [
# (opcode, line number, symbol)
("LOAD_CONST", 1, 1),
("STORE_NAME", 1, "x"),
("LOAD_NAME", 2, "x"),
("STORE_NAME", 2, "y"),
("LOAD_CONST", 2, None),
]
self.ctx.vm.run_program(src, "", maximum_depth=10)
actual = [
(op.name, op.line, symbol)
for op, symbol, _ in self.ctx.vm.opcode_traces
]
self.assertEqual(actual, expected)
| TraceTest |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_common.py | {
"start": 2077,
"end": 6931
} | class ____(StrictUndefined, Tripwire):
"""
Extends Jinja's `StrictUndefined`, allowing any kind of error occurring during recursive templating operations to be captured and deferred.
Direct or managed access to most `Marker` attributes will raise a `MarkerError`, which usually ends the current innermost templating
operation and converts the `MarkerError` back to the origin Marker instance (subject to the `MarkerBehavior` in effect at the time).
"""
__slots__ = ('_marker_template_source',)
_concrete_subclasses: t.ClassVar[set[type[Marker]]] = set()
def __init__(
self,
hint: t.Optional[str] = None,
obj: t.Any = missing,
name: t.Optional[str] = None,
exc: t.Type[TemplateRuntimeError] = UndefinedError, # Ansible doesn't set this argument or consume the attribute it is stored under.
*args,
_no_template_source=False,
**kwargs,
) -> None:
if not hint and name and obj is not missing:
hint = f"object of type {native_type_name(obj)!r} has no attribute {name!r}"
kwargs.update(
hint=hint,
obj=obj,
name=name,
exc=exc,
)
super().__init__(*args, **kwargs)
if _no_template_source:
self._marker_template_source = None
else:
self._marker_template_source = TemplateContext.current().template_value
def _as_exception(self) -> Exception:
"""Return the exception instance to raise in a top-level templating context."""
return AnsibleUndefinedVariable(self._undefined_message, obj=self._marker_template_source)
def _as_message(self) -> str:
"""Return the error message to show when this marker must be represented as a string, such as for substitutions or warnings."""
return self._undefined_message
def _fail_with_undefined_error(self, *args: t.Any, **kwargs: t.Any) -> t.NoReturn:
"""Ansible-specific replacement for Jinja's _fail_with_undefined_error tripwire on dunder methods."""
self.trip()
def trip(self) -> t.NoReturn:
"""Raise an internal exception which can be converted back to this instance."""
raise MarkerError(self._undefined_message, self)
def __setattr__(self, name: str, value: t.Any) -> None:
"""
Any attempt to set an unknown attribute on a `Marker` should invoke the trip method to propagate the original context.
This does not protect against mutation of known attributes, but the implementation is fairly simple.
"""
try:
super().__setattr__(name, value)
except AttributeError:
pass
else:
return
self.trip()
def __getattr__(self, name: str) -> t.Any:
"""Raises AttributeError for dunder-looking accesses, self-propagates otherwise."""
if name.startswith('__') and name.endswith('__'):
raise AttributeError(name)
return self
def __getitem__(self, key):
"""Self-propagates on all item accesses."""
return self
@classmethod
def __init_subclass__(cls, **kwargs) -> None:
if not inspect.isabstract(cls):
_untaggable_types.add(cls)
cls._concrete_subclasses.add(cls)
@classmethod
def _init_class(cls):
_untaggable_types.add(cls)
# These are the methods StrictUndefined already intercepts.
jinja_method_names = (
'__add__',
'__bool__',
'__call__',
'__complex__',
'__contains__',
'__div__',
'__eq__',
'__float__',
'__floordiv__',
'__ge__',
# '__getitem__', # using a custom implementation that propagates self instead
'__gt__',
'__hash__',
'__int__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__pos__',
'__pow__',
'__radd__',
'__rdiv__',
'__rfloordiv__',
'__rmod__',
'__rmul__',
'__rpow__',
'__rsub__',
'__rtruediv__',
'__str__',
'__sub__',
'__truediv__',
)
# These additional methods should be intercepted, even though they are not intercepted by StrictUndefined.
additional_method_names = (
'__aiter__',
'__delattr__',
'__format__',
'__repr__',
'__setitem__',
)
for name in jinja_method_names + additional_method_names:
setattr(cls, name, cls._fail_with_undefined_error)
Marker._init_class()
| Marker |
python | numba__numba | numba/core/typed_passes.py | {
"start": 20795,
"end": 21176
} | class ____(AnalysisPass):
_name = "ir_legalization"
def __init__(self):
AnalysisPass.__init__(self)
def run_pass(self, state):
# NOTE: this function call must go last, it checks and fixes invalid IR!
check_and_legalize_ir(state.func_ir, flags=state.flags)
return True
@register_pass(mutates_CFG=True, analysis_only=False)
| IRLegalization |
python | tensorflow__tensorflow | tensorflow/python/framework/config_test.py | {
"start": 32260,
"end": 34521
} | class ____(test.TestCase):
def tearDown(self):
super(TensorFloat32Test, self).tearDown()
config.enable_tensor_float_32_execution(True)
def test_tensor_float_32_global_variable(self):
self.assertTrue(config.tensor_float_32_execution_enabled())
self.assertTrue(test_ops.is_tensor_float32_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
self.assertFalse(test_ops.is_tensor_float32_enabled())
config.enable_tensor_float_32_execution(True)
self.assertTrue(config.tensor_float_32_execution_enabled())
self.assertTrue(test_ops.is_tensor_float32_enabled())
def _skip_if_tensor_float_32_unsupported(self):
if not test_util.is_gpu_available(
cuda_only=True, min_cuda_compute_capability=(8, 0)):
self.skipTest('TensorFloat-32 requires an NVIDIA GPU with compute '
'capability of at least 8.0')
# Size of each dimension of matrices to test. cuBLAS does not use TF32 for
# small matrices, so we must choose a large enough size to cause TF32 to be
# used.
DIM = 2 ** 10
def test_tensor_float_32_enabled(self):
self._skip_if_tensor_float_32_unsupported()
self.assertTrue(config.tensor_float_32_execution_enabled())
x = array_ops.fill((self.DIM, self.DIM), 1 + 2**-12)
y = array_ops.ones((self.DIM, self.DIM))
out = math_ops.matmul(x, y)
# In TensorFloat-32, each element of x is rounded to 1, so each output
# element should be self.DIM.
expected = array_ops.fill((self.DIM, self.DIM), float(self.DIM))
self.assertAllEqual(out, expected)
def test_tensor_float_32_disabled(self):
self._skip_if_tensor_float_32_unsupported()
self.assertTrue(config.tensor_float_32_execution_enabled())
config.enable_tensor_float_32_execution(False)
self.assertFalse(config.tensor_float_32_execution_enabled())
x = array_ops.fill((self.DIM, self.DIM), 1 + 2**-12)
y = array_ops.ones((self.DIM, self.DIM))
out = math_ops.matmul(x, y)
expected = array_ops.fill((self.DIM, self.DIM), self.DIM * (1 + 2**-12))
self.assertAllClose(out, expected, rtol=2**-13, atol=0)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| TensorFloat32Test |
python | gevent__gevent | src/greentest/3.12/test_signal.py | {
"start": 27906,
"end": 31608
} | class ____(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('netbsd5',),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.001, 0.001)
for _ in support.busy_retry(support.LONG_TIMEOUT):
# use up some virtual time by doing real work
_ = sum(i * i for i in range(10**5))
if signal.getitimer(self.itimer) == (0.0, 0.0):
# sig_vtalrm handler stopped this itimer
break
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
for _ in support.busy_retry(support.LONG_TIMEOUT):
# do some work
_ = sum(i * i for i in range(10**5))
if signal.getitimer(self.itimer) == (0.0, 0.0):
# sig_prof handler stopped this itimer
break
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
def test_setitimer_tiny(self):
# bpo-30807: C setitimer() takes a microsecond-resolution interval.
# Check that float -> timeval conversion doesn't round
# the interval down to zero, which would disable the timer.
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1e-6)
time.sleep(1)
self.assertEqual(self.hndl_called, True)
| ItimerTest |
python | pypa__pip | tests/lib/__init__.py | {
"start": 42667,
"end": 42822
} | class ____:
def __init__(self, returncode: int, stdout: str) -> None:
self.returncode = returncode
self.stdout = stdout
| InMemoryPipResult |
python | weaviate__weaviate-python-client | weaviate/debug/types.py | {
"start": 151,
"end": 617
} | class ____(BaseModel):
collection: str = Field(..., alias="class")
creation_time: datetime = Field(..., alias="creationTimeUnix")
last_update_time: datetime = Field(..., alias="lastUpdateTimeUnix")
properties: Dict[str, Any] = Field(...)
tenant: Optional[str] = Field(None)
uuid: uuid_package.UUID = Field(..., alias="id")
vector: Optional[list[float]] = Field(None)
vectors: Optional[Dict[str, list[float]]] = Field(None)
| DebugRESTObject |
python | django__django | tests/model_forms/tests.py | {
"start": 119156,
"end": 121236
} | class ____(SimpleTestCase):
def test_form_subclass_inheritance(self):
class Form(forms.Form):
age = forms.IntegerField()
class ModelForm(forms.ModelForm, Form):
class Meta:
model = Writer
fields = "__all__"
self.assertEqual(list(ModelForm().fields), ["name", "age"])
def test_field_removal(self):
class ModelForm(forms.ModelForm):
class Meta:
model = Writer
fields = "__all__"
class Mixin:
age = None
class Form(forms.Form):
age = forms.IntegerField()
class Form2(forms.Form):
foo = forms.IntegerField()
self.assertEqual(list(ModelForm().fields), ["name"])
self.assertEqual(list(type("NewForm", (Mixin, Form), {})().fields), [])
self.assertEqual(
list(type("NewForm", (Form2, Mixin, Form), {})().fields), ["foo"]
)
self.assertEqual(
list(type("NewForm", (Mixin, ModelForm, Form), {})().fields), ["name"]
)
self.assertEqual(
list(type("NewForm", (ModelForm, Mixin, Form), {})().fields), ["name"]
)
self.assertEqual(
list(type("NewForm", (ModelForm, Form, Mixin), {})().fields),
["name", "age"],
)
self.assertEqual(
list(type("NewForm", (ModelForm, Form), {"age": None})().fields), ["name"]
)
def test_field_removal_name_clashes(self):
"""
Form fields can be removed in subclasses by setting them to None
(#22510).
"""
class MyForm(forms.ModelForm):
media = forms.CharField()
class Meta:
model = Writer
fields = "__all__"
class SubForm(MyForm):
media = None
self.assertIn("media", MyForm().fields)
self.assertNotIn("media", SubForm().fields)
self.assertTrue(hasattr(MyForm, "media"))
self.assertTrue(hasattr(SubForm, "media"))
| ModelFormInheritanceTests |
python | numba__numba | numba/cuda/cudamath.py | {
"start": 1863,
"end": 2246
} | class ____(ConcreteTemplate):
key = math.hypot
cases = [
signature(types.float64, types.int64, types.int64),
signature(types.float64, types.uint64, types.uint64),
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
@infer_global(math.copysign)
@infer_global(math.fmod)
| Math_hypot |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 4748,
"end": 6054
} | class ____(AssertsCompiledSQL, fixtures.TestBase):
__dialect__ = postgresql.dialect()
@testing.combinations(
("asyncpg", "x LIKE $1::VARCHAR"),
("psycopg", "x LIKE %(x_1)s::VARCHAR"),
("psycopg2", "x LIKE %(x_1)s"),
("pg8000", "x LIKE %s::VARCHAR"),
)
def test_string_coercion_no_len(self, driver, expected):
"""test #9511.
comparing to string does not include length in the cast for those
dialects that require a cast.
"""
self.assert_compile(
column("x", String(2)).like("%a%"),
expected,
dialect=f"postgresql+{driver}",
)
@testing.combinations(
("sa", sqltypes.Float(), "FLOAT"), # ideally it should render real
("sa", sqltypes.Double(), "DOUBLE PRECISION"),
("sa", sqltypes.FLOAT(), "FLOAT"),
("sa", sqltypes.REAL(), "REAL"),
("sa", sqltypes.DOUBLE(), "DOUBLE"),
("sa", sqltypes.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("pg", postgresql.FLOAT(), "FLOAT"),
("pg", postgresql.DOUBLE_PRECISION(), "DOUBLE PRECISION"),
("pg", postgresql.REAL(), "REAL"),
id_="ira",
)
def test_float_type_compile(self, type_, sql_text):
self.assert_compile(type_, sql_text)
| MiscTypesTest |
python | getsentry__sentry | src/sentry/api/endpoints/organization_pinned_searches.py | {
"start": 1315,
"end": 4362
} | class ____(OrganizationEndpoint):
owner = ApiOwner.UNOWNED
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationPinnedSearchPermission,)
def put(self, request: Request, organization: Organization) -> Response:
if not request.user.is_authenticated:
return Response(status=400)
serializer = OrganizationSearchSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
result = serializer.validated_data
SavedSearch.objects.create_or_update(
organization=organization,
name=PINNED_SEARCH_NAME,
owner_id=request.user.id,
type=result["type"],
visibility=Visibility.OWNER_PINNED,
values={"query": result["query"], "sort": result["sort"]},
)
# This entire endpoint will be removed once custom views are GA'd
first_starred_view = GroupSearchViewStarred.objects.filter(
organization=organization, user_id=request.user.id, position=0
).first()
if first_starred_view:
default_view = first_starred_view.group_search_view
default_view.query = result["query"]
default_view.query_sort = result["sort"]
default_view.save()
else:
new_default_view = GroupSearchView.objects.create(
organization=organization,
user_id=request.user.id,
name="Default Search",
query=result["query"],
query_sort=result["sort"],
visibility=GroupSearchViewVisibility.ORGANIZATION,
)
GroupSearchViewStarred.objects.create(
organization=organization,
user_id=request.user.id,
group_search_view_id=new_default_view.id,
position=0,
)
pinned_search = SavedSearch.objects.get(
organization=organization,
owner_id=request.user.id,
type=result["type"],
visibility=Visibility.OWNER_PINNED,
)
return Response(serialize(pinned_search, request.user), status=201)
def delete(self, request: Request, organization) -> Response:
if not request.user.is_authenticated:
return Response(status=400)
try:
search_type = SearchType(int(request.data.get("type", 0)))
except ValueError as e:
return Response({"detail": "Invalid input for `type`. Error: %s" % str(e)}, status=400)
SavedSearch.objects.filter(
organization=organization,
owner_id=request.user.id,
type=search_type.value,
visibility=Visibility.OWNER_PINNED,
).delete()
GroupSearchView.objects.filter(organization=organization, user_id=request.user.id).delete()
return Response(status=204)
| OrganizationPinnedSearchEndpoint |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_common.py | {
"start": 214,
"end": 1160
} | class ____:
def test_raise_if_schema_class_passed(self):
with pytest.raises(TypeError, match="based on a Schema instance"):
make_schema_key(PetSchema)
def test_same_schemas_instances_equal(self):
assert make_schema_key(PetSchema()) == make_schema_key(PetSchema())
@pytest.mark.parametrize("structure", (list, set))
def test_same_schemas_instances_unhashable_modifiers_equal(self, structure):
modifier = [str(i) for i in range(1000)]
assert make_schema_key(
PetSchema(load_only=structure(modifier))
) == make_schema_key(PetSchema(load_only=structure(modifier[::-1])))
def test_different_schemas_not_equal(self):
assert make_schema_key(PetSchema()) != make_schema_key(SampleSchema())
def test_instances_with_different_modifiers_not_equal(self):
assert make_schema_key(PetSchema()) != make_schema_key(PetSchema(partial=True))
| TestMakeSchemaKey |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 6102,
"end": 6263
} | class ____(Event):
''' Base class for all Bokeh Document events.
This base class is not typically useful to instantiate on its own.
'''
| DocumentEvent |
python | redis__redis-py | redis/commands/core.py | {
"start": 220861,
"end": 226391
} | class ____(CommandsProtocol):
"""
Redis Lua script commands. see:
https://redis.io/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/
"""
def _eval(
self,
command: str,
script: str,
numkeys: int,
*keys_and_args: Union[KeyT, EncodableT],
) -> Union[Awaitable[str], str]:
return self.execute_command(command, script, numkeys, *keys_and_args)
def eval(
self, script: str, numkeys: int, *keys_and_args: Union[KeyT, EncodableT]
) -> Union[Awaitable[str], str]:
"""
Execute the Lua ``script``, specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
For more information, see https://redis.io/commands/eval
"""
return self._eval("EVAL", script, numkeys, *keys_and_args)
def eval_ro(
self, script: str, numkeys: int, *keys_and_args: Union[KeyT, EncodableT]
) -> Union[Awaitable[str], str]:
"""
The read-only variant of the EVAL command
Execute the read-only Lua ``script`` specifying the ``numkeys`` the script
will touch and the key names and argument values in ``keys_and_args``.
Returns the result of the script.
For more information, see https://redis.io/commands/eval_ro
"""
return self._eval("EVAL_RO", script, numkeys, *keys_and_args)
def _evalsha(
self,
command: str,
sha: str,
numkeys: int,
*keys_and_args: Union[KeyT, EncodableT],
) -> Union[Awaitable[str], str]:
return self.execute_command(command, sha, numkeys, *keys_and_args)
def evalsha(
self, sha: str, numkeys: int, *keys_and_args: Union[KeyT, EncodableT]
) -> Union[Awaitable[str], str]:
"""
Use the ``sha`` to execute a Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
In practice, use the object returned by ``register_script``. This
function exists purely for Redis API completion.
For more information, see https://redis.io/commands/evalsha
"""
return self._evalsha("EVALSHA", sha, numkeys, *keys_and_args)
def evalsha_ro(
self, sha: str, numkeys: int, *keys_and_args: Union[KeyT, EncodableT]
) -> Union[Awaitable[str], str]:
"""
The read-only variant of the EVALSHA command
Use the ``sha`` to execute a read-only Lua script already registered via EVAL
or SCRIPT LOAD. Specify the ``numkeys`` the script will touch and the
key names and argument values in ``keys_and_args``. Returns the result
of the script.
For more information, see https://redis.io/commands/evalsha_ro
"""
return self._evalsha("EVALSHA_RO", sha, numkeys, *keys_and_args)
def script_exists(self, *args: str) -> ResponseT:
"""
Check if a script exists in the script cache by specifying the SHAs of
each script as ``args``. Returns a list of boolean values indicating if
if each already script exists in the cache_data.
For more information, see https://redis.io/commands/script-exists
"""
return self.execute_command("SCRIPT EXISTS", *args)
def script_debug(self, *args) -> None:
raise NotImplementedError(
"SCRIPT DEBUG is intentionally not implemented in the client."
)
def script_flush(
self, sync_type: Union[Literal["SYNC"], Literal["ASYNC"]] = None
) -> ResponseT:
"""Flush all scripts from the script cache_data.
``sync_type`` is by default SYNC (synchronous) but it can also be
ASYNC.
For more information, see https://redis.io/commands/script-flush
"""
# Redis pre 6 had no sync_type.
if sync_type not in ["SYNC", "ASYNC", None]:
raise DataError(
"SCRIPT FLUSH defaults to SYNC in redis > 6.2, or "
"accepts SYNC/ASYNC. For older versions, "
"of redis leave as None."
)
if sync_type is None:
pieces = []
else:
pieces = [sync_type]
return self.execute_command("SCRIPT FLUSH", *pieces)
def script_kill(self) -> ResponseT:
"""
Kill the currently executing Lua script
For more information, see https://redis.io/commands/script-kill
"""
return self.execute_command("SCRIPT KILL")
def script_load(self, script: ScriptTextT) -> ResponseT:
"""
Load a Lua ``script`` into the script cache_data. Returns the SHA.
For more information, see https://redis.io/commands/script-load
"""
return self.execute_command("SCRIPT LOAD", script)
def register_script(self: "redis.client.Redis", script: ScriptTextT) -> Script:
"""
Register a Lua ``script`` specifying the ``keys`` it will touch.
Returns a Script object that is callable and hides the complexity of
deal with scripts, keys, and shas. This is the preferred way to work
with Lua scripts.
"""
return Script(self, script)
| ScriptCommands |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/base_frontend_mixin.py | {
"start": 73,
"end": 6295
} | class ____(object):
""" A mix-in class for implementing Qt frontends.
To handle messages of a particular type, frontends need only define an
appropriate handler method. For example, to handle 'stream' messaged, define
a '_handle_stream(msg)' method.
"""
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' concrete interface
#---------------------------------------------------------------------------
_kernel_client = None
_kernel_manager = None
@property
def kernel_client(self):
"""Returns the current kernel client."""
return self._kernel_client
@kernel_client.setter
def kernel_client(self, kernel_client):
"""Disconnect from the current kernel client (if any) and set a new
kernel client.
"""
# Disconnect the old kernel client, if necessary.
old_client = self._kernel_client
if old_client is not None:
old_client.started_channels.disconnect(self._started_channels)
old_client.stopped_channels.disconnect(self._stopped_channels)
# Disconnect the old kernel client's channels.
old_client.iopub_channel.message_received.disconnect(self._dispatch)
old_client.shell_channel.message_received.disconnect(self._dispatch)
old_client.stdin_channel.message_received.disconnect(self._dispatch)
old_client.hb_channel.kernel_died.disconnect(
self._handle_kernel_died)
# Handle the case where the old kernel client is still listening.
if old_client.channels_running:
self._stopped_channels()
# Set the new kernel client.
self._kernel_client = kernel_client
if kernel_client is None:
return
# Connect the new kernel client.
kernel_client.started_channels.connect(self._started_channels)
kernel_client.stopped_channels.connect(self._stopped_channels)
# Connect the new kernel client's channels.
kernel_client.iopub_channel.message_received.connect(self._dispatch)
kernel_client.shell_channel.message_received.connect(self._dispatch)
kernel_client.stdin_channel.message_received.connect(self._dispatch)
# hb_channel
kernel_client.hb_channel.kernel_died.connect(self._handle_kernel_died)
# Handle the case where the kernel client started channels before
# we connected.
if kernel_client.channels_running:
self._started_channels()
@property
def kernel_manager(self):
"""The kernel manager, if any"""
return self._kernel_manager
@kernel_manager.setter
def kernel_manager(self, kernel_manager):
old_man = self._kernel_manager
if old_man is not None:
old_man.kernel_restarted.disconnect(self._handle_kernel_restarted)
self._kernel_manager = kernel_manager
if kernel_manager is None:
return
kernel_manager.kernel_restarted.connect(self._handle_kernel_restarted)
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _handle_kernel_died(self, since_last_heartbeat):
""" This is called when the ``kernel_died`` signal is emitted.
This method is called when the kernel heartbeat has not been
active for a certain amount of time.
This is a strictly passive notification -
the kernel is likely being restarted by its KernelManager.
Parameters
----------
since_last_heartbeat : float
The time since the heartbeat was last received.
"""
def _handle_kernel_restarted(self):
""" This is called when the ``kernel_restarted`` signal is emitted.
This method is called when the kernel has been restarted by the
autorestart mechanism.
Parameters
----------
since_last_heartbeat : float
The time since the heartbeat was last received.
"""
def _started_kernel(self):
"""Called when the KernelManager starts (or restarts) the kernel subprocess.
Channels may or may not be running at this point.
"""
def _started_channels(self):
""" Called when the KernelManager channels have started listening or
when the frontend is assigned an already listening KernelManager.
"""
def _stopped_channels(self):
""" Called when the KernelManager channels have stopped listening or
when a listening KernelManager is removed from the frontend.
"""
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' protected interface
#---------------------------------------------------------------------------
def _dispatch(self, msg):
""" Calls the frontend handler associated with the message type of the
given message.
"""
msg_type = msg['header']['msg_type']
handler = getattr(self, '_handle_' + msg_type, None)
if handler:
handler(msg)
def from_here(self, msg):
"""Return whether a message is from this session"""
session_id = self._kernel_client.session.session
return msg['parent_header'].get("session", session_id) == session_id
def include_output(self, msg):
"""Return whether we should include a given output message"""
if msg['parent_header']:
# If parent message is from hidden execution, don't include it.
msg_id = msg['parent_header']['msg_id']
info = self._request_info['execute'].get(msg_id)
if info and info.hidden:
return False
from_here = self.from_here(msg)
if msg['msg_type'] == 'execute_input':
# only echo inputs not from here
return self.include_other_output and not from_here
if self.include_other_output:
return True
else:
return from_here
| BaseFrontendMixin |
python | scipy__scipy | scipy/optimize/tests/test__shgo.py | {
"start": 6321,
"end": 7557
} | class ____(StructTestFunction):
"""
LennardJones objective function. Used to test symmetry constraints
settings.
"""
def f(self, x, *args):
print(f'x = {x}')
self.N = args[0]
k = int(self.N / 3)
s = 0.0
for i in range(k - 1):
for j in range(i + 1, k):
a = 3 * i
b = 3 * j
xd = x[a] - x[b]
yd = x[a + 1] - x[b + 1]
zd = x[a + 2] - x[b + 2]
ed = xd * xd + yd * yd + zd * zd
ud = ed * ed * ed
if ed > 0.0:
s += (1.0 / ud - 2.0) / ud
return s
g = None
cons = wrap_constraints(g)
N = 6
boundsLJ = list(zip([-4.0] * 6, [4.0] * 6))
testLJ = StructTestLJ(bounds=boundsLJ,
expected_fun=[-1.0],
expected_x=None,
# expected_x=[-2.71247337e-08,
# -2.71247337e-08,
# -2.50000222e+00,
# -2.71247337e-08,
# -2.71247337e-08,
# -1.50000222e+00]
)
| StructTestLJ |
python | PyCQA__pylint | tests/functional/a/abstract/abstract_class_instantiated.py | {
"start": 415,
"end": 564
} | class ____(metaclass=abc.ABCMeta):
""" This should not raise the warning. """
def test(self):
raise NotImplementedError()
| ThirdGoodClass |
python | instagram__MonkeyType | monkeytype/typing.py | {
"start": 11725,
"end": 12642
} | class ____(GenericTypeRewriter[type]):
"""TypeRewriter provides a visitor for rewriting parts of types"""
def make_anonymous_typed_dict(self, required_fields, optional_fields):
return make_typed_dict(
required_fields=required_fields, optional_fields=optional_fields
)
def make_builtin_typed_dict(self, name, annotations, total):
return TypedDict(name, annotations, total=total)
def generic_rewrite(self, typ):
return typ
def rewrite_container_type(self, container_type):
return container_type
def rewrite_malformed_container(self, container):
return container
def rewrite_type_variable(self, type_variable):
return type_variable
def make_builtin_tuple(self, elements):
return tuple(elements)
def make_container_type(self, container_type, element):
return container_type[element]
| TypeRewriter |
python | kamyu104__LeetCode-Solutions | Python/find-the-number-of-winning-players.py | {
"start": 102,
"end": 478
} | class ____(object):
def winningPlayerCount(self, n, pick):
"""
:type n: int
:type pick: List[List[int]]
:rtype: int
"""
cnts = collections.defaultdict(lambda: collections.defaultdict(int))
for x, y in pick:
cnts[x][y] += 1
return sum(i < max(cnt.itervalues()) for i, cnt in cnts.iteritems())
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_quantile_values.py | {
"start": 1065,
"end": 20252
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.quantile_values"
value_keys = ("quantiles", "allow_relative_error")
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, quantiles, allow_relative_error, **kwargs):
"""Quantile Function"""
interpolation_options = ("linear", "lower", "higher", "midpoint", "nearest")
if not allow_relative_error:
allow_relative_error = "nearest"
if allow_relative_error not in interpolation_options:
raise ValueError( # noqa: TRY003 # FIXME CoP
f"If specified for pandas, allow_relative_error must be one an allowed value for the 'interpolation'" # noqa: E501 # FIXME CoP
f"parameter of .quantile() (one of {interpolation_options})"
)
return column.quantile(quantiles, interpolation=allow_relative_error).tolist()
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy( # noqa: C901, PLR0911 # FIXME CoP
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: dict[str, Any],
runtime_configuration: dict,
):
(
selectable,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column_name = accessor_domain_kwargs["column"]
column = sa.column(column_name) # type: ignore[var-annotated] # FIXME CoP
dialect_name = execution_engine.dialect_name
quantiles = metric_value_kwargs["quantiles"]
allow_relative_error = metric_value_kwargs.get("allow_relative_error", False)
table_row_count = metrics.get("table.row_count")
if dialect_name == GXSqlDialect.MSSQL:
return _get_column_quantiles_mssql(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name == GXSqlDialect.BIGQUERY:
return _get_column_quantiles_bigquery(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name == GXSqlDialect.MYSQL:
return _get_column_quantiles_mysql(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name.lower() == GXSqlDialect.CLICKHOUSE:
return _get_column_quantiles_clickhouse(
column=column, # type: ignore[arg-type] # FIXME CoP
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name == GXSqlDialect.TRINO:
return _get_column_quantiles_trino(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name == GXSqlDialect.SNOWFLAKE:
# NOTE: 20201216 - JPC - snowflake has a representation/precision limitation
# in its percentile_disc implementation that causes an error when we do
# not round. It is unclear to me *how* the call to round affects the behavior --
# the binary representation should be identical before and after, and I do
# not observe a type difference. However, the issue is replicable in the
# snowflake console and directly observable in side-by-side comparisons with
# and without the call to round()
quantiles = [round(x, 10) for x in quantiles]
return _get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
selectable=selectable,
execution_engine=execution_engine,
)
elif dialect_name == GXSqlDialect.SQLITE:
return _get_column_quantiles_sqlite(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
table_row_count=table_row_count,
)
elif dialect_name == GXSqlDialect.AWSATHENA:
return _get_column_quantiles_athena(
column=column,
quantiles=quantiles,
selectable=selectable,
execution_engine=execution_engine,
)
else:
return _get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
selectable=selectable,
execution_engine=execution_engine,
)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: dict,
metric_value_kwargs: dict,
metrics: dict[str, Any],
runtime_configuration: dict,
):
(
df,
_compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
quantiles = metric_value_kwargs["quantiles"]
column = accessor_domain_kwargs["column"]
allow_relative_error = metric_value_kwargs.get("allow_relative_error", False)
if not allow_relative_error:
allow_relative_error = 0.0
if (
not isinstance(allow_relative_error, float)
or allow_relative_error < 0.0
or allow_relative_error > 1.0
):
raise ValueError( # noqa: TRY003 # FIXME CoP
"SparkDFExecutionEngine requires relative error to be False or to be a float between 0 and 1." # noqa: E501 # FIXME CoP
)
return df.approxQuantile(column, list(quantiles), allow_relative_error) # type: ignore[attr-defined] # FIXME CoP
def _get_column_quantiles_mssql(
column, quantiles: Iterable, selectable, execution_engine: SqlAlchemyExecutionEngine
) -> list:
# mssql requires over(), so we add an empty over() clause
selects: list[sqlalchemy.WithinGroup] = [
sa.func.percentile_disc(quantile).within_group(column.asc()).over() # type: ignore[misc] # FIXME CoP
for quantile in quantiles
]
quantiles_query: sqlalchemy.Select = sa.select(*selects).select_from(selectable)
try:
quantiles_results = execution_engine.execute_query(quantiles_query).fetchone()
return list(quantiles_results) # type: ignore[arg-type] # FIXME CoP
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_bigquery(
column, quantiles: Iterable, selectable, execution_engine: SqlAlchemyExecutionEngine
) -> list:
# BigQuery does not support "WITHIN", so we need a special case for it
selects: list[sqlalchemy.WithinGroup] = [
sa.func.percentile_disc(column, quantile).over() # type: ignore[misc] # FIXME CoP
for quantile in quantiles
]
quantiles_query: sqlalchemy.Select = sa.select(*selects).select_from(selectable)
try:
quantiles_results = execution_engine.execute_query(quantiles_query).fetchone()
return list(quantiles_results) # type: ignore[arg-type] # FIXME CoP
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_mysql(
column, quantiles: Iterable, selectable, execution_engine: SqlAlchemyExecutionEngine
) -> list:
# MySQL does not support "percentile_disc", so we implement it as a compound query.
# Please see https://stackoverflow.com/questions/19770026/calculate-percentile-value-using-mysql for reference. # noqa: E501 # FIXME CoP
percent_rank_query: sqlalchemy.CTE = (
sa.select(
column,
sa.cast(
sa.func.percent_rank().over(order_by=column.asc()),
sa.dialects.mysql.DECIMAL(18, 15),
).label("p"),
)
.order_by(sa.column("p").asc())
.select_from(selectable)
.cte("t")
)
selects: list[sqlalchemy.WithinGroup] = []
for idx, quantile in enumerate(quantiles):
# pymysql cannot handle conversion of numpy float64 to float; convert just in case
if np.issubdtype(type(quantile), np.double):
quantile = float(quantile) # noqa: PLW2901 # FIXME CoP
quantile_column: sqlalchemy.Label = (
sa.func.first_value(column)
.over(
order_by=sa.case(
(
percent_rank_query.columns.p
<= sa.cast(quantile, sa.dialects.mysql.DECIMAL(18, 15)),
percent_rank_query.columns.p,
),
else_=None,
).desc()
)
.label(f"q_{idx}")
)
selects.append(quantile_column) # type: ignore[arg-type] # FIXME CoP
quantiles_query: sqlalchemy.Select = (
sa.select(*selects).distinct().order_by(percent_rank_query.columns.p.desc())
)
try:
quantiles_results = execution_engine.execute_query(quantiles_query).fetchone()
return list(quantiles_results) # type: ignore[arg-type] # FIXME CoP
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_trino(
column, quantiles: Iterable, selectable, execution_engine: SqlAlchemyExecutionEngine
) -> list:
# Trino does not have the percentile_disc func, but instead has approx_percentile
sql_approx: str = f"approx_percentile({column}, ARRAY{list(quantiles)})"
selects_approx: list[sqlalchemy.TextClause] = [sa.text(sql_approx)]
quantiles_query: sqlalchemy.Select = sa.select(*selects_approx).select_from(selectable)
try:
quantiles_results = execution_engine.execute_query(quantiles_query).fetchone()
return list(quantiles_results)[0] # type: ignore[arg-type] # FIXME CoP
except (sqlalchemy.ProgrammingError, trino.trinoexceptions.TrinoUserError) as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_clickhouse(
column: str, quantiles: Iterable, selectable, execution_engine
) -> list:
quantiles_list = list(quantiles)
sql_approx: str = f"quantilesExact({', '.join([str(x) for x in quantiles_list])})({column})"
selects_approx: list[sqlalchemy.TextClause] = [sa.text(sql_approx)]
quantiles_query: sqlalchemy.Select = sa.select(selects_approx).select_from(selectable) # type: ignore[call-overload] # FIXME CoP
try:
quantiles_results = execution_engine.execute(quantiles_query).fetchone()[0]
return quantiles_results
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_sqlite(
column,
quantiles: Iterable,
selectable,
execution_engine: SqlAlchemyExecutionEngine,
table_row_count,
) -> list:
"""
The present implementation is somewhat inefficient, because it requires as many calls to
"execution_engine.execute_query()" as the number of partitions in the "quantiles" parameter (albeit, typically,
only a few). However, this is the only mechanism available for SQLite at the present time (11/17/2021), because
the analytical processing is not a very strongly represented capability of the SQLite database management system.
""" # noqa: E501 # FIXME CoP
offsets: list[int] = [quantile * table_row_count - 1 for quantile in quantiles]
quantile_queries: list[sqlalchemy.Select] = [
sa.select(column).order_by(column.asc()).offset(offset).limit(1).select_from(selectable)
for offset in offsets
]
try:
quantiles_results = [
execution_engine.execute_query(quantile_query).fetchone()
for quantile_query in quantile_queries
]
return list(
itertools.chain.from_iterable(
[list(quantile_result) for quantile_result in quantiles_results] # type: ignore[arg-type] # FIXME CoP
)
)
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
def _get_column_quantiles_athena(
column,
quantiles: Iterable,
selectable,
execution_engine: SqlAlchemyExecutionEngine,
) -> list:
approx_percentiles = f"approx_percentile({column}, ARRAY{list(quantiles)})"
selects_approx: list[sqlalchemy.TextClause] = [sa.text(approx_percentiles)]
quantiles_query_approx: sqlalchemy.Select = sa.select(*selects_approx).select_from(selectable)
try:
quantiles_results = execution_engine.execute_query(quantiles_query_approx).fetchone()
# the ast literal eval is needed because the method is returning a json string and not a dict # noqa: E501 # FIXME CoP
results = ast.literal_eval(quantiles_results[0]) # type: ignore[index] # FIXME CoP
return results
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
# Support for computing the quantiles column for PostGreSQL and Redshift is included in the same method as that for # noqa: E501 # FIXME CoP
# the generic sqlalchemy compatible DBMS engine, because users often use the postgresql driver to connect to Redshift # noqa: E501 # FIXME CoP
# The key functional difference is that Redshift does not support the aggregate function
# "percentile_disc", but does support the approximate percentile_disc or percentile_cont function version instead.``` # noqa: E501 # FIXME CoP
def _get_column_quantiles_generic_sqlalchemy(
column,
quantiles: Iterable,
allow_relative_error: bool,
selectable,
execution_engine: SqlAlchemyExecutionEngine,
) -> list:
selects: list[sqlalchemy.WithinGroup] = [
sa.func.percentile_disc(quantile).within_group(column.asc()) for quantile in quantiles
]
quantiles_query: sqlalchemy.Select = sa.select(*selects).select_from(selectable)
try:
quantiles_results = execution_engine.execute_query(quantiles_query).fetchone()
return list(quantiles_results) # type: ignore[arg-type] # FIXME CoP
except sqlalchemy.ProgrammingError:
# ProgrammingError: (psycopg2.errors.SyntaxError) Aggregate function "percentile_disc" is not supported; # noqa: E501 # FIXME CoP
# use approximate percentile_disc or percentile_cont instead.
if attempt_allowing_relative_error(execution_engine.dialect):
# Redshift does not have a percentile_disc method, but does support an approximate version. # noqa: E501 # FIXME CoP
sql_approx: str = get_approximate_percentile_disc_sql(
selects=selects, sql_engine_dialect=execution_engine.dialect
)
selects_approx: list[sqlalchemy.TextClause] = [sa.text(sql_approx)]
quantiles_query_approx: sqlalchemy.Select = sa.select(*selects_approx).select_from(
selectable
)
if allow_relative_error or execution_engine.engine.driver == "psycopg2":
try:
quantiles_results = execution_engine.execute_query(
quantiles_query_approx
).fetchone()
return list(quantiles_results) # type: ignore[arg-type] # FIXME CoP
except sqlalchemy.ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += (
f'{type(pe).__name__}: "{pe!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise pe # noqa: TRY201 # FIXME CoP
else:
raise ValueError( # noqa: TRY003 # FIXME CoP
f'The SQL engine dialect "{execution_engine.dialect!s}" does not support computing quantiles ' # noqa: E501 # FIXME CoP
"without approximation error; set allow_relative_error to True to allow approximate quantiles." # noqa: E501 # FIXME CoP
)
else:
raise ValueError( # noqa: TRY003 # FIXME CoP
f'The SQL engine dialect "{execution_engine.dialect!s}" does not support computing quantiles with ' # noqa: E501 # FIXME CoP
"approximation error; set allow_relative_error to False to disable approximate quantiles." # noqa: E501 # FIXME CoP
)
| ColumnQuantileValues |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/storage/input_manager.py | {
"start": 6659,
"end": 7485
} | class ____(InputManager):
def __init__(self, load_fn: InputLoadFn):
self._load_fn = load_fn
def load_input(self, context: "InputContext") -> object:
# the @input_manager decorated function (self._load_fn) may return a direct value that
# should be used or an instance of an InputManager. So we call self._load_fn and see if the
# result is an InputManager. If so we call it's load_input method
intermediate = (
# type-ignore because function being used as attribute
self._load_fn(context) if has_at_least_one_parameter(self._load_fn) else self._load_fn() # type: ignore # (strict type guard)
)
if isinstance(intermediate, InputManager):
return intermediate.load_input(context)
return intermediate
| InputManagerWrapper |
python | getsentry__sentry | src/sentry/logging/handlers.py | {
"start": 2407,
"end": 3957
} | class ____(logging.StreamHandler):
def get_log_kwargs(self, record: logging.LogRecord) -> dict[str, Any]:
kwargs = {k: v for k, v in vars(record).items() if k not in throwaways and v is not None}
kwargs.update(
{
"level": record.levelno,
"event": record.msg,
"sentry.trace.trace_id": get_trace_id(),
}
)
if record.args:
# record.args inside of LogRecord.__init__ gets unrolled
# if it's the shape `({},)`, a single item dictionary.
# so we need to check for this, and re-wrap it because
# down the line of structlog, it's expected to be this
# original shape.
if isinstance(record.args, (tuple, list)):
kwargs["positional_args"] = record.args
else:
kwargs["positional_args"] = (record.args,)
return kwargs
def emit(self, record: logging.LogRecord, logger: logging.Logger | None = None) -> None:
# If anyone wants to use the 'extra' kwarg to provide context within
# structlog, we have to strip all of the default attributes from
# a record because the RootLogger will take the 'extra' dictionary
# and just turn them into attributes.
try:
if logger is None:
logger = get_logger()
logger.log(**self.get_log_kwargs(record=record))
except Exception:
if logging.raiseExceptions:
raise
| StructLogHandler |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol9.py | {
"start": 121,
"end": 285
} | class ____(Protocol):
value: int
@property
def left(self) -> "TreeLike | None": ...
@property
def right(self) -> "TreeLike | None": ...
| TreeLike |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_code_mappings.py | {
"start": 5844,
"end": 9983
} | class ____(OrganizationEndpoint, OrganizationIntegrationMixin):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationIntegrationsLoosePermission,)
def get(self, request: Request, organization: Organization) -> Response:
"""
Get the list of repository project path configs
:pparam string organization_id_or_slug: the id or slug of the organization the
team should be created for.
:qparam int integrationId: the optional integration id.
:qparam int project: Optional. Pass "-1" to filter to 'all projects user has access to'. Omit to filter for 'all projects user is a member of'.
:qparam int per_page: Pagination size.
:qparam string cursor: Pagination cursor.
:auth: required
"""
integration_id = request.GET.get("integrationId")
queryset = RepositoryProjectPathConfig.objects.all()
if integration_id:
# get_organization_integration will raise a 404 if no org_integration is found
org_integration = self.get_organization_integration(organization, integration_id)
queryset = queryset.filter(organization_integration_id=org_integration.id)
else:
# Filter by project
projects = self.get_projects(request, organization)
queryset = queryset.filter(project__in=projects)
return self.paginate(
request=request,
queryset=queryset,
on_results=lambda x: serialize(x, request.user),
paginator_cls=OffsetPaginator,
)
def post(self, request: Request, organization) -> Response:
"""
Create a new repository project path config
``````````````````
:pparam string organization_id_or_slug: the id or slug of the organization the
team should be created for.
:param int repositoryId:
:param int projectId:
:param string stackRoot:
:param string sourceRoot:
:param string defaultBranch:
:param int required integrationId:
:auth: required
"""
integration_id = request.data.get("integrationId")
if not integration_id:
return self.respond("Missing param: integrationId", status=status.HTTP_400_BAD_REQUEST)
try:
project = Project.objects.get(id=request.data["projectId"])
except ValueError as exc:
if "invalid literal for int() with base 10" in str(exc):
return self.respond(
"Invalid projectId param. Expected an integer.",
status=status.HTTP_400_BAD_REQUEST,
)
else:
raise
except (Project.DoesNotExist, KeyError):
return self.respond("Could not find project", status=status.HTTP_404_NOT_FOUND)
if not request.access.has_project_access(project):
return self.respond(status=status.HTTP_403_FORBIDDEN)
try:
# We expect there to exist an org_integration
org_integration = self.get_organization_integration(organization, integration_id)
except Http404:
# Human friendly error response.
return self.respond(
"Could not find this integration installed on your organization",
status=status.HTTP_404_NOT_FOUND,
)
serializer = RepositoryProjectPathConfigSerializer(
context={"organization": organization, "organization_integration": org_integration},
data=request.data,
)
if serializer.is_valid():
repository_project_path_config = serializer.save()
return self.respond(
serialize(repository_project_path_config, request.user),
status=status.HTTP_201_CREATED,
)
return self.respond(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| OrganizationCodeMappingsEndpoint |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_dialect.py | {
"start": 13682,
"end": 15985
} | class ____(fixtures.TestBase):
__backend__ = True
__requires__ = ("default_schema_name_switch",)
def test_control_case(self):
default_schema_name = config.db.dialect.default_schema_name
eng = engines.testing_engine()
with eng.connect():
pass
eq_(eng.dialect.default_schema_name, default_schema_name)
def test_wont_work_wo_insert(self):
default_schema_name = config.db.dialect.default_schema_name
eng = engines.testing_engine()
@event.listens_for(eng, "connect")
def on_connect(dbapi_connection, connection_record):
set_default_schema_on_connection(
config, dbapi_connection, config.test_schema
)
with eng.connect() as conn:
what_it_should_be = eng.dialect._get_default_schema_name(conn)
eq_(what_it_should_be, config.test_schema)
eq_(eng.dialect.default_schema_name, default_schema_name)
def test_schema_change_on_connect(self):
eng = engines.testing_engine()
@event.listens_for(eng, "connect", insert=True)
def on_connect(dbapi_connection, connection_record):
set_default_schema_on_connection(
config, dbapi_connection, config.test_schema
)
with eng.connect() as conn:
what_it_should_be = eng.dialect._get_default_schema_name(conn)
eq_(what_it_should_be, config.test_schema)
eq_(eng.dialect.default_schema_name, config.test_schema)
def test_schema_change_works_w_transactions(self):
eng = engines.testing_engine()
@event.listens_for(eng, "connect", insert=True)
def on_connect(dbapi_connection, *arg):
set_default_schema_on_connection(
config, dbapi_connection, config.test_schema
)
with eng.connect() as conn:
trans = conn.begin()
what_it_should_be = eng.dialect._get_default_schema_name(conn)
eq_(what_it_should_be, config.test_schema)
trans.rollback()
what_it_should_be = eng.dialect._get_default_schema_name(conn)
eq_(what_it_should_be, config.test_schema)
eq_(eng.dialect.default_schema_name, config.test_schema)
| WeCanSetDefaultSchemaWEventsTest |
python | numpy__numpy | numpy/distutils/ccompiler_opt.py | {
"start": 46094,
"end": 62347
} | class ____:
"""A helper class for `CCompilerOpt` that managing CPU features.
Attributes
----------
feature_supported : dict
Dictionary containing all CPU features that supported
by the platform, according to the specified values in attribute
`_Config.conf_features` and `_Config.conf_features_partial()`
feature_min : set
The minimum support of CPU features, according to
the specified values in attribute `_Config.conf_min_features`.
"""
def __init__(self):
if hasattr(self, "feature_is_cached"):
return
self.feature_supported = pfeatures = self.conf_features_partial()
for feature_name in list(pfeatures.keys()):
feature = pfeatures[feature_name]
cfeature = self.conf_features[feature_name]
feature.update({
k:v for k,v in cfeature.items() if k not in feature
})
disabled = feature.get("disable")
if disabled is not None:
pfeatures.pop(feature_name)
self.dist_log(
"feature '%s' is disabled," % feature_name,
disabled, stderr=True
)
continue
# list is used internally for these options
for option in (
"implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
feature[option] = oval.split()
self.feature_min = set()
min_f = self.conf_min_features.get(self.cc_march, "")
for F in min_f.upper().split():
if F in self.feature_supported:
self.feature_min.add(F)
self.feature_is_cached = True
def feature_names(self, names=None, force_flags=None, macros=[]):
"""
Returns a set of CPU feature names that supported by platform and the **C** compiler.
Parameters
----------
names : sequence or None, optional
Specify certain CPU features to test it against the **C** compiler.
if None(default), it will test all current supported features.
**Note**: feature names must be in upper-case.
force_flags : list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during the test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(
names is None or (
not isinstance(names, str) and
hasattr(names, "__iter__")
)
)
assert(force_flags is None or isinstance(force_flags, list))
if names is None:
names = self.feature_supported.keys()
supported_names = set()
for f in names:
if self.feature_is_supported(
f, force_flags=force_flags, macros=macros
):
supported_names.add(f)
return supported_names
def feature_is_exist(self, name):
"""
Returns True if a certain feature is exist and covered within
``_Config.conf_features``.
Parameters
----------
'name': str
feature name in uppercase.
"""
assert(name.isupper())
return name in self.conf_features
def feature_sorted(self, names, reverse=False):
"""
Sort a list of CPU features ordered by the lowest interest.
Parameters
----------
'names': sequence
sequence of supported feature names in uppercase.
'reverse': bool, optional
If true, the sorted features is reversed. (highest interest)
Returns
-------
list, sorted CPU features
"""
def sort_cb(k):
if isinstance(k, str):
return self.feature_supported[k]["interest"]
# multiple features
rank = max([self.feature_supported[f]["interest"] for f in k])
# FIXME: that's not a safe way to increase the rank for
# multi targets
rank += len(k) -1
return rank
return sorted(names, reverse=reverse, key=sort_cb)
def feature_implies(self, names, keep_origins=False):
"""
Return a set of CPU features that implied by 'names'
Parameters
----------
names : str or sequence of str
CPU feature name(s) in uppercase.
keep_origins : bool
if False(default) then the returned set will not contain any
features from 'names'. This case happens only when two features
imply each other.
Examples
--------
>>> self.feature_implies("SSE3")
{'SSE', 'SSE2'}
>>> self.feature_implies("SSE2")
{'SSE'}
>>> self.feature_implies("SSE2", keep_origins=True)
# 'SSE2' found here since 'SSE' and 'SSE2' imply each other
{'SSE', 'SSE2'}
"""
def get_implies(name, _caller=set()):
implies = set()
d = self.feature_supported[name]
for i in d.get("implies", []):
implies.add(i)
if i in _caller:
# infinity recursive guard since
# features can imply each other
continue
_caller.add(name)
implies = implies.union(get_implies(i, _caller))
return implies
if isinstance(names, str):
implies = get_implies(names)
names = [names]
else:
assert(hasattr(names, "__iter__"))
implies = set()
for n in names:
implies = implies.union(get_implies(n))
if not keep_origins:
implies.difference_update(names)
return implies
def feature_implies_c(self, names):
"""same as feature_implies() but combining 'names'"""
if isinstance(names, str):
names = set((names,))
else:
names = set(names)
return names.union(self.feature_implies(names))
def feature_ahead(self, names):
"""
Return list of features in 'names' after remove any
implied features and keep the origins.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41"])
["SSE41"]
# assume AVX2 and FMA3 implies each other and AVX2
# is the highest interest
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2"]
# assume AVX2 and FMA3 don't implies each other
>>> self.feature_ahead(["SSE2", "SSE3", "SSE41", "AVX2", "FMA3"])
["AVX2", "FMA3"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
implies = self.feature_implies(names, keep_origins=True)
ahead = [n for n in names if n not in implies]
if len(ahead) == 0:
# return the highest interested feature
# if all features imply each other
ahead = self.feature_sorted(names, reverse=True)[:1]
return ahead
def feature_untied(self, names):
"""
same as 'feature_ahead()' but if both features implied each other
and keep the highest interest.
Parameters
----------
'names': sequence
sequence of CPU feature names in uppercase.
Returns
-------
list of CPU features sorted as-is 'names'
Examples
--------
>>> self.feature_untied(["SSE2", "SSE3", "SSE41"])
["SSE2", "SSE3", "SSE41"]
# assume AVX2 and FMA3 implies each other
>>> self.feature_untied(["SSE2", "SSE3", "SSE41", "FMA3", "AVX2"])
["SSE2", "SSE3", "SSE41", "AVX2"]
"""
assert(
not isinstance(names, str)
and hasattr(names, '__iter__')
)
final = []
for n in names:
implies = self.feature_implies(n)
tied = [
nn for nn in final
if nn in implies and n in self.feature_implies(nn)
]
if tied:
tied = self.feature_sorted(tied + [n])
if n not in tied[1:]:
continue
final.remove(tied[:1][0])
final.append(n)
return final
def feature_get_til(self, names, keyisfalse):
"""
same as `feature_implies_c()` but stop collecting implied
features when feature's option that provided through
parameter 'keyisfalse' is False, also sorting the returned
features.
"""
def til(tnames):
# sort from highest to lowest interest then cut if "key" is False
tnames = self.feature_implies_c(tnames)
tnames = self.feature_sorted(tnames, reverse=True)
for i, n in enumerate(tnames):
if not self.feature_supported[n].get(keyisfalse, True):
tnames = tnames[:i+1]
break
return tnames
if isinstance(names, str) or len(names) <= 1:
names = til(names)
# normalize the sort
names.reverse()
return names
names = self.feature_ahead(names)
names = {t for n in names for t in til(n)}
return self.feature_sorted(names)
def feature_detect(self, names):
"""
Return a list of CPU features that required to be detected
sorted from the lowest to highest interest.
"""
names = self.feature_get_til(names, "implies_detect")
detect = []
for n in names:
d = self.feature_supported[n]
detect += d.get("detect", d.get("group", [n]))
return detect
@_Cache.me
def feature_flags(self, names):
"""
Return a list of CPU features flags sorted from the lowest
to highest interest.
"""
names = self.feature_sorted(self.feature_implies_c(names))
flags = []
for n in names:
d = self.feature_supported[n]
f = d.get("flags", [])
if not f or not self.cc_test_flags(f):
continue
flags += f
return self.cc_normalize_flags(flags)
@_Cache.me
def feature_test(self, name, force_flags=None, macros=[]):
"""
Test a certain CPU feature against the compiler through its own
check file.
Parameters
----------
name : str
Supported CPU feature name.
force_flags : list or None, optional
If None(default), the returned flags from `feature_flags()`
will be used.
macros : list of tuples, optional
A list of C macro definitions.
"""
if force_flags is None:
force_flags = self.feature_flags(name)
self.dist_log(
"testing feature '%s' with flags (%s)" % (
name, ' '.join(force_flags)
))
# Each CPU feature must have C source code contains at
# least one intrinsic or instruction related to this feature.
test_path = os.path.join(
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(
test_path, force_flags + self.cc_flags["werror"], macros=macros
)
if not test:
self.dist_log("testing failed", stderr=True)
return test
@_Cache.me
def feature_is_supported(self, name, force_flags=None, macros=[]):
"""
Check if a certain CPU feature is supported by the platform and compiler.
Parameters
----------
name : str
CPU feature name in uppercase.
force_flags : list or None, optional
If None(default), default compiler flags for every CPU feature will
be used during test.
macros : list of tuples, optional
A list of C macro definitions.
"""
assert(name.isupper())
assert(force_flags is None or isinstance(force_flags, list))
supported = name in self.feature_supported
if supported:
for impl in self.feature_implies(name):
if not self.feature_test(impl, force_flags, macros=macros):
return False
if not self.feature_test(name, force_flags, macros=macros):
return False
return supported
@_Cache.me
def feature_can_autovec(self, name):
"""
check if the feature can be auto-vectorized by the compiler
"""
assert(isinstance(name, str))
d = self.feature_supported[name]
can = d.get("autovec", None)
if can is None:
valid_flags = [
self.cc_test_flags([f]) for f in d.get("flags", [])
]
can = valid_flags and any(valid_flags)
return can
@_Cache.me
def feature_extra_checks(self, name):
"""
Return a list of supported extra checks after testing them against
the compiler.
Parameters
----------
names : str
CPU feature name in uppercase.
"""
assert isinstance(name, str)
d = self.feature_supported[name]
extra_checks = d.get("extra_checks", [])
if not extra_checks:
return []
self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
flags = self.feature_flags(name)
available = []
not_available = []
for chk in extra_checks:
test_path = os.path.join(
self.conf_check_path, "extra_%s.c" % chk.lower()
)
if not os.path.exists(test_path):
self.dist_fatal("extra check file does not exist", test_path)
is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
if is_supported:
available.append(chk)
else:
not_available.append(chk)
if not_available:
self.dist_log("testing failed for checks", not_available, stderr=True)
return available
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
Parameters
----------
'feature_name': str
CPU feature name in uppercase.
'tabs': int
if > 0, align the generated strings to the right depend on number of tabs.
Returns
-------
str, generated C preprocessor
Examples
--------
>>> self.feature_c_preprocessor("SSE3")
/** SSE3 **/
#define NPY_HAVE_SSE3 1
#include <pmmintrin.h>
"""
assert(feature_name.isupper())
feature = self.feature_supported.get(feature_name)
assert(feature is not None)
prepr = [
"/** %s **/" % feature_name,
"#define %sHAVE_%s 1" % (self.conf_c_prefix, feature_name)
]
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
extra_defs = feature.get("group", [])
extra_defs += self.feature_extra_checks(feature_name)
for edef in extra_defs:
# Guard extra definitions in case of duplicate with
# another feature
prepr += [
"#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
"\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
| _Feature |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/asyncpg.py | {
"start": 9083,
"end": 9148
} | class ____(REGCONFIG):
render_bind_cast = True
| AsyncpgREGCONFIG |
python | kamyu104__LeetCode-Solutions | Python/minimum-changes-to-make-alternating-binary-string.py | {
"start": 29,
"end": 244
} | class ____(object):
def minOperations(self, s):
"""
:type s: str
:rtype: int
"""
cnt = sum(int(c) == i%2 for i, c in enumerate(s))
return min(cnt, len(s)-cnt)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1259214,
"end": 1259794
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.update_member event."""
__schema__ = github_schema
__field_names__ = ("permission", "permission_was")
permission = sgqlc.types.Field(OrgUpdateMemberAuditEntryPermission, graphql_name="permission")
"""The new member permission level for the organization."""
permission_was = sgqlc.types.Field(OrgUpdateMemberAuditEntryPermission, graphql_name="permissionWas")
"""The former member permission level for the organization."""
| OrgUpdateMemberAuditEntry |
python | getsentry__sentry | src/sentry/eventtypes/security.py | {
"start": 1471,
"end": 1705
} | class ____(SecurityEvent):
key = "hpkp"
def extract_metadata(self, data):
metadata = SecurityEvent.extract_metadata(self, data)
metadata["origin"] = data["hpkp"].get("hostname")
return metadata
| HpkpEvent |
python | django__django | tests/auth_tests/test_handlers.py | {
"start": 339,
"end": 2897
} | class ____(TransactionTestCase):
"""
Tests for the mod_wsgi authentication handler
"""
available_apps = [
"django.contrib.auth",
"django.contrib.contenttypes",
"auth_tests",
]
def test_check_password(self):
"""
check_password() returns the correct values as per
https://modwsgi.readthedocs.io/en/develop/user-guides/access-control-mechanisms.html#apache-authentication-provider
"""
User.objects.create_user("test", "test@example.com", "test")
# User not in database
self.assertIsNone(check_password({}, "unknown", ""))
# Valid user with correct password
self.assertIs(check_password({}, "test", "test"), True)
# Valid user with incorrect password
self.assertIs(check_password({}, "test", "incorrect"), False)
# correct password, but user is inactive
User.objects.filter(username="test").update(is_active=False)
self.assertIsNone(check_password({}, "test", "test"))
@override_settings(AUTH_USER_MODEL="auth_tests.CustomUser")
def test_check_password_custom_user(self):
"""
check_password() returns the correct values as per
https://modwsgi.readthedocs.io/en/develop/user-guides/access-control-mechanisms.html#apache-authentication-provider
with a custom user installed.
"""
CustomUser._default_manager.create_user(
"test@example.com", "1990-01-01", "test"
)
# User not in database
self.assertIsNone(check_password({}, "unknown", ""))
# Valid user with correct password'
self.assertIs(check_password({}, "test@example.com", "test"), True)
# Valid user with incorrect password
self.assertIs(check_password({}, "test@example.com", "incorrect"), False)
def test_groups_for_user(self):
"""
groups_for_user() returns correct values as per
https://modwsgi.readthedocs.io/en/develop/user-guides/access-control-mechanisms.html#apache-group-authorisation
"""
user1 = User.objects.create_user("test", "test@example.com", "test")
User.objects.create_user("test1", "test1@example.com", "test1")
group = Group.objects.create(name="test_group")
user1.groups.add(group)
# User not in database
self.assertEqual(groups_for_user({}, "unknown"), [])
self.assertEqual(groups_for_user({}, "test"), [b"test_group"])
self.assertEqual(groups_for_user({}, "test1"), [])
| ModWsgiHandlerTestCase |
python | pennersr__django-allauth | allauth/socialaccount/providers/mailchimp/views.py | {
"start": 216,
"end": 1118
} | class ____(OAuth2Adapter):
"""OAuth2Adapter for MailChimp API v3."""
provider_id = "mailchimp"
authorize_url = "https://login.mailchimp.com/oauth2/authorize"
access_token_url = "https://login.mailchimp.com/oauth2/token" # nosec
profile_url = "https://login.mailchimp.com/oauth2/metadata"
def complete_login(self, request, app, token, **kwargs):
"""Complete login, ensuring correct OAuth header."""
headers = {"Authorization": "OAuth {0}".format(token.token)}
metadata = (
get_adapter().get_requests_session().get(self.profile_url, headers=headers)
)
extra_data = metadata.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(MailChimpOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(MailChimpOAuth2Adapter)
| MailChimpOAuth2Adapter |
python | automl__auto-sklearn | test/test_pipeline/components/dummy_components/dummy_component_1.py | {
"start": 344,
"end": 412
} | class ____(AutoSklearnClassificationAlgorithm):
pass
| DummyComponent1 |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E30.py | {
"start": 8023,
"end": 9729
} | class ____():
pass
# comment
# another comment
a = 1
# end
# E305:8:1
def fn():
print()
# comment
# another comment
try:
fn()
except Exception:
pass
# end
# E305:5:1
def a():
print()
# Two spaces before comments, too.
if a():
a()
# end
#: E305:8:1
# Example from https://github.com/PyCQA/pycodestyle/issues/400
import stuff
def main():
blah, blah
if __name__ == '__main__':
main()
# end
# E306:3:5
def a():
x = 1
def b():
pass
# end
#: E306:3:5
async def a():
x = 1
def b():
pass
# end
#: E306:3:5 E306:5:9
def a():
x = 2
def b():
x = 1
def c():
pass
# end
# E306:3:5 E306:6:5
def a():
x = 1
class C:
pass
x = 2
def b():
pass
# end
# E306
def foo():
def bar():
pass
def baz(): pass
# end
# E306:3:5
def foo():
def bar(): pass
def baz():
pass
# end
# E306
def a():
x = 2
@decorator
def b():
pass
# end
# E306
def a():
x = 2
@decorator
async def b():
pass
# end
# E306
def a():
x = 2
async def b():
pass
# end
# no error
@overload
def arrow_strip_whitespace(obj: Table, /, *cols: str) -> Table: ...
@overload
def arrow_strip_whitespace(obj: Array, /, *cols: str) -> Array: ... # type: ignore[misc]
def arrow_strip_whitespace(obj, /, *cols):
...
# end
# E302
def test_update():
pass
# comment
def test_clientmodel():
pass
# end
# E302
def test_update():
pass
# comment
def test_clientmodel():
pass
# end
# E302
def test_update():
pass
# comment
def test_clientmodel():
pass
# end
# E305
| Class |
python | walkccc__LeetCode | solutions/2034. Stock Price Fluctuation/2034.py | {
"start": 42,
"end": 765
} | class ____:
def __init__(self):
self.timestampToPrice = SortedDict()
self.pricesCount = SortedDict()
def update(self, timestamp: int, price: int) -> None:
if timestamp in self.timestampToPrice:
prevPrice = self.timestampToPrice[timestamp]
self.pricesCount[prevPrice] -= 1
if self.pricesCount[prevPrice] == 0:
del self.pricesCount[prevPrice]
self.timestampToPrice[timestamp] = price
self.pricesCount[price] = self.pricesCount.get(price, 0) + 1
def current(self) -> int:
return self.timestampToPrice.peekitem(-1)[1]
def maximum(self) -> int:
return self.pricesCount.peekitem(-1)[0]
def minimum(self) -> int:
return self.pricesCount.peekitem(0)[0]
| StockPrice |
python | pytorch__pytorch | torch/ao/nn/sparse/quantized/utils.py | {
"start": 587,
"end": 2044
} | class ____:
rlock = threading.RLock()
row_block_size: int = 1
col_block_size: int = 4
prev_row_block_size: int = 1
prev_col_block_size: int = 4
def __init__(self, row_block_size: int = 1, col_block_size: int = 4):
assert _is_valid_linear_block_sparse_pattern(row_block_size, col_block_size)
LinearBlockSparsePattern.rlock.acquire()
LinearBlockSparsePattern.prev_row_block_size = (
LinearBlockSparsePattern.row_block_size
)
LinearBlockSparsePattern.prev_col_block_size = (
LinearBlockSparsePattern.col_block_size
)
LinearBlockSparsePattern.row_block_size = row_block_size
LinearBlockSparsePattern.col_block_size = col_block_size
def __enter__(self) -> None:
pass
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
backtrace: object | None,
) -> None:
LinearBlockSparsePattern.row_block_size = (
LinearBlockSparsePattern.prev_row_block_size
)
LinearBlockSparsePattern.col_block_size = (
LinearBlockSparsePattern.prev_col_block_size
)
LinearBlockSparsePattern.rlock.release()
@staticmethod
def block_size() -> tuple[int, int]:
return (
LinearBlockSparsePattern.row_block_size,
LinearBlockSparsePattern.col_block_size,
)
| LinearBlockSparsePattern |
python | numba__numba | numba/cuda/tests/cudapy/test_intrinsics.py | {
"start": 6357,
"end": 34931
} | class ____(CUDATestCase):
def setUp(self):
super().setUp()
np.random.seed(0)
def test_simple_threadidx(self):
compiled = cuda.jit("void(int32[:])")(simple_threadidx)
ary = np.ones(1, dtype=np.int32)
compiled[1, 1](ary)
self.assertTrue(ary[0] == 0)
def test_fill_threadidx(self):
compiled = cuda.jit("void(int32[:])")(fill_threadidx)
N = 10
ary = np.ones(N, dtype=np.int32)
exp = np.arange(N, dtype=np.int32)
compiled[1, N](ary)
self.assertTrue(np.all(ary == exp))
def test_fill3d_threadidx(self):
X, Y, Z = 4, 5, 6
def c_contigous():
compiled = cuda.jit("void(int32[:,:,::1])")(fill3d_threadidx)
ary = np.zeros((X, Y, Z), dtype=np.int32)
compiled[1, (X, Y, Z)](ary)
return ary
def f_contigous():
compiled = cuda.jit("void(int32[::1,:,:])")(fill3d_threadidx)
ary = np.asfortranarray(np.zeros((X, Y, Z), dtype=np.int32))
compiled[1, (X, Y, Z)](ary)
return ary
c_res = c_contigous()
f_res = f_contigous()
self.assertTrue(np.all(c_res == f_res))
@skip_on_cudasim('Cudasim does not check types')
def test_nonliteral_grid_error(self):
with self.assertRaisesRegex(TypingError, 'RequireLiteralValue'):
cuda.jit('void(int32)')(nonliteral_grid)
@skip_on_cudasim('Cudasim does not check types')
def test_nonliteral_gridsize_error(self):
with self.assertRaisesRegex(TypingError, 'RequireLiteralValue'):
cuda.jit('void(int32)')(nonliteral_gridsize)
def test_simple_grid1d(self):
compiled = cuda.jit("void(int32[::1])")(simple_grid1d)
ntid, nctaid = 3, 7
nelem = ntid * nctaid
ary = np.empty(nelem, dtype=np.int32)
compiled[nctaid, ntid](ary)
self.assertTrue(np.all(ary == np.arange(nelem)))
def test_simple_grid2d(self):
compiled = cuda.jit("void(int32[:,::1])")(simple_grid2d)
ntid = (4, 3)
nctaid = (5, 6)
shape = (ntid[0] * nctaid[0], ntid[1] * nctaid[1])
ary = np.empty(shape, dtype=np.int32)
exp = ary.copy()
compiled[nctaid, ntid](ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = i + j
self.assertTrue(np.all(ary == exp))
def test_simple_gridsize1d(self):
compiled = cuda.jit("void(int32[::1])")(simple_gridsize1d)
ntid, nctaid = 3, 7
ary = np.zeros(1, dtype=np.int32)
compiled[nctaid, ntid](ary)
self.assertEqual(ary[0], nctaid * ntid)
@skip_on_cudasim('Requires too many threads')
def test_issue_9229(self):
# Ensure that grid and grid size are correct - #9229 showed that they
# overflowed an int32.
@cuda.jit
def f(grid_error, gridsize_error):
i1 = cuda.grid(1)
i2 = cuda.blockIdx.x * cuda.blockDim.x + cuda.threadIdx.x
gs1 = cuda.gridsize(1)
gs2 = cuda.blockDim.x * cuda.gridDim.x
if i1 != i2:
grid_error[0] = 1
if gs1 != gs2:
gridsize_error[0] = 1
grid_error = np.zeros(1, dtype=np.uint64)
gridsize_error = np.zeros(1, dtype=np.uint64)
# A large enough grid for thread IDs to overflow an int32
# (22121216 * 256 = 5663031296, which is greater than 2 ** 32)
f[22121216, 256](grid_error, gridsize_error)
self.assertEqual(grid_error[0], 0)
self.assertEqual(gridsize_error[0], 0)
@skip_on_cudasim('Tests PTX emission')
def test_selp(self):
sig = (int64[:], int64, int64[:])
cu_branching_with_ifs = cuda.jit(sig)(branching_with_ifs)
cu_branching_with_selps = cuda.jit(sig)(branching_with_selps)
n = 32
b = 6
c = np.full(shape=32, fill_value=17, dtype=np.int64)
expected = c.copy()
expected[:5] = 3
a = np.arange(n, dtype=np.int64)
cu_branching_with_ifs[n, 1](a, b, c)
ptx = cu_branching_with_ifs.inspect_asm(sig)
self.assertEqual(2, len(re.findall(r'\s+bra\s+', ptx)))
np.testing.assert_array_equal(a, expected, err_msg='branching')
a = np.arange(n, dtype=np.int64)
cu_branching_with_selps[n, 1](a, b, c)
ptx = cu_branching_with_selps.inspect_asm(sig)
self.assertEqual(0, len(re.findall(r'\s+bra\s+', ptx)))
np.testing.assert_array_equal(a, expected, err_msg='selp')
def test_simple_gridsize2d(self):
compiled = cuda.jit("void(int32[::1])")(simple_gridsize2d)
ntid = (4, 3)
nctaid = (5, 6)
ary = np.zeros(2, dtype=np.int32)
compiled[nctaid, ntid](ary)
self.assertEqual(ary[0], nctaid[0] * ntid[0])
self.assertEqual(ary[1], nctaid[1] * ntid[1])
def test_intrinsic_forloop_step(self):
compiled = cuda.jit("void(int32[:,::1])")(intrinsic_forloop_step)
ntid = (4, 3)
nctaid = (5, 6)
shape = (ntid[0] * nctaid[0], ntid[1] * nctaid[1])
ary = np.empty(shape, dtype=np.int32)
compiled[nctaid, ntid](ary)
gridX, gridY = shape
height, width = ary.shape
for i, j in zip(range(ntid[0]), range(ntid[1])):
startX, startY = gridX + i, gridY + j
for x in range(startX, width, gridX):
for y in range(startY, height, gridY):
self.assertTrue(ary[y, x] == x + y, (ary[y, x], x + y))
def test_3dgrid(self):
@cuda.jit
def foo(out):
x, y, z = cuda.grid(3)
a, b, c = cuda.gridsize(3)
out[x, y, z] = a * b * c
arr = np.zeros(9 ** 3, dtype=np.int32).reshape(9, 9, 9)
foo[(3, 3, 3), (3, 3, 3)](arr)
np.testing.assert_equal(arr, 9 ** 3)
def test_3dgrid_2(self):
@cuda.jit
def foo(out):
x, y, z = cuda.grid(3)
a, b, c = cuda.gridsize(3)
grid_is_right = (
x == cuda.threadIdx.x + cuda.blockIdx.x * cuda.blockDim.x and
y == cuda.threadIdx.y + cuda.blockIdx.y * cuda.blockDim.y and
z == cuda.threadIdx.z + cuda.blockIdx.z * cuda.blockDim.z
)
gridsize_is_right = (a == cuda.blockDim.x * cuda.gridDim.x and
b == cuda.blockDim.y * cuda.gridDim.y and
c == cuda.blockDim.z * cuda.gridDim.z)
out[x, y, z] = grid_is_right and gridsize_is_right
x, y, z = (4 * 3, 3 * 2, 2 * 4)
arr = np.zeros((x * y * z), dtype=np.bool_).reshape(x, y, z)
foo[(4, 3, 2), (3, 2, 4)](arr)
self.assertTrue(np.all(arr))
def test_popc_u4(self):
compiled = cuda.jit("void(int32[:], uint32)")(simple_popc)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0xF0)
self.assertEqual(ary[0], 4)
def test_popc_u8(self):
compiled = cuda.jit("void(int32[:], uint64)")(simple_popc)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0xF00000000000)
self.assertEqual(ary[0], 4)
def test_fma_f4(self):
compiled = cuda.jit("void(f4[:], f4, f4, f4)")(simple_fma)
ary = np.zeros(1, dtype=np.float32)
compiled[1, 1](ary, 2., 3., 4.)
np.testing.assert_allclose(ary[0], 2 * 3 + 4)
def test_fma_f8(self):
compiled = cuda.jit("void(f8[:], f8, f8, f8)")(simple_fma)
ary = np.zeros(1, dtype=np.float64)
compiled[1, 1](ary, 2., 3., 4.)
np.testing.assert_allclose(ary[0], 2 * 3 + 4)
@skip_unless_cc_53
def test_hadd(self):
compiled = cuda.jit("void(f2[:], f2[:], f2[:])")(simple_hadd)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([3.], dtype=np.float16)
arg2 = np.array([4.], dtype=np.float16)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1 + arg2)
@skip_unless_cc_53
def test_hadd_scalar(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hadd_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.1415926)
arg2 = np.float16(3.)
compiled[1, 1](ary, arg1, arg2)
ref = arg1 + arg2
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_hadd_ptx(self):
args = (f2[:], f2, f2)
ptx, _ = compile_ptx(simple_hadd_scalar, args, cc=(5, 3))
self.assertIn('add.f16', ptx)
@skip_unless_cc_53
def test_hfma(self):
compiled = cuda.jit("void(f2[:], f2[:], f2[:], f2[:])")(simple_hfma)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([2.], dtype=np.float16)
arg2 = np.array([3.], dtype=np.float16)
arg3 = np.array([4.], dtype=np.float16)
compiled[1, 1](ary, arg1, arg2, arg3)
np.testing.assert_allclose(ary[0], arg1 * arg2 + arg3)
@skip_unless_cc_53
def test_hfma_scalar(self):
compiled = cuda.jit("void(f2[:], f2, f2, f2)")(simple_hfma_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(2.)
arg2 = np.float16(3.)
arg3 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2, arg3)
ref = arg1 * arg2 + arg3
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_hfma_ptx(self):
args = (f2[:], f2, f2, f2)
ptx, _ = compile_ptx(simple_hfma_scalar, args, cc=(5, 3))
self.assertIn('fma.rn.f16', ptx)
@skip_unless_cc_53
def test_hsub(self):
compiled = cuda.jit("void(f2[:], f2[:], f2[:])")(simple_hsub)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([3.], dtype=np.float16)
arg2 = np.array([4.], dtype=np.float16)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1 - arg2)
@skip_unless_cc_53
def test_hsub_scalar(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hsub_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.1415926)
arg2 = np.float16(1.57)
compiled[1, 1](ary, arg1, arg2)
ref = arg1 - arg2
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_hsub_ptx(self):
args = (f2[:], f2, f2)
ptx, _ = compile_ptx(simple_hsub_scalar, args, cc=(5, 3))
self.assertIn('sub.f16', ptx)
@skip_unless_cc_53
def test_hmul(self):
compiled = cuda.jit()(simple_hmul)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([3.], dtype=np.float16)
arg2 = np.array([4.], dtype=np.float16)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1 * arg2)
@skip_unless_cc_53
def test_hmul_scalar(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hmul_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.1415926)
arg2 = np.float16(1.57)
compiled[1, 1](ary, arg1, arg2)
ref = arg1 * arg2
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_hmul_ptx(self):
args = (f2[:], f2, f2)
ptx, _ = compile_ptx(simple_hmul_scalar, args, cc=(5, 3))
self.assertIn('mul.f16', ptx)
@skip_unless_cc_53
def test_hdiv_scalar(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hdiv_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.1415926)
arg2 = np.float16(1.57)
compiled[1, 1](ary, arg1, arg2)
ref = arg1 / arg2
np.testing.assert_allclose(ary[0], ref)
@skip_unless_cc_53
def test_hdiv(self):
compiled = cuda.jit("void(f2[:], f2[:], f2[:])")(simple_hdiv_kernel)
arry1 = np.random.randint(-65504, 65505, size=500).astype(np.float16)
arry2 = np.random.randint(-65504, 65505, size=500).astype(np.float16)
ary = np.zeros_like(arry1, dtype=np.float16)
compiled.forall(ary.size)(ary, arry1, arry2)
ref = arry1 / arry2
np.testing.assert_allclose(ary, ref)
@skip_unless_cc_53
def test_hneg(self):
compiled = cuda.jit("void(f2[:], f2[:])")(simple_hneg)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([3.], dtype=np.float16)
compiled[1, 1](ary, arg1)
np.testing.assert_allclose(ary[0], -arg1)
@skip_unless_cc_53
def test_hneg_scalar(self):
compiled = cuda.jit("void(f2[:], f2)")(simple_hneg_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.1415926)
compiled[1, 1](ary, arg1)
ref = -arg1
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_hneg_ptx(self):
args = (f2[:], f2)
ptx, _ = compile_ptx(simple_hneg_scalar, args, cc=(5, 3))
self.assertIn('neg.f16', ptx)
@skip_unless_cc_53
def test_habs(self):
compiled = cuda.jit()(simple_habs)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.array([-3.], dtype=np.float16)
compiled[1, 1](ary, arg1)
np.testing.assert_allclose(ary[0], abs(arg1))
@skip_unless_cc_53
def test_habs_scalar(self):
compiled = cuda.jit("void(f2[:], f2)")(simple_habs_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(-3.1415926)
compiled[1, 1](ary, arg1)
ref = abs(arg1)
np.testing.assert_allclose(ary[0], ref)
@skip_on_cudasim('Compilation unsupported in the simulator')
def test_habs_ptx(self):
args = (f2[:], f2)
ptx, _ = compile_ptx(simple_habs_scalar, args, cc=(5, 3))
self.assertIn('abs.f16', ptx)
@skip_unless_cc_53
def test_fp16_intrinsics_common(self):
kernels = (simple_hsin, simple_hcos,
simple_hlog, simple_hlog2, simple_hlog10,
simple_hsqrt, simple_hceil, simple_hfloor,
simple_hrcp, simple_htrunc, simple_hrint,
simple_hrsqrt)
exp_kernels = (simple_hexp, simple_hexp2)
expected_functions = (np.sin, np.cos,
np.log, np.log2, np.log10,
np.sqrt, np.ceil, np.floor,
np.reciprocal, np.trunc, np.rint,
numpy_hrsqrt)
expected_exp_functions = (np.exp, np.exp2)
# Generate random data
N = 32
np.random.seed(1)
x = np.random.randint(1, 65505, size=N).astype(np.float16)
r = np.zeros_like(x)
for kernel, fn in zip(kernels, expected_functions):
with self.subTest(fn=fn):
kernel = cuda.jit("void(f2[:], f2[:])")(kernel)
kernel[1,N](r, x)
expected = fn(x, dtype=np.float16)
np.testing.assert_allclose(r, expected)
x2 = np.random.randint(1, 10, size=N).astype(np.float16)
for kernel, fn in zip(exp_kernels, expected_exp_functions):
with self.subTest(fn=fn):
kernel = cuda.jit("void(f2[:], f2[:])")(kernel)
kernel[1,N](r, x2)
expected = fn(x2, dtype=np.float16)
np.testing.assert_allclose(r, expected)
@skip_unless_cc_53
def test_hexp10(self):
@cuda.jit()
def hexp10_vectors(r, x):
i = cuda.grid(1)
if i < len(r):
r[i] = cuda.fp16.hexp10(x[i])
# Generate random data
N = 32
np.random.seed(1)
x = np.random.rand(N).astype(np.float16)
r = np.zeros_like(x)
# Run the kernel
hexp10_vectors[1, N](r, x)
np.testing.assert_allclose(r, 10 ** x)
@skip_unless_cc_53
def test_fp16_comparison(self):
fns = (simple_heq_scalar, simple_hne_scalar, simple_hge_scalar,
simple_hgt_scalar, simple_hle_scalar, simple_hlt_scalar)
ops = (operator.eq, operator.ne, operator.ge,
operator.gt, operator.le, operator.lt)
for fn, op in zip(fns, ops):
with self.subTest(op=op):
kernel = cuda.jit("void(b1[:], f2, f2)")(fn)
expected = np.zeros(1, dtype=np.bool_)
got = np.zeros(1, dtype=np.bool_)
arg2 = np.float16(2)
arg3 = np.float16(3)
arg4 = np.float16(4)
# Check with equal arguments
kernel[1, 1](got, arg3, arg3)
expected = op(arg3, arg3)
self.assertEqual(expected, got[0])
# Check with LHS < RHS
kernel[1, 1](got, arg3, arg4)
expected = op(arg3, arg4)
self.assertEqual(expected, got[0])
# Check with LHS > RHS
kernel[1, 1](got, arg3, arg2)
expected = op(arg3, arg2)
self.assertEqual(expected, got[0])
@skip_unless_cc_53
def test_multiple_float16_comparisons(self):
functions = (test_multiple_hcmp_1,
test_multiple_hcmp_2,
test_multiple_hcmp_3,
test_multiple_hcmp_4,
test_multiple_hcmp_5)
for fn in functions:
with self.subTest(fn=fn):
compiled = cuda.jit("void(b1[:], f2, f2, f2)")(fn)
ary = np.zeros(1, dtype=np.bool_)
arg1 = np.float16(2.)
arg2 = np.float16(3.)
arg3 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2, arg3)
self.assertTrue(ary[0])
@skip_unless_cc_53
def test_hmax(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hmax_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.)
arg2 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg2)
arg1 = np.float16(5.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1)
@skip_unless_cc_53
def test_hmin(self):
compiled = cuda.jit("void(f2[:], f2, f2)")(simple_hmin_scalar)
ary = np.zeros(1, dtype=np.float16)
arg1 = np.float16(3.)
arg2 = np.float16(4.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg1)
arg1 = np.float16(5.)
compiled[1, 1](ary, arg1, arg2)
np.testing.assert_allclose(ary[0], arg2)
def test_cbrt_f32(self):
compiled = cuda.jit("void(float32[:], float32)")(simple_cbrt)
ary = np.zeros(1, dtype=np.float32)
cbrt_arg = 2.
compiled[1, 1](ary, cbrt_arg)
np.testing.assert_allclose(ary[0], cbrt_arg ** (1 / 3))
def test_cbrt_f64(self):
compiled = cuda.jit("void(float64[:], float64)")(simple_cbrt)
ary = np.zeros(1, dtype=np.float64)
cbrt_arg = 6.
compiled[1, 1](ary, cbrt_arg)
np.testing.assert_allclose(ary[0], cbrt_arg ** (1 / 3))
def test_brev_u4(self):
compiled = cuda.jit("void(uint32[:], uint32)")(simple_brev)
ary = np.zeros(1, dtype=np.uint32)
compiled[1, 1](ary, 0x000030F0)
self.assertEqual(ary[0], 0x0F0C0000)
@skip_on_cudasim('only get given a Python "int", assumes 32 bits')
def test_brev_u8(self):
compiled = cuda.jit("void(uint64[:], uint64)")(simple_brev)
ary = np.zeros(1, dtype=np.uint64)
compiled[1, 1](ary, 0x000030F0000030F0)
self.assertEqual(ary[0], 0x0F0C00000F0C0000)
def test_clz_i4(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_clz)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x00100000)
self.assertEqual(ary[0], 11)
def test_clz_u4(self):
"""
Although the CUDA Math API
(http://docs.nvidia.com/cuda/cuda-math-api/group__CUDA__MATH__INTRINSIC__INT.html)
only says int32 & int64 arguments are supported in C code, the LLVM
IR input supports i8, i16, i32 & i64 (LLVM doesn't have a concept of
unsigned integers, just unsigned operations on integers).
http://docs.nvidia.com/cuda/nvvm-ir-spec/index.html#bit-manipulations-intrinics
"""
compiled = cuda.jit("void(int32[:], uint32)")(simple_clz)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x00100000)
self.assertEqual(ary[0], 11)
def test_clz_i4_1s(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_clz)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0xFFFFFFFF)
self.assertEqual(ary[0], 0)
def test_clz_i4_0s(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_clz)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x0)
self.assertEqual(ary[0], 32, "CUDA semantics")
@skip_on_cudasim('only get given a Python "int", assumes 32 bits')
def test_clz_i8(self):
compiled = cuda.jit("void(int32[:], int64)")(simple_clz)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x000000000010000)
self.assertEqual(ary[0], 47)
def test_ffs_i4(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_ffs)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x00100000)
self.assertEqual(ary[0], 21)
compiled[1, 1](ary, 0x80000000)
self.assertEqual(ary[0], 32)
def test_ffs_u4(self):
compiled = cuda.jit("void(int32[:], uint32)")(simple_ffs)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x00100000)
self.assertEqual(ary[0], 21)
compiled[1, 1](ary, 0x80000000)
self.assertEqual(ary[0], 32)
def test_ffs_i4_1s(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_ffs)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0xFFFFFFFF)
self.assertEqual(ary[0], 1)
def test_ffs_i4_0s(self):
compiled = cuda.jit("void(int32[:], int32)")(simple_ffs)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x0)
self.assertEqual(ary[0], 0)
@skip_on_cudasim('only get given a Python "int", assumes 32 bits')
def test_ffs_i8(self):
compiled = cuda.jit("void(int32[:], int64)")(simple_ffs)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary, 0x000000000010000)
self.assertEqual(ary[0], 17)
compiled[1, 1](ary, 0x100000000)
self.assertEqual(ary[0], 33)
def test_simple_laneid(self):
compiled = cuda.jit("void(int32[:])")(simple_laneid)
count = 2
ary = np.zeros(count * 32, dtype=np.int32)
exp = np.tile(np.arange(32, dtype=np.int32), count)
compiled[1, count * 32](ary)
self.assertTrue(np.all(ary == exp))
def test_simple_warpsize(self):
compiled = cuda.jit("void(int32[:])")(simple_warpsize)
ary = np.zeros(1, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(ary[0], 32, "CUDA semantics")
def test_round_f4(self):
compiled = cuda.jit("void(int64[:], float32)")(simple_round)
ary = np.zeros(1, dtype=np.int64)
for i in [-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75]:
compiled[1, 1](ary, i)
self.assertEqual(ary[0], round(i))
def test_round_f8(self):
compiled = cuda.jit("void(int64[:], float64)")(simple_round)
ary = np.zeros(1, dtype=np.int64)
for i in [-3.0, -2.5, -2.25, -1.5, 1.5, 2.25, 2.5, 2.75]:
compiled[1, 1](ary, i)
self.assertEqual(ary[0], round(i))
def test_round_to_f4(self):
compiled = cuda.jit("void(float32[:], float32, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float32)
np.random.seed(123)
vals = np.random.random(32).astype(np.float32)
np.concatenate((vals, np.array([np.inf, -np.inf, np.nan])))
digits = (
# Common case branch of round_to_impl
-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5,
# The algorithm currently implemented can only round to 13 digits
# with single precision. Note that this doesn't trigger the
# "overflow safe" branch of the implementation, which can only be
# hit when using double precision.
13
)
for val, ndigits in itertools.product(vals, digits):
with self.subTest(val=val, ndigits=ndigits):
compiled[1, 1](ary, val, ndigits)
self.assertPreciseEqual(ary[0], round(val, ndigits),
prec='single')
# CPython on most platforms uses rounding based on dtoa.c, whereas the CUDA
# round-to implementation uses CPython's fallback implementation, which has
# slightly different behavior at the edges of the domain. Since the CUDA
# simulator executes using CPython, we need to skip this test when the
# simulator is active.
@skip_on_cudasim('Overflow behavior differs on CPython')
def test_round_to_f4_overflow(self):
# Test that the input value is returned when y in round_ndigits
# overflows.
compiled = cuda.jit("void(float32[:], float32, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float32)
val = np.finfo(np.float32).max
# An unusually large number of digits is required to hit the "y
# overflows" branch of the implementation because the typing results in
# the computation of y as float64.
ndigits = 300
compiled[1, 1](ary, val, ndigits)
self.assertEqual(ary[0], val)
def test_round_to_f4_halfway(self):
compiled = cuda.jit("void(float32[:], float32, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float32)
# Value chosen to trigger the "round to even" branch of the
# implementation
val = 0.3425
ndigits = 3
compiled[1, 1](ary, val, ndigits)
self.assertPreciseEqual(ary[0], round(val, ndigits), prec='single')
def test_round_to_f8(self):
compiled = cuda.jit("void(float64[:], float64, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float64)
np.random.seed(123)
vals = np.random.random(32)
np.concatenate((vals, np.array([np.inf, -np.inf, np.nan])))
digits = (-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5)
for val, ndigits in itertools.product(vals, digits):
with self.subTest(val=val, ndigits=ndigits):
compiled[1, 1](ary, val, ndigits)
self.assertPreciseEqual(ary[0], round(val, ndigits),
prec='exact')
# Trigger the "overflow safe" branch of the implementation
val = 0.12345678987654321 * 10e-15
ndigits = 23
with self.subTest(val=val, ndigits=ndigits):
compiled[1, 1](ary, val, ndigits)
self.assertPreciseEqual(ary[0], round(val, ndigits),
prec='double')
# Skipped on cudasim for the same reasons as test_round_to_f4 above.
@skip_on_cudasim('Overflow behavior differs on CPython')
def test_round_to_f8_overflow(self):
# Test that the input value is returned when y in round_ndigits
# overflows.
compiled = cuda.jit("void(float64[:], float64, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float64)
val = np.finfo(np.float64).max
# Unlike test_round_to_f4_overflow, a reasonable number of digits can
# be used for this test to overflow y in round_ndigits.
ndigits = 12
compiled[1, 1](ary, val, ndigits)
self.assertEqual(ary[0], val)
def test_round_to_f8_halfway(self):
compiled = cuda.jit("void(float64[:], float64, int32)")(simple_round_to)
ary = np.zeros(1, dtype=np.float64)
# Value chosen to trigger the "round to even" branch of the
# implementation, with a value that is not exactly representable with a
# float32, but only a float64.
val = 0.5425
ndigits = 3
compiled[1, 1](ary, val, ndigits)
self.assertPreciseEqual(ary[0], round(val, ndigits), prec='double')
if __name__ == '__main__':
unittest.main()
| TestCudaIntrinsic |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 98692,
"end": 99832
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "sekarang"
assert self.locale._format_timeframe("second", 1) == "saat"
assert self.locale._format_timeframe("seconds", 3) == "3 saat"
assert self.locale._format_timeframe("minute", 1) == "minit"
assert self.locale._format_timeframe("minutes", 4) == "4 minit"
assert self.locale._format_timeframe("hour", 1) == "jam"
assert self.locale._format_timeframe("hours", 23) == "23 jam"
assert self.locale._format_timeframe("day", 1) == "hari"
assert self.locale._format_timeframe("days", 12) == "12 hari"
assert self.locale._format_timeframe("month", 1) == "bulan"
assert self.locale._format_timeframe("months", 2) == "2 bulan"
assert self.locale._format_timeframe("year", 1) == "tahun"
assert self.locale._format_timeframe("years", 2) == "2 tahun"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "Sabtu"
@pytest.mark.usefixtures("lang_locale")
| TestMalayLocale |
python | optuna__optuna | optuna/storages/journal/_file.py | {
"start": 455,
"end": 4818
} | class ____(BaseJournalBackend):
"""File storage class for Journal log backend.
Compared to SQLite3, the benefit of this backend is that it is more suitable for
environments where the file system does not support ``fcntl()`` file locking.
For example, as written in the `SQLite3 FAQ <https://www.sqlite.org/faq.html#q5>`__,
SQLite3 might not work on NFS (Network File System) since ``fcntl()`` file locking
is broken on many NFS implementations. In such scenarios, this backend provides
several workarounds for locking files. For more details, refer to the `Medium blog post`_.
.. _Medium blog post: https://medium.com/optuna/distributed-optimization-via-nfs\
-using-optunas-new-operation-based-logging-storage-9815f9c3f932
It's important to note that, similar to SQLite3, this class doesn't support a high
level of write concurrency, as outlined in the `SQLAlchemy documentation`_. However,
in typical situations where the objective function is computationally expensive, Optuna
users don't need to be concerned about this limitation. The reason being, the write
operations are not the bottleneck as long as the objective function doesn't invoke
:meth:`~optuna.trial.Trial.report` and :meth:`~optuna.trial.Trial.set_user_attr` excessively.
.. _SQLAlchemy documentation: https://docs.sqlalchemy.org/en/20/dialects/sqlite.html\
#database-locking-behavior-concurrency
Args:
file_path:
Path of file to persist the log to.
lock_obj:
Lock object for process exclusivity. An instance of
:class:`~optuna.storages.journal.JournalFileSymlinkLock` and
:class:`~optuna.storages.journal.JournalFileOpenLock` can be passed.
"""
def __init__(self, file_path: str, lock_obj: BaseJournalFileLock | None = None) -> None:
self._file_path: str = file_path
self._lock = lock_obj or JournalFileSymlinkLock(self._file_path)
if not os.path.exists(self._file_path):
open(self._file_path, "ab").close() # Create a file if it does not exist.
self._log_number_offset: dict[int, int] = {0: 0}
def read_logs(self, log_number_from: int) -> Generator[dict[str, Any], None, None]:
with open(self._file_path, "rb") as f:
# Maintain remaining_log_size to allow writing by another process
# while reading the log.
remaining_log_size = os.stat(self._file_path).st_size
log_number_start = 0
if log_number_from in self._log_number_offset:
f.seek(self._log_number_offset[log_number_from])
log_number_start = log_number_from
remaining_log_size -= self._log_number_offset[log_number_from]
last_decode_error = None
for log_number, line in enumerate(f, start=log_number_start):
byte_len = len(line)
remaining_log_size -= byte_len
if remaining_log_size < 0:
break
if last_decode_error is not None:
raise last_decode_error
if log_number + 1 not in self._log_number_offset:
self._log_number_offset[log_number + 1] = (
self._log_number_offset[log_number] + byte_len
)
if log_number < log_number_from:
continue
# Ensure that each line ends with line separators (\n, \r\n).
if not line.endswith(b"\n"):
last_decode_error = ValueError("Invalid log format.")
del self._log_number_offset[log_number + 1]
continue
try:
yield json.loads(line)
except json.JSONDecodeError as err:
last_decode_error = err
del self._log_number_offset[log_number + 1]
def append_logs(self, logs: list[dict[str, Any]]) -> None:
with get_lock_file(self._lock):
what_to_write = (
"\n".join([json.dumps(log, separators=(",", ":")) for log in logs]) + "\n"
)
with open(self._file_path, "ab") as f:
f.write(what_to_write.encode("utf-8"))
f.flush()
os.fsync(f.fileno())
| JournalFileBackend |
python | pypa__warehouse | warehouse/captcha/hcaptcha.py | {
"start": 1074,
"end": 2005
} | class ____(HCaptchaError):
pass
# https://docs.hcaptcha.com/#siteverify-error-codes-table
ERROR_CODE_MAP = {
"missing-input-secret": MissingInputSecretError,
"invalid-input-secret": InvalidInputSecretError,
"missing-input-response": MissingInputResponseError,
"invalid-input-response": InvalidInputResponseError,
"expired-input-response": ExpiredInputResponseError,
"already-seen-response": AlreadySeenResponseError,
"bad-request": BadRequestError,
"missing-remoteip": MissingRemoteIPError,
"invalid-remoteip": InvalidRemoteIPError,
"not-using-dummy-passcode": NotUsingDummyPasscodeError,
"sitekey-secret-mismatch": SitekeySecretMismatchError,
# Maybe legacy?
"invalid-or-already-seen-response": InvalidOrAlreadySeenResponseError,
}
_CSP_ENTRIES = [
"https://hcaptcha.com",
"https://*.hcaptcha.com",
]
@implementer(ICaptchaService)
| InvalidOrAlreadySeenResponseError |
python | encode__django-rest-framework | tests/test_request.py | {
"start": 7549,
"end": 9929
} | class ____(TestCase):
def setUp(self):
# Pass request object through session middleware so session is
# available to login and logout functions
self.wrapped_request = factory.get('/')
self.request = Request(self.wrapped_request)
def dummy_get_response(request): # pragma: no cover
return None
SessionMiddleware(dummy_get_response).process_request(self.wrapped_request)
AuthenticationMiddleware(dummy_get_response).process_request(self.wrapped_request)
User.objects.create_user('ringo', 'starr@thebeatles.com', 'yellow')
self.user = authenticate(username='ringo', password='yellow')
def test_user_can_be_set(self):
self.request.user = self.user
assert self.request.user == self.user
def test_user_can_login(self):
login(self.request, self.user)
assert self.request.user == self.user
def test_user_can_logout(self):
self.request.user = self.user
assert not self.request.user.is_anonymous
logout(self.request)
assert self.request.user.is_anonymous
def test_logged_in_user_is_set_on_wrapped_request(self):
login(self.request, self.user)
assert self.wrapped_request.user == self.user
def test_calling_user_fails_when_attribute_error_is_raised(self):
"""
This proves that when an AttributeError is raised inside of the request.user
property, that we can handle this and report the true, underlying error.
"""
class AuthRaisesAttributeError:
def authenticate(self, request):
self.MISSPELLED_NAME_THAT_DOESNT_EXIST
request = Request(self.wrapped_request, authenticators=(AuthRaisesAttributeError(),))
# The middleware processes the underlying Django request, sets anonymous user
assert self.wrapped_request.user.is_anonymous
# The DRF request object does not have a user and should run authenticators
expected = r"no attribute 'MISSPELLED_NAME_THAT_DOESNT_EXIST'"
with pytest.raises(WrappedAttributeError, match=expected):
request.user
with pytest.raises(WrappedAttributeError, match=expected):
hasattr(request, 'user')
with pytest.raises(WrappedAttributeError, match=expected):
login(request, self.user)
| TestUserSetter |
python | ray-project__ray | rllib/algorithms/dqn/dqn_torch_model.py | {
"start": 463,
"end": 6865
} | class ____(TorchModelV2, nn.Module):
"""Extension of standard TorchModelV2 to provide dueling-Q functionality."""
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
num_outputs: int,
model_config: ModelConfigDict,
name: str,
*,
q_hiddens: Sequence[int] = (256,),
dueling: bool = False,
dueling_activation: str = "relu",
num_atoms: int = 1,
use_noisy: bool = False,
v_min: float = -10.0,
v_max: float = 10.0,
sigma0: float = 0.5,
# TODO(sven): Move `add_layer_norm` into ModelCatalog as
# generic option, then error if we use ParameterNoise as
# Exploration type and do not have any LayerNorm layers in
# the net.
add_layer_norm: bool = False
):
"""Initialize variables of this model.
Extra model kwargs:
q_hiddens (Sequence[int]): List of layer-sizes after(!) the
Advantages(A)/Value(V)-split. Hence, each of the A- and V-
branches will have this structure of Dense layers. To define
the NN before this A/V-split, use - as always -
config["model"]["fcnet_hiddens"].
dueling: Whether to build the advantage(A)/value(V) heads
for DDQN. If True, Q-values are calculated as:
Q = (A - mean[A]) + V. If False, raw NN output is interpreted
as Q-values.
dueling_activation: The activation to use for all dueling
layers (A- and V-branch). One of "relu", "tanh", "linear".
num_atoms: If >1, enables distributional DQN.
use_noisy: Use noisy layers.
v_min: Min value support for distributional DQN.
v_max: Max value support for distributional DQN.
sigma0 (float): Initial value of noisy layers.
add_layer_norm: Enable layer norm (for param noise).
"""
nn.Module.__init__(self)
super(DQNTorchModel, self).__init__(
obs_space, action_space, num_outputs, model_config, name
)
self.dueling = dueling
self.num_atoms = num_atoms
self.v_min = v_min
self.v_max = v_max
self.sigma0 = sigma0
ins = num_outputs
advantage_module = nn.Sequential()
value_module = nn.Sequential()
# Dueling case: Build the shared (advantages and value) fc-network.
for i, n in enumerate(q_hiddens):
if use_noisy:
advantage_module.add_module(
"dueling_A_{}".format(i),
NoisyLayer(
ins, n, sigma0=self.sigma0, activation=dueling_activation
),
)
value_module.add_module(
"dueling_V_{}".format(i),
NoisyLayer(
ins, n, sigma0=self.sigma0, activation=dueling_activation
),
)
else:
advantage_module.add_module(
"dueling_A_{}".format(i),
SlimFC(ins, n, activation_fn=dueling_activation),
)
value_module.add_module(
"dueling_V_{}".format(i),
SlimFC(ins, n, activation_fn=dueling_activation),
)
# Add LayerNorm after each Dense.
if add_layer_norm:
advantage_module.add_module(
"LayerNorm_A_{}".format(i), nn.LayerNorm(n)
)
value_module.add_module("LayerNorm_V_{}".format(i), nn.LayerNorm(n))
ins = n
# Actual Advantages layer (nodes=num-actions).
if use_noisy:
advantage_module.add_module(
"A",
NoisyLayer(
ins, self.action_space.n * self.num_atoms, sigma0, activation=None
),
)
elif q_hiddens:
advantage_module.add_module(
"A", SlimFC(ins, action_space.n * self.num_atoms, activation_fn=None)
)
self.advantage_module = advantage_module
# Value layer (nodes=1).
if self.dueling:
if use_noisy:
value_module.add_module(
"V", NoisyLayer(ins, self.num_atoms, sigma0, activation=None)
)
elif q_hiddens:
value_module.add_module(
"V", SlimFC(ins, self.num_atoms, activation_fn=None)
)
self.value_module = value_module
def get_q_value_distributions(self, model_out):
"""Returns distributional values for Q(s, a) given a state embedding.
Override this in your custom model to customize the Q output head.
Args:
model_out: Embedding from the model layers.
Returns:
(action_scores, logits, dist) if num_atoms == 1, otherwise
(action_scores, z, support_logits_per_action, logits, dist)
"""
action_scores = self.advantage_module(model_out)
if self.num_atoms > 1:
# Distributional Q-learning uses a discrete support z
# to represent the action value distribution
z = torch.arange(0.0, self.num_atoms, dtype=torch.float32).to(
action_scores.device
)
z = self.v_min + z * (self.v_max - self.v_min) / float(self.num_atoms - 1)
support_logits_per_action = torch.reshape(
action_scores, shape=(-1, self.action_space.n, self.num_atoms)
)
support_prob_per_action = nn.functional.softmax(
support_logits_per_action, dim=-1
)
action_scores = torch.sum(z * support_prob_per_action, dim=-1)
logits = support_logits_per_action
probs = support_prob_per_action
return action_scores, z, support_logits_per_action, logits, probs
else:
logits = torch.unsqueeze(torch.ones_like(action_scores), -1)
return action_scores, logits, logits
def get_state_value(self, model_out):
"""Returns the state value prediction for the given state embedding."""
return self.value_module(model_out)
| DQNTorchModel |
python | sphinx-doc__sphinx | sphinx/ext/doctest.py | {
"start": 6673,
"end": 7918
} | class ____:
def __init__(self, name: str) -> None:
self.name = name
self.setup: list[TestCode] = []
self.tests: list[list[TestCode] | tuple[TestCode, None]] = []
self.cleanup: list[TestCode] = []
def add_code(self, code: TestCode, prepend: bool = False) -> None:
if code.type == 'testsetup':
if prepend:
self.setup.insert(0, code)
else:
self.setup.append(code)
elif code.type == 'testcleanup':
self.cleanup.append(code)
elif code.type == 'doctest':
self.tests.append([code])
elif code.type == 'testcode':
# "testoutput" may replace the second element
self.tests.append((code, None))
elif code.type == 'testoutput':
if self.tests:
latest_test = self.tests[-1]
if len(latest_test) == 2:
self.tests[-1] = [latest_test[0], code]
else:
raise RuntimeError(__('invalid TestCode type'))
def __repr__(self) -> str:
return (
f'TestGroup(name={self.name!r}, setup={self.setup!r}, '
f'cleanup={self.cleanup!r}, tests={self.tests!r})'
)
| TestGroup |
python | pypa__setuptools | setuptools/errors.py | {
"start": 1313,
"end": 1508
} | class ____(OptionError): # type: ignore[valid-type, misc] # distutils imports are `Any` on python 3.12+
"""Error used for configurations that were deprecated and removed."""
| RemovedConfigError |
python | walkccc__LeetCode | solutions/371. Sum of Two Integers/371.py | {
"start": 0,
"end": 208
} | class ____:
def getSum(self, a: int, b: int) -> int:
mask = 0xFFFFFFFF
MAX = 2000
while b != 0:
a, b = (a ^ b) & mask, ((a & b) << 1) & mask
return a if a < MAX else ~(a ^ mask)
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 31371,
"end": 31565
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("INTERNAL", "PRIVATE", "PUBLIC")
| RepoAccessAuditEntryVisibility |
python | ray-project__ray | python/ray/_private/ray_perf.py | {
"start": 572,
"end": 849
} | class ____:
async def small_value(self):
return b"ok"
async def small_value_with_arg(self, x):
return b"ok"
async def small_value_batch(self, n):
await asyncio.wait([small_value.remote() for _ in range(n)])
@ray.remote(num_cpus=0)
| AsyncActor |
python | django__django | tests/migrations/test_writer.py | {
"start": 1946,
"end": 2316
} | class ____(enum.IntFlag):
A = 1
B = 2
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
return wrapper
@decorator
def function_with_decorator():
pass
@functools.cache
def function_with_cache():
pass
@functools.lru_cache(maxsize=10)
def function_with_lru_cache():
pass
| IntFlagEnum |
python | walkccc__LeetCode | solutions/3418. Maximum Amount of Money Robot Can Earn/3418.py | {
"start": 0,
"end": 941
} | class ____:
def maximumAmount(self, coins: list[list[int]]) -> int:
m = len(coins)
n = len(coins[0])
# dp[i][j][k] := the maximum profit at position (i, j) with k remaining
# neutralizations
dp = [[[-math.inf] * 4 for _ in range(n)] for _ in range(m)]
# Base case: the robot starts at the top-left corner.
dp[0][0][2] = coins[0][0]
if coins[0][0] < 0:
dp[0][0][1] = 0 # Neutralize the robber.
for i in range(m):
for j in range(n):
for k in range(3): # for each number of remaining neutralizations
if i > 0:
dp[i][j][k] = max(dp[i][j][k],
dp[i - 1][j][k] + coins[i][j],
dp[i - 1][j][k + 1])
if j > 0:
dp[i][j][k] = max(dp[i][j][k],
dp[i][j - 1][k] + coins[i][j],
dp[i][j - 1][k + 1])
return max(dp[-1][-1])
| Solution |
python | django__django | django/contrib/gis/utils/layermapping.py | {
"start": 1074,
"end": 1121
} | class ____(LayerMapError):
pass
| InvalidString |
python | numpy__numpy | numpy/lib/tests/test_twodim_base.py | {
"start": 3672,
"end": 5096
} | class ____:
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
for k in range(5):
b[k, k] = vals[k]
assert_equal(diag(vals), b)
b = zeros((7, 7))
c = b.copy()
for k in range(5):
b[k, k + 2] = vals[k]
c[k + 2, k] = vals[k]
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k, k]
assert_equal(diag(vals), b)
b = b * 0
for k in range(3):
b[k] = vals[k, k + 2]
assert_equal(diag(vals, 2), b[:3])
for k in range(3):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
assert_equal(diag(A, k=2), [])
assert_equal(diag(A, k=1), [2])
assert_equal(diag(A, k=0), [1, 4])
assert_equal(diag(A, k=-1), [3, 6])
assert_equal(diag(A, k=-2), [5])
assert_equal(diag(A, k=-3), [])
def test_failure(self):
assert_raises(ValueError, diag, [[[1]]])
| TestDiag |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 25025,
"end": 27368
} | class ____(BaseRealtimeConnectionResource):
def clear(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
"""Send this event to clear the audio bytes in the buffer.
The server will
respond with an `input_audio_buffer.cleared` event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.clear", "event_id": event_id}))
)
def commit(self, *, event_id: str | NotGiven = NOT_GIVEN) -> None:
"""
Send this event to commit the user input audio buffer, which will create a
new user message item in the conversation. This event will produce an error
if the input audio buffer is empty. When in Server VAD mode, the client does
not need to send this event, the server will commit the audio buffer
automatically.
Committing the input audio buffer will trigger input audio transcription
(if enabled in session configuration), but it will not create a response
from the model. The server will respond with an `input_audio_buffer.committed`
event.
"""
self._connection.send(
cast(RealtimeClientEventParam, strip_not_given({"type": "input_audio_buffer.commit", "event_id": event_id}))
)
def append(self, *, audio: str, event_id: str | NotGiven = NOT_GIVEN) -> None:
"""Send this event to append audio bytes to the input audio buffer.
The audio
buffer is temporary storage you can write to and later commit. In Server VAD
mode, the audio buffer is used to detect speech and the server will decide
when to commit. When Server VAD is disabled, you must commit the audio buffer
manually.
The client may choose how much audio to place in each event up to a maximum
of 15 MiB, for example streaming smaller chunks from the client may allow the
VAD to be more responsive. Unlike made other client events, the server will
not send a confirmation response to this event.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "input_audio_buffer.append", "audio": audio, "event_id": event_id}),
)
)
| RealtimeInputAudioBufferResource |
python | PrefectHQ__prefect | src/prefect/assets/core.py | {
"start": 260,
"end": 921
} | class ____(PrefectBaseModel):
"""
Metadata properties to configure on an Asset
"""
model_config: ClassVar[ConfigDict] = ConfigDict(frozen=True)
name: Optional[str] = Field(
default=None, description="Human readable name of the Asset."
)
url: Optional[str] = Field(
default=None, description="Visitable url to view the Asset."
)
description: Optional[str] = Field(
default=None,
description="Description of the Asset.",
max_length=MAX_ASSET_DESCRIPTION_LENGTH,
)
owners: Optional[list[str]] = Field(
default=None, description="Owners of the Asset."
)
| AssetProperties |
python | sqlalchemy__sqlalchemy | test/orm/test_core_compilation.py | {
"start": 2341,
"end": 14172
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_filter_by(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1",
)
def test_c_accessor_not_mutated_subq(self):
"""test #6394, ensure all_selected_columns is generated each time"""
User = self.classes.User
s1 = select(User.id)
eq_(s1.subquery().c.keys(), ["id"])
eq_(s1.subquery().c.keys(), ["id"])
def test_integration_w_8285_subc(self):
Address = self.classes.Address
s1 = select(
Address.id, Address.__table__.c["user_id", "email_address"]
)
self.assert_compile(
s1,
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses",
)
subq = s1.subquery()
self.assert_compile(
select(subq.c.user_id, subq.c.id),
"SELECT anon_1.user_id, anon_1.id FROM (SELECT addresses.id AS "
"id, addresses.user_id AS user_id, addresses.email_address "
"AS email_address FROM addresses) AS anon_1",
)
def test_scalar_subquery_from_subq_same_source(self):
"""test #6394, ensure all_selected_columns is generated each time"""
User = self.classes.User
s1 = select(User.id)
for i in range(2):
stmt = s1.subquery().select().scalar_subquery()
self.assert_compile(
stmt,
"(SELECT anon_1.id FROM "
"(SELECT users.id AS id FROM users) AS anon_1)",
)
def test_froms_single_table(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
eq_(stmt.get_final_froms(), [self.tables.users])
def test_froms_join(self):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = select(User).join(User.addresses)
assert stmt.get_final_froms()[0].compare(users.join(addresses))
@testing.combinations(
(
lambda User: (User,),
lambda User: [
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
}
],
),
(
lambda user_alias: (user_alias,),
lambda User, user_alias: [
{
"name": None,
"type": User,
"aliased": True,
"expr": user_alias,
"entity": user_alias,
}
],
),
(
lambda User: (User.id,),
lambda User: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
}
],
),
(
lambda User, Address: (User.id, Address),
lambda User, Address: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "Address",
"type": Address,
"aliased": False,
"expr": Address,
"entity": Address,
},
],
),
(
lambda User, Address: (User.id, text("whatever")),
lambda User, Address: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": None,
"type": testing.eq_type_affinity(sqltypes.NullType),
"aliased": False,
"expr": testing.eq_clause_element(text("whatever")),
"entity": None,
},
],
),
(
lambda user_table: (user_table,),
lambda user_table: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"expr": user_table.c.id,
},
{
"name": "name",
"type": testing.eq_type_affinity(sqltypes.String),
"expr": user_table.c.name,
},
],
),
argnames="cols, expected",
)
def test_column_descriptions(self, cols, expected):
User, Address = self.classes("User", "Address")
ua = aliased(User)
cols = testing.resolve_lambda(
cols,
User=User,
Address=Address,
user_alias=ua,
user_table=inspect(User).local_table,
)
expected = testing.resolve_lambda(
expected,
User=User,
Address=Address,
user_alias=ua,
user_table=inspect(User).local_table,
)
stmt = select(*cols)
eq_(stmt.column_descriptions, expected)
if stmt._propagate_attrs:
stmt = select(*cols).from_statement(stmt)
eq_(stmt.column_descriptions, expected)
@testing.combinations(insert, update, delete, argnames="dml_construct")
@testing.combinations(
(
lambda User: User,
lambda User: (User.id, User.name),
lambda User, user_table: {
"name": "User",
"type": User,
"expr": User,
"entity": User,
"table": user_table,
},
lambda User: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "name",
"type": testing.eq_type_affinity(sqltypes.String),
"aliased": False,
"expr": User.name,
"entity": User,
},
],
),
argnames="entity, cols, expected_entity, expected_returning",
)
def test_dml_descriptions(
self, dml_construct, entity, cols, expected_entity, expected_returning
):
User, Address = self.classes("User", "Address")
lambda_args = dict(
User=User,
Address=Address,
user_table=inspect(User).local_table,
)
entity = testing.resolve_lambda(entity, **lambda_args)
cols = testing.resolve_lambda(cols, **lambda_args)
expected_entity = testing.resolve_lambda(
expected_entity, **lambda_args
)
expected_returning = testing.resolve_lambda(
expected_returning, **lambda_args
)
stmt = dml_construct(entity)
if cols:
stmt = stmt.returning(*cols)
eq_(stmt.entity_description, expected_entity)
eq_(stmt.returning_column_descriptions, expected_returning)
@testing.combinations(
(
lambda User, Address: select(User.name)
.select_from(User, Address)
.where(User.id == Address.user_id),
"SELECT users.name FROM users, addresses "
"WHERE users.id = addresses.user_id",
),
(
lambda User, Address: select(User.name)
.select_from(Address, User)
.where(User.id == Address.user_id),
"SELECT users.name FROM addresses, users "
"WHERE users.id = addresses.user_id",
),
)
def test_select_from_ordering(self, stmt, expected):
User, Address = self.classes("User", "Address")
lambda_args = dict(
User=User,
Address=Address,
user_table=inspect(User).local_table,
)
stmt = testing.resolve_lambda(stmt, **lambda_args)
self.assert_compile(stmt, expected)
def test_limit_offset_select(self):
User = self.classes.User
stmt = select(User.id).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT users.id FROM users LIMIT :param_1 OFFSET :param_2",
checkparams={"param_1": 5, "param_2": 6},
)
@testing.combinations(
(None, "ROWS ONLY"),
({"percent": True}, "PERCENT ROWS ONLY"),
({"percent": True, "with_ties": True}, "PERCENT ROWS WITH TIES"),
)
def test_fetch_offset_select(self, options, fetch_clause):
User = self.classes.User
if options is None:
stmt = select(User.id).fetch(5).offset(6)
else:
stmt = select(User.id).fetch(5, **options).offset(6)
self.assert_compile(
stmt,
"SELECT users.id FROM users OFFSET :param_1 "
"ROWS FETCH FIRST :param_2 %s" % (fetch_clause,),
checkparams={"param_1": 6, "param_2": 5},
)
@testing.variation("use_get_params", [True, False])
def test_annotated_cte_params_traverse(self, use_get_params):
"""test #12915
Tests the original issue in #12915 which was a specific issue
involving cloned_traverse with Annotated subclasses, where traversal
would not properly cover a CTE's self-referential structure.
This case still does not work in the general ORM case, so the
implementation of .params() was changed to not rely upon
cloned_traversal.
"""
User = self.classes.User
ids_param = bindparam("ids")
cte = select(User).where(User.id == ids_param).cte("cte")
ca = cte._annotate({"foo": "bar"})
stmt = select(ca)
if use_get_params:
stmt = stmt.params(ids=17)
else:
# test without using params(), in case the implementation
# for params() changes we still want to test cloned_traverse
def visit_bindparam(bind):
if bind.key == "ids":
bind.value = 17
bind.required = False
stmt = visitors.cloned_traverse(
stmt,
{"maintain_key": True, "detect_subquery_cols": True},
{"bindparam": visit_bindparam},
)
self.assert_compile(
stmt,
"WITH cte AS (SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id = :ids) "
"SELECT cte.id, cte.name FROM cte",
checkparams={"ids": 17},
)
def test_orm_cte_with_params(self, connection):
"""test for #12915's new implementation"""
User = self.classes.User
ids_param = bindparam("ids")
cte = select(User).where(User.id == ids_param).cte("cte")
stmt = select(aliased(User, cte.alias("a1"), adapt_on_names=True))
res = connection.execute(stmt, {"ids": 7}).all()
eq_(res, [(7, "jack")])
with Session(connection) as s:
res = s.scalars(stmt, {"ids": 7}).all()
eq_(res, [User(id=7, name="jack")])
| SelectableTest |
python | huggingface__transformers | src/transformers/generation/candidate_generator.py | {
"start": 31275,
"end": 32071
} | class ____(nn.Module):
"""
A class to prune and reindex the language model head.
This class prunes the language model head to only include the specified token IDs and reindexes the logits
to map back to the original vocabulary.
Args:
original_lm_head (nn.Module): The original language model head.
token_ids (list[int]): The list of token IDs to keep.
"""
def __init__(self, original_lm_head, assistant_overlap_token_ids):
super().__init__()
self.pruned_lm_head = prune_linear_layer(original_lm_head, assistant_overlap_token_ids).to(
original_lm_head.weight.dtype
)
def forward(self, hidden_states):
pruned_logits = self.pruned_lm_head(hidden_states)
return pruned_logits
| _PruneReindexingLMHead |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_organization_coding_agents.py | {
"start": 1919,
"end": 2470
} | class ____(CodingAgentClient):
"""Mock coding agent client for tests."""
base_url = "https://api.mock-agent.com/v1"
def launch(self, webhook_url: str, request: CodingAgentLaunchRequest) -> CodingAgentState:
"""Mock implementation of launch method."""
return CodingAgentState(
id="mock-123",
status=CodingAgentStatus.PENDING,
provider=CodingAgentProviderType.CURSOR_BACKGROUND_AGENT,
name="Mock Agent",
started_at=datetime.now(UTC),
)
| MockCodingAgentClient |
python | scikit-learn__scikit-learn | sklearn/linear_model/_coordinate_descent.py | {
"start": 98126,
"end": 102866
} | class ____(MultiTaskElasticNet):
"""Multi-task Lasso model trained with L1/L2 mixed-norm as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||Y - XW||^2_Fro + alpha * ||W||_21
Where::
||W||_21 = \\sum_i \\sqrt{\\sum_j w_{ij}^2}
i.e. the sum of norm of each row.
Read more in the :ref:`User Guide <multi_task_lasso>`.
Parameters
----------
alpha : float, default=1.0
Constant that multiplies the L1/L2 term. Defaults to 1.0.
fit_intercept : bool, default=True
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be centered).
copy_X : bool, default=True
If ``True``, X will be copied; else, it may be overwritten.
max_iter : int, default=1000
The maximum number of iterations.
tol : float, default=1e-4
The tolerance for the optimization: if the updates are smaller or equal to
``tol``, the optimization code checks the dual gap for optimality and continues
until it is smaller or equal to ``tol``.
warm_start : bool, default=False
When set to ``True``, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator that selects a random
feature to update. Used when ``selection`` == 'random'.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
selection : {'cyclic', 'random'}, default='cyclic'
If set to 'random', a random coefficient is updated every iteration
rather than looping over features sequentially by default. This
(setting to 'random') often leads to significantly faster convergence
especially when tol is higher than 1e-4.
Attributes
----------
coef_ : ndarray of shape (n_targets, n_features)
Parameter vector (W in the cost function formula).
Note that ``coef_`` stores the transpose of ``W``, ``W.T``.
intercept_ : ndarray of shape (n_targets,)
Independent term in decision function.
n_iter_ : int
Number of iterations run by the coordinate descent solver to reach
the specified tolerance.
dual_gap_ : ndarray of shape (n_alphas,)
The dual gaps at the end of the optimization for each alpha.
eps_ : float
The tolerance scaled scaled by the variance of the target `y`.
sparse_coef_ : sparse matrix of shape (n_features,) or \
(n_targets, n_features)
Sparse representation of the `coef_`.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
Lasso: Linear Model trained with L1 prior as regularizer (aka the Lasso).
MultiTaskLassoCV: Multi-task L1 regularized linear model with built-in
cross-validation.
MultiTaskElasticNetCV: Multi-task L1/L2 ElasticNet with built-in cross-validation.
Notes
-----
The algorithm used to fit the model is coordinate descent.
To avoid unnecessary memory duplication the X and y arguments of the fit
method should be directly passed as Fortran-contiguous numpy arrays.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.MultiTaskLasso(alpha=0.1)
>>> clf.fit([[0, 1], [1, 2], [2, 4]], [[0, 0], [1, 1], [2, 3]])
MultiTaskLasso(alpha=0.1)
>>> print(clf.coef_)
[[0. 0.60809415]
[0. 0.94592424]]
>>> print(clf.intercept_)
[-0.41888636 -0.87382323]
"""
_parameter_constraints: dict = {
**MultiTaskElasticNet._parameter_constraints,
}
_parameter_constraints.pop("l1_ratio")
def __init__(
self,
alpha=1.0,
*,
fit_intercept=True,
copy_X=True,
max_iter=1000,
tol=1e-4,
warm_start=False,
random_state=None,
selection="cyclic",
):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.copy_X = copy_X
self.tol = tol
self.warm_start = warm_start
self.l1_ratio = 1.0
self.random_state = random_state
self.selection = selection
| MultiTaskLasso |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/containers/dataframe.py | {
"start": 2218,
"end": 14306
} | class ____:
"""A representation of a dataframe."""
column_map: dict[str, Column]
table: plc.Table
columns: list[NamedColumn]
stream: Stream
def __init__(self, columns: Iterable[Column], stream: Stream) -> None:
columns = list(columns)
if any(c.name is None for c in columns):
raise ValueError("All columns must have a name")
self.columns = [cast(NamedColumn, c) for c in columns]
self.dtypes = [c.dtype for c in self.columns]
self.column_map = {c.name: c for c in self.columns}
self.table = plc.Table([c.obj for c in self.columns])
self.stream = stream
def copy(self) -> Self:
"""Return a shallow copy of self."""
return type(self)((c.copy() for c in self.columns), stream=self.stream)
def to_polars(self) -> pl.DataFrame:
"""Convert to a polars DataFrame."""
# If the arrow table has empty names, from_arrow produces
# column_$i. But here we know there is only one such column
# (by construction) and it should have an empty name.
# https://github.com/pola-rs/polars/issues/11632
# To guarantee we produce correct names, we therefore
# serialise with names we control and rename with that map.
name_map = {f"column_{i}": name for i, name in enumerate(self.column_map)}
metadata = [
_create_polars_column_metadata(name, dtype.polars_type)
for name, dtype in zip(name_map, self.dtypes, strict=True)
]
table_with_metadata = _ObjectWithArrowMetadata(
self.table, metadata, self.stream
)
df = pl.DataFrame(table_with_metadata)
return df.rename(name_map).with_columns(
pl.col(c.name).set_sorted(descending=c.order == plc.types.Order.DESCENDING)
if c.is_sorted
else pl.col(c.name)
for c in self.columns
)
@cached_property
def column_names_set(self) -> frozenset[str]:
"""Return the column names as a set."""
return frozenset(self.column_map)
@cached_property
def column_names(self) -> list[str]:
"""Return a list of the column names."""
return list(self.column_map)
@cached_property
def num_columns(self) -> int:
"""Number of columns."""
return len(self.column_map)
@cached_property
def num_rows(self) -> int:
"""Number of rows."""
return self.table.num_rows() if self.column_map else 0
@classmethod
def from_polars(cls, df: pl.DataFrame, stream: Stream) -> Self:
"""
Create from a polars dataframe.
Parameters
----------
df
Polars dataframe to convert
stream
CUDA stream used for device memory operations and kernel launches
on this dataframe.
Returns
-------
New dataframe representing the input.
"""
plc_table = plc.Table.from_arrow(df, stream=stream)
return cls(
(
Column(d_col, name=name, dtype=DataType(h_col.dtype)).copy_metadata(
h_col
)
for d_col, h_col, name in zip(
plc_table.columns(), df.iter_columns(), df.columns, strict=True
)
),
stream=stream,
)
@classmethod
def from_table(
cls,
table: plc.Table,
names: Sequence[str],
dtypes: Sequence[DataType],
stream: Stream,
) -> Self:
"""
Create from a pylibcudf table.
Parameters
----------
table
Pylibcudf table to obtain columns from
names
Names for the columns
dtypes
Dtypes for the columns
stream
CUDA stream used for device memory operations and kernel launches
on this dataframe. The caller is responsible for ensuring that
the data in ``table`` is valid on ``stream``.
Returns
-------
New dataframe sharing data with the input table.
Raises
------
ValueError
If the number of provided names does not match the
number of columns in the table.
"""
if table.num_columns() != len(names):
raise ValueError("Mismatching name and table length.")
return cls(
(
Column(c, name=name, dtype=dtype)
for c, name, dtype in zip(table.columns(), names, dtypes, strict=True)
),
stream=stream,
)
@classmethod
def deserialize(
cls,
header: DataFrameHeader,
frames: tuple[memoryview[bytes], plc.gpumemoryview],
stream: Stream,
) -> Self:
"""
Create a DataFrame from a serialized representation returned by `.serialize()`.
Parameters
----------
header
The (unpickled) metadata required to reconstruct the object.
frames
Two-tuple of frames (a memoryview and a gpumemoryview).
stream
CUDA stream used for device memory operations and kernel launches
on this dataframe. The caller is responsible for ensuring that
the data in ``frames`` is valid on ``stream``.
Returns
-------
DataFrame
The deserialized DataFrame.
"""
packed_metadata, packed_gpu_data = frames
table = plc.contiguous_split.unpack_from_memoryviews(
packed_metadata,
packed_gpu_data,
stream,
)
return cls(
(
Column(c, **Column.deserialize_ctor_kwargs(kw))
for c, kw in zip(table.columns(), header["columns_kwargs"], strict=True)
),
stream=stream,
)
def serialize(
self,
stream: Stream | None = None,
) -> tuple[DataFrameHeader, tuple[memoryview[bytes], plc.gpumemoryview]]:
"""
Serialize the table into header and frames.
Follows the Dask serialization scheme with a picklable header (dict) and
a tuple of frames (in this case a contiguous host and device buffer).
To enable dask support, dask serializers must be registered
>>> from cudf_polars.experimental.dask_serialize import register
>>> register()
Parameters
----------
stream
CUDA stream used for device memory operations and kernel launches
on this dataframe.
Returns
-------
header
A dict containing any picklable metadata required to reconstruct the object.
frames
Two-tuple of frames suitable for passing to `plc.contiguous_split.unpack_from_memoryviews`
"""
packed = plc.contiguous_split.pack(self.table, stream=stream)
# Keyword arguments for `Column.__init__`.
columns_kwargs: list[ColumnOptions] = [
col.serialize_ctor_kwargs() for col in self.columns
]
header: DataFrameHeader = {
"columns_kwargs": columns_kwargs,
"frame_count": 2,
}
return header, packed.release()
def sorted_like(
self, like: DataFrame, /, *, subset: Set[str] | None = None
) -> Self:
"""
Return a shallow copy with sortedness copied from like.
Parameters
----------
like
The dataframe to copy from
subset
Optional subset of columns from which to copy data.
Returns
-------
Shallow copy of self with metadata set.
Raises
------
ValueError
If there is a name mismatch between self and like.
"""
if like.column_names != self.column_names:
raise ValueError("Can only copy from identically named frame")
subset = self.column_names_set if subset is None else subset
return type(self)(
(
c.sorted_like(other) if c.name in subset else c
for c, other in zip(self.columns, like.columns, strict=True)
),
stream=self.stream,
)
def with_columns(
self,
columns: Iterable[Column],
*,
replace_only: bool = False,
stream: Stream,
) -> Self:
"""
Return a new dataframe with extra columns.
Parameters
----------
columns
Columns to add
replace_only
If true, then only replacements are allowed (matching by name).
stream
CUDA stream used for device memory operations and kernel launches.
The caller is responsible for ensuring that
1. The data in ``columns`` is valid on ``stream``.
2. No additional operations occur on ``self.stream`` with the
original data in ``self``.
Returns
-------
New dataframe
Notes
-----
If column names overlap, newer names replace older ones, and
appear in the same order as the original frame.
"""
new = {c.name: c for c in columns}
if replace_only and not self.column_names_set.issuperset(new.keys()):
raise ValueError("Cannot replace with non-existing names")
return type(self)((self.column_map | new).values(), stream=stream)
def discard_columns(self, names: Set[str]) -> Self:
"""Drop columns by name."""
return type(self)(
(column for column in self.columns if column.name not in names),
stream=self.stream,
)
def select(self, names: Sequence[str] | Mapping[str, Any]) -> Self:
"""Select columns by name returning DataFrame."""
try:
return type(self)(
(self.column_map[name] for name in names), stream=self.stream
)
except KeyError as e:
raise ValueError("Can't select missing names") from e
def rename_columns(self, mapping: Mapping[str, str]) -> Self:
"""Rename some columns."""
return type(self)(
(c.rename(mapping.get(c.name, c.name)) for c in self.columns),
stream=self.stream,
)
def select_columns(self, names: Set[str]) -> list[Column]:
"""Select columns by name."""
return [c for c in self.columns if c.name in names]
def filter(self, mask: Column) -> Self:
"""
Return a filtered table given a mask.
Parameters
----------
mask
Boolean mask to apply to the dataframe. It is the caller's
responsibility to ensure that ``mask`` is valid on ``self.stream``.
A mask that is derived from ``self`` via a computation on ``self.stream``
automatically satisfies this requirement.
Returns
-------
Filtered dataframe
"""
table = plc.stream_compaction.apply_boolean_mask(
self.table, mask.obj, stream=self.stream
)
return (
type(self)
.from_table(table, self.column_names, self.dtypes, self.stream)
.sorted_like(self)
)
def slice(self, zlice: Slice | None) -> Self:
"""
Slice a dataframe.
Parameters
----------
zlice
optional, tuple of start and length, negative values of start
treated as for python indexing. If not provided, returns self.
Returns
-------
New dataframe (if zlice is not None) otherwise self (if it is)
"""
if zlice is None:
return self
(table,) = plc.copying.slice(
self.table,
conversion.from_polars_slice(zlice, num_rows=self.num_rows),
stream=self.stream,
)
return (
type(self)
.from_table(table, self.column_names, self.dtypes, self.stream)
.sorted_like(self)
)
| DataFrame |
python | getsentry__sentry | src/sentry/models/group.py | {
"start": 7550,
"end": 10378
} | class ____(Enum):
LATEST = ["project_id", "-timestamp", "-event_id"]
OLDEST = ["project_id", "timestamp", "event_id"]
RECOMMENDED = [
"-replay.id",
"-trace.sampled",
"num_processing_errors",
"-profile.id",
"-timestamp",
"-event_id",
]
def get_oldest_or_latest_event(
group: Group,
ordering: EventOrdering,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> GroupEvent | None:
if group.issue_category == GroupCategory.ERROR:
dataset = Dataset.Events
else:
dataset = Dataset.IssuePlatform
all_conditions = [
Condition(Column("project_id"), Op.IN, [group.project.id]),
Condition(Column("group_id"), Op.IN, [group.id]),
]
if conditions:
all_conditions.extend(conditions)
events = eventstore.backend.get_events_snql(
organization_id=group.project.organization_id,
group_id=group.id,
start=start,
end=end,
conditions=all_conditions,
limit=1,
orderby=ordering.value,
referrer="Group.get_latest",
dataset=dataset,
tenant_ids={"organization_id": group.project.organization_id},
)
if events:
return events[0].for_group(group)
return None
def get_recommended_event(
group: Group,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> GroupEvent | None:
if group.issue_category == GroupCategory.ERROR:
dataset = Dataset.Events
else:
dataset = Dataset.IssuePlatform
all_conditions = [
Condition(Column("project_id"), Op.IN, [group.project.id]),
Condition(Column("group_id"), Op.IN, [group.id]),
]
if conditions:
all_conditions.extend(conditions)
default_end = group.last_seen + timedelta(minutes=1)
default_start = default_end - timedelta(days=7)
expired, _ = outside_retention_with_modified_start(
start=start if start else default_start,
end=end if end else default_end,
organization=Organization(group.project.organization_id),
)
if expired:
return None
events = eventstore.backend.get_events_snql(
organization_id=group.project.organization_id,
group_id=group.id,
start=start if start else default_start,
end=end if end else default_end,
conditions=all_conditions,
limit=1,
orderby=EventOrdering.RECOMMENDED.value,
referrer="Group.get_helpful",
dataset=dataset,
tenant_ids={"organization_id": group.project.organization_id},
inner_limit=1000,
)
if events:
return events[0].for_group(group)
return None
| EventOrdering |
python | ray-project__ray | python/ray/tune/tests/test_tuner.py | {
"start": 1395,
"end": 1530
} | class ____(DummyTrainer):
def training_loop(self) -> None:
raise RuntimeError("There is an error in trainer!")
| FailingTrainer |
python | google__jax | jax/_src/pallas/mosaic_gpu/lowering.py | {
"start": 12954,
"end": 13442
} | class ____:
grid: Sequence[Hashable]
cluster: Sequence[Hashable] = ()
wg: Hashable | None = None
def __iter__(self) -> Iterator[Hashable]:
return itertools.chain(
self.grid, self.cluster, [self.wg] if self.wg is not None else []
)
def reverse(self) -> "_AxisNames":
return _AxisNames(self.grid[::-1], self.cluster[::-1], self.wg)
AnyBarrierRef = (
mgpu.BarrierRef | mgpu.DialectBarrierRef | mgpu.CollectiveBarrierRef
)
@dataclasses.dataclass
| _AxisNames |
python | fluentpython__example-code | 03-dict-set/strkeydict.py | {
"start": 1148,
"end": 1508
} | class ____(collections.UserDict): # <1>
def __missing__(self, key): # <2>
if isinstance(key, str):
raise KeyError(key)
return self[str(key)]
def __contains__(self, key):
return str(key) in self.data # <3>
def __setitem__(self, key, item):
self.data[str(key)] = item # <4>
# END STRKEYDICT
| StrKeyDict |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.