language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 173823,
"end": 181835
} | class ____(TestZarrDirectoryStore):
@contextlib.contextmanager
def temp_dir(self) -> Iterator[tuple[str, str]]:
with tempfile.TemporaryDirectory() as d:
store = os.path.join(d, "test.zarr")
yield d, store
@contextlib.contextmanager
def roundtrip_dir(
self,
data,
store,
save_kwargs=None,
open_kwargs=None,
allow_cleanup_failure=False,
) -> Iterator[Dataset]:
if save_kwargs is None:
save_kwargs = {}
if open_kwargs is None:
open_kwargs = {}
data.to_zarr(store, **save_kwargs, **self.version_kwargs)
with xr.open_dataset(
store, engine="zarr", **open_kwargs, **self.version_kwargs
) as ds:
yield ds
@requires_dask
def test_default_zarr_fill_value(self):
inputs = xr.Dataset({"floats": ("x", [1.0]), "ints": ("x", [1])}).chunk()
expected = xr.Dataset({"floats": ("x", [np.nan]), "ints": ("x", [0])})
with self.temp_dir() as (_d, store):
inputs.to_zarr(store, compute=False)
with open_dataset(store) as on_disk:
assert np.isnan(on_disk.variables["floats"].encoding["_FillValue"])
assert (
"_FillValue" not in on_disk.variables["ints"].encoding
) # use default
if not has_zarr_v3:
# zarr-python v2 interprets fill_value=None inconsistently
del on_disk["ints"]
del expected["ints"]
assert_identical(expected, on_disk)
@pytest.mark.parametrize("consolidated", [True, False, None])
@pytest.mark.parametrize("write_empty", [True, False, None])
def test_write_empty(
self,
consolidated: bool | None,
write_empty: bool | None,
) -> None:
def assert_expected_files(expected: list[str], store: str) -> None:
"""Convenience for comparing with actual files written"""
ls = []
test_root = os.path.join(store, "test")
for root, _, files in os.walk(test_root):
ls.extend(
[
os.path.join(root, f).removeprefix(test_root).lstrip("/")
for f in files
]
)
assert set(expected) == {
file.lstrip("c/")
for file in ls
if (file not in (".zattrs", ".zarray", "zarr.json"))
}
# The zarr format is set by the `default_zarr_format`
# pytest fixture that acts on a superclass
zarr_format_3 = has_zarr_v3 and zarr.config.config["default_zarr_format"] == 3
if (write_empty is False) or (write_empty is None and has_zarr_v3):
expected = ["0.1.0"]
else:
expected = [
"0.0.0",
"0.0.1",
"0.1.0",
"0.1.1",
]
# use nan for default fill_value behaviour
data = np.array([np.nan, np.nan, 1.0, np.nan]).reshape((1, 2, 2))
if zarr_format_3:
# transform to the path style of zarr 3
# e.g. 0/0/1
expected = [e.replace(".", "/") for e in expected]
ds = xr.Dataset(data_vars={"test": (("Z", "Y", "X"), data)})
if has_dask:
ds["test"] = ds["test"].chunk(1)
encoding = None
else:
encoding = {"test": {"chunks": (1, 1, 1)}}
with self.temp_dir() as (_d, store):
ds.to_zarr(
store,
mode="w",
encoding=encoding,
write_empty_chunks=write_empty,
)
# check expected files after a write
assert_expected_files(expected, store)
with self.roundtrip_dir(
ds,
store,
save_kwargs={
"mode": "a",
"append_dim": "Z",
"write_empty_chunks": write_empty,
},
) as a_ds:
expected_ds = xr.concat([ds, ds], dim="Z")
assert_identical(a_ds, expected_ds.compute())
# add the new files we expect to be created by the append
# that was performed by the roundtrip_dir
if (write_empty is False) or (write_empty is None and has_zarr_v3):
expected.append("1.1.0")
elif not has_zarr_v3 or has_zarr_v3_async_oindex:
# this was broken from zarr 3.0.0 until 3.1.2
# async oindex released in 3.1.2 along with a fix
# for write_empty_chunks in append
expected.extend(
[
"1.1.0",
"1.0.0",
"1.0.1",
"1.1.1",
]
)
else:
expected.append("1.1.0")
if zarr_format_3:
expected = [e.replace(".", "/") for e in expected]
assert_expected_files(expected, store)
def test_avoid_excess_metadata_calls(self) -> None:
"""Test that chunk requests do not trigger redundant metadata requests.
This test targets logic in backends.zarr.ZarrArrayWrapper, asserting that calls
to retrieve chunk data after initialization do not trigger additional
metadata requests.
https://github.com/pydata/xarray/issues/8290
"""
ds = xr.Dataset(data_vars={"test": (("Z",), np.array([123]).reshape(1))})
# The call to retrieve metadata performs a group lookup. We patch Group.__getitem__
# so that we can inspect calls to this method - specifically count of calls.
# Use of side_effect means that calls are passed through to the original method
# rather than a mocked method.
Group: Any
if has_zarr_v3:
Group = zarr.AsyncGroup
patched = patch.object(
Group, "getitem", side_effect=Group.getitem, autospec=True
)
else:
Group = zarr.Group
patched = patch.object(
Group, "__getitem__", side_effect=Group.__getitem__, autospec=True
)
with self.create_zarr_target() as store, patched as mock:
ds.to_zarr(store, mode="w")
# We expect this to request array metadata information, so call_count should be == 1,
xrds = xr.open_zarr(store)
call_count = mock.call_count
assert call_count == 1
# compute() requests array data, which should not trigger additional metadata requests
# we assert that the number of calls has not increased after fetchhing the array
xrds.test.compute(scheduler="sync")
assert mock.call_count == call_count
@requires_zarr
@requires_fsspec
@pytest.mark.skipif(has_zarr_v3, reason="Difficult to test.")
def test_zarr_storage_options() -> None:
pytest.importorskip("aiobotocore")
ds = create_test_data()
store_target = "memory://test.zarr"
ds.to_zarr(store_target, storage_options={"test": "zarr_write"})
ds_a = xr.open_zarr(store_target, storage_options={"test": "zarr_read"})
assert_identical(ds, ds_a)
@requires_zarr
def test_zarr_version_deprecated() -> None:
ds = create_test_data()
store: Any
if has_zarr_v3:
store = KVStore()
else:
store = {}
with pytest.warns(FutureWarning, match="zarr_version"):
ds.to_zarr(store=store, zarr_version=2)
with pytest.warns(FutureWarning, match="zarr_version"):
xr.open_zarr(store=store, zarr_version=2)
with pytest.raises(ValueError, match="zarr_format"):
xr.open_zarr(store=store, zarr_version=2, zarr_format=3)
@requires_scipy
| TestZarrWriteEmpty |
python | pytorch__pytorch | test/inductor/test_ordered_set.py | {
"start": 914,
"end": 15507
} | class ____(TestCase):
# Tests common to both OrderedSet and frozenset
thetype = OrderedSet
basetype = OrderedSet
def setUp(self):
super().setUp()
self.word = word = "simsalabim"
self.otherword = "madagascar"
self.letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, OrderedSet().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
#
# s = self.thetype([frozenset(self.letters)])
# self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in OrderedSet, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype("abcba").union(C("cdc")), OrderedSet("abcd"))
self.assertEqual(
self.thetype("abcba").union(C("efgfe")), OrderedSet("abcefg")
)
self.assertEqual(self.thetype("abcba").union(C("ccb")), OrderedSet("abc"))
self.assertEqual(self.thetype("abcba").union(C("ef")), OrderedSet("abcef"))
self.assertEqual(
self.thetype("abcba").union(C("ef"), C("fg")), OrderedSet("abcefg")
)
# Issue #6573
x = self.thetype()
self.assertEqual(
x.union(OrderedSet([1]), x, OrderedSet([2])), self.thetype([1, 2])
)
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | OrderedSet(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
# else:
# self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in OrderedSet, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(
self.thetype("abcba").intersection(C("cdc")), OrderedSet("cc")
)
self.assertEqual(
self.thetype("abcba").intersection(C("efgfe")), OrderedSet("")
)
self.assertEqual(
self.thetype("abcba").intersection(C("ccb")), OrderedSet("bc")
)
self.assertEqual(
self.thetype("abcba").intersection(C("ef")), OrderedSet("")
)
self.assertEqual(
self.thetype("abcba").intersection(C("cbcf"), C("bag")), OrderedSet("b")
)
s = self.thetype("abcba")
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
"Pure python equivalent of isdisjoint()"
return not OrderedSet(s1).intersection(s2)
for large in "", "a", "ab", "abc", "ababac", "cdc", "cc", "efgfe", "ccb", "ef":
s1 = self.thetype(large)
for rarg in (
"",
"a",
"ab",
"abc",
"ababac",
"cdc",
"cc",
"efgfe",
"ccb",
"ef",
):
for C in OrderedSet, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & OrderedSet(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
# else:
# self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in OrderedSet, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(
self.thetype("abcba").difference(C("cdc")), OrderedSet("ab")
)
self.assertEqual(
self.thetype("abcba").difference(C("efgfe")), OrderedSet("abc")
)
self.assertEqual(
self.thetype("abcba").difference(C("ccb")), OrderedSet("a")
)
self.assertEqual(
self.thetype("abcba").difference(C("ef")), OrderedSet("abc")
)
self.assertEqual(self.thetype("abcba").difference(), OrderedSet("abc"))
self.assertEqual(
self.thetype("abcba").difference(C("a"), C("b")), OrderedSet("c")
)
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - OrderedSet(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
# else:
# self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in OrderedSet, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(
self.thetype("abcba").symmetric_difference(C("cdc")),
OrderedSet("abd"), # codespell:ignore
)
self.assertEqual(
self.thetype("abcba").symmetric_difference(C("efgfe")),
OrderedSet("abcefg"),
)
self.assertEqual(
self.thetype("abcba").symmetric_difference(C("ccb")), OrderedSet("a")
)
self.assertEqual(
self.thetype("abcba").symmetric_difference(C("ef")), OrderedSet("abcef")
)
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ OrderedSet(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
# else:
# self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, OrderedSet(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, OrderedSet(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ["abcdef", "bcd", "bdcb", "fed", "fedccba"])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ["ab", "abcde", "def"])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(OrderedSet("a").issubset("abc"))
self.assertTrue(OrderedSet("abc").issuperset("a"))
self.assertFalse(OrderedSet("a").issubset("cbs"))
self.assertFalse(OrderedSet("cbs").issuperset("a"))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
if type(self.s) not in (OrderedSet, frozenset):
self.s.x = ["x"]
self.s.z = ["z"]
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup)) # noqa: UP031
if type(self.s) not in (OrderedSet, frozenset):
self.assertEqual(self.s.x, dup.x)
self.assertEqual(self.s.z, dup.z)
self.assertFalse(hasattr(self.s, "y"))
del self.s.x, self.s.z
@unittest.skip("Pickling nyi")
def test_iterator_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg, proto)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of OrderedSet items.
# self.assertEqual(type(itorg), type(it))
self.assertIsInstance(it, collections.abc.Iterator)
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
continue
d = pickle.dumps(it, proto)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = OrderedSet(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.OrderedSet = OrderedSet([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7FFFFFFF)
s = H()
f = OrderedSet()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, "add"):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
@unittest.skip("Different repr")
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == OrderedSet:
self.assertEqual(repr(s), "{OrderedSet(...)}")
else:
name = repr(s).partition("(")[0] # strip class name
self.assertEqual(repr(s), "%s({%s(...)})" % (name, name)) # noqa: UP031
@unittest.skip("Different hashing")
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, "symmetric_difference_update"):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(OrderedSet(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for OrderedSet iterator object
class C:
pass
obj = C()
ref = weakref.ref(obj)
container = OrderedSet([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
def test_free_after_iterating(self):
support.check_free_after_iterating(self, iter, self.thetype)
| TestJointOps |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/sentry_app_authorizations.py | {
"start": 1489,
"end": 1764
} | class ____(serializers.Serializer):
client_id = serializers.CharField(required=True, allow_null=False)
grant_type = serializers.CharField(required=True, allow_null=False)
code = serializers.CharField(required=True, allow_null=False)
| SentryAppAuthorizationSerializer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 3176,
"end": 3519
} | class ____(graphene.ObjectType):
numFailedPartitions = graphene.NonNull(graphene.Int)
numMissingPartitions = graphene.NonNull(graphene.Int)
totalNumPartitions = graphene.NonNull(graphene.Int)
class Meta:
name = "AssetHealthMaterializationDegradedPartitionedMeta"
| GrapheneAssetHealthMaterializationDegradedPartitionedMeta |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/test_google_ads.py | {
"start": 643,
"end": 10518
} | class ____:
def __init__(self, date: str):
self._mock_date = date
def __getattr__(self, attr):
if attr == "date":
return date.fromisoformat(self._mock_date)
return MockedDateSegment(self._mock_date)
SAMPLE_CONFIG = {
"credentials": {
"developer_token": "developer_token",
"client_id": "client_id",
"client_secret": "client_secret",
"refresh_token": "refresh_token",
}
}
EXPECTED_CRED = {
"developer_token": "developer_token",
"client_id": "client_id",
"client_secret": "client_secret",
"refresh_token": "refresh_token",
"use_proto_plus": True,
}
def test_google_ads_init(mocker):
google_client_mocker = mocker.patch("source_google_ads.google_ads.GoogleAdsClient", return_value=MockGoogleAdsClient)
_ = GoogleAds(**SAMPLE_CONFIG)
assert google_client_mocker.load_from_dict.call_args[0][0] == EXPECTED_CRED
def test_google_ads_wrong_permissions(mocker):
mocker.patch("source_google_ads.google_ads.GoogleAdsClient.load_from_dict", side_effect=exceptions.RefreshError("invalid_grant"))
with pytest.raises(AirbyteTracedException) as e:
GoogleAds(**SAMPLE_CONFIG)
expected_message = "The authentication to Google Ads has expired. Re-authenticate to restore access to Google Ads."
assert e.value.message == expected_message
def test_get_accessible_accounts_retry_on_service_unavailable(mocker):
"""Test that get_accessible_accounts retries on ServiceUnavailable errors"""
from google.api_core.exceptions import ServiceUnavailable
mocker.patch("time.sleep") # Mock sleep to speed up test
mocker.patch("source_google_ads.google_ads.GoogleAdsClient.load_from_dict", return_value=MockGoogleAdsClient(SAMPLE_CONFIG))
google_ads_client = GoogleAds(**SAMPLE_CONFIG)
# Mock the _get_accessible_customers method to fail first, then succeed
mock_customer_service = mocker.Mock()
mock_customer_service.list_accessible_customers.side_effect = [
ServiceUnavailable("Service is currently unavailable"), # First call fails
mocker.Mock(resource_names=["customers/123", "customers/456"]), # Second call succeeds
]
google_ads_client.customer_service = mock_customer_service
# Mock the ga_service to return a mock that can parse customer paths
mock_ga_service = mocker.Mock()
mock_ga_service.parse_customer_path.side_effect = [{"customer_id": "123"}, {"customer_id": "456"}]
google_ads_client.ga_services["default"] = mock_ga_service
# This should retry and eventually succeed
customer_ids = list(google_ads_client.get_accessible_accounts())
# Verify it was called twice (once failed, once succeeded)
assert mock_customer_service.list_accessible_customers.call_count == 2
assert customer_ids == ["123", "456"]
def test_get_accessible_accounts_gives_up_after_max_retries(mocker):
"""Test that get_accessible_accounts gives up after max retries on ServiceUnavailable"""
from google.api_core.exceptions import ServiceUnavailable
from airbyte_cdk.utils import AirbyteTracedException
mocker.patch("time.sleep") # Mock sleep to speed up test
mocker.patch("source_google_ads.google_ads.GoogleAdsClient.load_from_dict", return_value=MockGoogleAdsClient(SAMPLE_CONFIG))
google_ads_client = GoogleAds(**SAMPLE_CONFIG)
# Mock the customer service to always fail with ServiceUnavailable
mock_customer_service = mocker.Mock()
mock_customer_service.list_accessible_customers.side_effect = ServiceUnavailable("Service is currently unavailable")
google_ads_client.customer_service = mock_customer_service
# This should retry 5 times then give up
with pytest.raises(AirbyteTracedException) as e:
list(google_ads_client.get_accessible_accounts())
# Verify it was called 5 times (max retries)
assert mock_customer_service.list_accessible_customers.call_count == 5
assert "Service is currently unavailable" in e.value.message
assert e.value.failure_type == FailureType.transient_error
def test_send_request(mocker, customers):
mocker.patch("source_google_ads.google_ads.GoogleAdsClient.load_from_dict", return_value=MockGoogleAdsClient(SAMPLE_CONFIG))
mocker.patch("source_google_ads.google_ads.GoogleAdsClient.get_service", return_value=MockGoogleAdsService())
google_ads_client = GoogleAds(**SAMPLE_CONFIG)
query = "Query"
customer_id = next(iter(customers)).id
response = list(google_ads_client.send_request(query, customer_id=customer_id))
assert response[0].customer_id == customer_id
assert response[0].query == query
def test_get_fields_from_schema():
response = GoogleAds.get_fields_from_schema(SAMPLE_SCHEMA)
assert response == ["segment.date"]
def test_interval_chunking():
mock_intervals = [
{"start_date": "2021-06-17", "end_date": "2021-06-26"},
{"start_date": "2021-06-27", "end_date": "2021-07-06"},
{"start_date": "2021-07-07", "end_date": "2021-07-16"},
{"start_date": "2021-07-17", "end_date": "2021-07-26"},
{"start_date": "2021-07-27", "end_date": "2021-08-05"},
{"start_date": "2021-08-06", "end_date": "2021-08-10"},
]
intervals = list(
chunk_date_range(
start_date="2021-07-01", end_date="2021-08-10", conversion_window=14, slice_duration=pendulum.Duration(days=9), time_zone="UTC"
)
)
assert mock_intervals == intervals
generic_schema = {"properties": {"ad_group_id": {}, "segments.date": {}, "campaign_id": {}, "account_id": {}}}
@pytest.mark.parametrize(
"fields, table_name, conditions, order_field, limit, expected_sql",
(
# Basic test case
(
["ad_group_id", "segments.date", "campaign_id", "account_id"],
"ad_group_ad",
["segments.date >= '2020-01-01'", "segments.date <= '2020-01-10'"],
"segments.date",
None,
"SELECT ad_group_id, segments.date, campaign_id, account_id FROM ad_group_ad WHERE segments.date >= '2020-01-01' AND segments.date <= '2020-01-10' ORDER BY segments.date ASC",
),
# Test with no conditions
(
["ad_group_id", "segments.date", "campaign_id", "account_id"],
"ad_group_ad",
None,
None,
None,
"SELECT ad_group_id, segments.date, campaign_id, account_id FROM ad_group_ad",
),
# Test order with limit
(
["ad_group_id", "segments.date", "campaign_id", "account_id"],
"click_view",
None,
"ad_group_id",
5,
"SELECT ad_group_id, segments.date, campaign_id, account_id FROM click_view ORDER BY ad_group_id ASC LIMIT 5",
),
),
)
def test_convert_schema_into_query(fields, table_name, conditions, order_field, limit, expected_sql):
query = GoogleAds.convert_schema_into_query(fields, table_name, conditions, order_field, limit)
assert query == expected_sql
def test_get_field_value():
field = "segment.date"
date = "2001-01-01"
response = GoogleAds.get_field_value(MockedDateSegment(date), field, {})
assert response == date
def test_get_field_value_object():
expected_response = [
{"text": "An exciting headline", "policySummaryInfo": {"reviewStatus": "REVIEWED", "approvalStatus": "APPROVED"}},
{"text": "second"},
]
field = "ad_group_ad.ad.responsive_search_ad.headlines"
ads_row = GoogleAdsRow(
ad_group_ad={
"ad": {
"responsive_search_ad": {
"headlines": [
{
"text": "An exciting headline",
"policy_summary_info": {"review_status": "REVIEWED", "approval_status": "APPROVED"},
},
{"text": "second"},
]
}
}
}
)
response = GoogleAds.get_field_value(ads_row, field, {})
assert [json.loads(i) for i in response] == expected_response
def test_get_field_value_strings():
expected_response = [
"http://url_one.com",
"https://url_two.com",
]
ads_row = GoogleAdsRow(
ad_group_ad={
"ad": {
"final_urls": [
"http://url_one.com",
"https://url_two.com",
]
}
}
)
field = "ad_group_ad.ad.final_urls"
response = GoogleAds.get_field_value(ads_row, field, {})
assert response == expected_response
def test_parse_single_result():
date = "2001-01-01"
response = GoogleAds.parse_single_result(SAMPLE_SCHEMA, MockedDateSegment(date))
assert response == response
def test_get_fields_metadata(mocker):
# Mock the GoogleAdsClient to return our mock client
mocker.patch("source_google_ads.google_ads.GoogleAdsClient", MockGoogleAdsClient)
# Instantiate the GoogleAds client
google_ads_client = GoogleAds(**SAMPLE_CONFIG)
# Define the fields we want metadata for
fields = ["field1", "field2", "field3"]
# Call the method to get fields metadata
response = google_ads_client.get_fields_metadata(fields)
# Get the mock service to check the request query
mock_service = google_ads_client.get_client().get_service("GoogleAdsFieldService")
# Assert the constructed request query
expected_query = """
SELECT
name,
data_type,
enum_values,
is_repeated
WHERE name in ('field1','field2','field3')
"""
assert mock_service.request_query.strip() == expected_query.strip()
# Assert the response
assert set(response.keys()) == set(fields)
for field in fields:
assert response[field].name == field
| MockedDateSegment |
python | getlogbook__logbook | src/logbook/handlers.py | {
"start": 2543,
"end": 3565
} | class ____(type):
"""The metaclass of handlers injects a destructor if the class has an
overridden close method. This makes it possible that the default
handler class as well as all subclasses that don't need cleanup to be
collected with less overhead.
"""
def __new__(cls, name, bases, d):
# aha, that thing has a custom close method. We will need a magic
# __del__ for it to be called on cleanup.
if (
bases != (ContextObject,)
and "close" in d
and "__del__" not in d
and not any(hasattr(x, "__del__") for x in bases)
):
def _magic_del(self):
try:
self.close()
except Exception:
# del is also invoked when init fails, so we better just
# ignore any exception that might be raised here
pass
d["__del__"] = _magic_del
return type.__new__(cls, name, bases, d)
| _HandlerType |
python | ethereum__web3.py | web3/types.py | {
"start": 8671,
"end": 9711
} | class ____(TypedDict, total=False):
balance: Wei | None
nonce: int | None
code: bytes | HexStr | None
state: dict[HexStr, HexStr] | None
stateDiff: dict[HexStr, HexStr] | None
StateOverride = dict[Union[str, Address, ChecksumAddress], StateOverrideParams]
GasPriceStrategy = Union[
Callable[["Web3", TxParams], Wei], Callable[["AsyncWeb3[Any]", TxParams], Wei]
]
# syntax b/c "from" keyword not allowed w/ class construction
TxReceipt = TypedDict(
"TxReceipt",
{
"blockHash": HexBytes,
"blockNumber": BlockNumber,
"contractAddress": Optional[ChecksumAddress],
"cumulativeGasUsed": int,
"effectiveGasPrice": Wei,
"gasUsed": int,
"from": ChecksumAddress,
"logs": list[LogReceipt],
"logsBloom": HexBytes,
"root": HexStr,
"status": int,
"to": ChecksumAddress,
"transactionHash": HexBytes,
"transactionIndex": int,
"type": int,
},
)
BlockReceipts = list[TxReceipt]
| StateOverrideParams |
python | openai__openai-python | tests/test_transform.py | {
"start": 10841,
"end": 12061
} | class ____(BaseModel):
foo: str
with_none_default: Union[str, None] = None
with_str_default: str = "foo"
@parametrize
@pytest.mark.asyncio
async def test_pydantic_default_field(use_async: bool) -> None:
# should be excluded when defaults are used
model = ModelWithDefaultField.construct()
assert model.with_none_default is None
assert model.with_str_default == "foo"
assert cast(Any, await transform(model, Any, use_async)) == {}
# should be included when the default value is explicitly given
model = ModelWithDefaultField.construct(with_none_default=None, with_str_default="foo")
assert model.with_none_default is None
assert model.with_str_default == "foo"
assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": None, "with_str_default": "foo"}
# should be included when a non-default value is explicitly given
model = ModelWithDefaultField.construct(with_none_default="bar", with_str_default="baz")
assert model.with_none_default == "bar"
assert model.with_str_default == "baz"
assert cast(Any, await transform(model, Any, use_async)) == {"with_none_default": "bar", "with_str_default": "baz"}
| ModelWithDefaultField |
python | django__django | tests/file_storage/tests.py | {
"start": 42761,
"end": 42868
} | class ____(ContentFile):
def chunks(self):
time.sleep(1)
return super().chunks()
| SlowFile |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_blank06.py | {
"start": 315,
"end": 1378
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_blank06.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [42270080, 42990208]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.show_hidden_data()
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | django__django | tests/template_tests/test_custom.py | {
"start": 37260,
"end": 38860
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.egg_dir = os.path.join(ROOT, "eggs")
super().setUpClass()
def test_load_error(self):
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'template_tests.broken_tag': cannot import name "
"'Xtemplate'"
)
with self.assertRaisesMessage(InvalidTemplateLibrary, msg):
Engine(libraries={"broken_tag": "template_tests.broken_tag"})
def test_load_error_egg(self):
egg_name = "%s/tagsegg.egg" % self.egg_dir
msg = (
"Invalid template library specified. ImportError raised when "
"trying to load 'tagsegg.templatetags.broken_egg': cannot "
"import name 'Xtemplate'"
)
with extend_sys_path(egg_name):
with self.assertRaisesMessage(InvalidTemplateLibrary, msg):
Engine(libraries={"broken_egg": "tagsegg.templatetags.broken_egg"})
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = "%s/tagsegg.egg" % self.egg_dir
with extend_sys_path(egg_name):
engine = Engine(
libraries={
"working_egg": "tagsegg.templatetags.working_egg",
}
)
engine.from_string(ttext)
def test_load_annotated_function(self):
Engine(
libraries={
"annotated_tag_function": "template_tests.annotated_tag_function",
}
)
| TemplateTagLoadingTests |
python | readthedocs__readthedocs.org | readthedocs/rtd_tests/tests/test_resolver.py | {
"start": 7242,
"end": 10361
} | class ____(TestCase):
def setUp(self):
self.resolver = Resolver()
def test_project_with_same_translation_and_main_language(self):
proj1 = fixture.get(Project, main_language_project=None)
proj2 = fixture.get(Project, main_language_project=None)
self.assertFalse(proj1.translations.exists())
self.assertIsNone(proj1.main_language_project)
self.assertFalse(proj2.translations.exists())
self.assertIsNone(proj2.main_language_project)
proj1.translations.add(proj2)
proj1.main_language_project = proj2
proj1.save()
self.assertEqual(
proj1.main_language_project.main_language_project,
proj1,
)
# This tests that we aren't going to re-recurse back to resolving proj1
r = Resolver()
self.assertEqual(r._get_canonical_project(proj1), (proj2, None))
def test_project_with_same_superproject_and_translation(self):
proj1 = fixture.get(Project, main_language_project=None)
proj2 = fixture.get(Project, main_language_project=None)
self.assertFalse(proj1.translations.exists())
self.assertIsNone(proj1.main_language_project)
self.assertFalse(proj2.translations.exists())
self.assertIsNone(proj2.main_language_project)
proj2.translations.add(proj1)
proj2.add_subproject(proj1)
self.assertEqual(
proj1.main_language_project,
proj2,
)
self.assertEqual(
proj1.superprojects.first().parent,
proj2,
)
# This tests that we aren't going to re-recurse back to resolving proj1
r = Resolver()
self.assertEqual(r._get_canonical_project(proj1), (proj2, None))
def test_project_with_same_grandchild_project(self):
# Note: we don't disallow this, but we also don't support this in our
# resolution (yet at least)
proj1 = fixture.get(Project, main_language_project=None)
proj2 = fixture.get(Project, main_language_project=None)
proj3 = fixture.get(Project, main_language_project=None)
self.assertFalse(proj1.translations.exists())
self.assertFalse(proj2.translations.exists())
self.assertFalse(proj3.translations.exists())
self.assertIsNone(proj1.main_language_project)
self.assertIsNone(proj2.main_language_project)
self.assertIsNone(proj3.main_language_project)
proj2.add_subproject(proj1)
proj3.add_subproject(proj2)
proj1.add_subproject(proj3)
self.assertEqual(
proj1.superprojects.first().parent,
proj2,
)
self.assertEqual(
proj2.superprojects.first().parent,
proj3,
)
self.assertEqual(
proj3.superprojects.first().parent,
proj1,
)
# This tests that we aren't going to re-recurse back to resolving proj1
r = Resolver()
self.assertEqual(
r._get_canonical_project(proj1), (proj2, proj1.parent_relationship)
)
| ResolverCanonicalProject |
python | Textualize__rich | rich/markdown.py | {
"start": 3480,
"end": 4425
} | class ____(TextElement):
"""A heading."""
@classmethod
def create(cls, markdown: Markdown, token: Token) -> Heading:
return cls(token.tag)
def on_enter(self, context: MarkdownContext) -> None:
self.text = Text()
context.enter_style(self.style_name)
def __init__(self, tag: str) -> None:
self.tag = tag
self.style_name = f"markdown.{tag}"
super().__init__()
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
text = self.text
text.justify = "center"
if self.tag == "h1":
# Draw a border around h1s
yield Panel(
text,
box=box.HEAVY,
style="markdown.h1.border",
)
else:
# Styled text for h2 and beyond
if self.tag == "h2":
yield Text("")
yield text
| Heading |
python | google__pytype | pytype/tests/test_pattern_matching.py | {
"start": 23824,
"end": 38167
} | class ____(test_base.BaseTest):
"""Test exhaustive coverage of enums."""
def test_exhaustive(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x:
case Color.RED:
return 10
case (Color.GREEN |
Color.BLUE):
return 'a'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int | str: ...
""",
)
def test_default(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x:
case Color.RED:
return 10
case _:
return 'a'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int | str: ...
""",
)
def test_default_with_capture(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x:
case Color.RED:
return 10
case _ as foo:
return foo
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int | Color: ...
""",
)
def test_nonexhaustive(self):
ty, err = self.InferWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x: # incomplete-match[e]
case Color.RED:
return 10
case Color.GREEN:
return 'a'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type, Union
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int | str | None: ...
""",
)
self.assertErrorSequences(err, {"e": ["missing", "cases", "Color.BLUE"]})
def test_unused_after_exhaustive(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x:
case Color.RED:
return 10
case (Color.GREEN |
Color.BLUE):
return 20
case _:
return 'a'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int: ...
""",
)
def test_nested(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: Color):
match x:
case Color.RED:
return 10
case (Color.GREEN |
Color.BLUE):
match y:
case Color.RED:
return 10
case Color.GREEN:
return 'a'
case _:
return None
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color, y: Color) -> int | str | None: ...
""",
)
def test_nested_mixed(self):
self.CheckWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: str):
match x: # incomplete-match
case Color.RED:
return 10
case Color.GREEN:
match y:
case "bold":
return 10
""")
def test_multiple(self):
ty, _ = self.InferWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: Color):
match x: # incomplete-match
case Color.RED:
return 10
case Color.GREEN:
return 20
match y:
case Color.RED:
return 'a'
case Color.GREEN | Color.BLUE:
return 'b'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color, y: Color) -> int | str: ...
""",
)
def test_enum_with_methods(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def red(self):
return self.RED
def f(x: Color):
match x:
case Color.RED:
return 10
case (Color.GREEN |
Color.BLUE):
return 'a'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type, TypeVar
Enum: Type[enum.Enum]
_TColor = TypeVar('_TColor', bound=Color)
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def red(self: _TColor) -> _TColor: ...
def f(x: Color) -> int | str: ...
""",
)
def test_redundant(self):
ty, _ = self.InferWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: Color):
match x:
case Color.RED:
return 10
case Color.GREEN:
return 20
case Color.RED: # redundant-match
return '10'
case Color.BLUE:
return 20
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color, y: Color) -> int: ...
""",
)
def test_incomplete_and_redundant(self):
ty, _ = self.InferWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: Color):
match x: # incomplete-match
case Color.RED:
return 10
case Color.GREEN:
return 20
case Color.RED: # redundant-match
return '10'
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color, y: Color) -> int | None: ...
""",
)
def test_partially_redundant(self):
err = self.CheckWithErrors("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color, y: Color):
match x:
case Color.RED:
return 10
case Color.GREEN:
return 20
case Color.RED | Color.BLUE: # redundant-match[e]
return '10'
""")
self.assertErrorSequences(err, {"e": ["already been covered", "Color.RED"]})
def test_complete_match_no_caching(self):
self.Check(
"""
import enum
@enum.unique
class Coin(str, enum.Enum):
HEADS: str = 'heads'
TAILS: str = 'tails'
class Foo:
def foo(self, c: Coin) -> None:
match c:
case Coin.HEADS:
self.bar()
case Coin.TAILS:
pass
def bar(self) -> None:
pass
""",
skip_repeat_calls=False,
)
def test_multiple_enums(self):
"""Skip tracking if matching several enums at once."""
# Regression test for a crash
self.Check("""
import enum
class A(enum.Enum):
X = 'x'
Y = 'y'
class B(enum.Enum):
XX = 'xx'
YY = 'yy'
def f(a: A, b: B):
match (a, b):
case (A.X, B.XX):
print('bar')
""")
def test_enum_in_tuple(self):
"""Skip tracking if matching an enum in a tuple."""
# Python unpacks the tuple and compiles to a simple enum cmp in some cases.
# Check that we do not track exhaustive or redundant matches for this case.
self.Check("""
import enum
class Side(enum.Enum):
RIGHT = enum.auto()
LEFT = enum.auto()
CUSTOM = enum.auto()
def actuate_phrase(side: Side, assistant: bool):
match (side, assistant):
case (Side.LEFT | Side.RIGHT, _):
return 'preset side'
case (Side.CUSTOM, True):
return 'custom true'
case (Side.CUSTOM, False): # should not be redundant
return 'custom false'
""")
def test_pytd_enum_basic(self):
with self.DepTree([(
"foo.pyi",
"""
import enum
class A(enum.Enum):
BASIC = 1
ADVANCED = 2
""",
)]):
self.Check("""
import foo
def f(v: foo.A):
match v:
case foo.A.BASIC:
return 'basic'
case foo.A.ADVANCED:
return 'control'
case _:
return 'unknown'
""")
def test_pytd_enum_redundant(self):
with self.DepTree([(
"foo.pyi",
"""
import enum
class A(enum.Enum):
BASIC = 1
ADVANCED = 2
""",
)]):
self.CheckWithErrors("""
import foo
def f(v: foo.A):
match v:
case foo.A.BASIC:
return 'basic'
case foo.A.BASIC: # redundant-match
return 'even more basic'
case _:
return 'unknown'
""")
def test_pytd_enum_incomplete(self):
with self.DepTree([(
"foo.pyi",
"""
import enum
class A(enum.Enum):
BASIC = 1
ADVANCED = 2
""",
)]):
self.CheckWithErrors("""
import foo
def f(v: foo.A):
match v: # incomplete-match
case foo.A.BASIC:
return 'basic'
""")
def call_function_with_match(self):
ty = self.Infer("""
from enum import Enum
class Color(Enum):
RED = 0
GREEN = 1
BLUE = 2
def f(x: Color):
match x:
case Color.RED:
return 10
case (Color.GREEN |
Color.BLUE):
return 'a'
a = f(Color.RED)
""")
self.assertTypesMatchPytd(
ty,
"""
import enum
from typing import Type
Enum: Type[enum.Enum]
a: int | str
class Color(enum.Enum):
BLUE: Literal[2]
GREEN: Literal[1]
RED: Literal[0]
def f(x: Color) -> int | str: ...
""",
)
def call_method_from_init(self):
"""Regression test for a crash."""
# Caused a crash when trying to access EnumTracker.default_value before it
# had been set.
self.Check("""
import enum
class A(enum.Enum):
RED = 1
BLUE = 2
GREEN = 3
class Foo:
def __init__(self):
self.a = self.f(A.RED)
def f(self, x: A):
match x:
case A.RED:
return 42
case _:
raise ValueError('foo')
""")
def test_optimized_bytecode_out_of_order_1(self):
"""Regression test for a bug resulting from compiler optimisations."""
# Compier optimisations that inline code can put blocks out of order, which
# could potentially interfere with our checks for the end of a match block.
self.Check("""
import enum
class Color(enum.Enum):
RED = 0
GREEN = 1
BLUE = 2
def test(color: Color):
match color:
case Color.RED:
print("I see red!")
case Color.GREEN:
print("Grass is green")
case Color.BLUE:
print("I'm feeling the blues :(")
# This line compiles to a return statement after every case branch.
return color
""")
def test_optimized_bytecode_out_of_order_2(self):
"""Regression test for a bug resulting from compiler optimisations."""
# See comment in the previous test case.
self.Check("""
import enum
class A(enum.Enum):
RED = 1
GREEN = 2
BLUE = 3
def f(x: A):
ret = True
count = 0
while count < 10:
match x:
case A.RED:
ret = 10
case A.BLUE:
ret = 20
case _:
return ret
if ret:
break
else:
count += 1
""")
def test_optimized_bytecode_out_of_order_3(self):
"""Regression test for a bug resulting from compiler optimisations."""
# See comment in the previous test case.
self.Check("""
import enum
class A(enum.Enum):
RED = 1
GREEN = 2
BLUE = 3
def f(xs: list[A]) -> list[str]:
ret = []
for x in xs:
match x:
case A.RED | A.BLUE:
add = str(x)
case _:
raise ValueError("green is unsupported")
ret.append(add)
return ret
""")
@test_utils.skipBeforePy((3, 10), "New syntax in 3.10")
| EnumMatchCoverageTest |
python | doocs__leetcode | solution/2000-2099/2095.Delete the Middle Node of a Linked List/Solution.py | {
"start": 151,
"end": 470
} | class ____:
def deleteMiddle(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode(next=head)
slow, fast = dummy, head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
slow.next = slow.next.next
return dummy.next
| Solution |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 7086,
"end": 7719
} | class ____(LinksTestCase):
link_method = 'link'
def _test_raise(self, p):
event, queue, callback_flag = self.set_links(p)
xxxxx = self.set_links_timeout(p.link_value)
sleep(DELAY)
self.assertFalse(p, p)
self.assertRaises(ExpectedError, event.get)
self.assertEqual(queue.get(), p)
sleep(DELAY)
self.assertFalse(callback_flag, callback_flag)
self.check_timed_out(*xxxxx)
def test_raise(self):
p = gevent.spawn(lambda: getcurrent().throw(ExpectedError('test_raise')))
for _ in range(3):
self._test_raise(p)
| TestRaise_link |
python | pydantic__pydantic | pydantic/_internal/_generate_schema.py | {
"start": 132016,
"end": 132421
} | class ____:
__slots__ = ('_stack',)
def __init__(self) -> None:
self._stack: list[type] = []
@contextmanager
def push(self, type_obj: type) -> Iterator[None]:
self._stack.append(type_obj)
yield
self._stack.pop()
def get(self) -> type | None:
if self._stack:
return self._stack[-1]
else:
return None
| _ModelTypeStack |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 591125,
"end": 591907
} | class ____(sgqlc.types.relay.Connection):
"""The connection type for Organization."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("EnterpriseOrganizationMembershipEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of("Organization"), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
| EnterpriseOrganizationMembershipConnection |
python | davidhalter__jedi | jedi/inference/param.py | {
"start": 641,
"end": 10450
} | class ____(ParamName):
def __init__(self, function_value, arguments, param_node, lazy_value, is_default=False):
super().__init__(function_value, param_node.name, arguments=arguments)
self._lazy_value = lazy_value
self._is_default = is_default
def infer(self):
return self._lazy_value.infer()
def matches_signature(self):
if self._is_default:
return True
argument_values = self.infer().py__class__()
if self.get_kind() in (Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD):
return True
annotations = self.infer_annotation(execute_annotation=False)
if not annotations:
# If we cannot infer annotations - or there aren't any - pretend
# that the signature matches.
return True
matches = any(c1.is_sub_class_of(c2)
for c1 in argument_values
for c2 in annotations.gather_annotation_classes())
debug.dbg("param compare %s: %s <=> %s",
matches, argument_values, annotations, color='BLUE')
return matches
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.string_name)
def get_executed_param_names_and_issues(function_value, arguments):
"""
Return a tuple of:
- a list of `ExecutedParamName`s corresponding to the arguments of the
function execution `function_value`, containing the inferred value of
those arguments (whether explicit or default)
- a list of the issues encountered while building that list
For example, given:
```
def foo(a, b, c=None, d='d'): ...
foo(42, c='c')
```
Then for the execution of `foo`, this will return a tuple containing:
- a list with entries for each parameter a, b, c & d; the entries for a,
c, & d will have their values (42, 'c' and 'd' respectively) included.
- a list with a single entry about the lack of a value for `b`
"""
def too_many_args(argument):
m = _error_argument_count(funcdef, len(unpacked_va))
# Just report an error for the first param that is not needed (like
# cPython).
if arguments.get_calling_nodes():
# There might not be a valid calling node so check for that first.
issues.append(
_add_argument_issue(
'type-error-too-many-arguments',
argument,
message=m
)
)
else:
issues.append(None)
debug.warning('non-public warning: %s', m)
issues = [] # List[Optional[analysis issue]]
result_params = []
param_dict = {}
funcdef = function_value.tree_node
# Default params are part of the value where the function was defined.
# This means that they might have access on class variables that the
# function itself doesn't have.
default_param_context = function_value.get_default_param_context()
for param in funcdef.get_params():
param_dict[param.name.value] = param
unpacked_va = list(arguments.unpack(funcdef))
var_arg_iterator = PushBackIterator(iter(unpacked_va))
non_matching_keys = defaultdict(lambda: [])
keys_used = {}
keys_only = False
had_multiple_value_error = False
for param in funcdef.get_params():
# The value and key can both be null. There, the defaults apply.
# args / kwargs will just be empty arrays / dicts, respectively.
# Wrong value count is just ignored. If you try to test cases that are
# not allowed in Python, Jedi will maybe not show any completions.
is_default = False
key, argument = next(var_arg_iterator, (None, None))
while key is not None:
keys_only = True
try:
key_param = param_dict[key]
except KeyError:
non_matching_keys[key] = argument
else:
if key in keys_used:
had_multiple_value_error = True
m = ("TypeError: %s() got multiple values for keyword argument '%s'."
% (funcdef.name, key))
for contextualized_node in arguments.get_calling_nodes():
issues.append(
analysis.add(contextualized_node.context,
'type-error-multiple-values',
contextualized_node.node, message=m)
)
else:
keys_used[key] = ExecutedParamName(
function_value, arguments, key_param, argument)
key, argument = next(var_arg_iterator, (None, None))
try:
result_params.append(keys_used[param.name.value])
continue
except KeyError:
pass
if param.star_count == 1:
# *args param
lazy_value_list = []
if argument is not None:
lazy_value_list.append(argument)
for key, argument in var_arg_iterator:
# Iterate until a key argument is found.
if key:
var_arg_iterator.push_back((key, argument))
break
lazy_value_list.append(argument)
seq = iterable.FakeTuple(function_value.inference_state, lazy_value_list)
result_arg = LazyKnownValue(seq)
elif param.star_count == 2:
if argument is not None:
too_many_args(argument)
# **kwargs param
dct = iterable.FakeDict(function_value.inference_state, dict(non_matching_keys))
result_arg = LazyKnownValue(dct)
non_matching_keys = {}
else:
# normal param
if argument is None:
# No value: Return an empty container
if param.default is None:
result_arg = LazyUnknownValue()
if not keys_only:
for contextualized_node in arguments.get_calling_nodes():
m = _error_argument_count(funcdef, len(unpacked_va))
issues.append(
analysis.add(
contextualized_node.context,
'type-error-too-few-arguments',
contextualized_node.node,
message=m,
)
)
else:
result_arg = LazyTreeValue(default_param_context, param.default)
is_default = True
else:
result_arg = argument
result_params.append(ExecutedParamName(
function_value, arguments, param, result_arg, is_default=is_default
))
if not isinstance(result_arg, LazyUnknownValue):
keys_used[param.name.value] = result_params[-1]
if keys_only:
# All arguments should be handed over to the next function. It's not
# about the values inside, it's about the names. Jedi needs to now that
# there's nothing to find for certain names.
for k in set(param_dict) - set(keys_used):
param = param_dict[k]
if not (non_matching_keys or had_multiple_value_error
or param.star_count or param.default):
# add a warning only if there's not another one.
for contextualized_node in arguments.get_calling_nodes():
m = _error_argument_count(funcdef, len(unpacked_va))
issues.append(
analysis.add(contextualized_node.context,
'type-error-too-few-arguments',
contextualized_node.node, message=m)
)
for key, lazy_value in non_matching_keys.items():
m = "TypeError: %s() got an unexpected keyword argument '%s'." \
% (funcdef.name, key)
issues.append(
_add_argument_issue(
'type-error-keyword-argument',
lazy_value,
message=m
)
)
remaining_arguments = list(var_arg_iterator)
if remaining_arguments:
first_key, lazy_value = remaining_arguments[0]
too_many_args(lazy_value)
return result_params, issues
def get_executed_param_names(function_value, arguments):
"""
Return a list of `ExecutedParamName`s corresponding to the arguments of the
function execution `function_value`, containing the inferred value of those
arguments (whether explicit or default). Any issues building this list (for
example required arguments which are missing in the invocation) are ignored.
For example, given:
```
def foo(a, b, c=None, d='d'): ...
foo(42, c='c')
```
Then for the execution of `foo`, this will return a list containing entries
for each parameter a, b, c & d; the entries for a, c, & d will have their
values (42, 'c' and 'd' respectively) included.
"""
return get_executed_param_names_and_issues(function_value, arguments)[0]
def _error_argument_count(funcdef, actual_count):
params = funcdef.get_params()
default_arguments = sum(1 for p in params if p.default or p.star_count)
if default_arguments == 0:
before = 'exactly '
else:
before = 'from %s to ' % (len(params) - default_arguments)
return ('TypeError: %s() takes %s%s arguments (%s given).'
% (funcdef.name, before, len(params), actual_count))
| ExecutedParamName |
python | Pylons__pyramid | src/pyramid/util.py | {
"start": 8525,
"end": 13256
} | class ____:
"""Maintain a set of items.
Each item is stored as a weakref to avoid extending their lifetime.
The values may be iterated over or the last item added may be
accessed via the ``last`` property.
If items are added more than once, the most recent addition will
be remembered in the order:
order = WeakOrderedSet()
order.add('1')
order.add('2')
order.add('1')
list(order) == ['2', '1']
order.last == '1'
"""
def __init__(self):
self._items = {}
self._order = []
def add(self, item):
"""Add an item to the set."""
oid = id(item)
if oid in self._items:
self._order.remove(oid)
self._order.append(oid)
return
ref = weakref.ref(item, lambda x: self._remove_by_id(oid))
self._items[oid] = ref
self._order.append(oid)
def _remove_by_id(self, oid):
"""Remove an item from the set."""
if oid in self._items:
del self._items[oid]
self._order.remove(oid)
def remove(self, item):
"""Remove an item from the set."""
self._remove_by_id(id(item))
def empty(self):
"""Clear all objects from the set."""
self._items = {}
self._order = []
def __len__(self):
return len(self._order)
def __contains__(self, item):
oid = id(item)
return oid in self._items
def __iter__(self):
return (self._items[oid]() for oid in self._order)
@property
def last(self):
if self._order:
oid = self._order[-1]
return self._items[oid]()
def strings_differ(string1, string2):
"""Check whether two strings differ while avoiding timing attacks.
This function returns True if the given strings differ and False
if they are equal. It's careful not to leak information about *where*
they differ as a result of its running time, which can be very important
to avoid certain timing-related crypto attacks:
http://seb.dbzteam.org/crypto/python-oauth-timing-hmac.pdf
.. versionchanged:: 1.6
Support :func:`hmac.compare_digest` if it is available (Python 2.7.7+
and Python 3.3+).
"""
len_eq = len(string1) == len(string2)
if len_eq:
invalid_bits = 0
left = string1
else:
invalid_bits = 1
left = string2
right = string2
invalid_bits += not compare_digest(left, right)
return invalid_bits != 0
def object_description(object):
"""Produce a human-consumable text description of ``object``,
usually involving a Python dotted name. For example:
>>> object_description(None)
'None'
>>> from xml.dom import minidom
>>> object_description(minidom)
'module xml.dom.minidom'
>>> object_description(minidom.Attr)
'class xml.dom.minidom.Attr'
>>> object_description(minidom.Attr.appendChild)
'method appendChild of class xml.dom.minidom.Attr'
If this method cannot identify the type of the object, a generic
description ala ``object <object.__name__>`` will be returned.
If the object passed is already a string, it is simply returned. If it
is a boolean, an integer, a list, a tuple, a set, or ``None``, a
(possibly shortened) string representation is returned.
"""
if isinstance(object, str):
return object
if isinstance(object, int):
return str(object)
if isinstance(object, (bool, float, type(None))):
return str(object)
if isinstance(object, set):
return shortrepr(object, '}')
if isinstance(object, tuple):
return shortrepr(object, ')')
if isinstance(object, list):
return shortrepr(object, ']')
if isinstance(object, dict):
return shortrepr(object, '}')
module = inspect.getmodule(object)
if module is None:
return 'object %s' % str(object)
modulename = module.__name__
if inspect.ismodule(object):
return 'module %s' % modulename
if inspect.ismethod(object):
oself = getattr(object, '__self__', None)
return 'method {} of class {}.{}'.format(
object.__name__,
modulename,
oself.__class__.__name__,
)
if inspect.isclass(object):
dottedname = f'{modulename}.{object.__name__}'
return 'class %s' % dottedname
if inspect.isfunction(object):
dottedname = f'{modulename}.{object.__name__}'
return 'function %s' % dottedname
return 'object %s' % str(object)
def shortrepr(object, closer):
r = str(object)
if len(r) > 100:
r = r[:100] + ' ... %s' % closer
return r
| WeakOrderedSet |
python | scikit-image__scikit-image | benchmarks/benchmark_import_time.py | {
"start": 62,
"end": 599
} | class ____:
"""Benchmark the time it takes to import various modules"""
params = [
'numpy',
'skimage',
'skimage.feature',
'skimage.morphology',
'skimage.color',
'skimage.io',
]
param_names = ["package_name"]
def setup(self, package_name):
pass
def time_import(self, package_name):
run(
executable + ' -c "import ' + package_name + '"',
capture_output=True,
stdin=PIPE,
shell=True,
)
| ImportSuite |
python | spack__spack | lib/spack/spack/tag.py | {
"start": 2229,
"end": 2332
} | class ____(spack.error.SpackError):
"""Raised when there is a problem with a TagIndex."""
| TagIndexError |
python | getsentry__sentry | tests/sentry/releases/endpoints/test_project_releases.py | {
"start": 12638,
"end": 22295
} | class ____(APITestCase):
def test_minimal(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(
url,
data={"version": "1.2.1"},
HTTP_USER_AGENT="sentry-cli/2.77.4",
)
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(
version=response.data["version"],
user_agent="sentry-cli/2.77.4",
)
assert not release.owner_id
assert release.organization == project.organization
assert release.projects.first() == project
def test_ios_release(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(url, data={"version": "1.2.1 (123)"})
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(version=response.data["version"])
assert not release.owner_id
assert release.organization == project.organization
assert release.projects.first() == project
def test_duplicate(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = Release.objects.create(version="1.2.1", organization_id=project.organization_id)
release.add_project(project)
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(url, data={"version": "1.2.1"})
assert response.status_code == 208, response.content
def test_duplicate_across_org(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
release = Release.objects.create(version="1.2.1", organization_id=project.organization_id)
release.add_project(project)
project2 = self.create_project(name="bar", organization=project.organization)
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project2.organization.slug,
"project_id_or_slug": project2.slug,
},
)
response = self.client.post(url, data={"version": "1.2.1"})
# since project2 was added, should be 201
assert response.status_code == 201, response.content
assert (
Release.objects.filter(version="1.2.1", organization_id=project.organization_id).count()
== 1
)
assert ReleaseProject.objects.get(release=release, project=project)
assert ReleaseProject.objects.get(release=release, project=project2)
def test_version_whitespace(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(url, data={"version": "1.2.3\n"})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "\n1.2.3"})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.\n2.3"})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3\f"})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3\t"})
assert response.status_code == 400, response.content
response = self.client.post(url, data={"version": "1.2.3"})
assert response.status_code == 201, response.content
assert response.data["version"] == "1.2.3"
release = Release.objects.get(
organization_id=project.organization_id, version=response.data["version"]
)
assert not release.owner_id
def test_features(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(url, data={"version": "1.2.1", "owner": self.user.email})
assert response.status_code == 201, response.content
assert response.data["version"]
release = Release.objects.get(
organization_id=project.organization_id, version=response.data["version"]
)
assert release.owner_id == self.user.id
def test_commits(self) -> None:
self.login_as(user=self.user)
project = self.create_project(name="foo")
url = reverse(
"sentry-api-0-project-releases",
kwargs={
"organization_id_or_slug": project.organization.slug,
"project_id_or_slug": project.slug,
},
)
response = self.client.post(
url, data={"version": "1.2.1", "commits": [{"id": "a" * 40}, {"id": "b" * 40}]}
)
assert response.status_code == 201, (response.status_code, response.content)
assert response.data["version"]
release = Release.objects.get(
organization_id=project.organization_id, version=response.data["version"]
)
rc_list = list(
ReleaseCommit.objects.filter(release=release)
.select_related("commit", "commit__author")
.order_by("order")
)
assert len(rc_list) == 2
for rc in rc_list:
assert rc.organization_id
def test_org_auth_token(self) -> None:
org = self.create_organization()
org2 = self.create_organization()
team1 = self.create_team(organization=org)
project1 = self.create_project(teams=[team1], organization=org)
release1 = Release.objects.create(
organization_id=org.id,
version="1",
date_added=datetime(2013, 8, 13, 3, 8, 24, 880386, tzinfo=UTC),
)
release1.add_project(project1)
url = reverse(
"sentry-api-0-project-releases",
kwargs={"organization_id_or_slug": org.slug, "project_id_or_slug": project1.slug},
)
# test right org, wrong permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
bad_token_str = generate_token(org.slug, "")
OrgAuthToken.objects.create(
organization_id=org.id,
name="token 1",
token_hashed=hash_token(bad_token_str),
token_last_characters="ABCD",
scope_list=[],
date_last_used=None,
)
response = self.client.post(
url,
data={"version": "1.2.1"},
HTTP_AUTHORIZATION=f"Bearer {bad_token_str}",
)
assert response.status_code == 403
# test wrong org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
wrong_org_token_str = generate_token(org2.slug, "")
OrgAuthToken.objects.create(
organization_id=org2.id,
name="token 1",
token_hashed=hash_token(wrong_org_token_str),
token_last_characters="ABCD",
scope_list=["org:ci"],
date_last_used=None,
)
response = self.client.post(
url,
data={"version": "1.2.1"},
HTTP_AUTHORIZATION=f"Bearer {wrong_org_token_str}",
)
assert response.status_code == 403
# test right org, right permissions level
with assume_test_silo_mode(SiloMode.CONTROL):
good_token_str = generate_token(org.slug, "")
OrgAuthToken.objects.create(
organization_id=org.id,
name="token 1",
token_hashed=hash_token(good_token_str),
token_last_characters="ABCD",
scope_list=["org:ci"],
date_last_used=None,
)
with outbox_runner():
response = self.client.post(
url,
data={"version": "1.2.1"},
HTTP_AUTHORIZATION=f"Bearer {good_token_str}",
)
assert response.status_code == 201, response.content
# Make sure org token usage was updated
with assume_test_silo_mode(SiloMode.CONTROL):
org_token = OrgAuthToken.objects.get(token_hashed=hash_token(good_token_str))
assert org_token.date_last_used is not None
assert org_token.project_last_used_id == project1.id
| ProjectReleaseCreateTest |
python | pytorch__pytorch | torch/utils/checkpoint.py | {
"start": 33189,
"end": 34795
} | class ____(torch.autograd.Function):
@staticmethod
# pyrefly: ignore [bad-override]
def forward(*args):
return torch.empty((0,))
@staticmethod
def setup_context(ctx: Any, inputs: Tuple[Any, ...], output: Any) -> None:
# Only tensors can be saved with ctx.save_for_backward, everything else
# is captured by get_args, which is saved directly on ctx
tensor_indices, tensors = zip(
*[(i, o) for i, o in enumerate(inputs) if isinstance(o, torch.Tensor)], strict=False
)
idx2saved_idx = {b: a for a, b in enumerate(tensor_indices)}
# args but with tensors replaced with None as placeholders
args = [None if isinstance(o, torch.Tensor) else o for o in inputs]
def get_args(saved_tensors):
# restore the placeholders with the original tensors grabbed from
# ctx.saved_tensors (which may be saved on a parent checkpoint if
# this checkpoint is nested, and that would trigger a recursive
# unpack!)
ret = [
saved_tensors[idx2saved_idx[i]] if i in tensor_indices else o
for i, o in enumerate(args)
]
# grab the tail since we also saved the dummy to avoid having to explicitly
# handle the case where there are no tensor inputs
return ret[1:]
ctx.get_args = get_args
ctx.save_for_backward(*tensors)
@staticmethod
def backward(ctx, *grad_outputs) -> NoReturn:
raise AssertionError("Did not expect to backward on this graph")
| _NoopSaveInputs |
python | wandb__wandb | wandb/_pydantic/base.py | {
"start": 532,
"end": 1616
} | class ____(TypedDict, total=False):
"""Shared keyword arguments for `BaseModel.model_{dump,dump_json}`.
Newer pydantic versions may accept more arguments than are listed here.
Last updated for pydantic v2.12.0.
"""
include: IncEx | None
exclude: IncEx | None
context: Any | None
by_alias: bool | None
exclude_unset: bool
exclude_defaults: bool
exclude_none: bool
exclude_computed_fields: bool
round_trip: bool
warnings: bool | Literal["none", "warn", "error"]
fallback: Callable[[Any], Any] | None
serialize_as_any: bool
# ---------------------------------------------------------------------------
# Base models and mixin classes.
#
# Extra info is provided for devs in inline comments, NOT docstrings. This
# prevents it from showing up in generated docs for subclasses.
# FOR INTERNAL USE ONLY: v1-compatible drop-in replacement for `pydantic.BaseModel`.
# If pydantic v2 is detected, this is just `pydantic.BaseModel`.
#
# Deliberately inherits ALL default configuration from `pydantic.BaseModel`.
| ModelDumpKwargs |
python | ray-project__ray | python/ray/data/_internal/planner/exchange/sort_task_spec.py | {
"start": 3305,
"end": 9034
} | class ____(ExchangeTaskSpec):
"""
The implementation for distributed sort tasks.
The algorithm is similar to [External Merge Sort]
(https://en.wikipedia.org/wiki/External_sorting).
Sorting is done in 3 steps: sampling, sorting individual blocks, and
merging sorted blocks.
Sampling (`sample_boundaries`): we get a number of sample items from each block,
sort them, and use them to compute boundaries that would partition all items into
approximately equal ranges.
Sorting (`map`): each block is sorted locally, then partitioned into smaller
blocks according to the boundaries. Each partitioned block is passed to a merge
task.
Merging (`reduce`): a merge task would receive a block from every worker that
consists of items in a certain range. It then merges the sorted blocks into one
sorted block and becomes part of the new, sorted block.
"""
SORT_SAMPLE_SUB_PROGRESS_BAR_NAME = "Sort Sample"
def __init__(
self,
boundaries: List[T],
sort_key: SortKey,
batch_format: str,
):
super().__init__(
map_args=[boundaries, sort_key],
reduce_args=[sort_key, batch_format],
)
@staticmethod
def map(
idx: int,
block: Block,
output_num_blocks: int,
boundaries: List[T],
sort_key: SortKey,
) -> List[Union[Block, "BlockMetadataWithSchema"]]:
stats = BlockExecStats.builder()
accessor = BlockAccessor.for_block(block)
out = accessor.sort_and_partition(boundaries, sort_key)
from ray.data.block import BlockMetadataWithSchema
meta_with_schema = BlockMetadataWithSchema.from_block(
block, stats=stats.build()
)
return out + [meta_with_schema]
@staticmethod
def reduce(
sort_key: SortKey,
batch_format: str,
*mapper_outputs: List[Block],
partial_reduce: bool = False,
) -> Tuple[Block, "BlockMetadataWithSchema"]:
normalized_blocks = TableBlockAccessor.normalize_block_types(
mapper_outputs,
target_block_type=ExchangeTaskSpec._derive_target_block_type(batch_format),
)
blocks, meta_with_schema = BlockAccessor.for_block(
normalized_blocks[0]
).merge_sorted_blocks(normalized_blocks, sort_key)
return blocks, meta_with_schema
@staticmethod
def sample_boundaries(
blocks: List[ObjectRef[Block]],
sort_key: SortKey,
num_reducers: int,
sample_bar: Optional[ProgressBar] = None,
) -> List[T]:
"""
Return (num_reducers - 1) items in ascending order from the blocks that
partition the domain into ranges with approximately equally many elements.
Each boundary item is a tuple of a form (col1_value, col2_value, ...).
"""
columns = sort_key.get_columns()
n_samples = int(num_reducers * 10 / len(blocks))
sample_block = cached_remote_fn(_sample_block)
sample_results = [
sample_block.remote(block, n_samples, sort_key) for block in blocks
]
if sample_bar is None:
sample_bar = ProgressBar(
SortTaskSpec.SORT_SAMPLE_SUB_PROGRESS_BAR_NAME,
len(blocks) * n_samples,
unit="rows",
)
# TODO(zhilong): Update sort sample bar before finished.
samples = sample_bar.fetch_until_complete(sample_results)
del sample_results
samples: List[Block] = [s for s in samples if len(s) > 0]
# The dataset is empty
if len(samples) == 0:
return [None] * (num_reducers - 1)
# Convert samples to a sorted list[tuple[...]] where each tuple represents a
# sample.
# TODO: Once we deprecate pandas blocks, we can avoid this conversion and
# directly sort the samples.
builder = DelegatingBlockBuilder()
for sample in samples:
builder.add_block(sample)
samples_table = builder.build()
samples_dict = BlockAccessor.for_block(samples_table).to_numpy(columns=columns)
# This zip does the transposition from list of column values to list of tuples.
samples_list = list(zip(*samples_dict.values()))
def is_na(x):
# Check if x is None or NaN. Type casting to np.array first to avoid
# isnan failing on strings and other types.
if x is None:
return True
x = np.asarray(x)
if np.issubdtype(x.dtype, np.number):
return np.isnan(x)
return False
# To allow multi-directional sort, we utilize Python's stable sort: we
# sort several times with different directions. We do this in reverse, so
# that the last key we sort by is the primary sort key passed by the user.
for i, desc in list(enumerate(sort_key.get_descending()))[::-1]:
# Sort the list, but Nones should be NULL_SENTINEL to ensure safe sorting.
samples_list.sort(
key=lambda sample: NULL_SENTINEL if is_na(sample[i]) else sample[i],
reverse=desc,
)
# Each boundary corresponds to a quantile of the data.
quantile_indices = [
int(q * (len(samples_list) - 1))
for q in np.linspace(0, 1, num_reducers + 1)
]
# Exclude the first and last quantiles because they're 0 and 1.
return [samples_list[i] for i in quantile_indices[1:-1]]
def _sample_block(block: Block, n_samples: int, sort_key: SortKey) -> Block:
return BlockAccessor.for_block(block).sample(n_samples, sort_key)
| SortTaskSpec |
python | fluentpython__example-code-2e | 15-more-types/petbox/petbox.py | {
"start": 236,
"end": 276
} | class ____(Pet):
"""Felis catus"""
| Cat |
python | getsentry__sentry | tests/sentry/monitors/logic/test_mark_failed.py | {
"start": 558,
"end": 19189
} | class ____(TestCase):
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
def test_mark_failed_default_params(
self, mock_dispatch_incident_occurrence: mock.MagicMock
) -> None:
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"max_runtime": None,
"checkin_margin": None,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
)
last_checkin = timezone.now()
trace_id = uuid.uuid4()
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
trace_id=trace_id,
date_added=last_checkin,
)
assert mark_failed(checkin, failed_at=checkin.date_added)
monitor_environment.refresh_from_db()
assert monitor_environment.status == MonitorStatus.ERROR
monitor_incidents = MonitorIncident.objects.filter(monitor_environment=monitor_environment)
assert len(monitor_incidents) == 1
assert mock_dispatch_incident_occurrence.call_count == 1
assert mock_dispatch_incident_occurrence.call_args == mock.call(
checkin,
[checkin],
monitor_incidents[0],
checkin.date_added,
None,
)
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
def test_mark_failed_muted(self, mock_dispatch_incident_occurrence: mock.MagicMock) -> None:
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"max_runtime": None,
"checkin_margin": None,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
is_muted=True,
)
assert monitor_environment.active_incident is None
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.UNKNOWN,
)
assert mark_failed(checkin, failed_at=checkin.date_added)
monitor.refresh_from_db()
monitor_environment.refresh_from_db()
assert is_monitor_muted(monitor)
assert monitor_environment.status == MonitorStatus.ERROR
assert mock_dispatch_incident_occurrence.call_count == 0
assert monitor_environment.active_incident is not None
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
def test_mark_failed_issue_threshold(
self, mock_dispatch_incident_occurrence: mock.MagicMock
) -> None:
failure_issue_threshold = 8
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"failure_issue_threshold": failure_issue_threshold,
"max_runtime": None,
"checkin_margin": None,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
)
failure_statuses = cycle([CheckInStatus.ERROR, CheckInStatus.TIMEOUT, CheckInStatus.MISSED])
for _ in range(0, failure_issue_threshold - 1):
status = next(failure_statuses)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=status,
)
mark_failed(checkin, failed_at=checkin.date_added)
# failure has not hit threshold, monitor should be in an OK status
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.OK
# create another OK check-in to break the chain
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
)
for _ in range(0, failure_issue_threshold):
status = next(failure_statuses)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=status,
)
if _ == 0:
first_checkin = checkin
mark_failed(checkin, failed_at=checkin.date_added)
# failure has hit threshold, monitor should be in a failed state
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.ERROR
# check that an incident has been created correctly
monitor_incident = MonitorIncident.objects.get(monitor_environment=monitor_environment)
assert monitor_incident.starting_checkin == first_checkin
assert monitor_incident.starting_timestamp == first_checkin.date_added
assert monitor_environment.active_incident is not None
assert monitor_incident.grouphash == monitor_environment.active_incident.grouphash
# assert correct number of occurrences was sent
assert mock_dispatch_incident_occurrence.call_count == failure_issue_threshold
# send another check-in to make sure the incident does not change
status = next(failure_statuses)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=status,
)
mark_failed(checkin, failed_at=checkin.date_added)
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.ERROR
# check that incident has not changed
monitor_incident = MonitorIncident.objects.get(id=monitor_incident.id)
assert monitor_environment.active_incident is not None
assert monitor_incident.grouphash == monitor_environment.active_incident.grouphash
# assert correct number of occurrences was sent
assert mock_dispatch_incident_occurrence.call_count == failure_issue_threshold + 1
# Resolve the incident with an OK check-in
ok_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=MonitorStatus.OK,
)
monitor_incident.resolving_checkin = ok_checkin
monitor_incident.resolving_timestamp = ok_checkin.date_added
monitor_incident.save()
monitor_environment.status = MonitorStatus.OK
monitor_environment.save()
# Cause a new incident and ensure we create a new incident
for _ in range(0, failure_issue_threshold):
status = next(failure_statuses)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=status,
)
mark_failed(checkin, failed_at=checkin.date_added)
monitor_incidents = MonitorIncident.objects.filter(monitor_environment=monitor_environment)
assert len(monitor_incidents) == 2
# Test to make sure that timeout mark_failed (which occur in the past)
# correctly create issues once passing the failure_issue_threshold
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
def test_mark_failed_issue_threshold_timeout(
self, mock_dispatch_incident_occurrence: mock.MagicMock
) -> None:
failure_issue_threshold = 8
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"failure_issue_threshold": failure_issue_threshold,
"max_runtime": None,
"checkin_margin": None,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
)
# create in-progress check-ins
checkins = []
for i in range(0, failure_issue_threshold + 1):
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.IN_PROGRESS,
)
checkins.append(checkin)
if i == 0:
first_checkin = checkin
# mark check-ins as failed
for _ in range(0, failure_issue_threshold - 1):
checkin = checkins.pop(0)
checkin.update(status=CheckInStatus.TIMEOUT)
mark_failed(checkin, failed_at=checkin.date_added)
# failure has not hit threshold, monitor should be in an OK status
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.OK
checkin = checkins.pop(0)
checkin.update(status=CheckInStatus.TIMEOUT)
mark_failed(checkin, failed_at=checkin.date_added)
# failure has hit threshold, monitor should be in a failed state
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.ERROR
# check that an incident has been created correctly
monitor_incident = MonitorIncident.objects.get(monitor_environment=monitor_environment)
assert monitor_incident.starting_checkin == first_checkin
assert monitor_incident.starting_timestamp == first_checkin.date_added
assert monitor_environment.active_incident is not None
assert monitor_incident.grouphash == monitor_environment.active_incident.grouphash
# assert correct number of occurrences was sent
assert mock_dispatch_incident_occurrence.call_count == failure_issue_threshold
# we are duplicating this test as the code paths are different, for now
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
def test_mark_failed_issue_threshold_disabled(
self, mock_dispatch_incident_occurrence: mock.MagicMock
) -> None:
failure_issue_threshold = 8
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"failure_issue_threshold": failure_issue_threshold,
"max_runtime": None,
"checkin_margin": None,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
is_muted=True,
)
assert monitor_environment.active_incident is None
for _ in range(0, failure_issue_threshold):
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.UNKNOWN,
)
mark_failed(checkin, failed_at=checkin.date_added)
monitor.refresh_from_db()
monitor_environment.refresh_from_db()
assert is_monitor_muted(monitor)
assert monitor_environment.status == MonitorStatus.ERROR
assert mock_dispatch_incident_occurrence.call_count == 0
assert monitor_environment.active_incident is not None
def test_mark_failed_issue_assignment(self) -> None:
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "month"],
"schedule_type": ScheduleType.INTERVAL,
"max_runtime": None,
"checkin_margin": None,
},
owner_user_id=self.user.id,
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.IN_PROGRESS,
)
mark_failed(checkin, failed_at=checkin.date_added)
# failure has hit threshold, monitor should be in a failed state
monitor_environment = MonitorEnvironment.objects.get(id=monitor_environment.id)
assert monitor_environment.status == MonitorStatus.ERROR
# check that an incident has been created correctly
monitor_incident = MonitorIncident.objects.get(monitor_environment=monitor_environment)
assert monitor_incident.starting_checkin == checkin
assert monitor_incident.starting_timestamp == checkin.date_added
assert monitor_environment.active_incident is not None
assert monitor_incident.grouphash == monitor_environment.active_incident.grouphash
occurrence_data = {"fingerprint": [monitor_environment.active_incident.grouphash]}
process_occurrence_data(occurrence_data)
issue_platform_hash = occurrence_data["fingerprint"][0]
grouphash = GroupHash.objects.get(hash=issue_platform_hash)
group_assignee = GroupAssignee.objects.get(group_id=grouphash.group_id)
assert group_assignee.user_id == monitor.owner_user_id
@mock.patch("sentry.monitors.logic.incidents.dispatch_incident_occurrence")
@mock.patch("sentry.monitors.logic.incident_occurrence.resolve_incident_group")
def test_mark_failed_fingerprint_after_resolution(
self, mock_resolve_incident_group, mock_dispatch_incident_occurrence
):
"""Test that resolving and recreating an incident maintains the same fingerprint"""
monitor = Monitor.objects.create(
name="test monitor",
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": [1, "hour"],
"schedule_type": ScheduleType.INTERVAL,
"failure_issue_threshold": 3,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
)
expected_fingerprint = f"crons:{monitor_environment.id}"
for i in range(3):
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
)
mark_failed(checkin, failed_at=checkin.date_added)
first_incident = MonitorIncident.objects.get(monitor_environment=monitor_environment)
assert first_incident.grouphash == expected_fingerprint
ok_checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
)
from sentry.monitors.logic.mark_ok import mark_ok
mark_ok(ok_checkin, ok_checkin.date_added)
monitor_environment.refresh_from_db()
assert monitor_environment.status == MonitorStatus.OK
first_incident.refresh_from_db()
assert first_incident.resolving_checkin == ok_checkin
for i in range(3):
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.ERROR,
)
mark_failed(checkin, failed_at=checkin.date_added)
incidents = MonitorIncident.objects.filter(
monitor_environment=monitor_environment
).order_by("date_added")
assert incidents.count() == 2
second_incident = incidents.last()
assert second_incident
assert second_incident.id != first_incident.id
assert second_incident.grouphash == expected_fingerprint
assert second_incident.grouphash == first_incident.grouphash
| MarkFailedTestCase |
python | doocs__leetcode | solution/2300-2399/2323.Find Minimum Time to Finish All Jobs II/Solution.py | {
"start": 0,
"end": 198
} | class ____:
def minimumTime(self, jobs: List[int], workers: List[int]) -> int:
jobs.sort()
workers.sort()
return max((a + b - 1) // b for a, b in zip(jobs, workers))
| Solution |
python | coleifer__peewee | tests/dataset.py | {
"start": 770,
"end": 20837
} | class ____(ModelTestCase):
database = db
requires = [User, Note, Category]
names = ['charlie', 'huey', 'peewee', 'mickey', 'zaizee']
def setUp(self):
if os.path.exists(self.database.database):
os.unlink(self.database.database)
super(TestDataSet, self).setUp()
self.dataset = DataSet('sqlite:///%s' % self.database.database)
def tearDown(self):
self.dataset.close()
super(TestDataSet, self).tearDown()
def test_create_index(self):
users = self.dataset['users']
users.insert(username='u0')
users.create_index(['username'], True)
with self.assertRaises(IntegrityError):
users.insert(username='u0')
def test_pass_database(self):
db = SqliteDatabase(':memory:')
dataset = DataSet(db)
self.assertEqual(dataset._database_path, ':memory:')
users = dataset['users']
users.insert(username='charlie')
self.assertEqual(list(users), [{'id': 1, 'username': 'charlie'}])
@skip_if(IS_SQLITE_OLD)
def test_with_views(self):
self.dataset.query('CREATE VIEW notes_public AS '
'SELECT content, timestamp FROM note '
'WHERE status = 1 ORDER BY timestamp DESC')
try:
self.assertTrue('notes_public' in self.dataset.views)
self.assertFalse('notes_public' in self.dataset.tables)
users = self.dataset['user']
with self.dataset.transaction():
users.insert(username='charlie')
users.insert(username='huey')
notes = self.dataset['note']
for i, (ct, st) in enumerate([('n1', 1), ('n2', 2), ('n3', 1)]):
notes.insert(content=ct, status=st, user_id='charlie',
timestamp=datetime.datetime(2022, 1, 1 + i))
self.assertFalse('notes_public' in self.dataset)
# Create a new dataset instance with views enabled.
dataset = DataSet(self.dataset._database, include_views=True)
self.assertTrue('notes_public' in dataset)
public = dataset['notes_public']
self.assertEqual(public.columns, ['content', 'timestamp'])
self.assertEqual(list(public), [
{'content': 'n3', 'timestamp': datetime.datetime(2022, 1, 3)},
{'content': 'n1', 'timestamp': datetime.datetime(2022, 1, 1)}])
finally:
self.dataset.query('DROP VIEW notes_public')
def test_item_apis(self):
dataset = DataSet('sqlite:///:memory:')
users = dataset['users']
users.insert(username='charlie')
self.assertEqual(list(users), [{'id': 1, 'username': 'charlie'}])
users[2] = {'username': 'huey', 'color': 'white'}
self.assertEqual(list(users), [
{'id': 1, 'username': 'charlie', 'color': None},
{'id': 2, 'username': 'huey', 'color': 'white'}])
users[2] = {'username': 'huey-x', 'kind': 'cat'}
self.assertEqual(list(users), [
{'id': 1, 'username': 'charlie', 'color': None, 'kind': None},
{'id': 2, 'username': 'huey-x', 'color': 'white', 'kind': 'cat'}])
del users[2]
self.assertEqual(list(users), [
{'id': 1, 'username': 'charlie', 'color': None, 'kind': None}])
users[1] = {'kind': 'person'}
users[2] = {'username': 'zaizee'}
users[2] = {'kind': 'cat'}
self.assertEqual(list(users), [
{'id': 1, 'username': 'charlie', 'color': None, 'kind': 'person'},
{'id': 2, 'username': 'zaizee', 'color': None, 'kind': 'cat'}])
def create_users(self, n=2):
user = self.dataset['user']
for i in range(min(n, len(self.names))):
user.insert(username=self.names[i])
def test_special_char_table(self):
self.database.execute_sql('CREATE TABLE "hello!!world" ("data" TEXT);')
self.database.execute_sql('INSERT INTO "hello!!world" VALUES (?)',
('test',))
ds = DataSet('sqlite:///%s' % self.database.database)
table = ds['hello!!world']
model = table.model_class
self.assertEqual(model._meta.table_name, 'hello!!world')
def test_column_preservation(self):
ds = DataSet('sqlite:///:memory:')
books = ds['books']
books.insert(book_id='BOOK1')
books.insert(bookId='BOOK2')
data = [(row['book_id'] or '', row['bookId'] or '') for row in books]
self.assertEqual(sorted(data), [
('', 'BOOK2'),
('BOOK1', '')])
def test_case_insensitive(self):
db.execute_sql('CREATE TABLE "SomeTable" (data TEXT);')
tables = sorted(self.dataset.tables)
self.assertEqual(tables, ['SomeTable', 'category', 'note', 'user'])
table = self.dataset['HueyMickey']
self.assertEqual(table.model_class._meta.table_name, 'HueyMickey')
tables = sorted(self.dataset.tables)
self.assertEqual(
tables,
['HueyMickey', 'SomeTable', 'category', 'note', 'user'])
# Subsequent lookup succeeds.
self.dataset['HueyMickey']
def test_introspect(self):
tables = sorted(self.dataset.tables)
self.assertEqual(tables, ['category', 'note', 'user'])
user = self.dataset['user']
columns = sorted(user.columns)
self.assertEqual(columns, ['username'])
note = self.dataset['note']
columns = sorted(note.columns)
self.assertEqual(columns, ['content', 'id', 'status', 'timestamp',
'user_id'])
category = self.dataset['category']
columns = sorted(category.columns)
self.assertEqual(columns, ['id', 'name', 'parent_id'])
def test_update_cache(self):
self.assertEqual(sorted(self.dataset.tables),
['category', 'note', 'user'])
db.execute_sql('create table "foo" (id INTEGER, data TEXT)')
Foo = self.dataset['foo']
self.assertEqual(sorted(Foo.columns), ['data', 'id'])
self.assertTrue('foo' in self.dataset._models)
self.dataset._models['foo'].drop_table()
self.dataset.update_cache()
self.assertTrue('foo' not in self.database.get_tables())
# This will create the table again.
Foo = self.dataset['foo']
self.assertTrue('foo' in self.database.get_tables())
self.assertEqual(Foo.columns, ['id'])
def assertQuery(self, query, expected, sort_key='id'):
key = operator.itemgetter(sort_key)
self.assertEqual(
sorted(list(query), key=key),
sorted(expected, key=key))
def test_insert(self):
self.create_users()
user = self.dataset['user']
expected = [
{'username': 'charlie'},
{'username': 'huey'}]
self.assertQuery(user.all(), expected, 'username')
user.insert(username='mickey', age=5)
expected = [
{'username': 'charlie', 'age': None},
{'username': 'huey', 'age': None},
{'username': 'mickey', 'age': 5}]
self.assertQuery(user.all(), expected, 'username')
query = user.find(username='charlie')
expected = [{'username': 'charlie', 'age': None}]
self.assertQuery(query, expected, 'username')
self.assertEqual(
user.find_one(username='mickey'),
{'username': 'mickey', 'age': 5})
self.assertTrue(user.find_one(username='xx') is None)
def test_update(self):
self.create_users()
user = self.dataset['user']
self.assertEqual(user.update(favorite_color='green'), 2)
expected = [
{'username': 'charlie', 'favorite_color': 'green'},
{'username': 'huey', 'favorite_color': 'green'}]
self.assertQuery(user.all(), expected, 'username')
res = user.update(
favorite_color='blue',
username='huey',
columns=['username'])
self.assertEqual(res, 1)
expected[1]['favorite_color'] = 'blue'
self.assertQuery(user.all(), expected, 'username')
def test_delete(self):
self.create_users()
user = self.dataset['user']
self.assertEqual(user.delete(username='huey'), 1)
self.assertEqual(list(user.all()), [{'username': 'charlie'}])
def test_find(self):
self.create_users(5)
user = self.dataset['user']
def assertUsernames(query, expected):
self.assertEqual(
sorted(row['username'] for row in query),
sorted(expected))
assertUsernames(user.all(), self.names)
assertUsernames(user.find(), self.names)
assertUsernames(user.find(username='charlie'), ['charlie'])
assertUsernames(user.find(username='missing'), [])
user.update(favorite_color='green')
for username in ['zaizee', 'huey']:
user.update(
favorite_color='blue',
username=username,
columns=['username'])
assertUsernames(
user.find(favorite_color='green'),
['charlie', 'mickey', 'peewee'])
assertUsernames(
user.find(favorite_color='blue'),
['zaizee', 'huey'])
assertUsernames(
user.find(favorite_color='green', username='peewee'),
['peewee'])
self.assertEqual(
user.find_one(username='charlie'),
{'username': 'charlie', 'favorite_color': 'green'})
def test_magic_methods(self):
self.create_users(5)
user = self.dataset['user']
# __len__()
self.assertEqual(len(user), 5)
# __iter__()
users = sorted([u for u in user], key=operator.itemgetter('username'))
self.assertEqual(users[0], {'username': 'charlie'})
self.assertEqual(users[-1], {'username': 'zaizee'})
# __contains__()
self.assertTrue('user' in self.dataset)
self.assertFalse('missing' in self.dataset)
def test_foreign_keys(self):
user = self.dataset['user']
user.insert(username='charlie')
note = self.dataset['note']
for i in range(1, 4):
note.insert(
content='note %s' % i,
timestamp=datetime.date(2014, 1, i),
status=i,
user_id='charlie')
notes = sorted(note.all(), key=operator.itemgetter('id'))
self.assertEqual(notes[0], {
'content': 'note 1',
'id': 1,
'status': 1,
'timestamp': datetime.datetime(2014, 1, 1),
'user_id': 'charlie'})
self.assertEqual(notes[-1], {
'content': 'note 3',
'id': 3,
'status': 3,
'timestamp': datetime.datetime(2014, 1, 3),
'user_id': 'charlie'})
user.insert(username='mickey')
note.update(user_id='mickey', id=3, columns=['id'])
self.assertEqual(note.find(user_id='charlie').count(), 2)
self.assertEqual(note.find(user_id='mickey').count(), 1)
category = self.dataset['category']
category.insert(name='c1')
c1 = category.find_one(name='c1')
self.assertEqual(c1, {'id': 1, 'name': 'c1', 'parent_id': None})
category.insert(name='c2', parent_id=1)
c2 = category.find_one(parent_id=1)
self.assertEqual(c2, {'id': 2, 'name': 'c2', 'parent_id': 1})
self.assertEqual(category.delete(parent_id=1), 1)
self.assertEqual(list(category.all()), [c1])
def test_transactions(self):
user = self.dataset['user']
with self.dataset.transaction() as txn:
user.insert(username='u1')
with self.dataset.transaction() as txn2:
user.insert(username='u2')
txn2.rollback()
with self.dataset.transaction() as txn3:
user.insert(username='u3')
with self.dataset.transaction() as txn4:
user.insert(username='u4')
txn3.rollback()
with self.dataset.transaction() as txn5:
user.insert(username='u5')
with self.dataset.transaction() as txn6:
with self.dataset.transaction() as txn7:
user.insert(username='u6')
txn7.rollback()
user.insert(username='u7')
user.insert(username='u8')
self.assertQuery(user.all(), [
{'username': 'u1'},
{'username': 'u5'},
{'username': 'u7'},
{'username': 'u8'},
], 'username')
def test_export(self):
self.create_users()
user = self.dataset['user']
buf = StringIO()
self.dataset.freeze(user.all(), 'json', file_obj=buf)
self.assertEqual(buf.getvalue(), (
'[{"username": "charlie"}, {"username": "huey"}]'))
buf = StringIO()
self.dataset.freeze(user.all(), 'csv', file_obj=buf)
self.assertEqual(buf.getvalue().splitlines(), [
'username',
'charlie',
'huey'])
@skip_if(sys.version_info[0] < 3, 'requires python 3.x')
def test_freeze_thaw_csv_utf8(self):
self._test_freeze_thaw_utf8('csv')
def test_freeze_thaw_json_utf8(self):
self._test_freeze_thaw_utf8('json')
def _test_freeze_thaw_utf8(self, fmt):
username_bytes = b'\xd0\x92obby' # Bobby with cyrillic "B".
username_str = username_bytes.decode('utf8')
u = User.create(username=username_str)
# Freeze the data as a the given format.
user = self.dataset['user']
filename = tempfile.mktemp() # Get a filename.
self.dataset.freeze(user.all(), fmt, filename)
# Clear out the table and reload.
User.delete().execute()
self.assertEqual(list(user.all()), [])
# Thaw the frozen data.
n = user.thaw(format=fmt, filename=filename)
self.assertEqual(n, 1)
self.assertEqual(list(user.all()), [{'username': username_str}])
def test_freeze_thaw(self):
user = self.dataset['user']
user.insert(username='charlie')
note = self.dataset['note']
note_ts = datetime.datetime(2017, 1, 2, 3, 4, 5)
note.insert(content='foo', timestamp=note_ts, user_id='charlie',
status=2)
buf = StringIO()
self.dataset.freeze(note.all(), 'json', file_obj=buf)
self.assertEqual(json.loads(buf.getvalue()), [{
'id': 1,
'user_id': 'charlie',
'content': 'foo',
'status': 2,
'timestamp': '2017-01-02 03:04:05'}])
note.delete(id=1)
self.assertEqual(list(note.all()), [])
buf.seek(0)
note.thaw(format='json', file_obj=buf)
self.assertEqual(list(note.all()), [{
'id': 1,
'user_id': 'charlie',
'content': 'foo',
'status': 2,
'timestamp': note_ts}])
def test_table_column_creation(self):
table = self.dataset['people']
table.insert(name='charlie')
self.assertEqual(table.columns, ['id', 'name'])
self.assertEqual(list(table.all()), [{'id': 1, 'name': 'charlie'}])
def test_table_column_creation_field_col(self):
table = self.dataset['people']
table.insert(**{'First Name': 'charlie'})
self.assertEqual(table.columns, ['id', 'First_Name'])
self.assertEqual(list(table.all()), [{'id': 1, 'First_Name': 'charlie'}])
table.insert(**{'First Name': 'huey'})
self.assertEqual(table.columns, ['id', 'First_Name'])
self.assertEqual(list(table.all().order_by(table.model_class.id)), [
{'id': 1, 'First_Name': 'charlie'},
{'id': 2, 'First_Name': 'huey'}])
def test_import_json(self):
table = self.dataset['people']
table.insert(name='charlie')
data = [
{'name': 'zaizee', 'foo': 1},
{'name': 'huey'},
{'name': 'mickey', 'foo': 2},
{'bar': None}]
buf = StringIO()
json.dump(data, buf)
buf.seek(0)
# All rows but the last will be inserted.
count = self.dataset.thaw('people', 'json', file_obj=buf, strict=True)
self.assertEqual(count, 3)
names = [row['name'] for row in self.dataset['people'].all()]
self.assertEqual(
set(names),
set(['charlie', 'huey', 'mickey', 'zaizee']))
# The columns have not changed.
self.assertEqual(table.columns, ['id', 'name'])
# No rows are inserted because no column overlap between `user` and the
# provided data.
buf.seek(0)
count = self.dataset.thaw('user', 'json', file_obj=buf, strict=True)
self.assertEqual(count, 0)
# Create a new table and load all data into it.
table = self.dataset['more_people']
# All rows and columns will be inserted.
buf.seek(0)
count = self.dataset.thaw('more_people', 'json', file_obj=buf)
self.assertEqual(count, 4)
self.assertEqual(
set(table.columns),
set(['id', 'name', 'bar', 'foo']))
self.assertEqual(sorted(table.all(), key=lambda row: row['id']), [
{'id': 1, 'name': 'zaizee', 'foo': 1, 'bar': None},
{'id': 2, 'name': 'huey', 'foo': None, 'bar': None},
{'id': 3, 'name': 'mickey', 'foo': 2, 'bar': None},
{'id': 4, 'name': None, 'foo': None, 'bar': None},
])
def test_import_csv(self):
table = self.dataset['people']
table.insert(name='charlie')
data = [
('zaizee', 1, None),
('huey', 2, 'foo'),
('mickey', 3, 'baze')]
buf = StringIO()
writer = csv.writer(buf)
writer.writerow(['name', 'foo', 'bar'])
writer.writerows(data)
buf.seek(0)
count = self.dataset.thaw('people', 'csv', file_obj=buf, strict=True)
self.assertEqual(count, 3)
names = [row['name'] for row in self.dataset['people'].all()]
self.assertEqual(
set(names),
set(['charlie', 'huey', 'mickey', 'zaizee']))
# The columns have not changed.
self.assertEqual(table.columns, ['id', 'name'])
# No rows are inserted because no column overlap between `user` and the
# provided data.
buf.seek(0)
count = self.dataset.thaw('user', 'csv', file_obj=buf, strict=True)
self.assertEqual(count, 0)
# Create a new table and load all data into it.
table = self.dataset['more_people']
# All rows and columns will be inserted.
buf.seek(0)
count = self.dataset.thaw('more_people', 'csv', file_obj=buf)
self.assertEqual(count, 3)
self.assertEqual(
set(table.columns),
set(['id', 'name', 'bar', 'foo']))
self.assertEqual(sorted(table.all(), key=lambda row: row['id']), [
{'id': 1, 'name': 'zaizee', 'foo': '1', 'bar': ''},
{'id': 2, 'name': 'huey', 'foo': '2', 'bar': 'foo'},
{'id': 3, 'name': 'mickey', 'foo': '3', 'bar': 'baze'},
])
def test_table_thaw(self):
table = self.dataset['people']
data = json.dumps([{'name': 'charlie'}, {'name': 'huey', 'color': 'white'}])
self.assertEqual(table.thaw(file_obj=StringIO(data), format='json'), 2)
self.assertEqual(list(table.all()), [
{'id': 1, 'name': 'charlie', 'color': None},
{'id': 2, 'name': 'huey', 'color': 'white'},
])
def test_creating_tables(self):
new_table = self.dataset['new_table']
new_table.insert(data='foo')
ref2 = self.dataset['new_table']
self.assertEqual(list(ref2.all()), [{'id': 1, 'data': 'foo'}])
| TestDataSet |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_detector_details.py | {
"start": 29955,
"end": 33922
} | class ____(OrganizationDetectorDetailsBaseTest):
method = "DELETE"
@mock.patch(
"sentry.workflow_engine.endpoints.organization_detector_details.schedule_update_project_config"
)
def test_simple(self, mock_schedule_update_project_config: mock.MagicMock) -> None:
with outbox_runner():
self.get_success_response(self.organization.slug, self.detector.id)
assert RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=self.detector.id
).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
target_object=self.detector.id,
event=audit_log.get_event_id("DETECTOR_REMOVE"),
actor=self.user,
).exists()
self.detector.refresh_from_db()
assert self.detector.status == ObjectStatus.PENDING_DELETION
mock_schedule_update_project_config.assert_called_once_with(self.detector)
def test_error_group_type(self) -> None:
"""
Test that we do not delete the required error detector
"""
data_condition_group = self.create_data_condition_group()
error_detector = self.create_detector(
project=self.project,
name="Error Monitor",
type=ErrorGroupType.slug,
workflow_condition_group=data_condition_group,
)
with outbox_runner():
self.get_error_response(self.organization.slug, error_detector.id, status_code=403)
assert not RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=error_detector.id
).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert not AuditLogEntry.objects.filter(
target_object=error_detector.id,
event=audit_log.get_event_id("DETECTOR_REMOVE"),
actor=self.user,
).exists()
@with_feature("organizations:anomaly-detection-alerts")
@mock.patch("sentry.seer.anomaly_detection.delete_rule.delete_rule_in_seer")
@mock.patch(
"sentry.workflow_engine.endpoints.organization_detector_details.schedule_update_project_config"
)
def test_anomaly_detection(
self, mock_schedule_update_project_config: mock.MagicMock, mock_seer_request: mock.MagicMock
) -> None:
self.detector.config = {"detection_type": AlertRuleDetectionType.DYNAMIC}
self.detector.save()
mock_seer_request.return_value = True
with outbox_runner():
self.get_success_response(self.organization.slug, self.detector.id)
assert RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=self.detector.id
).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert AuditLogEntry.objects.filter(
target_object=self.detector.id,
event=audit_log.get_event_id("DETECTOR_REMOVE"),
actor=self.user,
).exists()
self.detector.refresh_from_db()
assert self.detector.status == ObjectStatus.PENDING_DELETION
mock_seer_request.assert_called_once_with(
source_id=int(self.data_source.source_id), organization=self.organization
)
def test_cannot_delete_system_created_detector(self) -> None:
error_detector = self.create_detector(
project=self.project,
name="Error Detector",
type=ErrorGroupType.slug,
)
self.get_error_response(self.organization.slug, error_detector.id, status_code=403)
# Verify detector was not deleted
error_detector.refresh_from_db()
assert error_detector.status != ObjectStatus.PENDING_DELETION
assert not RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=error_detector.id
).exists()
| OrganizationDetectorDetailsDeleteTest |
python | html5lib__html5lib-python | html5lib/html5parser.py | {
"start": 79294,
"end": 83142
} | class ____(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
__slots__ = tuple()
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
startTagHandler = _utils.MethodDispatcher([
("html", Phase.startTagHtml),
("tr", startTagTr),
(("td", "th"), startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
startTagTableOther)
])
startTagHandler.default = startTagOther
endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), endTagTableRowGroup),
("table", endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), endTagIgnore)
])
endTagHandler.default = endTagOther
| InTableBodyPhase |
python | tensorflow__tensorflow | tensorflow/python/distribute/cluster_resolver/tfconfig_cluster_resolver_test.py | {
"start": 1166,
"end": 10159
} | class ____(test.TestCase):
def _verifyClusterSpecEquality(self, cluster_spec, expected_proto):
self.assertProtoEquals(expected_proto, cluster_spec.as_cluster_def())
self.assertProtoEquals(
expected_proto, server_lib.ClusterSpec(cluster_spec).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_cluster_def()).as_cluster_def())
self.assertProtoEquals(
expected_proto,
server_lib.ClusterSpec(cluster_spec.as_dict()).as_cluster_def())
def testNormalClusterSpecRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 0 value: 'worker0:2222' }
tasks { key: 1 value: 'worker1:2222' }
tasks { key: 2 value: 'worker2:2222' } }
"""
actual_cluster_spec = cluster_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testSparseClusterSpecRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": {"1": "worker1:2222"}
},
"task": {
"type": "worker",
"index": 1
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
expected_proto = """
job { name: 'ps' tasks { key: 0 value: 'ps0:2222' }
tasks { key: 1 value: 'ps1:2222' } }
job { name: 'worker' tasks { key: 1 value: 'worker1:2222' } }
"""
actual_cluster_spec = cluster_resolver.cluster_spec()
self._verifyClusterSpecEquality(actual_cluster_spec, expected_proto)
def testAutomaticMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('ps0:2222', cluster_resolver.master())
def testSpecifiedTaskTypeAndIndexMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('worker1:2222', cluster_resolver.master('worker', 1))
def testSessionMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"session_master": "sessionmaster:2222",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('sessionmaster:2222', cluster_resolver.master())
def testRpcLayerRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
def testTaskTypeIndexRpcRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
self.assertEqual('grpc', cluster_resolver.rpc_layer)
def testParameterOverrides(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": 1
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver(
task_type='ps', task_id=0)
self.assertEqual('grpc://ps0:2222', cluster_resolver.master())
self.assertEqual('ps', cluster_resolver.task_type)
self.assertEqual(0, cluster_resolver.task_id)
cluster_resolver.task_type = 'worker'
cluster_resolver.task_id = 1
cluster_resolver.rpc_layer = 'test'
self.assertEqual('test://worker1:2222', cluster_resolver.master())
self.assertEqual('worker', cluster_resolver.task_type)
self.assertEqual(1, cluster_resolver.task_id)
self.assertEqual('test', cluster_resolver.rpc_layer)
def testTaskTypeCastToString(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"123456": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": 123456,
"index": 0
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('123456', cluster_resolver.task_type)
def testTaskIndexCastToInteger(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "ps",
"index": "1"
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual(1, cluster_resolver.task_id)
def testTaskIndexOverride(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222", "worker1:2222"]
},
"task": {
"type": "worker",
"index": "0"
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver(
task_id=1)
self.assertEqual(1, cluster_resolver.task_id)
def testZeroItemsInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
def testOneItemInClusterSpecMasterRead(self):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker": ["worker0:2222"]
}
}
"""
cluster_resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
self.assertEqual('', cluster_resolver.master())
@mock.patch.object(config, 'list_logical_devices')
@mock.patch.object(session.BaseSession, 'list_devices')
def testNumAcceleratorsFilterTasksByEnvVar(self, mock_list_devices,
mock_eager_list_devices):
os.environ['TF_CONFIG'] = """
{
"cluster": {
"worker1": ["w10:2222"],
"worker2": ["w21:2222", "w22:2222", "w23:2222", "w24:2222"]
},
"rpc_layer": "grpc",
"task": {
"type": "worker1",
"index": "0"
}
}
"""
devices = [
context.LogicalDevice('/job:worker1/task:0/device:TPU:0', 'TPU'),
context.LogicalDevice('/job:worker1/task:0/device:TPU:1', 'TPU'),
context.LogicalDevice('/job:worker1/task:0/device:GPU:0', 'GPU'),
context.LogicalDevice('/job:worker1/task:0/device:GPU:1', 'GPU'),
context.LogicalDevice('/job:worker2/task:1/device:TPU:2', 'TPU'),
context.LogicalDevice('/job:worker2/task:2/device:TPU:3', 'TPU'),
context.LogicalDevice('/job:worker2/task:3/device:GPU:2', 'GPU'),
context.LogicalDevice('/job:worker2/task:4/device:GPU:3', 'GPU'),
]
device_list = [
session._DeviceAttributes(d.name, d.device_type, 1024, 0)
for d in devices
]
mock_eager_list_devices.return_value = devices
mock_list_devices.return_value = device_list
resolver = tfconfig_cluster_resolver.TFConfigClusterResolver()
# By default we read from TF_CONFIG
self.assertEqual(resolver.num_accelerators(), {'TPU': 2, 'GPU': 2})
# Override still works when we want it to
self.assertEqual(resolver.num_accelerators(task_type='worker2', task_id=3),
{'GPU': 1})
if __name__ == '__main__':
test.main()
| TFConfigClusterResolverTest |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 33253,
"end": 33908
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to update a work pool."""
description: Optional[str] = Field(default=None)
is_paused: Optional[bool] = Field(default=None)
base_job_template: Optional[Dict[str, Any]] = Field(default=None)
concurrency_limit: Optional[NonNegativeInteger] = Field(default=None)
storage_configuration: Optional[schemas.core.WorkPoolStorageConfiguration] = Field(
default=None,
description="The storage configuration for the work pool.",
)
_validate_base_job_template = field_validator("base_job_template")(
validate_base_job_template
)
| WorkPoolUpdate |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py | {
"start": 15519,
"end": 15590
} | class ____(GroundingDinoConvEncoder):
pass
| MMGroundingDinoConvEncoder |
python | django__django | tests/queries/models.py | {
"start": 7648,
"end": 7774
} | class ____(models.Model):
connection = models.ForeignKey(SharedConnection, models.CASCADE)
# Multi-layer ordering
| PointerB |
python | spyder-ide__spyder | spyder/plugins/editor/api/run.py | {
"start": 1522,
"end": 2103
} | class ____(TypedDict):
"""Schema emitted by the editor for the `Cell` run context."""
# File path to the file that contains the selection to execute.
path: str
# Actual cell text to execute.
cell: str
# Name of the cell.
cell_name: str
# Encoding of the text.
encoding: str
# Selection start and end in (line, column) format
line_col_bounds: Tuple[Tuple[int, int], Tuple[int, int]]
# Selection start and end in characters
character_bounds: Tuple[int, int]
# True if the text should be copied over.
copy: bool
| CellRun |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_vision.py | {
"start": 19318,
"end": 19956
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageSafeSearchOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.safe_search_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=DEFAULT, timeout=None, additional_properties=None
)
| TestCloudVisionDetectImageSafeSearchOperator |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 36384,
"end": 37136
} | class ____(ContextWrappingVariable):
"""
This class represents Python contextlib.nullcontext.
"""
def __init__(self, target_values: Optional[Any] = None, **kwargs: Any) -> None:
super().__init__(target_values=target_values, **kwargs)
def enter(self, tx: "InstructionTranslator") -> VariableTracker:
none = variables.ConstantVariable.create(None)
return self.target_values if self.target_values else none
def exit(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
return variables.ConstantVariable.create(None)
def module_name(self) -> str:
return "contextlib"
def fn_name(self) -> str:
return "nullcontext"
| NullContextVariable |
python | pytorch__pytorch | torchgen/api/python.py | {
"start": 8984,
"end": 9063
} | class ____:
returns: tuple[Return, ...]
@dataclass(frozen=True)
| PythonReturns |
python | kamyu104__LeetCode-Solutions | Python/maximum-points-after-enemy-battles.py | {
"start": 38,
"end": 358
} | class ____(object):
def maximumPoints(self, enemyEnergies, currentEnergy):
"""
:type enemyEnergies: List[int]
:type currentEnergy: int
:rtype: int
"""
mn = min(enemyEnergies)
return ((currentEnergy-mn)+sum(enemyEnergies))//mn if currentEnergy >= mn else 0
| Solution |
python | django__django | tests/serializers/test_yaml.py | {
"start": 2914,
"end": 5048
} | class ____(SerializersTestBase, TestCase):
serializer_name = "yaml"
pkless_str = """- model: serializers.category
pk: null
fields:
name: Reference
- model: serializers.category
fields:
name: Non-fiction"""
mapping_ordering_str = (
"""- model: serializers.article
pk: %(article_pk)s
fields:
author: %(author_pk)s
headline: Poker has no place on ESPN
pub_date: 2006-06-16 11:00:00
categories:"""
+ (
" [%(first_category_pk)s, %(second_category_pk)s]"
if HAS_YAML and yaml.__version__ < "5.1"
else "\n - %(first_category_pk)s\n - %(second_category_pk)s"
)
+ """
meta_data: []
topics: []
"""
)
@staticmethod
def _validate_output(serial_str):
try:
yaml.safe_load(StringIO(serial_str))
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
ret_list.append(obj_dict["pk"])
return ret_list
@staticmethod
def _get_field_values(serial_str, field_name):
ret_list = []
stream = StringIO(serial_str)
for obj_dict in yaml.safe_load(stream):
if "fields" in obj_dict and field_name in obj_dict["fields"]:
field_value = obj_dict["fields"][field_name]
# yaml.safe_load will return non-string objects for some
# of the fields we are interested in, this ensures that
# everything comes back as a string
if isinstance(field_value, str):
ret_list.append(field_value)
else:
ret_list.append(str(field_value))
return ret_list
def test_yaml_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("yaml", "{"):
pass
@unittest.skipUnless(HAS_YAML, "No yaml library detected")
| YamlSerializerTestCase |
python | scikit-learn__scikit-learn | doc/conf.py | {
"start": 22392,
"end": 39224
} | class ____(ExampleTitleSortKey):
"""Sorts release highlights based on version number."""
def __call__(self, filename):
title = super().__call__(filename)
prefix = "plot_release_highlights_"
# Use title to sort if not a release highlight
if not str(filename).startswith(prefix):
return title
major_minor = filename[len(prefix) :].split("_")[:2]
version_float = float(".".join(major_minor))
# negate to place the newest version highlights first
return -version_float
def notebook_modification_function(notebook_content, notebook_filename):
notebook_content_str = str(notebook_content)
warning_template = "\n".join(
[
"<div class='alert alert-{message_class}'>",
"",
"# JupyterLite warning",
"",
"{message}",
"</div>",
]
)
message_class = "warning"
message = (
"Running the scikit-learn examples in JupyterLite is experimental and you may"
" encounter some unexpected behavior.\n\nThe main difference is that imports"
" will take a lot longer than usual, for example the first `import sklearn` can"
" take roughly 10-20s.\n\nIf you notice problems, feel free to open an"
" [issue](https://github.com/scikit-learn/scikit-learn/issues/new/choose)"
" about it."
)
markdown = warning_template.format(message_class=message_class, message=message)
dummy_notebook_content = {"cells": []}
add_markdown_cell(dummy_notebook_content, markdown)
code_lines = []
if "seaborn" in notebook_content_str:
code_lines.append("%pip install seaborn")
if "plotly.express" in notebook_content_str:
code_lines.append("%pip install plotly nbformat")
if "skimage" in notebook_content_str:
code_lines.append("%pip install scikit-image")
if "polars" in notebook_content_str:
code_lines.append("%pip install polars")
if "fetch_" in notebook_content_str:
code_lines.extend(
[
"%pip install pyodide-http",
"import pyodide_http",
"pyodide_http.patch_all()",
]
)
# always import matplotlib and pandas to avoid Pyodide limitation with
# imports inside functions
code_lines.extend(["import matplotlib", "import pandas"])
# Work around https://github.com/jupyterlite/pyodide-kernel/issues/166
# and https://github.com/pyodide/micropip/issues/223 by installing the
# dependencies first, and then scikit-learn from Anaconda.org.
if "dev" in release:
dev_docs_specific_code = [
"import piplite",
"import joblib",
"import threadpoolctl",
"import scipy",
"await piplite.install(\n"
f" 'scikit-learn=={release}',\n"
" index_urls='https://pypi.anaconda.org/scientific-python-nightly-wheels/simple',\n"
")",
]
code_lines.extend(dev_docs_specific_code)
if code_lines:
code_lines = ["# JupyterLite-specific code"] + code_lines
code = "\n".join(code_lines)
add_code_cell(dummy_notebook_content, code)
notebook_content["cells"] = (
dummy_notebook_content["cells"] + notebook_content["cells"]
)
default_global_config = sklearn.get_config()
def reset_sklearn_config(gallery_conf, fname):
"""Reset sklearn config to default values."""
sklearn.set_config(**default_global_config)
sg_examples_dir = "../examples"
sg_gallery_dir = "auto_examples"
sphinx_gallery_conf = {
"doc_module": "sklearn",
"backreferences_dir": os.path.join("modules", "generated"),
"show_memory": False,
"reference_url": {"sklearn": None},
"examples_dirs": [sg_examples_dir],
"gallery_dirs": [sg_gallery_dir],
"subsection_order": SubSectionTitleOrder(sg_examples_dir),
"within_subsection_order": SKExampleTitleSortKey,
"binder": {
"org": "scikit-learn",
"repo": "scikit-learn",
"binderhub_url": "https://mybinder.org",
"branch": binder_branch,
"dependencies": "./binder/requirements.txt",
"use_jupyter_lab": True,
},
# avoid generating too many cross links
"inspect_global_variables": False,
"remove_config_comments": True,
"plot_gallery": "True",
"recommender": {"enable": True, "n_examples": 4, "min_df": 12},
"reset_modules": ("matplotlib", "seaborn", reset_sklearn_config),
}
if with_jupyterlite:
sphinx_gallery_conf["jupyterlite"] = {
"notebook_modification_function": notebook_modification_function
}
# For the index page of the gallery and each nested section, we hide the secondary
# sidebar by specifying an empty list (no components), because there is no meaningful
# in-page toc for these pages, and they are generated so "sourcelink" is not useful
# either.
html_theme_options["secondary_sidebar_items"][f"{sg_gallery_dir}/index"] = []
for sub_sg_dir in (Path(".") / sg_examples_dir).iterdir():
if sub_sg_dir.is_dir():
html_theme_options["secondary_sidebar_items"][
f"{sg_gallery_dir}/{sub_sg_dir.name}/index"
] = []
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {"sphx_glr_plot_classifier_comparison_001.png": 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import ( # noqa: F401
enable_halving_search_cv,
enable_iterative_imputer,
)
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print("Preparing carousel images")
image_dir = os.path.join(app.builder.outdir, "_images")
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png")
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != "html":
return
print("Removing methods from search index")
searchindex_path = os.path.join(app.builder.outdir, "searchindex.js")
with open(searchindex_path, "r") as f:
searchindex_text = f.read()
searchindex_text = re.sub(r"{__init__.+?}", "{}", searchindex_text)
searchindex_text = re.sub(r"{__call__.+?}", "{}", searchindex_text)
with open(searchindex_path, "w") as f:
f.write(searchindex_text)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = "scikit-learn/scikit-learn"
def disable_plot_gallery_for_linkcheck(app):
if app.builder.name == "linkcheck":
sphinx_gallery_conf["plot_gallery"] = "False"
def skip_properties(app, what, name, obj, skip, options):
"""Skip properties that are fitted attributes"""
if isinstance(obj, property):
if name.endswith("_") and not name.startswith("_"):
return True
return skip
def setup(app):
# do not run the examples when using linkcheck by using a small priority
# (default priority is 500 and sphinx-gallery using builder-inited event too)
app.connect("builder-inited", disable_plot_gallery_for_linkcheck, priority=50)
# triggered just before the HTML for an individual page is created
app.connect("html-page-context", add_js_css_files)
# to hide/show the prompt in code examples
app.connect("build-finished", make_carousel_thumbs)
app.connect("build-finished", filter_search_index)
app.connect("autodoc-skip-member", skip_properties)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"sklearn",
(
"https://github.com/scikit-learn/"
"scikit-learn/blob/{revision}/"
"{package}/{path}#L{lineno}"
),
)
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
# TODO(1.10): remove PassiveAggressive
warnings.filterwarnings("ignore", category=FutureWarning, message="PassiveAggressive")
if os.environ.get("SKLEARN_WARNINGS_AS_ERRORS", "0") != "0":
turn_warnings_into_errors()
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
# Config for sphinxext.opengraph
ogp_site_url = "https://scikit-learn/stable/"
ogp_image = "https://scikit-learn.org/stable/_static/scikit-learn-logo-notext.png"
ogp_use_first_image = True
ogp_site_name = "scikit-learn"
# Config for linkcheck that checks the documentation for broken links
# ignore all links in 'whats_new' to avoid doing many github requests and
# hitting the github rate threshold that makes linkcheck take a lot of time
linkcheck_exclude_documents = [r"whats_new/.*"]
# default timeout to make some sites links fail faster
linkcheck_timeout = 10
# Allow redirects from doi.org
linkcheck_allowed_redirects = {r"https://doi.org/.+": r".*"}
linkcheck_ignore = [
# ignore links to local html files e.g. in image directive :target: field
r"^..?/",
# ignore links to specific pdf pages because linkcheck does not handle them
# ('utf-8' codec can't decode byte error)
r"http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=.*",
(
"https://www.fordfoundation.org/media/2976/roads-and-bridges"
"-the-unseen-labor-behind-our-digital-infrastructure.pdf#page=.*"
),
# links falsely flagged as broken
(
"https://www.researchgate.net/publication/"
"233096619_A_Dendrite_Method_for_Cluster_Analysis"
),
(
"https://www.researchgate.net/publication/221114584_Random_Fourier"
"_Approximations_for_Skewed_Multiplicative_Histogram_Kernels"
),
(
"https://www.researchgate.net/publication/4974606_"
"Hedonic_housing_prices_and_the_demand_for_clean_air"
),
(
"https://www.researchgate.net/profile/Anh-Huy-Phan/publication/220241471_Fast_"
"Local_Algorithms_for_Large_Scale_Nonnegative_Matrix_and_Tensor_Factorizations"
),
"https://doi.org/10.13140/RG.2.2.35280.02565",
(
"https://www.microsoft.com/en-us/research/uploads/prod/2006/01/"
"Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf"
),
"https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/tr-99-87.pdf",
"https://microsoft.com/",
"https://www.jstor.org/stable/2984099",
"https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf",
# Broken links from testimonials
"http://www.bestofmedia.com",
"http://www.data-publica.com/",
"https://livelovely.com",
"https://www.mars.com/global",
"https://www.yhat.com",
# Ignore some dynamically created anchors. See
# https://github.com/sphinx-doc/sphinx/issues/9016 for more details about
# the github example
r"https://github.com/conda-forge/miniforge#miniforge",
r"https://github.com/joblib/threadpoolctl/"
"#setting-the-maximum-size-of-thread-pools",
r"https://stackoverflow.com/questions/5836335/"
"consistently-create-same-random-numpy-array/5837352#comment6712034_5837352",
]
# Use a browser-like user agent to avoid some "403 Client Error: Forbidden for
# url" errors. This is taken from the variable navigator.userAgent inside a
# browser console.
user_agent = (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0"
)
# Use Github token from environment variable to avoid Github rate limits when
# checking Github links
github_token = os.getenv("GITHUB_TOKEN")
if github_token is None:
linkcheck_request_headers = {}
else:
linkcheck_request_headers = {
"https://github.com/": {"Authorization": f"token {github_token}"},
}
def infer_next_release_versions():
"""Infer the most likely next release versions to make."""
all_version_full = {"rc": "0.99.0rc1", "final": "0.99.0", "bf": "0.98.1"}
all_version_short = {"rc": "0.99", "final": "0.99", "bf": "0.98"}
all_previous_tag = {"rc": "unused", "final": "0.98.33", "bf": "0.97.22"}
try:
# Fetch the version switcher JSON; see `html_theme_options` for more details
versions_json = json.loads(
urlopen(html_theme_options["switcher"]["json_url"], timeout=10).read()
)
# See `build_tools/circle/list_versions.py`, stable is always the second entry
stable_version = parse(versions_json[1]["version"])
last_stable_version = parse(versions_json[2]["version"])
next_major_minor = f"{stable_version.major}.{stable_version.minor + 1}"
# RC
all_version_full["rc"] = f"{next_major_minor}.0rc1"
all_version_short["rc"] = next_major_minor
# Major/Minor final
all_version_full["final"] = f"{next_major_minor}.0"
all_version_short["final"] = next_major_minor
all_previous_tag["final"] = stable_version.base_version
# Bug-fix
all_version_full["bf"] = (
f"{stable_version.major}.{stable_version.minor}.{stable_version.micro + 1}"
)
all_version_short["bf"] = f"{stable_version.major}.{stable_version.minor}"
all_previous_tag["bf"] = last_stable_version.base_version
except Exception as e:
logger.warning(
"Failed to infer all possible next release versions because of "
f"{type(e).__name__}: {e}"
)
return {
"version_full": all_version_full,
"version_short": all_version_short,
"previous_tag": all_previous_tag,
}
# -- Convert .rst.template files to .rst ---------------------------------------
from api_reference import API_REFERENCE, DEPRECATED_API_REFERENCE
from sklearn._min_dependencies import dependent_packages
# If development build, link to local page in the top navbar; otherwise link to the
# development version; see https://github.com/scikit-learn/scikit-learn/pull/22550
if parsed_version.is_devrelease:
development_link = "developers/index"
else:
development_link = "https://scikit-learn.org/dev/developers/index.html"
# Define the templates and target files for conversion
# Each entry is in the format (template name, file name, kwargs for rendering)
rst_templates = [
("index", "index", {"development_link": development_link}),
(
"developers/maintainer",
"developers/maintainer",
{"inferred": infer_next_release_versions()},
),
(
"min_dependency_table",
"min_dependency_table",
{"dependent_packages": dependent_packages},
),
(
"min_dependency_substitutions",
"min_dependency_substitutions",
{"dependent_packages": dependent_packages},
),
(
"api/index",
"api/index",
{
"API_REFERENCE": sorted(API_REFERENCE.items(), key=lambda x: x[0]),
"DEPRECATED_API_REFERENCE": sorted(
DEPRECATED_API_REFERENCE.items(), key=lambda x: x[0], reverse=True
),
},
),
]
# Convert each module API reference page
for module in API_REFERENCE:
rst_templates.append(
(
"api/module",
f"api/{module}",
{"module": module, "module_info": API_REFERENCE[module]},
)
)
# Convert the deprecated API reference page (if there exists any)
if DEPRECATED_API_REFERENCE:
rst_templates.append(
(
"api/deprecated",
"api/deprecated",
{
"DEPRECATED_API_REFERENCE": sorted(
DEPRECATED_API_REFERENCE.items(), key=lambda x: x[0], reverse=True
)
},
)
)
for rst_template_name, rst_target_name, kwargs in rst_templates:
# Read the corresponding template file into jinja2
with (Path(".") / f"{rst_template_name}.rst.template").open(
"r", encoding="utf-8"
) as f:
t = jinja2.Template(f.read())
# Render the template and write to the target
with (Path(".") / f"{rst_target_name}.rst").open("w", encoding="utf-8") as f:
f.write(t.render(**kwargs))
| SKExampleTitleSortKey |
python | python-pillow__Pillow | src/PIL/ImageCms.py | {
"start": 11501,
"end": 40676
} | class ____(Exception):
"""(pyCMS) Exception class.
This is used for all errors in the pyCMS API."""
pass
def profileToProfile(
im: Image.Image,
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
renderingIntent: Intent = Intent.PERCEPTUAL,
outputMode: str | None = None,
inPlace: bool = False,
flags: Flags = Flags.NONE,
) -> Image.Image | None:
"""
(pyCMS) Applies an ICC transformation to a given image, mapping from
``inputProfile`` to ``outputProfile``.
If the input or output profiles specified are not valid filenames, a
:exc:`PyCMSError` will be raised. If ``inPlace`` is ``True`` and
``outputMode != im.mode``, a :exc:`PyCMSError` will be raised.
If an error occurs during application of the profiles,
a :exc:`PyCMSError` will be raised.
If ``outputMode`` is not a mode supported by the ``outputProfile`` (or by pyCMS),
a :exc:`PyCMSError` will be raised.
This function applies an ICC transformation to im from ``inputProfile``'s
color space to ``outputProfile``'s color space using the specified rendering
intent to decide how to handle out-of-gamut colors.
``outputMode`` can be used to specify that a color mode conversion is to
be done using these profiles, but the specified profiles must be able
to handle that mode. I.e., if converting im from RGB to CMYK using
profiles, the input profile must handle RGB data, and the output
profile must handle CMYK data.
:param im: An open :py:class:`~PIL.Image.Image` object (i.e. Image.new(...)
or Image.open(...), etc.)
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this image, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this image, or a profile object
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param outputMode: A valid PIL mode for the output image (i.e. "RGB",
"CMYK", etc.). Note: if rendering the image "inPlace", outputMode
MUST be the same mode as the input, or omitted completely. If
omitted, the outputMode will be the same as the mode of the input
image (im.mode)
:param inPlace: Boolean. If ``True``, the original image is modified in-place,
and ``None`` is returned. If ``False`` (default), a new
:py:class:`~PIL.Image.Image` object is returned with the transform applied.
:param flags: Integer (0-...) specifying additional flags
:returns: Either None or a new :py:class:`~PIL.Image.Image` object, depending on
the value of ``inPlace``
:exception PyCMSError:
"""
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile,
outputProfile,
im.mode,
outputMode,
renderingIntent,
flags=flags,
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
return imOut
def getOpenProfile(
profileFilename: str | SupportsRead[bytes] | core.CmsProfile,
) -> ImageCmsProfile:
"""
(pyCMS) Opens an ICC profile file.
The PyCMSProfile object can be passed back into pyCMS for use in creating
transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
If ``profileFilename`` is not a valid filename for an ICC profile,
a :exc:`PyCMSError` will be raised.
:param profileFilename: String, as a valid filename path to the ICC profile
you wish to open, or a file-like object.
:returns: A CmsProfile class object.
:exception PyCMSError:
"""
try:
return ImageCmsProfile(profileFilename)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def buildTransform(
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
inMode: str,
outMode: str,
renderingIntent: Intent = Intent.PERCEPTUAL,
flags: Flags = Flags.NONE,
) -> ImageCmsTransform:
"""
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
``outputProfile``. Use applyTransform to apply the transform to a given
image.
If the input or output profiles specified are not valid filenames, a
:exc:`PyCMSError` will be raised. If an error occurs during creation
of the transform, a :exc:`PyCMSError` will be raised.
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
(or by pyCMS), a :exc:`PyCMSError` will be raised.
This function builds and returns an ICC transform from the ``inputProfile``
to the ``outputProfile`` using the ``renderingIntent`` to determine what to do
with out-of-gamut colors. It will ONLY work for converting images that
are in ``inMode`` to images that are in ``outMode`` color format (PIL mode,
i.e. "RGB", "RGBA", "CMYK", etc.).
Building the transform is a fair part of the overhead in
ImageCms.profileToProfile(), so if you're planning on converting multiple
images using the same input/output settings, this can save you time.
Once you have a transform object, it can be used with
ImageCms.applyProfile() to convert images without the need to re-compute
the lookup table for the transform.
The reason pyCMS returns a class object rather than a handle directly
to the transform is that it needs to keep track of the PIL input/output
modes that the transform is meant for. These attributes are stored in
the ``inMode`` and ``outMode`` attributes of the object (which can be
manually overridden if you really want to, but I don't know of any
time that would be of use, or would even work).
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(
inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags
)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def buildProofTransform(
inputProfile: _CmsProfileCompatible,
outputProfile: _CmsProfileCompatible,
proofProfile: _CmsProfileCompatible,
inMode: str,
outMode: str,
renderingIntent: Intent = Intent.PERCEPTUAL,
proofRenderingIntent: Intent = Intent.ABSOLUTE_COLORIMETRIC,
flags: Flags = Flags.SOFTPROOFING,
) -> ImageCmsTransform:
"""
(pyCMS) Builds an ICC transform mapping from the ``inputProfile`` to the
``outputProfile``, but tries to simulate the result that would be
obtained on the ``proofProfile`` device.
If the input, output, or proof profiles specified are not valid
filenames, a :exc:`PyCMSError` will be raised.
If an error occurs during creation of the transform,
a :exc:`PyCMSError` will be raised.
If ``inMode`` or ``outMode`` are not a mode supported by the ``outputProfile``
(or by pyCMS), a :exc:`PyCMSError` will be raised.
This function builds and returns an ICC transform from the ``inputProfile``
to the ``outputProfile``, but tries to simulate the result that would be
obtained on the ``proofProfile`` device using ``renderingIntent`` and
``proofRenderingIntent`` to determine what to do with out-of-gamut
colors. This is known as "soft-proofing". It will ONLY work for
converting images that are in ``inMode`` to images that are in outMode
color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
Usage of the resulting transform object is exactly the same as with
ImageCms.buildTransform().
Proof profiling is generally used when using an output device to get a
good idea of what the final printed/displayed image would look like on
the ``proofProfile`` device when it's quicker and easier to use the
output device for judging color. Generally, this means that the
output device is a monitor, or a dye-sub printer (etc.), and the simulated
device is something more expensive, complicated, or time consuming
(making it difficult to make a real print for color judgement purposes).
Soft-proofing basically functions by adjusting the colors on the
output device to match the colors of the device being simulated. However,
when the simulated device has a much wider gamut than the output
device, you may obtain marginal results.
:param inputProfile: String, as a valid filename path to the ICC input
profile you wish to use for this transform, or a profile object
:param outputProfile: String, as a valid filename path to the ICC output
(monitor, usually) profile you wish to use for this transform, or a
profile object
:param proofProfile: String, as a valid filename path to the ICC proof
profile you wish to use for this transform, or a profile object
:param inMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param outMode: String, as a valid PIL mode that the appropriate profile
also supports (i.e. "RGB", "RGBA", "CMYK", etc.)
:param renderingIntent: Integer (0-3) specifying the rendering intent you
wish to use for the input->proof (simulated) transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param proofRenderingIntent: Integer (0-3) specifying the rendering intent
you wish to use for proof->output transform
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param flags: Integer (0-...) specifying additional flags
:returns: A CmsTransform class object.
:exception PyCMSError:
"""
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <= 3):
msg = "renderingIntent must be an integer between 0 and 3"
raise PyCMSError(msg)
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
msg = f"flags must be an integer between 0 and {_MAX_FLAG}"
raise PyCMSError(msg)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(
inputProfile,
outputProfile,
inMode,
outMode,
renderingIntent,
proofProfile,
proofRenderingIntent,
flags,
)
except (OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
def applyTransform(
im: Image.Image, transform: ImageCmsTransform, inPlace: bool = False
) -> Image.Image | None:
"""
(pyCMS) Applies a transform to a given image.
If ``im.mode != transform.input_mode``, a :exc:`PyCMSError` is raised.
If ``inPlace`` is ``True`` and ``transform.input_mode != transform.output_mode``, a
:exc:`PyCMSError` is raised.
If ``im.mode``, ``transform.input_mode`` or ``transform.output_mode`` is not
supported by pyCMSdll or the profiles you used for the transform, a
:exc:`PyCMSError` is raised.
If an error occurs while the transform is being applied,
a :exc:`PyCMSError` is raised.
This function applies a pre-calculated transform (from
ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles())
to an image. The transform can be used for multiple images, saving
considerable calculation time if doing the same conversion multiple times.
If you want to modify im in-place instead of receiving a new image as
the return value, set ``inPlace`` to ``True``. This can only be done if
``transform.input_mode`` and ``transform.output_mode`` are the same, because we
can't change the mode in-place (the buffer sizes for some modes are
different). The default behavior is to return a new :py:class:`~PIL.Image.Image`
object of the same dimensions in mode ``transform.output_mode``.
:param im: An :py:class:`~PIL.Image.Image` object, and ``im.mode`` must be the same
as the ``input_mode`` supported by the transform.
:param transform: A valid CmsTransform class object
:param inPlace: Bool. If ``True``, ``im`` is modified in place and ``None`` is
returned, if ``False``, a new :py:class:`~PIL.Image.Image` object with the
transform applied is returned (and ``im`` is not changed). The default is
``False``.
:returns: Either ``None``, or a new :py:class:`~PIL.Image.Image` object,
depending on the value of ``inPlace``. The profile will be returned in
the image's ``info['icc_profile']``.
:exception PyCMSError:
"""
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v) from v
return imOut
def createProfile(
colorSpace: Literal["LAB", "XYZ", "sRGB"], colorTemp: SupportsFloat = 0
) -> core.CmsProfile:
"""
(pyCMS) Creates a profile.
If colorSpace not in ``["LAB", "XYZ", "sRGB"]``,
a :exc:`PyCMSError` is raised.
If using LAB and ``colorTemp`` is not a positive integer,
a :exc:`PyCMSError` is raised.
If an error occurs while creating the profile,
a :exc:`PyCMSError` is raised.
Use this function to create common profiles on-the-fly instead of
having to supply a profile on disk and knowing the path to it. It
returns a normal CmsProfile object that can be passed to
ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
to images.
:param colorSpace: String, the color space of the profile you wish to
create.
Currently only "LAB", "XYZ", and "sRGB" are supported.
:param colorTemp: Positive number for the white point for the profile, in
degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
illuminant if omitted (5000k). colorTemp is ONLY applied to LAB
profiles, and is ignored for XYZ and sRGB.
:returns: A CmsProfile class object
:exception PyCMSError:
"""
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
msg = (
f"Color space not supported for on-the-fly profile creation ({colorSpace})"
)
raise PyCMSError(msg)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except (TypeError, ValueError) as e:
msg = f'Color temperature must be numeric, "{colorTemp}" not valid'
raise PyCMSError(msg) from e
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileName(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the internal product name for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
a :exc:`PyCMSError` is raised If an error occurs while trying
to obtain the name tag, a :exc:`PyCMSError` is raised.
Use this function to obtain the INTERNAL name of the profile (stored
in an ICC tag in the profile itself), usually the one used when the
profile was originally created. Sometimes this tag also contains
additional information supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal name of the profile as stored
in an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.model
manufacturer = profile.profile.manufacturer
if not (model or manufacturer):
return (profile.profile.profile_description or "") + "\n"
if not manufacturer or (model and len(model) > 30):
return f"{model}\n"
return f"{model} - {manufacturer}\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileInfo(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the internal product information for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile,
a :exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the info tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
info tag. This often contains details about the profile, and how it
was created, as supplied by the creator.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well,
# so skipping.
# info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.profile_description
cpright = profile.profile.copyright
elements = [element for element in (description, cpright) if element]
return "\r\n\r\n".join(elements) + "\r\n\r\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileCopyright(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the copyright for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the copyright tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
copyright tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.copyright or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileManufacturer(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the manufacturer for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the manufacturer tag, a
:exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
manufacturer tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.manufacturer or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileModel(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the model for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the model tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
model tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in
an ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.model or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getProfileDescription(profile: _CmsProfileCompatible) -> str:
"""
(pyCMS) Gets the description for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the description tag,
a :exc:`PyCMSError` is raised.
Use this function to obtain the information stored in the profile's
description tag.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: A string containing the internal profile information stored in an
ICC tag.
:exception PyCMSError:
"""
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return (profile.profile.profile_description or "") + "\n"
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def getDefaultIntent(profile: _CmsProfileCompatible) -> int:
"""
(pyCMS) Gets the default intent name for the given profile.
If ``profile`` isn't a valid CmsProfile object or filename to a profile, a
:exc:`PyCMSError` is raised.
If an error occurs while trying to obtain the default intent, a
:exc:`PyCMSError` is raised.
Use this function to determine the default (and usually best optimized)
rendering intent for this profile. Most profiles support multiple
rendering intents, but are intended mostly for one type of conversion.
If you wish to use a different intent than returned, use
ImageCms.isIntentSupported() to verify it will work first.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:returns: Integer 0-3 specifying the default rendering intent for this
profile.
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
def isIntentSupported(
profile: _CmsProfileCompatible, intent: Intent, direction: Direction
) -> Literal[-1, 1]:
"""
(pyCMS) Checks if a given intent is supported.
Use this function to verify that you can use your desired
``intent`` with ``profile``, and that ``profile`` can be used for the
input/output/proof profile as you desire.
Some profiles are created specifically for one "direction", can cannot
be used for others. Some profiles can only be used for certain
rendering intents, so it's best to either verify this before trying
to create a transform with them (using this function), or catch the
potential :exc:`PyCMSError` that will occur if they don't
support the modes you select.
:param profile: EITHER a valid CmsProfile object, OR a string of the
filename of an ICC profile.
:param intent: Integer (0-3) specifying the rendering intent you wish to
use with this profile
ImageCms.Intent.PERCEPTUAL = 0 (DEFAULT)
ImageCms.Intent.RELATIVE_COLORIMETRIC = 1
ImageCms.Intent.SATURATION = 2
ImageCms.Intent.ABSOLUTE_COLORIMETRIC = 3
see the pyCMS documentation for details on rendering intents and what
they do.
:param direction: Integer specifying if the profile is to be used for
input, output, or proof
INPUT = 0 (or use ImageCms.Direction.INPUT)
OUTPUT = 1 (or use ImageCms.Direction.OUTPUT)
PROOF = 2 (or use ImageCms.Direction.PROOF)
:returns: 1 if the intent/direction are supported, -1 if they are not.
:exception PyCMSError:
"""
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, OSError, TypeError, ValueError) as v:
raise PyCMSError(v) from v
| PyCMSError |
python | pytorch__pytorch | torch/distributions/multinomial.py | {
"start": 346,
"end": 5714
} | class ____(Distribution):
r"""
Creates a Multinomial distribution parameterized by :attr:`total_count` and
either :attr:`probs` or :attr:`logits` (but not both). The innermost dimension of
:attr:`probs` indexes over categories. All other dimensions index over batches.
Note that :attr:`total_count` need not be specified if only :meth:`log_prob` is
called (see example below)
.. note:: The `probs` argument must be non-negative, finite and have a non-zero sum,
and it will be normalized to sum to 1 along the last dimension. :attr:`probs`
will return this normalized value.
The `logits` argument will be interpreted as unnormalized log probabilities
and can therefore be any real number. It will likewise be normalized so that
the resulting probabilities sum to 1 along the last dimension. :attr:`logits`
will return this normalized value.
- :meth:`sample` requires a single shared `total_count` for all
parameters and samples.
- :meth:`log_prob` allows different `total_count` for each parameter and
sample.
Example::
>>> # xdoctest: +SKIP("FIXME: found invalid values")
>>> m = Multinomial(100, torch.tensor([ 1., 1., 1., 1.]))
>>> x = m.sample() # equal probability of 0, 1, 2, 3
tensor([ 21., 24., 30., 25.])
>>> Multinomial(probs=torch.tensor([1., 1., 1., 1.])).log_prob(x)
tensor([-4.1338])
Args:
total_count (int): number of trials
probs (Tensor): event probabilities
logits (Tensor): event log probabilities (unnormalized)
"""
# pyrefly: ignore [bad-override]
arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector}
total_count: int
@property
def mean(self) -> Tensor:
return self.probs * self.total_count
@property
def variance(self) -> Tensor:
return self.total_count * self.probs * (1 - self.probs)
def __init__(
self,
total_count: int = 1,
probs: Optional[Tensor] = None,
logits: Optional[Tensor] = None,
validate_args: Optional[bool] = None,
) -> None:
if not isinstance(total_count, int):
raise NotImplementedError("inhomogeneous total_count is not supported")
self.total_count = total_count
self._categorical = Categorical(probs=probs, logits=logits)
self._binomial = Binomial(total_count=total_count, probs=self.probs)
batch_shape = self._categorical.batch_shape
event_shape = self._categorical.param_shape[-1:]
super().__init__(batch_shape, event_shape, validate_args=validate_args)
def expand(self, batch_shape, _instance=None):
new = self._get_checked_instance(Multinomial, _instance)
batch_shape = torch.Size(batch_shape)
new.total_count = self.total_count
new._categorical = self._categorical.expand(batch_shape)
super(Multinomial, new).__init__(
batch_shape, self.event_shape, validate_args=False
)
new._validate_args = self._validate_args
return new
def _new(self, *args, **kwargs):
return self._categorical._new(*args, **kwargs)
@constraints.dependent_property(is_discrete=True, event_dim=1)
# pyrefly: ignore [bad-override]
def support(self):
return constraints.multinomial(self.total_count)
@property
def logits(self) -> Tensor:
return self._categorical.logits
@property
def probs(self) -> Tensor:
return self._categorical.probs
@property
def param_shape(self) -> torch.Size:
return self._categorical.param_shape
def sample(self, sample_shape=torch.Size()):
sample_shape = torch.Size(sample_shape)
samples = self._categorical.sample(
torch.Size((self.total_count,)) + sample_shape
)
# samples.shape is (total_count, sample_shape, batch_shape), need to change it to
# (sample_shape, batch_shape, total_count)
shifted_idx = list(range(samples.dim()))
shifted_idx.append(shifted_idx.pop(0))
samples = samples.permute(*shifted_idx)
counts = samples.new(self._extended_shape(sample_shape)).zero_()
counts.scatter_add_(-1, samples, torch.ones_like(samples))
return counts.type_as(self.probs)
def entropy(self):
n = torch.tensor(self.total_count)
cat_entropy = self._categorical.entropy()
term1 = n * cat_entropy - torch.lgamma(n + 1)
support = self._binomial.enumerate_support(expand=False)[1:]
binomial_probs = torch.exp(self._binomial.log_prob(support))
weights = torch.lgamma(support + 1)
term2 = (binomial_probs * weights).sum([0, -1])
return term1 + term2
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
logits, value = broadcast_all(self.logits, value)
logits = logits.clone(memory_format=torch.contiguous_format)
log_factorial_n = torch.lgamma(value.sum(-1) + 1)
log_factorial_xs = torch.lgamma(value + 1).sum(-1)
logits[(value == 0) & (logits == -inf)] = 0
log_powers = (logits * value).sum(-1)
return log_factorial_n - log_factorial_xs + log_powers
| Multinomial |
python | facebookresearch__faiss | tests/test_contrib.py | {
"start": 5635,
"end": 7264
} | class ____(unittest.TestCase):
def test_LinearTransform(self):
# training data
xt = np.random.rand(1000, 20).astype('float32')
# test data
x = np.random.rand(10, 20).astype('float32')
# make the PCA matrix
pca = faiss.PCAMatrix(20, 10)
pca.train(xt)
# apply it to test data
yref = pca.apply_py(x)
A, b = inspect_tools.get_LinearTransform_matrix(pca)
# verify
ynew = x @ A.T + b
np.testing.assert_array_almost_equal(yref, ynew)
def test_IndexFlat(self):
xb = np.random.rand(13, 20).astype('float32')
index = faiss.IndexFlatL2(20)
index.add(xb)
np.testing.assert_array_equal(
xb, inspect_tools.get_flat_data(index)
)
def test_make_LT(self):
rs = np.random.RandomState(123)
X = rs.rand(13, 20).astype('float32')
A = rs.rand(5, 20).astype('float32')
b = rs.rand(5).astype('float32')
Yref = X @ A.T + b
lt = inspect_tools.make_LinearTransform_matrix(A, b)
Ynew = lt.apply(X)
np.testing.assert_allclose(Yref, Ynew, rtol=1e-06)
def test_NSG_neighbors(self):
# FIXME number of elements to add should be >> 100
ds = datasets.SyntheticDataset(32, 0, 200, 10)
index = faiss.index_factory(ds.d, "NSG")
index.add(ds.get_database())
neighbors = inspect_tools.get_NSG_neighbors(index.nsg)
# neighbors should be either valid indexes or -1
np.testing.assert_array_less(-2, neighbors)
np.testing.assert_array_less(neighbors, ds.nb)
| TestInspect |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1236,
"end": 1307
} | class ____(Token):
id = '<block mapping start>'
| BlockMappingStartToken |
python | ipython__ipython | tests/test_completerlib.py | {
"start": 2995,
"end": 6496
} | class ____(unittest.TestCase):
@onlyif_unicode_paths
def setUp(self):
self.BASETESTDIR = tempfile.mkdtemp()
for fil in ["aaø.py", "a.py", "b.py"]:
with open(join(self.BASETESTDIR, fil), "w", encoding="utf-8") as sfile:
sfile.write("pass\n")
self.oldpath = os.getcwd()
os.chdir(self.BASETESTDIR)
def tearDown(self):
os.chdir(self.oldpath)
shutil.rmtree(self.BASETESTDIR)
@onlyif_unicode_paths
def test_1(self):
"""Test magic_run_completer, should match two alternatives"""
event = MockEvent("%run a")
mockself = None
match = set(magic_run_completer(mockself, event))
self.assertEqual(match, {"a.py", "aaø.py"})
@onlyif_unicode_paths
def test_2(self):
"""Test magic_run_completer, should match one alternative"""
event = MockEvent("%run aa")
mockself = None
match = set(magic_run_completer(mockself, event))
self.assertEqual(match, {"aaø.py"})
@onlyif_unicode_paths
def test_3(self):
"""Test magic_run_completer with unterminated " """
event = MockEvent('%run "a')
mockself = None
match = set(magic_run_completer(mockself, event))
self.assertEqual(match, {"a.py", "aaø.py"})
# module_completer:
def test_import_invalid_module():
"""Testing of issue https://github.com/ipython/ipython/issues/1107"""
invalid_module_names = {"foo-bar", "foo:bar", "10foo"}
valid_module_names = {"foobar"}
with TemporaryDirectory() as tmpdir:
sys.path.insert(0, tmpdir)
for name in invalid_module_names | valid_module_names:
filename = os.path.join(tmpdir, name + ".py")
open(filename, "w", encoding="utf-8").close()
s = set(module_completion("import foo"))
intersection = s.intersection(invalid_module_names)
assert intersection == set()
assert valid_module_names.issubset(s), valid_module_names.intersection(s)
def test_bad_module_all():
"""Test module with invalid __all__
https://github.com/ipython/ipython/issues/9678
"""
testsdir = os.path.dirname(__file__)
sys.path.insert(0, testsdir)
try:
results = module_completion("from bad_all import ")
assert "puppies" in results
for r in results:
assert isinstance(r, str)
# bad_all doesn't contain submodules, but this completion
# should finish without raising an exception:
results = module_completion("import bad_all.")
assert results == []
finally:
sys.path.remove(testsdir)
def test_module_without_init():
"""
Test module without __init__.py.
https://github.com/ipython/ipython/issues/11226
"""
fake_module_name = "foo_xder_134"
with TemporaryDirectory() as tmpdir:
sys.path.insert(0, tmpdir)
try:
os.makedirs(os.path.join(tmpdir, fake_module_name))
s = try_import(mod=fake_module_name)
assert s == [], f"for module {fake_module_name}"
finally:
sys.path.remove(tmpdir)
def test_valid_exported_submodules():
"""
Test checking exported (__all__) objects are submodules
"""
results = module_completion("import os.pa")
# ensure we get a valid submodule:
assert "os.path" in results
# ensure we don't get objects that aren't submodules:
assert "os.pathconf" not in results
| Test_magic_run_completer_nonascii |
python | kamyu104__LeetCode-Solutions | Python/subarray-sum-equals-k.py | {
"start": 50,
"end": 495
} | class ____(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
result = 0
accumulated_sum = 0
lookup = collections.defaultdict(int)
lookup[0] += 1
for num in nums:
accumulated_sum += num
result += lookup[accumulated_sum - k]
lookup[accumulated_sum] += 1
return result
| Solution |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 31018,
"end": 32690
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
import sklearn.tree
if type(y) in ("binary", "multiclass"):
kf = sklearn.model_selection.StratifiedKFold(n_splits=5)
else:
kf = sklearn.model_selection.KFold(n_splits=5)
accuracy = 0.0
for train, test in kf.split(X, y):
random_state = sklearn.utils.check_random_state(42)
node = sklearn.tree.DecisionTreeClassifier(
criterion="entropy",
max_depth=1,
random_state=random_state,
min_samples_split=2,
min_samples_leaf=1,
max_features=None,
)
if len(y.shape) == 1 or y.shape[1] == 1:
node.fit(
X.iloc[train] if hasattr(X, "iloc") else X[train],
y.iloc[train] if hasattr(y, "iloc") else y[train],
)
else:
node = OneVsRestClassifier(node)
node.fit(
X.iloc[train] if hasattr(X, "iloc") else X[train],
y.iloc[train] if hasattr(y, "iloc") else y[train],
)
predictions = node.predict(
X.iloc[test] if hasattr(X, "iloc") else X[test],
)
accuracy += sklearn.metrics.accuracy_score(
predictions,
y.iloc[test] if hasattr(y, "iloc") else y[test],
)
return accuracy / 5
def _calculate_sparse(self, X, y, logger, feat_type):
return np.NaN
@metafeatures.define("LandmarkRandomNodeLearner")
| LandmarkDecisionNodeLearner |
python | huggingface__transformers | src/transformers/models/t5/configuration_t5.py | {
"start": 768,
"end": 6372
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`T5Model`]. It is used to
instantiate a T5 model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the T5
[google-t5/t5-small](https://huggingface.co/google-t5/t5-small) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`T5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
be defined as `num_heads * d_kv`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `T5Block`.
num_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
classifier_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for classifier.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. T5v1.1 uses the
`"gated-gelu"` feed forward projection. Original T5 uses `"relu"`.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
"head_dim": "d_kv",
}
def __init__(
self,
vocab_size=32128,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="relu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
classifier_dropout=0.0,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.classifier_dropout = classifier_dropout
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
act_info = self.feed_forward_proj.split("-")
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == "gated"
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
)
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
self.dense_act_fn = "gelu_new"
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.tie_encoder_decoder = True # T5 is always tied, has always been like that.
__all__ = ["T5Config"]
| T5Config |
python | pytorch__pytorch | torch/_subclasses/functional_tensor.py | {
"start": 32788,
"end": 34752
} | class ____(BaseFunctionalizeAPI):
def __init__(
self, mode: Optional[FunctionalTensorMode] = None, pre_dispatch: bool = False
) -> None:
super().__init__()
self.mode = mode if mode else FunctionalTensorMode()
self.pre_dispatch = pre_dispatch
def wrap_tensors(self, args: tuple[Any]) -> tuple[Any]:
with self.mode:
return torch.utils._pytree.tree_map_only(
torch.Tensor, FunctionalTensor.to_functional, args
)
def unwrap_tensors(
self, args: Union[torch.Tensor, tuple[torch.Tensor, ...], list[torch.Tensor]]
) -> Any:
return torch.utils._pytree.tree_map_only(
FunctionalTensor, FunctionalTensor.from_functional, args
)
def functionalize(self, inner_f: Callable) -> Callable:
return dispatch_functionalize(inner_f, self.mode)
def redispatch_to_next(self) -> AbstractContextManager:
# [NOTE] We don't do anything here because at the time
# we exercise this path, we would have already popped the
# FunctionalTensorMode from mode stack. Since FunctionalTensorMode
# is now stateful, it is better to explicitly pass in correct mode
# directly instead of globally setting it.
return contextlib.nullcontext()
def replace(self, input_tensor, output_tensor) -> None:
assert isinstance(input_tensor, FunctionalTensor)
assert not isinstance(output_tensor, FunctionalTensor)
input_tensor.replace_(output_tensor)
def commit_update(self, tensor) -> None:
assert isinstance(tensor, FunctionalTensor)
tensor.commit_update()
def sync(self, tensor) -> None:
assert isinstance(tensor, FunctionalTensor)
tensor.sync()
def mark_mutation_hidden_from_autograd(self, tensor) -> None:
assert isinstance(tensor, FunctionalTensor)
tensor.mark_mutation_hidden_from_autograd()
| PythonFunctionalizeAPI |
python | dask__dask | dask/dataframe/io/parquet/utils.py | {
"start": 347,
"end": 26782
} | class ____:
"""The API necessary to provide a new Parquet reader/writer"""
@classmethod
def extract_filesystem(
cls,
urlpath,
filesystem,
dataset_options,
open_file_options,
storage_options,
):
"""Extract filesystem object from urlpath or user arguments
This classmethod should only be overridden for engines that need
to handle filesystem implementations other than ``fsspec``
(e.g. ``pyarrow.fs.S3FileSystem``).
Parameters
----------
urlpath: str or List[str]
Source directory for data, or path(s) to individual parquet files.
filesystem: "fsspec" or fsspec.AbstractFileSystem
Filesystem backend to use. Default is "fsspec"
dataset_options: dict
Engine-specific dataset options.
open_file_options: dict
Options to be used for file-opening at read time.
storage_options: dict
Options to be passed on to the file-system backend.
Returns
-------
fs: Any
A global filesystem object to be used for metadata
processing and file-opening by the engine.
paths: List[str]
List of data-source paths.
dataset_options: dict
Engine-specific dataset options.
open_file_options: dict
Options to be used for file-opening at read time.
"""
# Check if fs was specified as a dataset option
if filesystem is None:
fs = dataset_options.pop("fs", "fsspec")
else:
if "fs" in dataset_options:
raise ValueError(
"Cannot specify a filesystem argument if the "
"'fs' dataset option is also defined."
)
fs = filesystem
if fs in (None, "fsspec"):
# Use fsspec to infer a filesystem by default
fs, _, paths = get_fs_token_paths(
urlpath, mode="rb", storage_options=storage_options
)
return fs, paths, dataset_options, open_file_options
else:
# Check that an initialized filesystem object was provided
if not isinstance(fs, AbstractFileSystem):
raise ValueError(
f"Expected fsspec.AbstractFileSystem or 'fsspec'. Got {fs}"
)
if storage_options:
# The filesystem was already specified. Can't pass in
# any storage options
raise ValueError(
f"Cannot specify storage_options when an explicit "
f"filesystem object is specified. Got: {storage_options}"
)
if isinstance(urlpath, (list, tuple, set)):
if not urlpath:
raise ValueError("empty urlpath sequence")
urlpath = [stringify_path(u) for u in urlpath]
else:
urlpath = [stringify_path(urlpath)]
paths = expand_paths_if_needed(urlpath, "rb", 1, fs, None)
return (
fs,
[fs._strip_protocol(u) for u in paths],
dataset_options,
open_file_options,
)
@classmethod
def default_blocksize(cls):
return "256 MiB"
@classmethod
def read_partition(
cls, fs, piece, columns, index, use_nullable_dtypes=False, **kwargs
):
"""Read a single piece of a Parquet dataset into a Pandas DataFrame
This function is called many times in individual tasks
Parameters
----------
fs: FileSystem
piece: object
This is some token that is returned by Engine.read_metadata.
Typically it represents a row group in a Parquet dataset
columns: List[str]
List of column names to pull out of that row group
index: str, List[str], or False
The index name(s).
use_nullable_dtypes: boolean
Whether to use pandas nullable dtypes (like "string" or "Int64")
where appropriate when reading parquet files.
dtype_backend: {"numpy_nullable", "pyarrow"}
Whether to use pandas nullable dtypes (like "string" or "Int64")
where appropriate when reading parquet files.
convert_string: boolean
Whether to use pyarrow strings when reading parquet files.
**kwargs:
Includes `"kwargs"` values stored within the `parts` output
of `engine.read_metadata`. May also include arguments to be
passed to the backend (if stored under a top-level `"read"` key).
Returns
-------
A Pandas DataFrame
"""
raise NotImplementedError()
@classmethod
def initialize_write(
cls,
df,
fs,
path,
append=False,
partition_on=None,
ignore_divisions=False,
division_info=None,
**kwargs,
):
"""Perform engine-specific initialization steps for this dataset
Parameters
----------
df: dask.dataframe.DataFrame
fs: FileSystem
path: str
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
append: bool
If True, may use existing metadata (if any) and perform checks
against the new data being stored.
partition_on: List(str)
Column(s) to use for dataset partitioning in parquet.
ignore_divisions: bool
Whether or not to ignore old divisions when appending. Otherwise,
overlapping divisions will lead to an error being raised.
division_info: dict
Dictionary containing the divisions and corresponding column name.
**kwargs: dict
Other keyword arguments (including `index_cols`)
Returns
-------
tuple:
engine-specific instance
list of filenames, one per partition
"""
raise NotImplementedError
@classmethod
def write_partition(
cls, df, path, fs, filename, partition_on, return_metadata, **kwargs
):
"""
Output a partition of a dask.DataFrame. This will correspond to
one output file, unless partition_on is set, in which case, it will
correspond to up to one file in each sub-directory.
Parameters
----------
df: dask.dataframe.DataFrame
path: str
Destination directory for data. Prepend with protocol like ``s3://``
or ``hdfs://`` for remote data.
fs: FileSystem
filename: str
partition_on: List(str)
Column(s) to use for dataset partitioning in parquet.
return_metadata : bool
Whether to return list of instances from this write, one for each
output file. These will be passed to write_metadata if an output
metadata file is requested.
**kwargs: dict
Other keyword arguments (including `fmd` and `index_cols`)
Returns
-------
List of metadata-containing instances (if `return_metadata` is `True`)
or empty list
"""
raise NotImplementedError
@classmethod
def write_metadata(cls, parts, meta, fs, path, append=False, **kwargs):
"""
Write the shared metadata file for a parquet dataset.
Parameters
----------
parts: List
Contains metadata objects to write, of the type undrestood by the
specific implementation
meta: non-chunk metadata
Details that do not depend on the specifics of each chunk write,
typically the schema and pandas metadata, in a format the writer
can use.
fs: FileSystem
path: str
Output file to write to, usually ``"_metadata"`` in the root of
the output dataset
append: boolean
Whether or not to consolidate new metadata with existing (True)
or start from scratch (False)
**kwargs: dict
Other keyword arguments (including `compression`)
"""
raise NotImplementedError()
@classmethod
def collect_file_metadata(cls, path, fs, file_path):
"""
Collect parquet metadata from a file and set the file_path.
Parameters
----------
path: str
Parquet-file path to extract metadata from.
fs: FileSystem
file_path: str
Relative path to set as `file_path` in the metadata.
Returns
-------
A metadata object. The specific type should be recognized
by the aggregate_metadata method.
"""
raise NotImplementedError()
@classmethod
def aggregate_metadata(cls, meta_list, fs, out_path):
"""
Aggregate a list of metadata objects and optionally
write out the final result as a _metadata file.
Parameters
----------
meta_list: list
List of metadata objects to be aggregated into a single
metadata object, and optionally written to disk. The
specific element type can be engine specific.
fs: FileSystem
out_path: str or None
Directory to write the final _metadata file. If None
is specified, the aggregated metadata will be returned,
and nothing will be written to disk.
Returns
-------
If out_path is None, an aggregate metadata object is returned.
Otherwise, None is returned.
"""
raise NotImplementedError()
def _normalize_index_columns(user_columns, data_columns, user_index, data_index):
"""Normalize user and file-provided column and index names
Parameters
----------
user_columns : None, str or list of str
data_columns : list of str
user_index : None, str, or list of str
data_index : list of str
Returns
-------
column_names : list of str
index_names : list of str
"""
specified_columns = user_columns is not None
specified_index = user_index is not None
if user_columns is None:
user_columns = list(data_columns)
elif isinstance(user_columns, str):
user_columns = [user_columns]
else:
user_columns = list(user_columns)
if user_index is None:
user_index = data_index
elif user_index is False:
# When index is False, use no index and all fields should be treated as
# columns (unless `columns` provided).
user_index = []
data_columns = data_index + data_columns
elif isinstance(user_index, str):
user_index = [user_index]
else:
user_index = list(user_index)
if specified_index and not specified_columns:
# Only `index` provided. Use specified index, and all column fields
# that weren't specified as indices
index_names = user_index
column_names = [x for x in data_columns if x not in index_names]
elif specified_columns and not specified_index:
# Only `columns` provided. Use specified columns, and all index fields
# that weren't specified as columns
column_names = user_columns
index_names = [x for x in data_index if x not in column_names]
elif specified_index and specified_columns:
# Both `index` and `columns` provided. Use as specified, but error if
# they intersect.
column_names = user_columns
index_names = user_index
if set(column_names).intersection(index_names):
raise ValueError("Specified index and column names must not intersect")
else:
# Use default columns and index from the metadata
column_names = data_columns
index_names = data_index
return column_names, index_names
def _sort_and_analyze_paths(file_list, fs, root=False):
file_list = sorted(file_list, key=natural_sort_key)
base, fns = _analyze_paths(file_list, fs, root=root)
return file_list, base, fns
def _analyze_paths(file_list, fs, root=False):
"""Consolidate list of file-paths into parquet relative paths
Note: This function was mostly copied from dask/fastparquet to
use in ArrowEngine`."""
def _join_path(*path):
def _scrub(i, p):
# Convert path to standard form
# this means windows path separators are converted to linux
p = p.replace(fs.sep, "/")
if p == "": # empty path is assumed to be a relative path
return "."
if p[-1] == "/": # trailing slashes are not allowed
p = p[:-1]
if i > 0 and p[0] == "/": # only the first path can start with /
p = p[1:]
return p
abs_prefix = ""
if path and path[0]:
if path[0][0] == "/":
abs_prefix = "/"
path = list(path)
path[0] = path[0][1:]
elif fs.sep == "\\" and path[0][1:].startswith(":/"):
# If windows, then look for the "c:/" prefix
abs_prefix = path[0][0:3]
path = list(path)
path[0] = path[0][3:]
_scrubbed = []
for i, p in enumerate(path):
_scrubbed.extend(_scrub(i, p).split("/"))
simpler = []
for s in _scrubbed:
if s == ".":
pass
elif s == "..":
if simpler:
if simpler[-1] == "..":
simpler.append(s)
else:
simpler.pop()
elif abs_prefix:
raise Exception("can not get parent of root")
else:
simpler.append(s)
else:
simpler.append(s)
if not simpler:
if abs_prefix:
joined = abs_prefix
else:
joined = "."
else:
joined = abs_prefix + ("/".join(simpler))
return joined
path_parts_list = [_join_path(fn).split("/") for fn in file_list]
if root is False:
basepath = path_parts_list[0][:-1]
for path_parts in path_parts_list:
j = len(path_parts) - 1
for k, (base_part, path_part) in enumerate(zip(basepath, path_parts)):
if base_part != path_part:
j = k
break
basepath = basepath[:j]
l = len(basepath)
else:
basepath = _join_path(root).split("/")
l = len(basepath)
assert all(
p[:l] == basepath for p in path_parts_list
), "All paths must begin with the given root"
out_list = []
for path_parts in path_parts_list:
out_list.append(
"/".join(path_parts[l:])
) # use '/'.join() instead of _join_path to be consistent with split('/')
return (
"/".join(basepath),
out_list,
) # use '/'.join() instead of _join_path to be consistent with split('/')
def _aggregate_stats(
file_path,
file_row_group_stats,
file_row_group_column_stats,
stat_col_indices,
):
"""Utility to aggregate the statistics for N row-groups
into a single dictionary.
Used by `Engine._construct_parts`
"""
if len(file_row_group_stats) < 1:
# Empty statistics
return {}
elif len(file_row_group_column_stats) == 0:
assert len(file_row_group_stats) == 1
return file_row_group_stats[0]
else:
# Note: It would be better to avoid df_rgs and df_cols
# construction altogether. It makes it fast to aggregate
# the statistics for many row groups, but isn't
# worthwhile for a small number of row groups.
if len(file_row_group_stats) > 1:
df_rgs = pd.DataFrame(file_row_group_stats)
s = {
"file_path_0": file_path,
"num-rows": df_rgs["num-rows"].sum(),
"num-row-groups": df_rgs["num-rows"].count(),
"total_byte_size": df_rgs["total_byte_size"].sum(),
"columns": [],
}
else:
s = {
"file_path_0": file_path,
"num-rows": file_row_group_stats[0]["num-rows"],
"num-row-groups": 1,
"total_byte_size": file_row_group_stats[0]["total_byte_size"],
"columns": [],
}
df_cols = None
if len(file_row_group_column_stats) > 1:
df_cols = pd.DataFrame(file_row_group_column_stats)
for ind, name in enumerate(stat_col_indices):
i = ind * 3
if df_cols is None:
minval = file_row_group_column_stats[0][i]
maxval = file_row_group_column_stats[0][i + 1]
null_count = file_row_group_column_stats[0][i + 2]
if minval == maxval and null_count:
# Remove "dangerous" stats (min == max, but null values exist)
s["columns"].append({"null_count": null_count})
else:
s["columns"].append(
{
"name": name,
"min": minval,
"max": maxval,
"null_count": null_count,
}
)
else:
minval = df_cols.iloc[:, i].dropna().min()
maxval = df_cols.iloc[:, i + 1].dropna().max()
null_count = df_cols.iloc[:, i + 2].sum()
if minval == maxval and null_count:
s["columns"].append({"null_count": null_count})
else:
s["columns"].append(
{
"name": name,
"min": minval,
"max": maxval,
"null_count": null_count,
}
)
return s
def _row_groups_to_parts(
gather_statistics,
split_row_groups,
aggregation_depth,
file_row_groups,
file_row_group_stats,
file_row_group_column_stats,
stat_col_indices,
make_part_func,
make_part_kwargs,
):
# Construct `parts` and `stats`
parts = []
stats = []
if split_row_groups:
# Create parts from each file,
# limiting the number of row_groups in each piece
split_row_groups = int(split_row_groups)
residual = 0
for filename, row_groups in file_row_groups.items():
row_group_count = len(row_groups)
if residual:
_rgs = [0] + list(range(residual, row_group_count, split_row_groups))
else:
_rgs = list(range(residual, row_group_count, split_row_groups))
for i in _rgs:
i_end = i + split_row_groups
if aggregation_depth is True:
if residual and i == 0:
i_end = residual
residual = 0
_residual = i_end - row_group_count
if _residual > 0:
residual = _residual
rg_list = row_groups[i:i_end]
part = make_part_func(
filename,
rg_list,
**make_part_kwargs,
)
if part is None:
continue
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename][i:i_end],
file_row_group_column_stats[filename][i:i_end],
stat_col_indices,
)
stats.append(stat)
else:
for filename, row_groups in file_row_groups.items():
part = make_part_func(
filename,
row_groups,
**make_part_kwargs,
)
if part is None:
continue
parts.append(part)
if gather_statistics:
stat = _aggregate_stats(
filename,
file_row_group_stats[filename],
file_row_group_column_stats[filename],
stat_col_indices,
)
stats.append(stat)
return parts, stats
def _get_aggregation_depth(aggregate_files, partition_names):
# Use `aggregate_files` to set `aggregation_depth`
#
# Note that `partition_names` must be ordered. `True` means that we allow
# aggregation of any two files. `False` means that we will never aggregate
# files. If a string is specified, it must be the name of a partition
# column, and the "partition depth" of that column will be used for
# aggregation. Note that we always convert the string into the partition
# "depth" to simplify the aggregation logic.
# Summary of output `aggregation_depth` settings:
#
# True : Free-for-all aggregation (any two files may be aggregated)
# False : No file aggregation allowed
# <int> : Allow aggregation within this partition-hierarchy depth
aggregation_depth = aggregate_files
if isinstance(aggregate_files, str):
if aggregate_files in partition_names:
# aggregate_files corresponds to a partition column. Reset the
# value of this variable to reflect the partition "depth" (in the
# range of 1 to the total number of partition levels)
aggregation_depth = len(partition_names) - partition_names.index(
aggregate_files
)
else:
raise ValueError(
f"{aggregate_files} is not a recognized directory partition."
)
return aggregation_depth
def _set_metadata_task_size(metadata_task_size, fs):
# Set metadata_task_size using the config file
# if the kwarg value was not specified
if metadata_task_size is None:
# If a default value is not specified in the config file,
# otherwise we use "0"
config_str = "dataframe.parquet.metadata-task-size-" + (
"local" if _is_local_fs(fs) else "remote"
)
return config.get(config_str, 0)
return metadata_task_size
def _process_open_file_options(
open_file_options,
metadata=None,
columns=None,
row_groups=None,
default_engine=None,
default_cache="readahead",
allow_precache=True,
):
# Process `open_file_options`.
# Set default values and extract `precache_options`
open_file_options = (open_file_options or {}).copy()
precache_options = open_file_options.pop("precache_options", {}).copy()
if not allow_precache:
# Precaching not allowed
# (probably because the file system is local)
precache_options = {}
if "open_file_func" not in open_file_options:
if precache_options.get("method") == "parquet":
open_file_options["cache_type"] = open_file_options.get(
"cache_type", "parts"
)
precache_options.update(
{
"metadata": metadata,
"columns": columns,
"row_groups": row_groups,
"engine": precache_options.get("engine", default_engine),
}
)
else:
open_file_options["cache_type"] = open_file_options.get(
"cache_type", default_cache
)
open_file_options["mode"] = open_file_options.get("mode", "rb")
return precache_options, open_file_options
def _split_user_options(**kwargs):
# Check user-defined options.
# Split into "dataset"-specific kwargs
user_kwargs = kwargs.copy()
if "file" in user_kwargs:
# Deprecation warning to move toward a single `dataset` key
warnings.warn(
"Passing user options with the 'file' argument is now deprecated."
" Please use 'dataset' instead.",
FutureWarning,
)
dataset_options = {
**user_kwargs.pop("file", {}).copy(),
**user_kwargs.pop("dataset", {}).copy(),
}
read_options = user_kwargs.pop("read", {}).copy()
open_file_options = user_kwargs.pop("open_file_options", {}).copy()
return (
dataset_options,
read_options,
open_file_options,
user_kwargs,
)
def _set_gather_statistics(
gather_statistics,
blocksize,
split_row_groups,
aggregation_depth,
filter_columns,
stat_columns,
):
# Use available information about the current read options
# and target dataset to decide if we need to gather metadata
# statistics to construct the graph for a `read_parquet` op.
# If the user has specified `calculate_divisions=True`, then
# we will be starting with `gather_statistics=True` here.
if (
(blocksize and split_row_groups is True)
or (int(split_row_groups) > 1 and aggregation_depth)
or filter_columns.intersection(stat_columns)
):
# Need to gather statistics if we are aggregating files
# or filtering
# NOTE: Should avoid gathering statistics when the agg
# does not depend on a row-group statistic
gather_statistics = True
elif not stat_columns:
# Not aggregating files/row-groups.
# We only need to gather statistics if `stat_columns`
# is populated
gather_statistics = False
return bool(gather_statistics)
def _infer_split_row_groups(row_group_sizes, blocksize, aggregate_files=False):
# Use blocksize to choose an appropriate split_row_groups value
if row_group_sizes:
blocksize = parse_bytes(blocksize)
if aggregate_files or np.sum(row_group_sizes) > 2 * blocksize:
# If we are aggregating files, or the file is larger
# than `blocksize`, set split_row_groups to "adaptive"
return "adaptive"
return False
| Engine |
python | getsentry__sentry-python | sentry_sdk/integrations/langchain.py | {
"start": 6045,
"end": 40013
} | class ____(BaseCallbackHandler): # type: ignore[misc]
"""Callback handler that creates Sentry spans."""
def __init__(self, max_span_map_size, include_prompts):
# type: (Optional[int], bool) -> None
self.span_map = OrderedDict() # type: OrderedDict[UUID, WatchedSpan]
self.max_span_map_size = max_span_map_size
self.include_prompts = include_prompts
def gc_span_map(self):
# type: () -> None
if self.max_span_map_size is not None:
while len(self.span_map) > self.max_span_map_size:
run_id, watched_span = self.span_map.popitem(last=False)
self._exit_span(watched_span, run_id)
def _handle_error(self, run_id, error):
# type: (UUID, Any) -> None
with capture_internal_exceptions():
if not run_id or run_id not in self.span_map:
return
span_data = self.span_map[run_id]
span = span_data.span
set_span_errored(span)
sentry_sdk.capture_exception(error, span.scope)
span.__exit__(None, None, None)
del self.span_map[run_id]
def _normalize_langchain_message(self, message):
# type: (BaseMessage) -> Any
parsed = {"role": message.type, "content": message.content}
parsed.update(message.additional_kwargs)
return parsed
def _create_span(self, run_id, parent_id, **kwargs):
# type: (SentryLangchainCallback, UUID, Optional[Any], Any) -> WatchedSpan
watched_span = None # type: Optional[WatchedSpan]
if parent_id:
parent_span = self.span_map.get(parent_id) # type: Optional[WatchedSpan]
if parent_span:
watched_span = WatchedSpan(parent_span.span.start_child(**kwargs))
parent_span.children.append(watched_span)
if watched_span is None:
watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs))
watched_span.span.__enter__()
self.span_map[run_id] = watched_span
self.gc_span_map()
return watched_span
def _exit_span(self, span_data, run_id):
# type: (SentryLangchainCallback, WatchedSpan, UUID) -> None
if span_data.is_pipeline:
set_ai_pipeline_name(None)
span_data.span.__exit__(None, None, None)
del self.span_map[run_id]
def on_llm_start(
self,
serialized,
prompts,
*,
run_id,
tags=None,
parent_run_id=None,
metadata=None,
**kwargs,
):
# type: (SentryLangchainCallback, Dict[str, Any], List[str], UUID, Optional[List[str]], Optional[UUID], Optional[Dict[str, Any]], Any) -> Any
"""Run when LLM starts running."""
with capture_internal_exceptions():
if not run_id:
return
all_params = kwargs.get("invocation_params", {})
all_params.update(serialized.get("kwargs", {}))
model = (
all_params.get("model")
or all_params.get("model_name")
or all_params.get("model_id")
or ""
)
watched_span = self._create_span(
run_id,
parent_run_id,
op=OP.GEN_AI_PIPELINE,
name=kwargs.get("name") or "Langchain LLM call",
origin=LangchainIntegration.origin,
)
span = watched_span.span
if model:
span.set_data(
SPANDATA.GEN_AI_REQUEST_MODEL,
model,
)
ai_type = all_params.get("_type", "")
if "anthropic" in ai_type:
span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic")
elif "openai" in ai_type:
span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")
for key, attribute in DATA_FIELDS.items():
if key in all_params and all_params[key] is not None:
set_data_normalized(span, attribute, all_params[key], unpack=False)
_set_tools_on_span(span, all_params.get("tools"))
if should_send_default_pii() and self.include_prompts:
normalized_messages = [
{
"role": GEN_AI_ALLOWED_MESSAGE_ROLES.USER,
"content": {"type": "text", "text": prompt},
}
for prompt in prompts
]
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs):
# type: (SentryLangchainCallback, Dict[str, Any], List[List[BaseMessage]], UUID, Any) -> Any
"""Run when Chat Model starts running."""
with capture_internal_exceptions():
if not run_id:
return
all_params = kwargs.get("invocation_params", {})
all_params.update(serialized.get("kwargs", {}))
model = (
all_params.get("model")
or all_params.get("model_name")
or all_params.get("model_id")
or ""
)
watched_span = self._create_span(
run_id,
kwargs.get("parent_run_id"),
op=OP.GEN_AI_CHAT,
name=f"chat {model}".strip(),
origin=LangchainIntegration.origin,
)
span = watched_span.span
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
if model:
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model)
ai_type = all_params.get("_type", "")
if "anthropic" in ai_type:
span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic")
elif "openai" in ai_type:
span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")
agent_name = _get_current_agent()
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
for key, attribute in DATA_FIELDS.items():
if key in all_params and all_params[key] is not None:
set_data_normalized(span, attribute, all_params[key], unpack=False)
_set_tools_on_span(span, all_params.get("tools"))
if should_send_default_pii() and self.include_prompts:
normalized_messages = []
for list_ in messages:
for message in list_:
normalized_messages.append(
self._normalize_langchain_message(message)
)
normalized_messages = normalize_message_roles(normalized_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
def on_chat_model_end(self, response, *, run_id, **kwargs):
# type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
"""Run when Chat Model ends running."""
with capture_internal_exceptions():
if not run_id or run_id not in self.span_map:
return
span_data = self.span_map[run_id]
span = span_data.span
if should_send_default_pii() and self.include_prompts:
set_data_normalized(
span,
SPANDATA.GEN_AI_RESPONSE_TEXT,
[[x.text for x in list_] for list_ in response.generations],
)
_record_token_usage(span, response)
self._exit_span(span_data, run_id)
def on_llm_end(self, response, *, run_id, **kwargs):
# type: (SentryLangchainCallback, LLMResult, UUID, Any) -> Any
"""Run when LLM ends running."""
with capture_internal_exceptions():
if not run_id or run_id not in self.span_map:
return
span_data = self.span_map[run_id]
span = span_data.span
try:
generation = response.generations[0][0]
except IndexError:
generation = None
if generation is not None:
try:
response_model = generation.message.response_metadata.get(
"model_name"
)
if response_model is not None:
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
except AttributeError:
pass
try:
finish_reason = generation.generation_info.get("finish_reason")
if finish_reason is not None:
span.set_data(
SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason
)
except AttributeError:
pass
try:
if should_send_default_pii() and self.include_prompts:
tool_calls = getattr(generation.message, "tool_calls", None)
if tool_calls is not None and tool_calls != []:
set_data_normalized(
span,
SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS,
tool_calls,
unpack=False,
)
except AttributeError:
pass
if should_send_default_pii() and self.include_prompts:
set_data_normalized(
span,
SPANDATA.GEN_AI_RESPONSE_TEXT,
[[x.text for x in list_] for list_ in response.generations],
)
_record_token_usage(span, response)
self._exit_span(span_data, run_id)
def on_llm_error(self, error, *, run_id, **kwargs):
# type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
"""Run when LLM errors."""
self._handle_error(run_id, error)
def on_chat_model_error(self, error, *, run_id, **kwargs):
# type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
"""Run when Chat Model errors."""
self._handle_error(run_id, error)
def on_agent_finish(self, finish, *, run_id, **kwargs):
# type: (SentryLangchainCallback, AgentFinish, UUID, Any) -> Any
with capture_internal_exceptions():
if not run_id or run_id not in self.span_map:
return
span_data = self.span_map[run_id]
span = span_data.span
if should_send_default_pii() and self.include_prompts:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, finish.return_values.items()
)
self._exit_span(span_data, run_id)
def on_tool_start(self, serialized, input_str, *, run_id, **kwargs):
# type: (SentryLangchainCallback, Dict[str, Any], str, UUID, Any) -> Any
"""Run when tool starts running."""
with capture_internal_exceptions():
if not run_id:
return
tool_name = serialized.get("name") or kwargs.get("name") or ""
watched_span = self._create_span(
run_id,
kwargs.get("parent_run_id"),
op=OP.GEN_AI_EXECUTE_TOOL,
name=f"execute_tool {tool_name}".strip(),
origin=LangchainIntegration.origin,
)
span = watched_span.span
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
tool_description = serialized.get("description")
if tool_description is not None:
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description)
agent_name = _get_current_agent()
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
if should_send_default_pii() and self.include_prompts:
set_data_normalized(
span,
SPANDATA.GEN_AI_TOOL_INPUT,
kwargs.get("inputs", [input_str]),
)
def on_tool_end(self, output, *, run_id, **kwargs):
# type: (SentryLangchainCallback, str, UUID, Any) -> Any
"""Run when tool ends running."""
with capture_internal_exceptions():
if not run_id or run_id not in self.span_map:
return
span_data = self.span_map[run_id]
span = span_data.span
if should_send_default_pii() and self.include_prompts:
set_data_normalized(span, SPANDATA.GEN_AI_TOOL_OUTPUT, output)
self._exit_span(span_data, run_id)
def on_tool_error(self, error, *args, run_id, **kwargs):
# type: (SentryLangchainCallback, Union[Exception, KeyboardInterrupt], UUID, Any) -> Any
"""Run when tool errors."""
self._handle_error(run_id, error)
def _extract_tokens(token_usage):
# type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]]
if not token_usage:
return None, None, None
input_tokens = _get_value(token_usage, "prompt_tokens") or _get_value(
token_usage, "input_tokens"
)
output_tokens = _get_value(token_usage, "completion_tokens") or _get_value(
token_usage, "output_tokens"
)
total_tokens = _get_value(token_usage, "total_tokens")
return input_tokens, output_tokens, total_tokens
def _extract_tokens_from_generations(generations):
# type: (Any) -> tuple[Optional[int], Optional[int], Optional[int]]
"""Extract token usage from response.generations structure."""
if not generations:
return None, None, None
total_input = 0
total_output = 0
total_total = 0
for gen_list in generations:
for gen in gen_list:
token_usage = _get_token_usage(gen)
input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage)
total_input += input_tokens if input_tokens is not None else 0
total_output += output_tokens if output_tokens is not None else 0
total_total += total_tokens if total_tokens is not None else 0
return (
total_input if total_input > 0 else None,
total_output if total_output > 0 else None,
total_total if total_total > 0 else None,
)
def _get_token_usage(obj):
# type: (Any) -> Optional[Dict[str, Any]]
"""
Check multiple paths to extract token usage from different objects.
"""
possible_names = ("usage", "token_usage", "usage_metadata")
message = _get_value(obj, "message")
if message is not None:
for name in possible_names:
usage = _get_value(message, name)
if usage is not None:
return usage
llm_output = _get_value(obj, "llm_output")
if llm_output is not None:
for name in possible_names:
usage = _get_value(llm_output, name)
if usage is not None:
return usage
for name in possible_names:
usage = _get_value(obj, name)
if usage is not None:
return usage
return None
def _record_token_usage(span, response):
# type: (Span, Any) -> None
token_usage = _get_token_usage(response)
if token_usage:
input_tokens, output_tokens, total_tokens = _extract_tokens(token_usage)
else:
input_tokens, output_tokens, total_tokens = _extract_tokens_from_generations(
response.generations
)
if input_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens)
if output_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens)
if total_tokens is not None:
span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens)
def _get_request_data(obj, args, kwargs):
# type: (Any, Any, Any) -> tuple[Optional[str], Optional[List[Any]]]
"""
Get the agent name and available tools for the agent.
"""
agent = getattr(obj, "agent", None)
runnable = getattr(agent, "runnable", None)
runnable_config = getattr(runnable, "config", {})
tools = (
getattr(obj, "tools", None)
or getattr(agent, "tools", None)
or runnable_config.get("tools")
or runnable_config.get("available_tools")
)
tools = tools if tools and len(tools) > 0 else None
try:
agent_name = None
if len(args) > 1:
agent_name = args[1].get("run_name")
if agent_name is None:
agent_name = runnable_config.get("run_name")
except Exception:
pass
return (agent_name, tools)
def _simplify_langchain_tools(tools):
# type: (Any) -> Optional[List[Any]]
"""Parse and simplify tools into a cleaner format."""
if not tools:
return None
if not isinstance(tools, (list, tuple)):
return None
simplified_tools = []
for tool in tools:
try:
if isinstance(tool, dict):
if "function" in tool and isinstance(tool["function"], dict):
func = tool["function"]
simplified_tool = {
"name": func.get("name"),
"description": func.get("description"),
}
if simplified_tool["name"]:
simplified_tools.append(simplified_tool)
elif "name" in tool:
simplified_tool = {
"name": tool.get("name"),
"description": tool.get("description"),
}
simplified_tools.append(simplified_tool)
else:
name = (
tool.get("name")
or tool.get("tool_name")
or tool.get("function_name")
)
if name:
simplified_tools.append(
{
"name": name,
"description": tool.get("description")
or tool.get("desc"),
}
)
elif hasattr(tool, "name"):
simplified_tool = {
"name": getattr(tool, "name", None),
"description": getattr(tool, "description", None)
or getattr(tool, "desc", None),
}
if simplified_tool["name"]:
simplified_tools.append(simplified_tool)
elif hasattr(tool, "__name__"):
simplified_tools.append(
{
"name": tool.__name__,
"description": getattr(tool, "__doc__", None),
}
)
else:
tool_str = str(tool)
if tool_str and tool_str != "":
simplified_tools.append({"name": tool_str, "description": None})
except Exception:
continue
return simplified_tools if simplified_tools else None
def _set_tools_on_span(span, tools):
# type: (Span, Any) -> None
"""Set available tools data on a span if tools are provided."""
if tools is not None:
simplified_tools = _simplify_langchain_tools(tools)
if simplified_tools:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS,
simplified_tools,
unpack=False,
)
def _wrap_configure(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_configure(
callback_manager_cls, # type: type
inheritable_callbacks=None, # type: Callbacks
local_callbacks=None, # type: Callbacks
*args, # type: Any
**kwargs, # type: Any
):
# type: (...) -> Any
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
if integration is None:
return f(
callback_manager_cls,
inheritable_callbacks,
local_callbacks,
*args,
**kwargs,
)
local_callbacks = local_callbacks or []
# Handle each possible type of local_callbacks. For each type, we
# extract the list of callbacks to check for SentryLangchainCallback,
# and define a function that would add the SentryLangchainCallback
# to the existing callbacks list.
if isinstance(local_callbacks, BaseCallbackManager):
callbacks_list = local_callbacks.handlers
elif isinstance(local_callbacks, BaseCallbackHandler):
callbacks_list = [local_callbacks]
elif isinstance(local_callbacks, list):
callbacks_list = local_callbacks
else:
logger.debug("Unknown callback type: %s", local_callbacks)
# Just proceed with original function call
return f(
callback_manager_cls,
inheritable_callbacks,
local_callbacks,
*args,
**kwargs,
)
# Handle each possible type of inheritable_callbacks.
if isinstance(inheritable_callbacks, BaseCallbackManager):
inheritable_callbacks_list = inheritable_callbacks.handlers
elif isinstance(inheritable_callbacks, list):
inheritable_callbacks_list = inheritable_callbacks
else:
inheritable_callbacks_list = []
if not any(
isinstance(cb, SentryLangchainCallback)
for cb in itertools.chain(callbacks_list, inheritable_callbacks_list)
):
sentry_handler = SentryLangchainCallback(
integration.max_spans,
integration.include_prompts,
)
if isinstance(local_callbacks, BaseCallbackManager):
local_callbacks = local_callbacks.copy()
local_callbacks.handlers = [
*local_callbacks.handlers,
sentry_handler,
]
elif isinstance(local_callbacks, BaseCallbackHandler):
local_callbacks = [local_callbacks, sentry_handler]
else:
local_callbacks = [*local_callbacks, sentry_handler]
return f(
callback_manager_cls,
inheritable_callbacks,
local_callbacks,
*args,
**kwargs,
)
return new_configure
def _wrap_agent_executor_invoke(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_invoke(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
if integration is None:
return f(self, *args, **kwargs)
agent_name, tools = _get_request_data(self, args, kwargs)
start_span_function = get_start_span_function()
with start_span_function(
op=OP.GEN_AI_INVOKE_AGENT,
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
origin=LangchainIntegration.origin,
) as span:
_push_agent(agent_name)
try:
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
_set_tools_on_span(span, tools)
# Run the agent
result = f(self, *args, **kwargs)
input = result.get("input")
if (
input is not None
and should_send_default_pii()
and integration.include_prompts
):
normalized_messages = normalize_message_roles([input])
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
output = result.get("output")
if (
output is not None
and should_send_default_pii()
and integration.include_prompts
):
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
return result
finally:
# Ensure agent is popped even if an exception occurs
_pop_agent()
return new_invoke
def _wrap_agent_executor_stream(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_stream(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
if integration is None:
return f(self, *args, **kwargs)
agent_name, tools = _get_request_data(self, args, kwargs)
start_span_function = get_start_span_function()
span = start_span_function(
op=OP.GEN_AI_INVOKE_AGENT,
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
origin=LangchainIntegration.origin,
)
span.__enter__()
_push_agent(agent_name)
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
_set_tools_on_span(span, tools)
input = args[0].get("input") if len(args) >= 1 else None
if (
input is not None
and should_send_default_pii()
and integration.include_prompts
):
normalized_messages = normalize_message_roles([input])
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)
# Run the agent
result = f(self, *args, **kwargs)
old_iterator = result
def new_iterator():
# type: () -> Iterator[Any]
exc_info = (None, None, None) # type: tuple[Any, Any, Any]
try:
for event in old_iterator:
yield event
try:
output = event.get("output")
except Exception:
output = None
if (
output is not None
and should_send_default_pii()
and integration.include_prompts
):
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
except Exception:
exc_info = sys.exc_info()
set_span_errored(span)
raise
finally:
# Ensure cleanup happens even if iterator is abandoned or fails
_pop_agent()
span.__exit__(*exc_info)
async def new_iterator_async():
# type: () -> AsyncIterator[Any]
exc_info = (None, None, None) # type: tuple[Any, Any, Any]
try:
async for event in old_iterator:
yield event
try:
output = event.get("output")
except Exception:
output = None
if (
output is not None
and should_send_default_pii()
and integration.include_prompts
):
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output)
except Exception:
exc_info = sys.exc_info()
set_span_errored(span)
raise
finally:
# Ensure cleanup happens even if iterator is abandoned or fails
_pop_agent()
span.__exit__(*exc_info)
if str(type(result)) == "<class 'async_generator'>":
result = new_iterator_async()
else:
result = new_iterator()
return result
return new_stream
def _patch_embeddings_provider(provider_class):
# type: (Any) -> None
"""Patch an embeddings provider class with monitoring wrappers."""
if provider_class is None:
return
if hasattr(provider_class, "embed_documents"):
provider_class.embed_documents = _wrap_embedding_method(
provider_class.embed_documents
)
if hasattr(provider_class, "embed_query"):
provider_class.embed_query = _wrap_embedding_method(provider_class.embed_query)
if hasattr(provider_class, "aembed_documents"):
provider_class.aembed_documents = _wrap_async_embedding_method(
provider_class.aembed_documents
)
if hasattr(provider_class, "aembed_query"):
provider_class.aembed_query = _wrap_async_embedding_method(
provider_class.aembed_query
)
def _wrap_embedding_method(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""Wrap sync embedding methods (embed_documents and embed_query)."""
@wraps(f)
def new_embedding_method(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
if integration is None:
return f(self, *args, **kwargs)
model_name = getattr(self, "model", None) or getattr(self, "model_name", None)
with sentry_sdk.start_span(
op=OP.GEN_AI_EMBEDDINGS,
name=f"embeddings {model_name}" if model_name else "embeddings",
origin=LangchainIntegration.origin,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
if model_name:
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
# Capture input if PII is allowed
if (
should_send_default_pii()
and integration.include_prompts
and len(args) > 0
):
input_data = args[0]
# Normalize to list format
texts = input_data if isinstance(input_data, list) else [input_data]
set_data_normalized(
span, SPANDATA.GEN_AI_EMBEDDINGS_INPUT, texts, unpack=False
)
result = f(self, *args, **kwargs)
return result
return new_embedding_method
def _wrap_async_embedding_method(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""Wrap async embedding methods (aembed_documents and aembed_query)."""
@wraps(f)
async def new_async_embedding_method(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(LangchainIntegration)
if integration is None:
return await f(self, *args, **kwargs)
model_name = getattr(self, "model", None) or getattr(self, "model_name", None)
with sentry_sdk.start_span(
op=OP.GEN_AI_EMBEDDINGS,
name=f"embeddings {model_name}" if model_name else "embeddings",
origin=LangchainIntegration.origin,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
if model_name:
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
# Capture input if PII is allowed
if (
should_send_default_pii()
and integration.include_prompts
and len(args) > 0
):
input_data = args[0]
# Normalize to list format
texts = input_data if isinstance(input_data, list) else [input_data]
set_data_normalized(
span, SPANDATA.GEN_AI_EMBEDDINGS_INPUT, texts, unpack=False
)
result = await f(self, *args, **kwargs)
return result
return new_async_embedding_method
| SentryLangchainCallback |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 13599,
"end": 14034
} | class ____(Constraint):
"""
Constrain to a real half line `[-inf, upper_bound)`.
"""
def __init__(self, upper_bound):
self.upper_bound = upper_bound
super().__init__()
def check(self, value):
return value < self.upper_bound
def __repr__(self):
fmt_string = self.__class__.__name__[1:]
fmt_string += f"(upper_bound={self.upper_bound})"
return fmt_string
| _LessThan |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modular_mm_grounding_dino.py | {
"start": 18236,
"end": 19597
} | class ____(GroundingDinoForObjectDetection, MMGroundingDinoPreTrainedModel):
_tied_weights_keys = {
r"bbox_embed.(?![0])\d+": r"bbox_embed.0",
r"class_embed.(?![0])\d+": r"^class_embed.0",
"model.decoder.bbox_embed": "bbox_embed",
"model.decoder.class_embed": "class_embed",
}
def __init__(self, config: MMGroundingDinoConfig):
MMGroundingDinoPreTrainedModel.__init__(self, config)
self.model = MMGroundingDinoModel(config)
self.class_embed = nn.ModuleList(
[MMGroundingDinoContrastiveEmbedding(config) for _ in range(config.decoder_layers)]
)
self.bbox_embed = nn.ModuleList(
[
MMGroundingDinoMLPPredictionHead(
input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3
)
for _ in range(config.decoder_layers)
]
)
# Initialize weights and apply final processing
self.model.decoder.class_embed = self.class_embed # class embed has no weights so nothing to tie
self.model.decoder.bbox_embed = self.bbox_embed
self.post_init()
__all__ = [
"MMGroundingDinoConfig",
"MMGroundingDinoForObjectDetection",
"MMGroundingDinoModel",
"MMGroundingDinoPreTrainedModel",
]
| MMGroundingDinoForObjectDetection |
python | pypa__warehouse | tests/unit/manage/views/test_teams.py | {
"start": 5820,
"end": 6644
} | class ____:
@pytest.mark.usefixtures("_enable_organizations")
def test_manage_team_projects(
self,
db_request,
pyramid_user,
organization_service,
monkeypatch,
):
team = TeamFactory.create()
project = ProjectFactory.create()
TeamProjectRoleFactory.create(
project=project, team=team, role_name=TeamProjectRoleType.Owner
)
view = team_views.ManageTeamProjectsViews(team, db_request)
result = view.manage_team_projects()
assert view.team == team
assert view.request == db_request
assert result == {
"team": team,
"active_projects": view.active_projects,
"projects_owned": set(),
"projects_sole_owned": set(),
}
| TestManageTeamProjects |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/inline/err/star_index_py310.py | {
"start": 72,
"end": 293
} | class ____(Generic[DType, *Shape]): ... # motivating example from the PEP
lst[a, *b, c] # different positions
lst[a, b, *c] # different positions
lst[*a, *b] # multiple unpacks
array[3:5, *idxs] # mixed with slices
| Array |
python | Netflix__metaflow | test/core/tests/constants.py | {
"start": 67,
"end": 1923
} | class ____(MetaflowTest):
"""
Test that an artifact defined in the first step
is available in all steps downstream.
"""
PRIORITY = 0
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
CLASS_VARS = {
"str_const": '"this is a constant"',
"int_const": 123,
"obj_const": "[]",
}
PARAMETERS = {
"int_param": {"default": 456},
"str_param": {"default": "'foobar'"},
}
@steps(0, ["all"])
def step_all(self):
# make sure class attributes are available in all steps
# through joins etc
assert_equals("this is a constant", self.str_const)
assert_equals(123, self.int_const)
# obj_const is mutable. Not much that can be done about it
assert_equals([], self.obj_const)
assert_equals(456, self.int_param)
assert_equals("foobar", self.str_param)
# make sure class variables are not listed as parameters
from metaflow import current
assert_equals({"int_param", "str_param"}, set(current.parameter_names))
try:
self.int_param = 5
except AttributeError:
pass
else:
raise Exception("It shouldn't be possible to modify parameters")
try:
self.int_const = 122
except AttributeError:
pass
else:
raise Exception("It shouldn't be possible to modify constants")
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(step.name, "int_param", 456)
checker.assert_artifact(step.name, "int_const", 123)
| ConstantsTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 373448,
"end": 374244
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateRepositoryWebCommitSignoffSetting
"""
__schema__ = github_schema
__field_names__ = ("repository_id", "web_commit_signoff_required", "client_mutation_id")
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The ID of the repository to update."""
web_commit_signoff_required = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="webCommitSignoffRequired")
"""Indicates if the repository should require signoff on web-based
commits.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateRepositoryWebCommitSignoffSettingInput |
python | pytorch__pytorch | torch/utils/hipify/hipify_python.py | {
"start": 1690,
"end": 1750
} | class ____(Enum):
INITIALIZED = 1
DONE = 2
| CurrentState |
python | kamyu104__LeetCode-Solutions | Python/minimize-the-maximum-adjacent-element-difference.py | {
"start": 49,
"end": 1519
} | class ____(object):
def minDifference(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
def binary_search(left, right, check):
while left <= right:
mid = left+(right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def check(d):
prev = cnt = 0
for i in xrange(len(nums)):
if nums[i] == -1:
cnt += 1
continue
if prev and cnt and min(max(abs(prev-x), abs(nums[i]-x)) for x in (left+d, right-d)) > d and (cnt == 1 or (right-d)-(left+d) > d):
return False
prev = nums[i]
cnt = 0
return True
max_diff, left, right = 0, float("inf"), 0
for i in xrange(len(nums)):
if nums[i] != -1:
if i+1 < len(nums) and nums[i+1] != -1:
max_diff = max(max_diff, abs(nums[i]-nums[i+1]))
continue
if i-1 < len(nums) and nums[i-1] != -1:
left = min(left, nums[i-1])
right = max(right, nums[i-1])
if i+1 < len(nums) and nums[i+1] != -1:
left = min(left, nums[i+1])
right = max(right, nums[i+1])
return binary_search(max_diff, (right-left)//2, check)
| Solution |
python | spack__spack | lib/spack/spack/test/jobserver.py | {
"start": 484,
"end": 2042
} | class ____:
"""Test parsing of MAKEFLAGS for jobserver configuration."""
def test_empty_makeflags(self):
"""Empty MAKEFLAGS should return None."""
assert get_jobserver_config("") is None
def test_no_jobserver_flag(self):
"""MAKEFLAGS without jobserver flag should return None."""
assert get_jobserver_config(" -j4 --silent") is None
def test_fifo_format_new(self):
"""Parse new FIFO format"""
assert get_jobserver_config(" -j4 --jobserver-auth=fifo:/tmp/my_fifo") == "/tmp/my_fifo"
def test_pipe_format_new(self):
"""Parse new pipe format"""
assert get_jobserver_config(" -j4 --jobserver-auth=3,4") == (3, 4)
def test_pipe_format_old(self):
"""Parse old pipe format (on old versions of gmake this was not publicized)"""
assert get_jobserver_config(" -j4 --jobserver-fds=5,6") == (5, 6)
def test_multiple_flags_last_wins(self):
"""When multiple jobserver flags exist, last one wins."""
makeflags = " --jobserver-fds=3,4 --jobserver-auth=fifo:/tmp/fifo --jobserver-auth=7,8"
assert get_jobserver_config(makeflags) == (7, 8)
def test_invalid_format(self):
assert get_jobserver_config(" --jobserver-auth=3") is None
assert get_jobserver_config(" --jobserver-auth=a,b") is None
assert get_jobserver_config(" --jobserver-auth=3,b") is None
assert get_jobserver_config(" --jobserver-auth=3,4,5") is None
assert get_jobserver_config(" --jobserver-auth=") is None
| TestGetJobserverConfig |
python | spack__spack | lib/spack/spack/vendor/macholib/mach_o.py | {
"start": 11436,
"end": 11598
} | class ____(p_uint32):
def __str__(self):
return time.ctime(self)
def read_struct(f, s, **kw):
return s.from_fileobj(f, **kw)
| mach_timestamp_helper |
python | ray-project__ray | rllib/utils/replay_buffers/tests/test_segment_tree_replay_buffer_api.py | {
"start": 255,
"end": 5325
} | class ____(unittest.TestCase):
def test_tree_set(self):
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert np.isclose(tree.sum(), 4.0)
assert np.isclose(tree.sum(0, 2), 0.0)
assert np.isclose(tree.sum(0, 3), 1.0)
assert np.isclose(tree.sum(2, 3), 1.0)
assert np.isclose(tree.sum(2, -1), 1.0)
assert np.isclose(tree.sum(2, 4), 4.0)
assert np.isclose(tree.sum(2), 4.0)
def test_tree_set_overlap(self):
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[2] = 3.0
assert np.isclose(tree.sum(), 3.0)
assert np.isclose(tree.sum(2, 3), 3.0)
assert np.isclose(tree.sum(2, -1), 3.0)
assert np.isclose(tree.sum(2, 4), 3.0)
assert np.isclose(tree.sum(2), 3.0)
assert np.isclose(tree.sum(1, 2), 0.0)
def test_prefixsum_idx(self):
tree = SumSegmentTree(4)
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.0) == 2
assert tree.find_prefixsum_idx(0.5) == 2
assert tree.find_prefixsum_idx(0.99) == 2
assert tree.find_prefixsum_idx(1.01) == 3
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(4.00) == 3
def test_prefixsum_idx2(self):
tree = SumSegmentTree(4)
tree[0] = 0.5
tree[1] = 1.0
tree[2] = 1.0
tree[3] = 3.0
assert tree.find_prefixsum_idx(0.00) == 0
assert tree.find_prefixsum_idx(0.55) == 1
assert tree.find_prefixsum_idx(0.99) == 1
assert tree.find_prefixsum_idx(1.51) == 2
assert tree.find_prefixsum_idx(3.00) == 3
assert tree.find_prefixsum_idx(5.50) == 3
def test_max_interval_tree(self):
tree = MinSegmentTree(4)
tree[0] = 1.0
tree[2] = 0.5
tree[3] = 3.0
assert np.isclose(tree.min(), 0.5)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.5)
assert np.isclose(tree.min(0, -1), 0.5)
assert np.isclose(tree.min(2, 4), 0.5)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 0.7
assert np.isclose(tree.min(), 0.7)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 0.7)
assert np.isclose(tree.min(0, -1), 0.7)
assert np.isclose(tree.min(2, 4), 0.7)
assert np.isclose(tree.min(3, 4), 3.0)
tree[2] = 4.0
assert np.isclose(tree.min(), 1.0)
assert np.isclose(tree.min(0, 2), 1.0)
assert np.isclose(tree.min(0, 3), 1.0)
assert np.isclose(tree.min(0, -1), 1.0)
assert np.isclose(tree.min(2, 4), 3.0)
assert np.isclose(tree.min(2, 3), 4.0)
assert np.isclose(tree.min(2, -1), 4.0)
assert np.isclose(tree.min(3, 4), 3.0)
@staticmethod
def _get_episode(episode_len=None, id_=None, with_extra_model_outs=False):
eps = SingleAgentEpisode(id_=id_, observations=[0.0], infos=[{}])
ts = np.random.randint(1, 200) if episode_len is None else episode_len
for t in range(ts):
eps.add_env_step(
observation=float(t + 1),
action=int(t),
reward=0.1 * (t + 1),
infos={},
extra_model_outputs=(
{k: k for k in range(2)} if with_extra_model_outs else None
),
)
eps.is_terminated = np.random.random() > 0.5
eps.is_truncated = False if eps.is_terminated else np.random.random() > 0.8
return eps
def test_find_prefixsum_idx(self, buffer_size=80):
"""Fix edge case related to https://github.com/ray-project/ray/issues/54284"""
replay_buffer = PrioritizedEpisodeReplayBuffer(capacity=buffer_size)
sum_segment = replay_buffer._sum_segment
for i in range(10):
replay_buffer.add(self._get_episode(id_=str(i), episode_len=10))
self.assertTrue(sum_segment.capacity >= buffer_size)
# standard cases
for sample in np.linspace(0, sum_segment.sum(), 50):
prefixsum_idx = sum_segment.find_prefixsum_idx(sample)
self.assertTrue(
prefixsum_idx in replay_buffer._tree_idx_to_sample_idx,
f"{sum_segment.sum()=}, {sample=}, {prefixsum_idx=}",
)
# Edge cases (at the boundary then the binary tree can "clip" into invalid regions)
# Therefore, testing using values close to or above the max valid number
for sample in [
sum_segment.sum() - 0.00001,
sum_segment.sum(),
sum_segment.sum() + 0.00001,
]:
prefixsum_idx = sum_segment.find_prefixsum_idx(sample)
self.assertTrue(
prefixsum_idx in replay_buffer._tree_idx_to_sample_idx,
f"{sum_segment.sum()=}, {sample=}, {prefixsum_idx=}",
)
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestSegmentTree |
python | getsentry__sentry | src/sentry/relay/types/rule_condition.py | {
"start": 1538,
"end": 1712
} | class ____(TypedDict):
"""Condition for iterating over a list and applying a nested condition"""
op: Literal["any", "all"]
inner: "RuleCondition"
| IterableCondition |
python | pyinstaller__pyinstaller | tests/unit/test_hookutils.py | {
"start": 4319,
"end": 16400
} | class ____(object):
# A message should be emitted if a module, not a package, was passed.
# The module's name should be in the returned list, nevetheless.
def test_collect_submod_module(self, caplog):
with caplog.at_level(logging.DEBUG, logger='PyInstaller.utils.hooks'):
assert hookutils.collect_submodules('os') == ['os']
assert "collect_submodules - os is not a package." in caplog.records[-1].getMessage()
# A TypeError should be raised if given something other than a str.
def test_not_a_string(self):
with pytest.raises(TypeError, match="package must be a str"):
hookutils.collect_submodules(os)
# The package name itself should be in the returned list.
def test_collect_submod_itself(self, mod_list):
assert TEST_MOD in mod_list
# Python extension is included in the list.
@incompatible_with_cygwin
def test_collect_submod_pyextension(self, mod_list):
assert TEST_MOD + '.pyextension' in mod_list
# Check that all packages get included
# NOTE: the new behavior (see #6846 and #6850) is that un-importable subpackages are not included.
@incompatible_with_cygwin
def test_collect_submod_all_included(self, mod_list):
mod_list.sort()
assert mod_list == [
TEST_MOD,
# Python extensions end with '.pyd' on Windows and with '.so' on Linux, macOS, and other OSes.
# Under Cygwin, '.dll' suffix is used for extensions; therefore, the premise and the test data used by
# this test is inherently incompatible with Cygwin.
TEST_MOD + '.pyextension',
#TEST_MOD + '.raises_error_on_import_1',
#TEST_MOD + '.raises_error_on_import_2',
TEST_MOD + '.subpkg',
TEST_MOD + '.subpkg.twelve',
TEST_MOD + '.two'
]
# Dynamic libraries (.dll, .dylib) are not included in the list.
@incompatible_with_cygwin
def test_collect_submod_no_dynamiclib(self, mod_list):
assert TEST_MOD + '.dynamiclib' not in mod_list
# Subpackages without an __init__.py should not be included.
def test_collect_submod_subpkg_init(self, mod_list):
assert TEST_MOD + '.py_files_not_in_package.sub_pkg.three' not in mod_list
# Test with a subpackage.
def test_collect_submod_subpkg(self, mod_list):
# Note: Even though mod_list is overwritten, it is still needed as a fixture, so that the path to the
# TEST_MOD is set correctly.
mod_list = hookutils.collect_submodules(TEST_MOD + '.subpkg')
mod_list.sort()
assert mod_list == [TEST_MOD + '.subpkg', TEST_MOD + '.subpkg.twelve']
# Test in an ``.egg`` file.
@incompatible_with_cygwin # calls `test_collect_submod_all_included`
def test_collect_submod_egg(self, tmp_path, monkeypatch):
# Copy files to a tmpdir for egg building.
dest_path = tmp_path / 'hookutils_package'
shutil.copytree(TEST_MOD_PATH, dest_path)
monkeypatch.chdir(dest_path)
# Create an egg from the test package. For debug, show the output of the egg build.
print(exec_python('setup.py', 'bdist_egg'))
# Obtain the name of the egg, which depends on the Python version.
dist_path = dest_path / 'dist'
fl = os.listdir(dist_path)
assert len(fl) == 1
egg_name = fl[0]
assert egg_name.endswith('.egg')
# Add the egg to Python's path.
pth = str(dist_path / egg_name)
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': [pth]})
monkeypatch.syspath_prepend(pth)
# Verify its contents.
ml = hookutils.collect_submodules(TEST_MOD)
self.test_collect_submod_all_included(ml)
# Messages printed to stdout by modules during collect_submodules() should not affect the collected modules list.
def test_collect_submod_stdout_interference(self, monkeypatch):
TEST_MOD = 'foo'
TEST_MOD_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'hookutils_files2')
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': [TEST_MOD_PATH]})
monkeypatch.syspath_prepend(TEST_MOD_PATH)
ml = hookutils.collect_submodules(TEST_MOD)
ml = sorted(ml)
assert ml == ['foo', 'foo.bar']
# Test each possible value for the **on_error** parameter to collect_submodules().
def test_error_propagation(self, capfd, monkeypatch):
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': [TEST_MOD_PATH]})
monkeypatch.syspath_prepend(TEST_MOD_PATH)
# Test the default of warning only for the 1st error.
hookutils.collect_submodules(TEST_MOD)
error = capfd.readouterr().err
# Note that there is no guarantee which submodule will be collected first so we don't know exactly what the
# error will be from raises_error_on_import_1 or raises_error_on_import_2.
assert re.match(
".*Failed .* for 'hookutils_package.raises_error_on_import_[12]' because .* "
"raised: AssertionError: I cannot be imported", error
)
# Make sure that only one warning was issued.
assert error.count("Failed") == 1
# Test ignore everything.
hookutils.collect_submodules(TEST_MOD, on_error="ignore")
assert capfd.readouterr().err == ''
# Test warning for all errors. There should be two in total.
hookutils.collect_submodules(TEST_MOD, on_error="warn")
error = capfd.readouterr().err
assert "raises_error_on_import_1" in error
assert "raises_error_on_import_2" in error
assert error.count("Failed") == 2
# Test treating errors as errors.
with pytest.raises(RuntimeError) as ex_info:
hookutils.collect_submodules(TEST_MOD, on_error="raise")
# The traceback should include the cause of the error...
assert ex_info.match('(?s).* assert 0, "I cannot be imported!"')
# ... and the name of the offending submodule in an easy to spot format.
assert ex_info.match("Unable to load submodule 'hookutils_package.raises_error_on_import_[12]'")
def test_is_module_or_submodule():
assert hookutils.is_module_or_submodule('foo.bar', 'foo.bar')
assert hookutils.is_module_or_submodule('foo.bar.baz', 'foo.bar')
assert not hookutils.is_module_or_submodule('foo.bard', 'foo.bar')
assert not hookutils.is_module_or_submodule('foo', 'foo.bar')
def test_check_requirement_package_not_installed():
assert hookutils.check_requirement('pytest')
assert not hookutils.check_requirement('magnumopus-no-package-test-case')
# An error should be raised if a module, not a package, was passed.
def test_collect_data_module():
# 'os' is a module, not a package.
with pytest.raises(TypeError):
hookutils.collect_data_files(__import__('os'))
# This fixtures runs ``collect_data_files`` through the test cases in ``_DATA_PARAMS``.
@pytest.fixture(
params=[
# This is used to invoke ``collect_data_files(*args, **kwargs)`` and provide the expected results
# for validation. The order is: args, kwargs, expected_results_sequence
(
[TEST_MOD],
{},
(
'dynamiclib.dll',
'dynamiclib.dylib',
'nine.dat',
os.path.join('py_files_not_in_package', 'data', 'eleven.dat'),
os.path.join('py_files_not_in_package', 'ten.dat'),
# Not backwards! On Windows, ``.so`` files are just data and vice versa.
'pyextension.so' if is_win else 'pyextension.pyd',
os.path.join('subpkg', 'thirteen.txt'),
),
),
# Test collecting from a subpackage.
(
[TEST_MOD + '.subpkg'],
{},
(os.path.join('subpkg', 'thirteen.txt'),),
),
(
[TEST_MOD],
dict(include_py_files=True, excludes=['**/__pycache__']),
(
'__init__.py',
'dynamiclib.dll',
'dynamiclib.dylib',
'nine.dat',
os.path.join('py_files_not_in_package', 'data', 'eleven.dat'),
os.path.join('py_files_not_in_package', 'one.py'),
os.path.join('py_files_not_in_package', 'sub_pkg', '__init__.py'),
os.path.join('py_files_not_in_package', 'sub_pkg', 'three.py'),
os.path.join('py_files_not_in_package', 'ten.dat'),
# Not backwards! On Windows, ``.so`` files are just data and vice versa.
'pyextension.so' if is_win else 'pyextension.pyd',
os.path.join('raises_error_on_import_1', '__init__.py'),
os.path.join('raises_error_on_import_1', 'foo.py'),
os.path.join('raises_error_on_import_2', '__init__.py'),
os.path.join('raises_error_on_import_2', 'foo.py'),
os.path.join('subpkg', '__init__.py'),
os.path.join('subpkg', 'thirteen.txt'),
os.path.join('subpkg', 'twelve.py'),
'two.py',
),
),
(
[TEST_MOD],
dict(excludes=['py_files_not_in_package', '**/__pycache__']),
(
'dynamiclib.dll',
'dynamiclib.dylib',
'nine.dat',
'pyextension.so' if is_win else 'pyextension.pyd',
os.path.join('subpkg', 'thirteen.txt'),
),
),
(
[TEST_MOD],
dict(includes=['**/*.dat', '**/*.txt']),
(
'nine.dat',
os.path.join('py_files_not_in_package', 'data', 'eleven.dat'),
os.path.join('py_files_not_in_package', 'ten.dat'),
os.path.join('subpkg', 'thirteen.txt'),
),
),
(
[TEST_MOD],
dict(includes=['*.dat']),
('nine.dat',),
),
(
[TEST_MOD],
dict(subdir="py_files_not_in_package", excludes=['**/__pycache__']),
(
os.path.join('py_files_not_in_package', 'data', 'eleven.dat'),
os.path.join('py_files_not_in_package', 'ten.dat'),
),
),
],
ids=['package', 'subpackage', 'package with py files', 'excludes', '** includes', 'includes', 'subdir']
)
def data_lists(monkeypatch, request):
def _sort(sequence):
sorted_list = sorted(list(sequence))
return tuple(sorted_list)
# Add path with 'hookutils_files' module to ``sys.path`` (so analysis in the main process can find it),
# and to ``pathex`` (so subprocess-isolated code can find it).
monkeypatch.setattr('PyInstaller.config.CONF', {'pathex': [TEST_MOD_PATH]})
monkeypatch.syspath_prepend(TEST_MOD_PATH)
# Use the hookutils_test_files package for testing.
args, kwargs, subfiles = request.param
data = hookutils.collect_data_files(*args, **kwargs)
# Break list of (source, dest) into source and dest lists.
src = [item[0] for item in data]
dst = [item[1] for item in data]
return subfiles, _sort(src), _sort(dst)
# Make sure the correct files are found.
@incompatible_with_cygwin
def test_collect_data_all_included(data_lists):
subfiles, src, dst = data_lists
# Check the source and dest lists against the correct values in subfiles.
src_compare = tuple([os.path.join(TEST_MOD_PATH, TEST_MOD, subpath) for subpath in subfiles])
dst_compare = [os.path.dirname(os.path.join(TEST_MOD, subpath)) for subpath in subfiles]
dst_compare.sort()
dst_compare = tuple(dst_compare)
assert src == src_compare
assert dst == dst_compare
# An ImportError should be raised if the module is not found.
def test_get_module_file_attribute_non_exist_module():
with pytest.raises(ImportError):
hookutils.get_module_file_attribute('pyinst_nonexisting_module_name')
| TestCollectSubmodules |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/hooks/display_video.py | {
"start": 1072,
"end": 4237
} | class ____(GoogleBaseHook):
"""Hook for Google Display & Video 360."""
_conn: Resource | None = None
def __init__(
self,
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self.api_version = api_version
def get_conn_to_display_video(self) -> Resource:
"""Retrieve connection to DisplayVideo."""
if not self._conn:
http_authorized = self._authorize()
self._conn = build(
"displayvideo",
self.api_version,
http=http_authorized,
cache_discovery=False,
)
return self._conn
@staticmethod
def erf_uri(partner_id, entity_type) -> list[str]:
"""
Return URI for all Entity Read Files in bucket.
For example, if you were generating a file name to retrieve the entity read file
for partner 123 accessing the line_item table from April 2, 2013, your filename
would look something like this:
gdbm-123/entity/20130402.0.LineItem.json
More information:
https://developers.google.com/bid-manager/guides/entity-read/overview
:param partner_id The numeric ID of your Partner.
:param entity_type: The type of file Partner, Advertiser, InsertionOrder,
LineItem, Creative, Pixel, InventorySource, UserList, UniversalChannel, and summary.
"""
return [f"gdbm-{partner_id}/entity/{{{{ ds_nodash }}}}.*.{entity_type}.json"]
def create_sdf_download_operation(self, body_request: dict[str, Any]) -> dict[str, Any]:
"""
Create an SDF Download Task and Returns an Operation.
:param body_request: Body request.
More information about body request can be found here:
https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create
"""
result = (
self.get_conn_to_display_video()
.sdfdownloadtasks()
.create(body=body_request)
.execute(num_retries=self.num_retries)
)
return result
def get_sdf_download_operation(self, operation_name: str):
"""
Get the latest state of an asynchronous SDF download task operation.
:param operation_name: The name of the operation resource.
"""
result = (
self.get_conn_to_display_video()
.sdfdownloadtasks()
.operations()
.get(name=operation_name)
.execute(num_retries=self.num_retries)
)
return result
def download_media(self, resource_name: str):
"""
Download media.
:param resource_name: The resource name of the media that is being downloaded.
"""
request = self.get_conn_to_display_video().media().download_media(resourceName=resource_name)
return request
| GoogleDisplayVideo360Hook |
python | gevent__gevent | src/gevent/_monitor.py | {
"start": 966,
"end": 1046
} | class ____(RuntimeWarning):
"""The type of warnings we emit."""
| MonitorWarning |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 18485,
"end": 18601
} | class ____(BaseModel):
collections: List["CollectionDescription"] = Field(..., description="")
| CollectionsResponse |
python | pola-rs__polars | py-polars/src/polars/interchange/protocol.py | {
"start": 2879,
"end": 3311
} | class ____(TypedDict):
"""Description of a categorical column."""
# whether the ordering of dictionary indices is semantically meaningful
is_ordered: bool
# whether a dictionary-style mapping of categorical values to other objects exists
is_dictionary: Literal[True]
# Python-level only (e.g. `{int: str}`).
# None if not a dictionary-style categorical.
categories: PolarsColumn
| CategoricalDescription |
python | python-markdown__markdown | markdown/blockprocessors.py | {
"start": 2422,
"end": 6508
} | class ____:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own `test` and `run`
methods. The `test` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors `run` method.
Attributes:
BlockProcessor.parser (BlockParser): The `BlockParser` instance this is attached to.
BlockProcessor.tab_length (int): The tab length set on the `Markdown` instance.
"""
def __init__(self, parser: BlockParser):
self.parser = parser
self.tab_length = parser.md.tab_length
def lastChild(self, parent: etree.Element) -> etree.Element | None:
""" Return the last child of an `etree` element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text: str, length: int | None = None) -> tuple[str, str]:
""" Remove a tab from the front of each line of the given text. """
if length is None:
length = self.tab_length
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' ' * length):
newtext.append(line[length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text: str, level: int = 1) -> str:
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent: etree.Element, block: str) -> bool:
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the `test`
method on each to determine if the given block of text is of that
type. This method must return a boolean `True` or `False`. The
actual method of testing is left to the needs of that particular
block type. It could be as simple as `block.startswith(some_string)`
or a complex regular expression. As the block type may be different
depending on the parent of the block (i.e. inside a list), the parent
`etree` element is also provided and may be used as part of the test.
Keyword arguments:
parent: An `etree` element which will be the parent of the block.
block: A block of text from the source which has been split at blank lines.
"""
pass # pragma: no cover
def run(self, parent: etree.Element, blocks: list[str]) -> bool | None:
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's `run` method. This method
should parse the individual lines of the block and append them to
the `etree`.
Note that both the `parent` and `etree` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding `SubElements` or adding text
to the parent, and should remove (`pop`) or add (`insert`) items to
the list of blocks.
If `False` is returned, this will have the same effect as returning `False`
from the `test` method.
Keyword arguments:
parent: An `etree` element which is the parent of the current block.
blocks: A list of all remaining blocks of the document.
"""
pass # pragma: no cover
| BlockProcessor |
python | ray-project__ray | python/ray/util/dask/callbacks.py | {
"start": 6056,
"end": 7732
} | class ____:
def __init__(self, *callbacks):
self.callbacks = [normalize_ray_callback(c) for c in callbacks]
RayDaskCallback.ray_active.update(self.callbacks)
def __enter__(self):
return self
def __exit__(self, *args):
for c in self.callbacks:
RayDaskCallback.ray_active.discard(c)
def normalize_ray_callback(cb):
if isinstance(cb, RayDaskCallback):
return cb._ray_callback
elif isinstance(cb, RayCallback):
return cb
else:
raise TypeError(
"Callbacks must be either 'RayDaskCallback' or 'RayCallback' namedtuple"
)
def unpack_ray_callbacks(cbs):
"""Take an iterable of callbacks, return a list of each callback."""
if cbs:
# Only drop callback methods that aren't in CBS_DONT_DROP.
return RayCallbacks(
*(
[cb for cb in cbs_ if cb or CBS[idx] in CBS_DONT_DROP] or None
for idx, cbs_ in enumerate(zip(*cbs))
)
)
else:
return RayCallbacks(*([()] * len(CBS)))
@contextlib.contextmanager
def local_ray_callbacks(callbacks=None):
"""
Allows Dask-Ray callbacks to work with nested schedulers.
Callbacks will only be used by the first started scheduler they encounter.
This means that only the outermost scheduler will use global callbacks.
"""
global_callbacks = callbacks is None
if global_callbacks:
callbacks, RayDaskCallback.ray_active = (RayDaskCallback.ray_active, set())
try:
yield callbacks or ()
finally:
if global_callbacks:
RayDaskCallback.ray_active = callbacks
| add_ray_callbacks |
python | GoogleCloudPlatform__python-docs-samples | people-and-planet-ai/weather-forecasting/serving/weather-model/weather/model.py | {
"start": 4721,
"end": 5129
} | class ____(torch.nn.Module):
"""Preprocessing normalization layer with z-score."""
def __init__(self, mean: AnyType, std: AnyType) -> None:
super().__init__()
self.mean = torch.nn.Parameter(torch.as_tensor(mean))
self.std = torch.nn.Parameter(torch.as_tensor(std))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return (x - self.mean) / self.std
| Normalization |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 12215,
"end": 12598
} | class ____(ChainedSource):
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen(self.base)
codegen.extend_output(codegen.create_load_attrs("__mro__"))
def guard_source(self) -> GuardSource:
return self.base.guard_source()
def name(self) -> str:
return f"{self.base.name()}.__mro__"
@dataclasses.dataclass(frozen=True)
| TypeMROSource |
python | readthedocs__readthedocs.org | readthedocs/gold/views.py | {
"start": 2279,
"end": 3209
} | class ____(GoldProjectsMixin, FormView):
"""Gold Project list view and form view."""
form_class = GoldProjectForm
template_name = "gold/projects.html"
def form_valid(self, form):
to_add = Project.objects.get(slug=form.cleaned_data["project"])
gold_user = self.get_gold_user()
gold_user.projects.add(to_add)
return HttpResponseRedirect(self.get_success_url())
def get_form(self, data=None, files=None, **kwargs):
kwargs["user"] = self.get_gold_user()
kwargs["projects"] = self.get_gold_projects()
return self.form_class(self.request.user, data, files, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["gold_user"] = self.get_gold_user()
context["user"] = self.request.user
context["projects"] = self.get_gold_projects()
return context
| GoldProjectsListCreate |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataprep.py | {
"start": 8933,
"end": 10374
} | class ____(GoogleCloudBaseOperator):
"""
Runs the flow with the provided id copy of the provided flow id.
:param dataprep_conn_id: The Dataprep connection ID
:param flow_id: ID of the flow to be copied
:param body_request: Body of the POST request to be sent.
"""
template_fields: Sequence[str] = (
"flow_id",
"project_id",
)
operator_extra_links = (DataprepJobGroupLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
flow_id: int | str,
body_request: dict,
dataprep_conn_id: str = "dataprep_default",
**kwargs,
):
super().__init__(**kwargs)
self.project_id = project_id
self.flow_id = flow_id
self.body_request = body_request
self.dataprep_conn_id = dataprep_conn_id
def execute(self, context: Context) -> dict:
self.log.info("Running the flow with id: %d...", self.flow_id)
hooks = GoogleDataprepHook(dataprep_conn_id=self.dataprep_conn_id)
response = hooks.run_flow(flow_id=int(self.flow_id), body_request=self.body_request)
if self.project_id:
job_group_id = response["data"][0]["id"]
DataprepJobGroupLink.persist(
context=context,
project_id=self.project_id,
job_group_id=int(job_group_id),
)
return response
| DataprepRunFlowOperator |
python | ansible__ansible | lib/ansible/executor/play_iterator.py | {
"start": 1283,
"end": 1430
} | class ____(IntFlag):
NONE = 0
SETUP = 1
TASKS = 2
RESCUE = 4
ALWAYS = 8
HANDLERS = 16 # NOTE not in use anymore
| FailedStates |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/alloy_db.py | {
"start": 33462,
"end": 37777
} | class ____(AlloyDBWriteBaseOperator):
"""
Delete an Alloy DB instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AlloyDBDeleteInstanceOperator`
:param instance_id: Required. ID of the instance to delete.
:param cluster_id: Required. ID of the cluster.
:param request_id: Optional. An optional request ID to identify requests. Specify a unique request ID
so that if you must retry your request, the server ignores the request if it has already been
completed. The server guarantees that for at least 60 minutes since the first request.
For example, consider a situation where you make an initial request and the request times out.
If you make the request again with the same request ID, the server can check if the original operation
with the same request ID was received, and if so, ignores the second request.
This prevents clients from accidentally creating duplicate commitments.
The request ID must be a valid UUID with the exception that zero UUID is not supported
(00000000-0000-0000-0000-000000000000).
:param validate_request: Optional. If set, performs request validation, but does not actually
execute the request.
:param etag: Optional. The current etag of the Instance. If an etag is provided and does not match the
current etag of the Instance, deletion will be blocked and an ABORTED error will be returned.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"instance_id", "cluster_id", "etag"} | set(AlloyDBWriteBaseOperator.template_fields)
)
def __init__(
self,
instance_id: str,
cluster_id: str,
etag: str | None = None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.instance_id = instance_id
self.cluster_id = cluster_id
self.etag = etag
def execute(self, context: Context) -> None:
if self.validate_request:
self.log.info("Validating a Delete AlloyDB instance request.")
else:
self.log.info("Deleting an AlloyDB instance.")
try:
operation = self.hook.delete_instance(
instance_id=self.instance_id,
cluster_id=self.cluster_id,
project_id=self.project_id,
location=self.location,
etag=self.etag,
request_id=self.request_id,
validate_only=self.validate_request,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except Exception as ex:
raise AirflowException(ex) from ex
else:
self.get_operation_result(operation)
if not self.validate_request:
self.log.info("AlloyDB instance %s was successfully removed.", self.instance_id)
| AlloyDBDeleteInstanceOperator |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_api_key_details.py | {
"start": 1132,
"end": 2780
} | class ____(OrganizationApiKeyDetailsBase):
method = "put"
def test_update_api_key_details(self) -> None:
data = {
"label": "New Label",
"allowed_origins": "sentry.io",
"scope_list": ["a", "b", "c", "d"],
}
self.get_success_response(self.organization.slug, self.api_key.id, **data)
api_key = ApiKey.objects.get(id=self.api_key.id, organization_id=self.organization.id)
assert api_key.label == "New Label"
assert api_key.allowed_origins == "sentry.io"
assert api_key.get_scopes() == ["a", "b", "c", "d"]
def test_update_api_key_details_legacy_data(self) -> None:
# Some old api keys have this psql special format string
with connections[ApiKey.objects.db].cursor() as cur:
cur.execute(
"update sentry_apikey set scope_list = %s where id = %s",
("{event:read,member:read,org:read,project:read,team:read}", self.api_key.id),
)
with assume_test_silo_mode(SiloMode.REGION):
assert ApiKeyReplica.objects.get(apikey_id=self.api_key.id).get_scopes() == [
"event:read",
"member:read",
"org:read",
"project:read",
"team:read",
]
data = {"scope_list": ["a", "b", "c", "d"]}
self.get_success_response(self.organization.slug, self.api_key.id, **data)
api_key = ApiKey.objects.get(id=self.api_key.id, organization_id=self.organization.id)
assert api_key.get_scopes() == ["a", "b", "c", "d"]
@control_silo_test
| OrganizationApiKeyDetailsPut |
python | kubernetes-client__python | kubernetes/client/models/v1_resource_slice.py | {
"start": 383,
"end": 6757
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ResourceSliceSpec'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, local_vars_configuration=None): # noqa: E501
"""V1ResourceSlice - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
self.spec = spec
@property
def api_version(self):
"""Gets the api_version of this V1ResourceSlice. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ResourceSlice. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ResourceSlice.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ResourceSlice. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ResourceSlice. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ResourceSlice. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ResourceSlice.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ResourceSlice. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ResourceSlice. # noqa: E501
:return: The metadata of this V1ResourceSlice. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ResourceSlice.
:param metadata: The metadata of this V1ResourceSlice. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ResourceSlice. # noqa: E501
:return: The spec of this V1ResourceSlice. # noqa: E501
:rtype: V1ResourceSliceSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ResourceSlice.
:param spec: The spec of this V1ResourceSlice. # noqa: E501
:type: V1ResourceSliceSpec
"""
if self.local_vars_configuration.client_side_validation and spec is None: # noqa: E501
raise ValueError("Invalid value for `spec`, must not be `None`") # noqa: E501
self._spec = spec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ResourceSlice):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ResourceSlice):
return True
return self.to_dict() != other.to_dict()
| V1ResourceSlice |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_assets.py | {
"start": 118268,
"end": 121572
} | class ____(ExecutingGraphQLContextTestMatrix):
def test_asset_wipe(self, graphql_context: WorkspaceRequestContext):
# run to create materialization
_create_partitioned_run(graphql_context, "integers_asset_job", "0")
asset_keys = graphql_context.instance.all_asset_keys()
assert AssetKey("integers_asset") in asset_keys
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_WITH_PARTITION,
variables={"assetKey": {"path": ["integers_asset"]}},
)
mat = result.data["assetOrError"]["assetMaterializations"][0]
assert mat["partition"] == "0"
# wipe
result = execute_dagster_graphql(
graphql_context,
WIPE_ASSETS,
variables={"assetPartitionRanges": [{"assetKey": {"path": ["integers_asset"]}}]},
)
assert result.data
assert result.data["wipeAssets"]
assert result.data["wipeAssets"]["__typename"] == "AssetWipeSuccess"
assert result.data["wipeAssets"]["assetPartitionRanges"][0]["assetKey"]["path"] == [
"integers_asset"
]
assert result.data["wipeAssets"]["assetPartitionRanges"][0]["partitionRange"] is None
asset_keys = graphql_context.instance.all_asset_keys()
assert AssetKey("integers_asset") not in asset_keys
# run again to create another materialization
_create_partitioned_run(graphql_context, "integers_asset_job", "0")
asset_keys = graphql_context.instance.all_asset_keys()
assert AssetKey("integers_asset") in asset_keys
result = execute_dagster_graphql(
graphql_context,
GET_ASSET_MATERIALIZATION_WITH_PARTITION,
variables={"assetKey": {"path": ["integers_asset"]}},
)
mat = result.data["assetOrError"]["assetMaterializations"][0]
assert mat["partition"] == "0"
# wipe with range
result = execute_dagster_graphql(
graphql_context,
WIPE_ASSETS,
variables={
"assetPartitionRanges": [
{
"assetKey": {"path": ["integers_asset"]},
"partitions": {"range": {"start": "0", "end": "0"}},
}
]
},
)
assert result.data
assert result.data["wipeAssets"]
assert result.data["wipeAssets"]["__typename"] == "UnsupportedOperationError"
assert "Partitioned asset wipe is not supported yet" in result.data["wipeAssets"]["message"]
# wipe for non-existant asset
result = execute_dagster_graphql(
graphql_context,
WIPE_ASSETS,
variables={
"assetPartitionRanges": [
{
"assetKey": {"path": ["does_not_exist"]},
"partitions": {"range": {"start": "0", "end": "0"}},
}
]
},
)
assert result.data
assert result.data["wipeAssets"]
assert result.data["wipeAssets"]["__typename"] == "AssetNotFoundError"
assert 'Asset key ["does_not_exist"] not found' in result.data["wipeAssets"]["message"]
| TestAssetWipe |
python | coleifer__peewee | peewee.py | {
"start": 71602,
"end": 78269
} | class ____(SelectBase):
def __init__(self, from_list=None, columns=None, group_by=None,
having=None, distinct=None, windows=None, for_update=None,
for_update_of=None, nowait=None, lateral=None, **kwargs):
super(Select, self).__init__(**kwargs)
self._from_list = (list(from_list) if isinstance(from_list, tuple)
else from_list) or []
self._returning = columns
self._group_by = group_by
self._having = having
self._windows = None
self._for_update = for_update # XXX: consider reorganizing.
self._for_update_of = for_update_of
self._for_update_nowait = nowait
self._lateral = lateral
self._distinct = self._simple_distinct = None
if distinct:
if isinstance(distinct, bool):
self._simple_distinct = distinct
else:
self._distinct = distinct
self._cursor_wrapper = None
def clone(self):
clone = super(Select, self).clone()
if clone._from_list:
clone._from_list = list(clone._from_list)
return clone
@Node.copy
def columns(self, *columns, **kwargs):
self._returning = columns
select = columns
@Node.copy
def select_extend(self, *columns):
self._returning = tuple(self._returning) + columns
@property
def selected_columns(self):
return self._returning
@selected_columns.setter
def selected_columns(self, value):
self._returning = value
@Node.copy
def from_(self, *sources):
self._from_list = list(sources)
@Node.copy
def join(self, dest, join_type=JOIN.INNER, on=None):
if not self._from_list:
raise ValueError('No sources to join on.')
item = self._from_list.pop()
self._from_list.append(Join(item, dest, join_type, on))
def left_outer_join(self, dest, on=None):
return self.join(dest, JOIN.LEFT_OUTER, on)
@Node.copy
def group_by(self, *columns):
grouping = []
for column in columns:
if isinstance(column, Table):
if not column._columns:
raise ValueError('Cannot pass a table to group_by() that '
'does not have columns explicitly '
'declared.')
grouping.extend([getattr(column, col_name)
for col_name in column._columns])
else:
grouping.append(column)
self._group_by = grouping
def group_by_extend(self, *values):
"""@Node.copy used from group_by() call"""
group_by = tuple(self._group_by or ()) + values
return self.group_by(*group_by)
@Node.copy
def having(self, *expressions):
if self._having is not None:
expressions = (self._having,) + expressions
self._having = reduce(operator.and_, expressions)
@Node.copy
def distinct(self, *columns):
if len(columns) == 1 and (columns[0] is True or columns[0] is False):
self._simple_distinct = columns[0]
else:
self._simple_distinct = False
self._distinct = columns
@Node.copy
def window(self, *windows):
self._windows = windows if windows else None
@Node.copy
def for_update(self, for_update=True, of=None, nowait=None):
if not for_update and (of is not None or nowait):
for_update = True
self._for_update = for_update
self._for_update_of = of
self._for_update_nowait = nowait
@Node.copy
def lateral(self, lateral=True):
self._lateral = lateral
def _get_query_key(self):
return self._alias
def __sql_selection__(self, ctx, is_subquery=False):
return ctx.sql(CommaNodeList(self._returning))
def __sql__(self, ctx):
if ctx.scope == SCOPE_COLUMN:
return self.apply_column(ctx)
if self._lateral and ctx.scope == SCOPE_SOURCE:
ctx.literal('LATERAL ')
is_subquery = ctx.subquery
state = {
'converter': None,
'in_function': False,
'parentheses': is_subquery or (ctx.scope == SCOPE_SOURCE),
'subquery': True,
}
if ctx.state.in_function and ctx.state.function_arg_count == 1:
state['parentheses'] = False
with ctx.scope_normal(**state):
# Defer calling parent SQL until here. This ensures that any CTEs
# for this query will be properly nested if this query is a
# sub-select or is used in an expression. See GH#1809 for example.
super(Select, self).__sql__(ctx)
ctx.literal('SELECT ')
if self._simple_distinct or self._distinct is not None:
ctx.literal('DISTINCT ')
if self._distinct:
(ctx
.literal('ON ')
.sql(EnclosedNodeList(self._distinct))
.literal(' '))
with ctx.scope_source():
ctx = self.__sql_selection__(ctx, is_subquery)
if self._from_list:
with ctx.scope_source(parentheses=False):
ctx.literal(' FROM ').sql(CommaNodeList(self._from_list))
if self._where is not None:
ctx.literal(' WHERE ').sql(self._where)
if self._group_by:
ctx.literal(' GROUP BY ').sql(CommaNodeList(self._group_by))
if self._having is not None:
ctx.literal(' HAVING ').sql(self._having)
if self._windows is not None:
ctx.literal(' WINDOW ')
ctx.sql(CommaNodeList(self._windows))
# Apply ORDER BY, LIMIT, OFFSET.
self._apply_ordering(ctx)
if self._for_update:
if not ctx.state.for_update:
raise ValueError('FOR UPDATE specified but not supported '
'by database.')
ctx.literal(' ')
ctx.sql(ForUpdate(self._for_update, self._for_update_of,
self._for_update_nowait))
# If the subquery is inside a function -or- we are evaluating a
# subquery on either side of an expression w/o an explicit alias, do
# not generate an alias + AS clause.
if ctx.state.in_function or (ctx.state.in_expr and
self._alias is None):
return ctx
return self.apply_alias(ctx)
| Select |
python | numba__numba | numba/tests/test_casting.py | {
"start": 423,
"end": 3913
} | class ____(unittest.TestCase):
def test_float_to_int(self):
pyfunc = float_to_int
cfunc = njit((types.float32,))(pyfunc)
self.assertEqual(cfunc.nopython_signatures[0].return_type, types.int32)
self.assertEqual(cfunc(12.3), pyfunc(12.3))
self.assertEqual(cfunc(12.3), int(12.3))
self.assertEqual(cfunc(-12.3), pyfunc(-12.3))
self.assertEqual(cfunc(-12.3), int(-12.3))
def test_int_to_float(self):
pyfunc = int_to_float
cfunc = njit((types.int64,))(pyfunc)
self.assertEqual(cfunc.nopython_signatures[0].return_type,
types.float64)
self.assertEqual(cfunc(321), pyfunc(321))
self.assertEqual(cfunc(321), 321. / 2)
def test_float_to_unsigned(self):
pyfunc = float_to_unsigned
cfunc = njit((types.float32,))(pyfunc)
self.assertEqual(cfunc.nopython_signatures[0].return_type, types.uint32)
self.assertEqual(cfunc(3.21), pyfunc(3.21))
self.assertEqual(cfunc(3.21), struct.unpack('I', struct.pack('i',
3))[0])
def test_float_to_complex(self):
pyfunc = float_to_complex
cfunc = njit((types.float64,))(pyfunc)
self.assertEqual(cfunc.nopython_signatures[0].return_type,
types.complex128)
self.assertEqual(cfunc(-3.21), pyfunc(-3.21))
self.assertEqual(cfunc(-3.21), -3.21 + 0j)
def test_array_to_array(self):
"""Make sure this compiles.
Cast C to A array
"""
@njit("f8(f8[:])")
def inner(x):
return x[0]
inner.disable_compile()
@njit("f8(f8[::1])")
def driver(x):
return inner(x)
x = np.array([1234], dtype=np.float64)
self.assertEqual(driver(x), x[0])
self.assertEqual(len(inner.overloads), 1)
def test_0darrayT_to_T(self):
@njit
def inner(x):
return x.dtype.type(x)
inputs = [
(np.bool_, True),
(np.float32, 12.3),
(np.float64, 12.3),
(np.int64, 12),
(np.complex64, 2j+3),
(np.complex128, 2j+3),
(np.timedelta64, np.timedelta64(3, 'h')),
(np.datetime64, np.datetime64('2016-01-01')),
('<U3', 'ABC'),
]
for (T, inp) in inputs:
x = np.array(inp, dtype=T)
self.assertEqual(inner(x), x[()])
def test_array_to_scalar(self):
"""
Ensure that a TypingError exception is raised if
user tries to convert numpy array to scalar
"""
with self.assertRaises(TypingError) as raises:
njit(())(numpy_scalar_cast_error)
self.assertIn("Casting array(float64, 1d, C) to int32 directly is unsupported.",
str(raises.exception))
def test_optional_to_optional(self):
"""
Test error due mishandling of Optional to Optional casting
Related issue: https://github.com/numba/numba/issues/1718
"""
# Attempt to cast optional(intp) to optional(float64)
opt_int = types.Optional(types.intp)
opt_flt = types.Optional(types.float64)
sig = opt_flt(opt_int)
@njit(sig)
def foo(a):
return a
self.assertEqual(foo(2), 2)
self.assertIsNone(foo(None))
if __name__ == '__main__':
unittest.main()
| TestCasting |
python | pytorch__pytorch | torch/_inductor/ops_handler.py | {
"start": 28625,
"end": 31124
} | class ____(DefaultHandler):
def __init__(self, parent_handler: OpsHandler[Any]):
self.parent_handler = parent_handler
self._output = IndentedBuffer(1)
self.var_counter = itertools.count()
@staticmethod
def ir_to_string(ir_fn, index, rindex=None) -> str:
from .ir import FlexibleLayout
from .virtualized import V
args = [index, rindex] if rindex is not None else [index]
names = ["index", "rindex"] if rindex is not None else ["index"]
formatter = KernelFormatterHandler(MockHandler())
with formatter._output.indent(-1):
formatter._output.writeline(f"def inner_fn({', '.join(names)}):")
for name, arg in zip(names, args):
if arg:
lhs = ", ".join(
[
str("_" if isinstance(v, (int, sympy.Integer)) else v)
for v in arg
]
)
formatter._output.writeline(f"{lhs} = {name}")
with (
V.set_ops_handler(formatter),
patch.object(FlexibleLayout, "allow_indexing", True),
):
result = ir_fn(*args)
return formatter.getvalue(result)
def indirect_indexing(self, *args, **kwargs) -> sympy.Symbol:
return self.parent_handler.indirect_indexing(*args, **kwargs)
def _write(self, line):
# replace line with a new variable name
varname = f"tmp{next(self.var_counter)}"
self._output.writeline(f"{varname} = {line}")
return varname
def _default(self, name: str, args: tuple[Any, ...], kwargs: dict[str, Any]) -> Any:
return pytree.tree_map(
self._write, getattr(self.parent_handler, name)(*args, **kwargs)
)
def reduction(
self,
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: ReductionType,
value: Union[str, tuple[str, ...]],
) -> Union[str, tuple[str, ...]]:
line = self.parent_handler.reduction(dtype, src_dtype, reduction_type, value)
num_values = reduction_num_outputs(reduction_type)
varnames = [f"tmp{next(self.var_counter)}" for _ in range(num_values)]
self._output.writeline(f"{','.join(varnames)} = {line}")
return tuple(varnames) if num_values > 1 else varnames[0]
def getvalue(self, result):
self._output.writeline(f"return {result}")
return self._output.getvalue()
| KernelFormatterHandler |
python | django__django | django/utils/functional.py | {
"start": 1413,
"end": 1799
} | class ____:
"""
Decorator that converts a method with a single cls argument into a property
that can be accessed directly from the class.
"""
def __init__(self, method=None):
self.fget = method
def __get__(self, instance, cls=None):
return self.fget(cls)
def getter(self, method):
self.fget = method
return self
| classproperty |
python | encode__django-rest-framework | rest_framework/fields.py | {
"start": 27862,
"end": 28607
} | class ____(CharField):
default_error_messages = {
'invalid': _('Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.'),
'invalid_unicode': _('Enter a valid "slug" consisting of Unicode letters, numbers, underscores, or hyphens.')
}
def __init__(self, allow_unicode=False, **kwargs):
super().__init__(**kwargs)
self.allow_unicode = allow_unicode
if self.allow_unicode:
validator = RegexValidator(re.compile(r'^[-\w]+\Z', re.UNICODE), message=self.error_messages['invalid_unicode'])
else:
validator = RegexValidator(re.compile(r'^[-a-zA-Z0-9_]+$'), message=self.error_messages['invalid'])
self.validators.append(validator)
| SlugField |
python | getsentry__sentry | src/sentry/codecov/endpoints/repository_token_regenerate/repository_token_regenerate.py | {
"start": 1150,
"end": 2449
} | class ____(CodecovEndpoint):
owner = ApiOwner.CODECOV
publish_status = {
"POST": ApiPublishStatus.PUBLIC,
}
permission_classes = (RepositoryTokenRegeneratePermission,)
@extend_schema(
operation_id="Regenerates a repository upload token and returns the new token",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
PreventParams.OWNER,
PreventParams.REPOSITORY,
],
request=None,
responses={
200: RepositoryTokenRegenerateSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def post(self, request: Request, owner: RpcIntegration, repository: str, **kwargs) -> Response:
"""
Regenerates a repository upload token and returns the new token.
"""
owner_slug = owner.name
variables = {
"owner": owner_slug,
"repoName": repository,
}
client = CodecovApiClient(git_provider_org=owner_slug)
graphql_response = client.query(query=query, variables=variables)
token = RepositoryTokenRegenerateSerializer().to_representation(graphql_response.json())
return Response(token)
| RepositoryTokenRegenerateEndpoint |
python | docker__docker-py | docker/tls.py | {
"start": 34,
"end": 2320
} | class ____:
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be a bool or a path to a CA cert
file to verify against. If ``True``, verify using ca_cert;
if ``False`` or not specified, do not verify.
"""
cert = None
ca_cert = None
verify = None
def __init__(self, client_cert=None, ca_cert=None, verify=None):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving verify=False
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_cert must be a tuple of'
' (client certificate, key file)'
) from None
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
"""
Configure a client with these TLS options.
"""
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
| TLSConfig |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol17.py | {
"start": 2167,
"end": 2357
} | class ____(Protocol[_T1_contra]):
def m1(self: "Protocol12[_T1_contra]", x: _T1_contra) -> None: ...
@classmethod
def m2(cls: "type[Protocol12[_T1_contra]]") -> None: ...
| Protocol12 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchLiteral1.py | {
"start": 3167,
"end": 3941
} | class ____(str): ...
def test_subclass1(a: A):
match a:
case "TEST" as m:
reveal_type(m, expected_text="A")
case x:
reveal_type(x, expected_text="A")
def test_subclass2(subj: int):
match subj:
case 1.0e4:
reveal_type(subj, expected_text="int")
def test_subclass3(subj: Literal[1]):
match subj:
case 1.0:
reveal_type(subj, expected_text="Literal[1]")
T1 = TypeVar("T1", Literal["A"], Literal["B"])
def test_constrained_typevar(subj: T1):
match subj:
case "A":
reveal_type(subj, expected_text="Literal['A']")
case "B":
reveal_type(subj, expected_text="Literal['B']")
case x:
reveal_type(x, expected_text="Never")
| A |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 1681,
"end": 1873
} | class ____(B3):
def m0(self, x):
pass
def m2(self, x):
pass # Issue or not?
def sink_in_subclass(b: B3):
b.m0(_test_source())
"""
A4
/ \
B4 C4
|
D4
"""
| D3 |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/_inspect.py | {
"start": 597,
"end": 9655
} | class ____(JupyterMixin):
"""A renderable to inspect any Python Object.
Args:
obj (Any): An object to inspect.
title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
methods (bool, optional): Enable inspection of callables. Defaults to False.
docs (bool, optional): Also render doc strings. Defaults to True.
private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
sort (bool, optional): Sort attributes alphabetically. Defaults to True.
all (bool, optional): Show all attributes. Defaults to False.
value (bool, optional): Pretty print value of object. Defaults to True.
"""
def __init__(
self,
obj: Any,
*,
title: Optional[TextType] = None,
help: bool = False,
methods: bool = False,
docs: bool = True,
private: bool = False,
dunder: bool = False,
sort: bool = True,
all: bool = True,
value: bool = True,
) -> None:
self.highlighter = ReprHighlighter()
self.obj = obj
self.title = title or self._make_title(obj)
if all:
methods = private = dunder = True
self.help = help
self.methods = methods
self.docs = docs or help
self.private = private or dunder
self.dunder = dunder
self.sort = sort
self.value = value
def _make_title(self, obj: Any) -> Text:
"""Make a default title."""
title_str = (
str(obj)
if (isclass(obj) or callable(obj) or ismodule(obj))
else str(type(obj))
)
title_text = self.highlighter(title_str)
return title_text
def __rich__(self) -> Panel:
return Panel.fit(
Group(*self._render()),
title=self.title,
border_style="scope.border",
padding=(0, 1),
)
def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
"""Get a signature for a callable."""
try:
_signature = str(signature(obj)) + ":"
except ValueError:
_signature = "(...)"
except TypeError:
return None
source_filename: Optional[str] = None
try:
source_filename = getfile(obj)
except (OSError, TypeError):
# OSError is raised if obj has no source file, e.g. when defined in REPL.
pass
callable_name = Text(name, style="inspect.callable")
if source_filename:
callable_name.stylize(f"link file://{source_filename}")
signature_text = self.highlighter(_signature)
qualname = name or getattr(obj, "__qualname__", name)
# If obj is a module, there may be classes (which are callable) to display
if inspect.isclass(obj):
prefix = "class"
elif inspect.iscoroutinefunction(obj):
prefix = "async def"
else:
prefix = "def"
qual_signature = Text.assemble(
(f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
(qualname, "inspect.callable"),
signature_text,
)
return qual_signature
def _render(self) -> Iterable[RenderableType]:
"""Render object."""
def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
key, (_error, value) = item
return (callable(value), key.strip("_").lower())
def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
"""Get attribute or any exception."""
try:
return (None, getattr(obj, attr_name))
except Exception as error:
return (error, None)
obj = self.obj
keys = dir(obj)
total_items = len(keys)
if not self.dunder:
keys = [key for key in keys if not key.startswith("__")]
if not self.private:
keys = [key for key in keys if not key.startswith("_")]
not_shown_count = total_items - len(keys)
items = [(key, safe_getattr(key)) for key in keys]
if self.sort:
items.sort(key=sort_items)
items_table = Table.grid(padding=(0, 1), expand=False)
items_table.add_column(justify="right")
add_row = items_table.add_row
highlighter = self.highlighter
if callable(obj):
signature = self._get_signature("", obj)
if signature is not None:
yield signature
yield ""
if self.docs:
_doc = self._get_formatted_doc(obj)
if _doc is not None:
doc_text = Text(_doc, style="inspect.help")
doc_text = highlighter(doc_text)
yield doc_text
yield ""
if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
yield Panel(
Pretty(obj, indent_guides=True, max_length=10, max_string=60),
border_style="inspect.value.border",
)
yield ""
for key, (error, value) in items:
key_text = Text.assemble(
(
key,
"inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
),
(" =", "inspect.equals"),
)
if error is not None:
warning = key_text.copy()
warning.stylize("inspect.error")
add_row(warning, highlighter(repr(error)))
continue
if callable(value):
if not self.methods:
continue
_signature_text = self._get_signature(key, value)
if _signature_text is None:
add_row(key_text, Pretty(value, highlighter=highlighter))
else:
if self.docs:
docs = self._get_formatted_doc(value)
if docs is not None:
_signature_text.append("\n" if "\n" in docs else " ")
doc = highlighter(docs)
doc.stylize("inspect.doc")
_signature_text.append(doc)
add_row(key_text, _signature_text)
else:
add_row(key_text, Pretty(value, highlighter=highlighter))
if items_table.row_count:
yield items_table
elif not_shown_count:
yield Text.from_markup(
f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
)
def _get_formatted_doc(self, object_: Any) -> Optional[str]:
"""
Extract the docstring of an object, process it and returns it.
The processing consists in cleaning up the doctring's indentation,
taking only its 1st paragraph if `self.help` is not True,
and escape its control codes.
Args:
object_ (Any): the object to get the docstring from.
Returns:
Optional[str]: the processed docstring, or None if no docstring was found.
"""
docs = getdoc(object_)
if docs is None:
return None
docs = cleandoc(docs).strip()
if not self.help:
docs = _first_paragraph(docs)
return escape_control_codes(docs)
def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
"""Returns the MRO of an object's class, or of the object itself if it's a class."""
if not hasattr(obj, "__mro__"):
# N.B. we cannot use `if type(obj) is type` here because it doesn't work with
# some types of classes, such as the ones that use abc.ABCMeta.
obj = type(obj)
return getattr(obj, "__mro__", ())
def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
"""
Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
Examples:
`object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
"""
return [
f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
for type_ in get_object_types_mro(obj)
]
def is_object_one_of_types(
obj: object, fully_qualified_types_names: Collection[str]
) -> bool:
"""
Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
fully qualified names in its MRO.
"""
for type_name in get_object_types_mro_as_strings(obj):
if type_name in fully_qualified_types_names:
return True
return False
| Inspect |
python | bokeh__bokeh | src/bokeh/util/warnings.py | {
"start": 1433,
"end": 1630
} | class ____(DeprecationWarning):
''' A Bokeh-specific ``DeprecationWarning`` subclass.
Used to selectively filter Bokeh deprecations for unconditional display.
'''
| BokehDeprecationWarning |
python | sqlalchemy__sqlalchemy | examples/nested_sets/nested_sets.py | {
"start": 504,
"end": 3726
} | class ____(Base):
__tablename__ = "personnel"
__mapper_args__ = {
"batch": False # allows extension to fire for each
# instance before going to the next.
}
parent = None
emp = Column(String, primary_key=True)
left = Column("lft", Integer, nullable=False)
right = Column("rgt", Integer, nullable=False)
def __repr__(self):
return "Employee(%s, %d, %d)" % (self.emp, self.left, self.right)
@event.listens_for(Employee, "before_insert")
def before_insert(mapper, connection, instance):
if not instance.parent:
instance.left = 1
instance.right = 2
else:
personnel = mapper.persist_selectable
right_most_sibling = connection.scalar(
select(personnel.c.rgt).where(
personnel.c.emp == instance.parent.emp
)
)
connection.execute(
personnel.update()
.where(personnel.c.rgt >= right_most_sibling)
.values(
lft=case(
(
personnel.c.lft > right_most_sibling,
personnel.c.lft + 2,
),
else_=personnel.c.lft,
),
rgt=case(
(
personnel.c.rgt >= right_most_sibling,
personnel.c.rgt + 2,
),
else_=personnel.c.rgt,
),
)
)
instance.left = right_most_sibling
instance.right = right_most_sibling + 1
# before_update() would be needed to support moving of nodes
# after_delete() would be needed to support removal of nodes.
engine = create_engine("sqlite://", echo=True)
Base.metadata.create_all(engine)
session = Session(bind=engine)
albert = Employee(emp="Albert")
bert = Employee(emp="Bert")
chuck = Employee(emp="Chuck")
donna = Employee(emp="Donna")
eddie = Employee(emp="Eddie")
fred = Employee(emp="Fred")
bert.parent = albert
chuck.parent = albert
donna.parent = chuck
eddie.parent = chuck
fred.parent = chuck
# the order of "add" is important here. elements must be added in
# the order in which they should be INSERTed.
session.add_all([albert, bert, chuck, donna, eddie, fred])
session.commit()
print(session.query(Employee).all())
# 1. Find an employee and all their supervisors, no matter how deep the tree.
ealias = aliased(Employee)
print(
session.query(Employee)
.filter(ealias.left.between(Employee.left, Employee.right))
.filter(ealias.emp == "Eddie")
.all()
)
# 2. Find the employee and all their subordinates.
# (This query has a nice symmetry with the first query.)
print(
session.query(Employee)
.filter(Employee.left.between(ealias.left, ealias.right))
.filter(ealias.emp == "Chuck")
.all()
)
# 3. Find the level of each node, so you can print the tree
# as an indented listing.
for indentation, employee in (
session.query(func.count(Employee.emp).label("indentation") - 1, ealias)
.filter(ealias.left.between(Employee.left, Employee.right))
.group_by(ealias.emp)
.order_by(ealias.left)
):
print(" " * indentation + str(employee))
| Employee |
python | django-haystack__django-haystack | test_haystack/test_loading.py | {
"start": 6793,
"end": 6960
} | class ____(indexes.SearchIndex, indexes.Indexable):
document = indexes.CharField(document=True)
def get_model(self):
return MockModel
| InvalidSearchIndex |
python | scipy__scipy | benchmarks/benchmarks/stats.py | {
"start": 629,
"end": 1177
} | class ____(Benchmark):
param_names = ['alternative']
params = [
['two-sided', 'less', 'greater']
]
def setup(self, mode):
a = np.random.rand(2,2) * 10
self.a = a
def time_fisher_exact(self, alternative):
stats.fisher_exact(self.a, alternative=alternative)
def time_barnard_exact(self, alternative):
stats.barnard_exact(self.a, alternative=alternative)
def time_boschloo_exact(self, alternative):
stats.boschloo_exact(self.a, alternative=alternative)
| CorrelationFunctions |
python | wandb__wandb | wandb/vendor/pygments/formatters/bbcode.py | {
"start": 348,
"end": 3314
} | class ____(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| BBCodeFormatter |
python | networkx__networkx | networkx/algorithms/tests/test_planar_drawing.py | {
"start": 6368,
"end": 8765
} | class ____:
"""Compare vectors by their angle without loss of precision
All vectors in direction [0, 1] are the smallest.
The vectors grow in clockwise direction.
"""
__slots__ = ["x", "y", "node", "quadrant"]
def __init__(self, x, y, node):
self.x = x
self.y = y
self.node = node
if self.x >= 0 and self.y > 0:
self.quadrant = 1
elif self.x > 0 and self.y <= 0:
self.quadrant = 2
elif self.x <= 0 and self.y < 0:
self.quadrant = 3
else:
self.quadrant = 4
def __eq__(self, other):
return self.quadrant == other.quadrant and self.x * other.y == self.y * other.x
def __lt__(self, other):
if self.quadrant < other.quadrant:
return True
elif self.quadrant > other.quadrant:
return False
else:
return self.x * other.y < self.y * other.x
def __ne__(self, other):
return self != other
def __le__(self, other):
return not other < self
def __gt__(self, other):
return other < self
def __ge__(self, other):
return not self < other
def planar_drawing_conforms_to_embedding(embedding, pos):
"""Checks if pos conforms to the planar embedding
Returns true iff the neighbors are actually oriented in the orientation
specified of the embedding
"""
for v in embedding:
nbr_vectors = []
v_pos = pos[v]
for nbr in embedding[v]:
new_vector = Vector(pos[nbr][0] - v_pos[0], pos[nbr][1] - v_pos[1], nbr)
nbr_vectors.append(new_vector)
# Sort neighbors according to their phi angle
nbr_vectors.sort()
for idx, nbr_vector in enumerate(nbr_vectors):
cw_vector = nbr_vectors[(idx + 1) % len(nbr_vectors)]
ccw_vector = nbr_vectors[idx - 1]
if (
embedding[v][nbr_vector.node]["cw"] != cw_vector.node
or embedding[v][nbr_vector.node]["ccw"] != ccw_vector.node
):
return False
if cw_vector.node != nbr_vector.node and cw_vector == nbr_vector:
# Lines overlap
return False
if ccw_vector.node != nbr_vector.node and ccw_vector == nbr_vector:
# Lines overlap
return False
return True
| Vector |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/styles/style_transformation.py | {
"start": 8177,
"end": 9080
} | class ____(StyleTransformation):
"""
StyleTransformation class that can dynamically returns any
`StyleTransformation`.
:param get_style_transformation: Callable that returns a
:class:`.StyleTransformation` instance.
"""
def __init__(
self, get_style_transformation: Callable[[], StyleTransformation | None]
) -> None:
self.get_style_transformation = get_style_transformation
def transform_attrs(self, attrs: Attrs) -> Attrs:
style_transformation = (
self.get_style_transformation() or DummyStyleTransformation()
)
return style_transformation.transform_attrs(attrs)
def invalidation_hash(self) -> Hashable:
style_transformation = (
self.get_style_transformation() or DummyStyleTransformation()
)
return style_transformation.invalidation_hash()
| DynamicStyleTransformation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.