language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | faker/providers/address/fr_CH/__init__.py | {
"start": 71,
"end": 8595
} | class ____(AddressProvider):
city_suffixes = (
"-des-Bois",
"-les-Bains",
"-la-Ville",
"-Dessus",
"-Dessous",
" am Rhein",
" am See",
" am Albis",
" an der Aare",
)
city_prefixes = ("Saint ", "Sainte ", "San ", "Ober", "Unter")
street_prefixes = ("rue", "rue", "chemin", "avenue", "boulevard")
address_formats = ("{{street_address}}\n{{postcode}} {{city}}",)
building_number_formats = ("%", "%#", "%#", "%#", "%##")
city_formats = (
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}{{city_suffix}}",
"{{last_name}}-près-{{last_name}}",
"{{last_name}}-sur-{{last_name}}",
"{{city_prefix}}{{last_name}}",
"{{last_name}} ({{canton_code}})",
)
street_address_formats = (
"{{street_name}}",
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}}",
)
street_name_formats = (
"{{street_prefix}} {{last_name}}",
"{{street_prefix}} {{first_name}} {{last_name}}",
"{{street_prefix}} de {{last_name}}",
)
postcode_formats = (
"1###",
"2###",
"3###",
"4###",
"5###",
"6###",
"7###",
"8###",
"9###",
)
cantons = (
("AG", "Argovie"),
("AI", "Appenzell Rhodes-Intérieures"),
("AR", "Appenzell Rhodes-Extérieures"),
("BE", "Berne"),
("BL", "Bâle-Campagne"),
("BS", "Bâle-Ville"),
("FR", "Fribourg"),
("GE", "Genève"),
("GL", "Glaris"),
("GR", "Grisons"),
("JU", "Jura"),
("LU", "Lucerne"),
("NE", "Neuchâtel"),
("NW", "Nidwald"),
("OW", "Obwald"),
("SG", "Saint-Gall"),
("SH", "Schaffhouse"),
("SO", "Soleure"),
("SZ", "Schwytz"),
("TG", "Thurgovie"),
("TI", "Tessin"),
("UR", "Uri"),
("VD", "Vaud"),
("VS", "Valais"),
("ZG", "Zoug"),
("ZH", "Zurich"),
)
countries = (
"Afghanistan",
"Afrique du sud",
"Albanie",
"Algérie",
"Allemagne",
"Andorre",
"Angola",
"Anguilla",
"Antarctique",
"Antigua et Barbuda",
"Antilles néerlandaises",
"Arabie saoudite",
"Argentine",
"Arménie",
"Aruba",
"Australie",
"Autriche",
"Azerbaïdjan",
"Bahamas",
"Bahrain",
"Bangladesh",
"Belgique",
"Belize",
"Benin",
"Bermudes (Les)",
"Bhoutan",
"Biélorussie",
"Bolivie",
"Bosnie-Herzégovine",
"Botswana",
"Bouvet (Îles)",
"Brunei",
"Brésil",
"Bulgarie",
"Burkina Faso",
"Burundi",
"Cambodge",
"Cameroun",
"Canada",
"Cap Vert",
"Cayman (Îles)",
"Chili",
"Chine (Rép. pop.)",
"Christmas (Île)",
"Chypre",
"Cocos (Îles)",
"Colombie",
"Comores",
"Cook (Îles)",
"Corée du Nord",
"Corée, Sud",
"Costa Rica",
"Croatie",
"Cuba",
"Côte d'Ivoire",
"Danemark",
"Djibouti",
"Dominique",
"Égypte",
"El Salvador",
"Émirats arabes unis",
"Équateur",
"Érythrée",
"Espagne",
"Estonie",
"États-Unis",
"Ethiopie",
"Falkland (Île)",
"Fidji (République des)",
"Finlande",
"France",
"Féroé (Îles)",
"Gabon",
"Gambie",
"Ghana",
"Gibraltar",
"Grenade",
"Groenland",
"Grèce",
"Guadeloupe",
"Guam",
"Guatemala",
"Guinée",
"Guinée Equatoriale",
"Guinée-Bissau",
"Guyane",
"Guyane française",
"Géorgie",
"Géorgie du Sud et Sandwich du Sud (Îles)",
"Haïti",
"Heard et McDonald (Îles)",
"Honduras",
"Hong Kong",
"Hongrie",
"Îles Mineures Éloignées des États-Unis",
"Inde",
"Indonésie",
"Irak",
"Iran",
"Irlande",
"Islande",
"Israël",
"Italie",
"Jamaïque",
"Japon",
"Jordanie",
"Kazakhstan",
"Kenya",
"Kirghizistan",
"Kiribati",
"Koweit",
"La Barbad",
"Laos",
"Lesotho",
"Lettonie",
"Liban",
"Libye",
"Libéria",
"Liechtenstein",
"Lithuanie",
"Luxembourg",
"Macau",
"Macédoine du Nord",
"Madagascar",
"Malaisie",
"Malawi",
"Maldives (Îles)",
"Mali",
"Malte",
"Mariannes du Nord (Îles)",
"Maroc",
"Marshall (Îles)",
"Martinique",
"Maurice",
"Mauritanie",
"Mayotte",
"Mexique",
"Micronésie (États fédérés de)",
"Moldavie",
"Monaco",
"Mongolie",
"Montserrat",
"Mozambique",
"Myanmar",
"Namibie",
"Nauru",
"Nepal",
"Nicaragua",
"Niger",
"Nigeria",
"Niue",
"Norfolk (Îles)",
"Norvège",
"Nouvelle Calédonie",
"Nouvelle-Zélande",
"Oman",
"Ouganda",
"Ouzbékistan",
"Pakistan",
"Palau",
"Panama",
"Papouasie-Nouvelle-Guinée",
"Paraguay",
"Pays-Bas",
"Philippines",
"Pitcairn (Îles)",
"Pologne",
"Polynésie française",
"Porto Rico",
"Portugal",
"Pérou",
"Qatar",
"Roumanie",
"Royaume-Uni",
"Russie",
"Rwanda",
"Rép. Dém. du Congo",
"République centrafricaine",
"République Dominicaine",
"République tchèque",
"Réunion (La)",
"Sahara Occidental",
"Saint Pierre et Miquelon",
"Saint Vincent et les Grenadines",
"Saint-Kitts et Nevis",
"Saint-Marin (Rép. de)",
"Sainte Hélène",
"Sainte Lucie",
"Samoa",
"Samoa",
"Seychelles",
"Sierra Leone",
"Singapour",
"Slovaquie",
"Slovénie",
"Somalie",
"Soudan",
"Sri Lanka",
"Suisse",
"Suriname",
"Suède",
"Svalbard et Jan Mayen (Îles)",
"Swaziland",
"Syrie",
"São Tomé et Príncipe (Rép.)",
"Sénégal",
"Tadjikistan",
"Taiwan",
"Tanzanie",
"Tchad",
"Territoire britannique de l'océan Indien",
"Territoires français du sud",
"Thailande",
"Timor",
"Togo",
"Tokelau",
"Tonga",
"Trinité et Tobago",
"Tunisie",
"Turkménistan",
"Turks et Caïques (Îles)",
"Turquie",
"Tuvalu",
"Ukraine",
"Uruguay",
"Vanuatu",
"Vatican (Etat du)",
"Venezuela",
"Vierges (Îles)",
"Vierges britanniques (Îles)",
"Vietnam",
"Wallis et Futuna (Îles)",
"Yemen",
"Yougoslavie",
"Zambie",
"Zaïre",
"Zimbabwe",
)
def street_prefix(self) -> str:
"""
:example: 'rue'
"""
return self.random_element(self.street_prefixes)
def city_prefix(self) -> str:
"""
:example: 'rue'
"""
return self.random_element(self.city_prefixes)
def canton(self) -> Tuple[str, str]:
"""
Randomly returns a swiss canton ('Abbreviated' , 'Name').
:example: ('VD' . 'Vaud')
"""
return self.random_element(self.cantons)
def administrative_unit(self) -> str:
"""
Randomly returns a Swiss canton name.
:example: 'Vaud'
"""
return self.canton()[1]
canton_name = administrative_unit
def canton_code(self) -> str:
"""
Randomly returns a Swiss canton code.
:example: 'VD'
"""
return self.canton()[0]
| Provider |
python | kamyu104__LeetCode-Solutions | Python/disconnect-path-in-a-binary-matrix-by-at-most-one-flip.py | {
"start": 1116,
"end": 1833
} | class ____(object):
def isPossibleToCutPath(self, grid):
"""
:type grid: List[List[int]]
:rtype: bool
"""
def iter_dfs():
stk = [(0, 0)]
while stk:
i, j = stk.pop()
if not (i < len(grid) and j < len(grid[0]) and grid[i][j]):
continue
if (i, j) == (len(grid)-1, len(grid[0])-1):
return True
if (i, j) != (0, 0):
grid[i][j] = 0
stk.append((i, j+1))
stk.append((i+1, j))
return False
return not iter_dfs() or not iter_dfs()
# Time: O(m * n)
# Space: O(m + n)
# dfs
| Solution2 |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_timestamp.py | {
"start": 17537,
"end": 31433
} | class ____:
@pytest.fixture(params=["s", "ms", "us"])
def reso(self, request):
return request.param
@pytest.fixture
def dt64(self, reso):
# cases that are in-bounds for nanosecond, so we can compare against
# the existing implementation.
return np.datetime64("2016-01-01", reso)
@pytest.fixture
def ts(self, dt64):
return Timestamp._from_dt64(dt64)
@pytest.fixture
def ts_tz(self, ts, tz_aware_fixture):
tz = maybe_get_tz(tz_aware_fixture)
return Timestamp._from_value_and_reso(ts._value, ts._creso, tz)
def test_non_nano_construction(self, dt64, ts, reso):
assert ts._value == dt64.view("i8")
if reso == "s":
assert ts._creso == NpyDatetimeUnit.NPY_FR_s.value
elif reso == "ms":
assert ts._creso == NpyDatetimeUnit.NPY_FR_ms.value
elif reso == "us":
assert ts._creso == NpyDatetimeUnit.NPY_FR_us.value
def test_non_nano_fields(self, dt64, ts):
alt = Timestamp(dt64)
assert ts.year == alt.year
assert ts.month == alt.month
assert ts.day == alt.day
assert ts.hour == ts.minute == ts.second == ts.microsecond == 0
assert ts.nanosecond == 0
assert ts.to_julian_date() == alt.to_julian_date()
assert ts.weekday() == alt.weekday()
assert ts.isoweekday() == alt.isoweekday()
def test_start_end_fields(self, ts):
assert ts.is_year_start
assert ts.is_quarter_start
assert ts.is_month_start
assert not ts.is_year_end
assert not ts.is_month_end
assert not ts.is_month_end
# 2016-01-01 is a Friday, so is year/quarter/month start with this freq
assert ts.is_year_start
assert ts.is_quarter_start
assert ts.is_month_start
assert not ts.is_year_end
assert not ts.is_month_end
assert not ts.is_month_end
def test_day_name(self, dt64, ts):
alt = Timestamp(dt64)
assert ts.day_name() == alt.day_name()
def test_month_name(self, dt64, ts):
alt = Timestamp(dt64)
assert ts.month_name() == alt.month_name()
def test_tz_convert(self, ts):
ts = Timestamp._from_value_and_reso(ts._value, ts._creso, timezone.utc)
tz = zoneinfo.ZoneInfo("US/Pacific")
result = ts.tz_convert(tz)
assert isinstance(result, Timestamp)
assert result._creso == ts._creso
assert tz_compare(result.tz, tz)
def test_repr(self, dt64, ts):
alt = Timestamp(dt64)
assert str(ts) == str(alt)
assert repr(ts) == repr(alt)
def test_comparison(self, dt64, ts):
alt = Timestamp(dt64)
assert ts == dt64
assert dt64 == ts
assert ts == alt
assert alt == ts
assert not ts != dt64
assert not dt64 != ts
assert not ts != alt
assert not alt != ts
assert not ts < dt64
assert not dt64 < ts
assert not ts < alt
assert not alt < ts
assert not ts > dt64
assert not dt64 > ts
assert not ts > alt
assert not alt > ts
assert ts >= dt64
assert dt64 >= ts
assert ts >= alt
assert alt >= ts
assert ts <= dt64
assert dt64 <= ts
assert ts <= alt
assert alt <= ts
def test_cmp_cross_reso(self):
# numpy gets this wrong because of silent overflow
dt64 = np.datetime64(9223372800, "s") # won't fit in M8[ns]
ts = Timestamp._from_dt64(dt64)
# subtracting 3600*24 gives a datetime64 that _can_ fit inside the
# nanosecond implementation bounds.
other = Timestamp(dt64 - 3600 * 24).as_unit("ns")
assert other < ts
assert other.asm8 > ts.asm8 # <- numpy gets this wrong
assert ts > other
assert ts.asm8 < other.asm8 # <- numpy gets this wrong
assert not other == ts
assert ts != other
@pytest.mark.xfail(reason="Dispatches to np.datetime64 which is wrong")
def test_cmp_cross_reso_reversed_dt64(self):
dt64 = np.datetime64(106752, "D") # won't fit in M8[ns]
ts = Timestamp._from_dt64(dt64)
other = Timestamp(dt64 - 1)
assert other.asm8 < ts
def test_pickle(self, ts, tz_aware_fixture):
tz = tz_aware_fixture
tz = maybe_get_tz(tz)
ts = Timestamp._from_value_and_reso(ts._value, ts._creso, tz)
rt = tm.round_trip_pickle(ts)
assert rt._creso == ts._creso
assert rt == ts
def test_normalize(self, dt64, ts):
alt = Timestamp(dt64)
result = ts.normalize()
assert result._creso == ts._creso
assert result == alt.normalize()
def test_asm8(self, dt64, ts):
rt = ts.asm8
assert rt == dt64
assert rt.dtype == dt64.dtype
def test_to_numpy(self, dt64, ts):
res = ts.to_numpy()
assert res == dt64
assert res.dtype == dt64.dtype
def test_to_datetime64(self, dt64, ts):
res = ts.to_datetime64()
assert res == dt64
assert res.dtype == dt64.dtype
def test_timestamp(self, dt64, ts):
alt = Timestamp(dt64)
assert ts.timestamp() == alt.timestamp()
def test_to_period(self, dt64, ts):
alt = Timestamp(dt64)
assert ts.to_period("D") == alt.to_period("D")
@pytest.mark.parametrize(
"td", [timedelta(days=4), Timedelta(days=4), np.timedelta64(4, "D")]
)
def test_addsub_timedeltalike_non_nano(self, dt64, ts, td):
exp_reso = max(ts._creso, Timedelta(td)._creso)
result = ts - td
expected = Timestamp(dt64) - td
assert isinstance(result, Timestamp)
assert result._creso == exp_reso
assert result == expected
result = ts + td
expected = Timestamp(dt64) + td
assert isinstance(result, Timestamp)
assert result._creso == exp_reso
assert result == expected
result = td + ts
expected = td + Timestamp(dt64)
assert isinstance(result, Timestamp)
assert result._creso == exp_reso
assert result == expected
def test_addsub_offset(self, ts_tz):
# specifically non-Tick offset
off = offsets.YearEnd(1)
result = ts_tz + off
assert isinstance(result, Timestamp)
assert result._creso == ts_tz._creso
if ts_tz.month == 12 and ts_tz.day == 31:
assert result.year == ts_tz.year + 1
else:
assert result.year == ts_tz.year
assert result.day == 31
assert result.month == 12
assert tz_compare(result.tz, ts_tz.tz)
result = ts_tz - off
assert isinstance(result, Timestamp)
assert result._creso == ts_tz._creso
assert result.year == ts_tz.year - 1
assert result.day == 31
assert result.month == 12
assert tz_compare(result.tz, ts_tz.tz)
def test_sub_datetimelike_mismatched_reso(self, ts_tz):
# case with non-lossy rounding
ts = ts_tz
# choose a unit for `other` that doesn't match ts_tz's;
# this construction ensures we get cases with other._creso < ts._creso
# and cases with other._creso > ts._creso
unit = {
NpyDatetimeUnit.NPY_FR_us.value: "ms",
NpyDatetimeUnit.NPY_FR_ms.value: "s",
NpyDatetimeUnit.NPY_FR_s.value: "us",
}[ts._creso]
other = ts.as_unit(unit)
assert other._creso != ts._creso
result = ts - other
assert isinstance(result, Timedelta)
assert result._value == 0
assert result._creso == max(ts._creso, other._creso)
result = other - ts
assert isinstance(result, Timedelta)
assert result._value == 0
assert result._creso == max(ts._creso, other._creso)
if ts._creso < other._creso:
# Case where rounding is lossy
other2 = other + Timedelta._from_value_and_reso(1, other._creso)
exp = ts.as_unit(other.unit) - other2
res = ts - other2
assert res == exp
assert res._creso == max(ts._creso, other._creso)
res = other2 - ts
assert res == -exp
assert res._creso == max(ts._creso, other._creso)
else:
ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso)
exp = ts2 - other.as_unit(ts2.unit)
res = ts2 - other
assert res == exp
assert res._creso == max(ts._creso, other._creso)
res = other - ts2
assert res == -exp
assert res._creso == max(ts._creso, other._creso)
def test_sub_timedeltalike_mismatched_reso(self, ts_tz):
# case with non-lossy rounding
ts = ts_tz
# choose a unit for `other` that doesn't match ts_tz's;
# this construction ensures we get cases with other._creso < ts._creso
# and cases with other._creso > ts._creso
unit = {
NpyDatetimeUnit.NPY_FR_us.value: "ms",
NpyDatetimeUnit.NPY_FR_ms.value: "s",
NpyDatetimeUnit.NPY_FR_s.value: "us",
}[ts._creso]
other = Timedelta(0).as_unit(unit)
assert other._creso != ts._creso
result = ts + other
assert isinstance(result, Timestamp)
assert result == ts
assert result._creso == max(ts._creso, other._creso)
result = other + ts
assert isinstance(result, Timestamp)
assert result == ts
assert result._creso == max(ts._creso, other._creso)
if ts._creso < other._creso:
# Case where rounding is lossy
other2 = other + Timedelta._from_value_and_reso(1, other._creso)
exp = ts.as_unit(other.unit) + other2
res = ts + other2
assert res == exp
assert res._creso == max(ts._creso, other._creso)
res = other2 + ts
assert res == exp
assert res._creso == max(ts._creso, other._creso)
else:
ts2 = ts + Timedelta._from_value_and_reso(1, ts._creso)
exp = ts2 + other.as_unit(ts2.unit)
res = ts2 + other
assert res == exp
assert res._creso == max(ts._creso, other._creso)
res = other + ts2
assert res == exp
assert res._creso == max(ts._creso, other._creso)
def test_addition_doesnt_downcast_reso(self):
# https://github.com/pandas-dev/pandas/pull/48748#pullrequestreview-1122635413
ts = Timestamp(year=2022, month=1, day=1, microsecond=999999).as_unit("us")
td = Timedelta(microseconds=1).as_unit("us")
res = ts + td
assert res._creso == ts._creso
def test_sub_timedelta64_mismatched_reso(self, ts_tz):
ts = ts_tz
res = ts + np.timedelta64(1, "ns")
exp = ts.as_unit("ns") + np.timedelta64(1, "ns")
assert exp == res
assert exp._creso == NpyDatetimeUnit.NPY_FR_ns.value
def test_min(self, ts):
assert ts.min <= ts
assert ts.min._creso == ts._creso
assert ts.min._value == NaT._value + 1
def test_max(self, ts):
assert ts.max >= ts
assert ts.max._creso == ts._creso
assert ts.max._value == np.iinfo(np.int64).max
def test_resolution(self, ts):
expected = Timedelta._from_value_and_reso(1, ts._creso)
result = ts.resolution
assert result == expected
assert result._creso == expected._creso
def test_out_of_ns_bounds(self):
# https://github.com/pandas-dev/pandas/issues/51060
result = Timestamp(-52700112000, unit="s")
assert result == Timestamp("0300-01-01")
assert result.to_numpy() == np.datetime64("0300-01-01T00:00:00", "s")
def test_timestamp_class_min_max_resolution():
# when accessed on the class (as opposed to an instance), we default
# to nanoseconds
assert Timestamp.min == Timestamp(NaT._value + 1)
assert Timestamp.min._creso == NpyDatetimeUnit.NPY_FR_ns.value
assert Timestamp.max == Timestamp(np.iinfo(np.int64).max)
assert Timestamp.max._creso == NpyDatetimeUnit.NPY_FR_ns.value
assert Timestamp.resolution == Timedelta(1)
assert Timestamp.resolution._creso == NpyDatetimeUnit.NPY_FR_ns.value
def test_delimited_date():
# https://github.com/pandas-dev/pandas/issues/50231
with tm.assert_produces_warning(None):
result = Timestamp("13-01-2000")
expected = Timestamp(2000, 1, 13)
assert result == expected
def test_utctimetuple():
# GH 32174
ts = Timestamp("2000-01-01", tz="UTC")
result = ts.utctimetuple()
expected = time.struct_time((2000, 1, 1, 0, 0, 0, 5, 1, 0))
assert result == expected
def test_negative_dates():
# https://github.com/pandas-dev/pandas/issues/50787
ts = Timestamp("-2000-01-01")
msg = (
" not yet supported on Timestamps which are outside the range of "
"Python's standard library. For now, please call the components you need "
r"\(such as `.year` and `.month`\) and construct your string from there.$"
)
func = "^strftime"
with pytest.raises(NotImplementedError, match=func + msg):
ts.strftime("%Y")
msg = (
" not yet supported on Timestamps which "
"are outside the range of Python's standard library. "
)
func = "^date"
with pytest.raises(NotImplementedError, match=func + msg):
ts.date()
func = "^isocalendar"
with pytest.raises(NotImplementedError, match=func + msg):
ts.isocalendar()
func = "^timetuple"
with pytest.raises(NotImplementedError, match=func + msg):
ts.timetuple()
func = "^toordinal"
with pytest.raises(NotImplementedError, match=func + msg):
ts.toordinal()
| TestNonNano |
python | huggingface__transformers | tests/models/kosmos2/test_modeling_kosmos2.py | {
"start": 1809,
"end": 4026
} | class ____:
def __init__(
self,
parent,
batch_size=12,
image_size=32,
patch_size=4,
num_channels=3,
is_training=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
initializer_range=1e-10,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.is_training = is_training
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.scope = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
num_patches = (image_size // patch_size) ** 2
self.seq_length = num_patches + 1
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return Kosmos2VisionConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
initializer_range=self.initializer_range,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
| Kosmos2VisionModelTester |
python | wireservice__csvkit | csvkit/utilities/csvclean.py | {
"start": 126,
"end": 4860
} | class ____(CSVKitUtility):
description = 'Report and fix common errors in a CSV file.'
override_flags = ['L', 'I']
def add_arguments(self):
self.argparser.add_argument(
'--length-mismatch', dest='length_mismatch', action='store_true',
help='Report data rows that are shorter or longer than the header row.')
self.argparser.add_argument(
'--empty-columns', dest='empty_columns', action='store_true',
help='Report empty columns as errors.')
self.argparser.add_argument(
'-a', '--enable-all-checks', dest='enable_all_checks', action='store_true',
help='Enable all error reporting.')
self.argparser.add_argument(
'--omit-error-rows', dest='omit_error_rows', action='store_true',
help='Omit data rows that contain errors, from standard output.')
self.argparser.add_argument(
'--label', dest='label',
help='Add a "label" column to standard error. Useful in automated workflows. '
'Use "-" to default to the input filename.')
self.argparser.add_argument(
'--header-normalize-space', dest='header_normalize_space', action='store_true',
help='Strip leading and trailing whitespace and replace sequences of whitespace characters by a single '
'space in the header.')
self.argparser.add_argument(
'--join-short-rows', dest='join_short_rows', action='store_true',
help='Merges short rows into a single row.')
self.argparser.add_argument(
'--separator', dest='separator', default='\n',
help='The string with which to join short rows. Defaults to a newline.')
self.argparser.add_argument(
'--fill-short-rows', dest='fill_short_rows', action='store_true',
help='Fill short rows with the missing cells.')
self.argparser.add_argument(
'--fillvalue', dest='fillvalue',
help='The value with which to fill short rows. Defaults to none.')
def main(self):
if self.additional_input_expected():
sys.stderr.write('No input file or piped data provided. Waiting for standard input:\n')
if (
# Checks
not self.args.length_mismatch
and not self.args.empty_columns
and not self.args.enable_all_checks
# Fixes
and not self.args.header_normalize_space
and not self.args.join_short_rows
and not self.args.fill_short_rows
):
self.argparser.error('No checks or fixes were enabled. See available options with: csvclean --help')
if self.args.join_short_rows and self.args.fill_short_rows:
self.argparser.error('The --join-short-rows and --fill-short-rows options are mutually exclusive.')
default = self.args.enable_all_checks
reader = agate.csv.reader(self.skip_lines(), **self.reader_kwargs)
checker = RowChecker(
reader,
# Checks
length_mismatch=default or self.args.length_mismatch,
empty_columns=default or self.args.empty_columns,
# Fixes
header_normalize_space=self.args.header_normalize_space,
join_short_rows=self.args.join_short_rows,
separator=self.args.separator,
fill_short_rows=self.args.fill_short_rows,
fillvalue=self.args.fillvalue,
# Other
zero_based=self.args.zero_based,
omit_error_rows=self.args.omit_error_rows,
)
label = self.args.label
if label == '-':
if self.input_file == sys.stdin:
label = 'stdin'
else:
label = self.input_file.name
output_writer = agate.csv.writer(self.output_file, **self.writer_kwargs)
output_writer.writerow(checker.column_names)
for row in checker.checked_rows():
output_writer.writerow(row)
if checker.errors:
error_writer = agate.csv.writer(self.error_file, **self.writer_kwargs)
fieldnames = ['line_number', 'msg'] + checker.column_names
if self.args.label:
fieldnames.insert(0, 'label')
error_writer.writerow(fieldnames)
for error in checker.errors:
row = [error.line_number, error.msg] + error.row
if self.args.label:
row.insert(0, label)
error_writer.writerow(row)
sys.exit(1)
def launch_new_instance():
utility = CSVClean()
utility.run()
if __name__ == '__main__':
launch_new_instance()
| CSVClean |
python | pytorch__pytorch | torch/_inductor/fx_passes/memory_estimator.py | {
"start": 768,
"end": 11516
} | class ____:
"""
Tracks storage allocation and usage relationships in an FX graph.
Differentiates between:
- Fresh allocations: nodes that allocate new storage (not views/aliases)
- Uses: nodes that use a storage as input
"""
def __init__(self, nodes: list[fx.Node]):
# Map from node to the fresh storages it allocates (not views/aliases)
self.node_to_fresh_allocations: dict[fx.Node, OrderedSet[StorageKey]] = {}
# Map from storage to the node that originally allocated it
self.storage_to_allocator: dict[StorageKey, fx.Node] = {}
# Map from node to all storages it uses as inputs
self.node_to_storage_uses: dict[fx.Node, OrderedSet[StorageKey]] = {}
# Map from storage to all nodes that use it
self.storage_to_uses: dict[StorageKey, OrderedSet[fx.Node]] = defaultdict(
OrderedSet
)
# Map from storage to the last node that uses it
self.storage_to_last_user: dict[StorageKey, fx.Node] = {}
# Map from node to storages that have their last use at that node
self.node_to_storages_last_used: dict[fx.Node, OrderedSet[StorageKey]] = (
defaultdict(OrderedSet)
)
# Track all output storages for each node (for building usage graph)
self.node_to_output_storages: dict[fx.Node, OrderedSet[StorageKey]] = {}
# First pass: build storage allocations and track uses
for node in nodes:
# Get output storages
output_storages = self._get_output_storages(node)
self.node_to_output_storages[node] = output_storages
# Track fresh allocations
fresh_allocations: OrderedSet[StorageKey] = OrderedSet()
for storage_key in output_storages:
if storage_key not in self.storage_to_allocator:
self.storage_to_allocator[storage_key] = node
fresh_allocations.add(storage_key)
self.node_to_fresh_allocations[node] = fresh_allocations
# Track input storage uses (safe because inputs were already processed)
input_storages = self._get_input_storages(node)
self.node_to_storage_uses[node] = input_storages
for storage_key in input_storages:
self.storage_to_uses[storage_key].add(node)
# Second pass: find last users (iterate in reverse)
for node in reversed(nodes):
input_storages = self.node_to_storage_uses[node]
for storage_key in input_storages:
if storage_key not in self.storage_to_last_user:
self.storage_to_last_user[storage_key] = node
self.node_to_storages_last_used[node].add(storage_key)
@staticmethod
def _get_output_storages(node: fx.Node) -> OrderedSet[StorageKey]:
"""
Get all storages from a node's outputs.
Uses pytree to handle arbitrary nested structures.
"""
val = node.meta.get("val")
if val is None:
return OrderedSet()
storages: OrderedSet[StorageKey] = OrderedSet()
def collect_storage(tensor: torch._subclasses.FakeTensor) -> None:
storages.add(StorageKey(tensor.untyped_storage(), tensor.device))
# Use tree_map_only to handle FakeTensors in nested structures
tree_map_only(torch._subclasses.FakeTensor, collect_storage, val)
return storages
def _get_input_storages(self, node: fx.Node) -> OrderedSet[StorageKey]:
"""
Get all storages from a node's inputs.
"""
input_storages: OrderedSet[StorageKey] = OrderedSet()
for input_node in node.all_input_nodes:
input_storages.update(self.node_to_output_storages[input_node])
return input_storages
def get_fresh_allocations(self, node: fx.Node) -> OrderedSet[StorageKey]:
"""Get all fresh storage allocations by this node (not views/aliases)."""
return self.node_to_fresh_allocations[node]
def get_storage_uses(self, node: fx.Node) -> OrderedSet[StorageKey]:
"""Get all storages that this node uses as inputs."""
return self.node_to_storage_uses[node]
def get_storages_last_used(
self,
node: fx.Node,
) -> OrderedSet[StorageKey]:
"""
Get storages whose last use is at this node.
"""
return self.node_to_storages_last_used[node]
def _size_of_default(num_bytes: int | torch.SymInt) -> int:
return hint_int(num_bytes, fallback=torch._inductor.config.unbacked_symint_fallback)
def device_filter(device: torch.device) -> bool:
return device.type != "cpu"
def build_memory_profile(
graph: fx.Graph,
is_releasable: Callable[[fx.Node], bool],
size_of: Callable[[int | torch.SymInt], int] | None = None,
) -> list[int]:
"""
Function to estimate the memory profile of an input FX graph.
Args:
- graph (fx.Graph): The input FX graph for which the memory profile
is to be estimated.
- is_releasable (Callable[[fx.Node], bool]): A function that
determines if a node's memory can be released (e.g. primal nodes
cannot be released).
- size_of (Callable[[int | torch.SymInt], int]): A function that converts
byte counts (possibly symbolic) to concrete integers.
Returns:
- List[int]: A list representing the memory profile over the execution
of the graph, where each entry corresponds to the memory usage at
a particular point in the execution.
"""
size_of = size_of or _size_of_default
nodes = list(graph.nodes)
alias_info = GraphAliasTracker(nodes)
# Build memory profile
current_memory = 0
for node in itertools.chain(
graph.find_nodes(op="placeholder"), graph.find_nodes(op="get_attr")
):
for storage_key in alias_info.get_fresh_allocations(node):
if device_filter(storage_key.device):
current_memory += size_of(storage_key.storage.nbytes())
memory_profile = [current_memory]
for node in nodes:
if node.op in ("placeholder", "get_attr", "output"):
continue
# Process allocations
for storage_key in alias_info.get_fresh_allocations(node):
if device_filter(storage_key.device):
current_memory += size_of(storage_key.storage.nbytes())
memory_profile.append(current_memory)
# Process deallocations
for storage_key in alias_info.get_storages_last_used(node):
allocator = alias_info.storage_to_allocator[storage_key]
if is_releasable(allocator):
if device_filter(storage_key.device):
current_memory -= size_of(storage_key.storage.nbytes())
memory_profile.append(current_memory)
return memory_profile
def get_fwd_bwd_interactions(
fwd_graph: fx.Graph,
bwd_graph: fx.Graph,
size_of: Callable[[int | torch.SymInt], int] | None = None,
) -> tuple[int, OrderedSet[str]]:
"""
Analyze the interactions between the forward (fwd) and backward (bwd) graphs
to determine memory usage characteristics.
Args:
- fwd_graph (fx.Graph): The forward graph representing the forward pass.
- bwd_graph (fx.Graph): The backward graph representing the backward pass.
- size_of (Callable[[int | torch.SymInt], int]): A function that converts
byte counts (possibly symbolic) to concrete integers.
Returns:
- tuple[int, OrderedSet[str]]: A tuple containing:
1. The baseline memory usage during the backward pass, accounting for
storages that persist from the forward pass (i.e., in fwd output but
not in bwd input).
2. A set of node names whose storage cannot be released during the bwd pass.
These include nodes that use storage from primals or are in bwd input
but not in fwd output.
"""
size_of = size_of or _size_of_default
# Build alias info for forward graph
fwd_nodes = list(fwd_graph.nodes)
fwd_alias_info = GraphAliasTracker(fwd_nodes)
# Identify storages allocated by primal placeholder nodes
primal_storages: OrderedSet[StorageKey] = OrderedSet()
for node in fwd_graph.find_nodes(op="placeholder"):
if node.name.startswith("primals"):
primal_storages.update(fwd_alias_info.get_fresh_allocations(node))
# Get storages in forward output
fwd_output_node = next(iter(reversed(fwd_graph.nodes)))[-1]
assert fwd_output_node.op == "output"
fwd_output_storages = fwd_alias_info.get_storage_uses(fwd_output_node)
# Node names that should not be deleted during memory profile estimation of bwd_graph
do_not_delete: OrderedSet[str] = OrderedSet()
# Collect all storages in backward inputs and identify nodes to not delete
bwd_input_storages: OrderedSet[StorageKey] = OrderedSet()
for node in bwd_graph.find_nodes(op="placeholder"):
node_storages = GraphAliasTracker._get_output_storages(node)
bwd_input_storages.update(node_storages)
# Check if this node uses primal storage
if node_storages & primal_storages:
do_not_delete.add(node.name)
# Check if this node's storages are not in forward outputs
# (meaning it's an external input to backward pass)
if not (node_storages & fwd_output_storages):
do_not_delete.add(node.name)
# Calculate baseline memory: storages in fwd output but not in bwd input
# These storages persist throughout the backward pass
baseline_storages = fwd_output_storages - bwd_input_storages
bwd_baseline_memory = 0
for storage_key in baseline_storages:
if storage_key.device.type != "cpu":
bwd_baseline_memory += size_of(storage_key.storage.nbytes())
return bwd_baseline_memory, do_not_delete
def _is_releasable(n: fx.Node) -> bool:
# Storages of primals cannot be released during fwd or bwd pass.
return not n.name.startswith("primals")
def get_peak_memory(
fwd_graph: fx.Graph,
bwd_graph: fx.Graph,
) -> int:
fwd_peak_memory = max(build_memory_profile(fwd_graph, _is_releasable))
bwd_baseline_memory, bwd_do_not_delete = get_fwd_bwd_interactions(
fwd_graph,
bwd_graph,
)
def _is_bwd_releasable(n: fx.Node) -> bool:
# Storages of nodes in bwd_do_not_delete cannot be released
# during the bwd pass.
return _is_releasable(n) and n.name not in bwd_do_not_delete
bwd_peak_memory = bwd_baseline_memory + max(
build_memory_profile(bwd_graph, _is_bwd_releasable)
)
return max(
fwd_peak_memory,
bwd_peak_memory,
)
| GraphAliasTracker |
python | pytorch__pytorch | test/test_datapipe.py | {
"start": 22615,
"end": 22685
} | class ____:
def __call__(self, x):
return x + 1
| Add1Callable |
python | Lightning-AI__lightning | tests/tests_pytorch/test_cli.py | {
"start": 20076,
"end": 23841
} | class ____(BoringCkptPathModel):
def __init__(self, extra: bool = True, **kwargs) -> None:
super().__init__(**kwargs)
self.extra = extra
def test_lightning_cli_ckpt_path_argument_hparams_subclass_mode(cleandir):
class CkptPathCLI(LightningCLI):
def add_arguments_to_parser(self, parser):
parser.link_arguments("model.init_args.out_dim", "model.init_args.hidden_dim", compute_fn=lambda x: x * 2)
cli_args = ["fit", "--model=BoringCkptPathSubclass", "--model.out_dim=4", "--trainer.max_epochs=1"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = CkptPathCLI(BoringCkptPathModel, subclass_mode_model=True)
assert cli.config.fit.model.class_path.endswith(".BoringCkptPathSubclass")
assert cli.config.fit.model.init_args == Namespace(out_dim=4, hidden_dim=8, extra=True)
hparams_path = Path(cli.trainer.log_dir) / "hparams.yaml"
assert hparams_path.is_file()
hparams = yaml.safe_load(hparams_path.read_text())
assert hparams["out_dim"] == 4
assert hparams["hidden_dim"] == 8
assert hparams["extra"] is True
checkpoint_path = next(Path(cli.trainer.log_dir, "checkpoints").glob("*.ckpt"))
cli_args = ["predict", "--model=BoringCkptPathModel", f"--ckpt_path={checkpoint_path}"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = CkptPathCLI(BoringCkptPathModel, subclass_mode_model=True)
assert isinstance(cli.model, BoringCkptPathSubclass)
assert cli.model.hidden_dim == 8
assert cli.model.extra is True
assert cli.model.layer.out_features == 4
def test_lightning_cli_submodules(cleandir):
class MainModule(BoringModel):
def __init__(self, submodule1: LightningModule, submodule2: LightningModule, main_param: int = 1):
super().__init__()
self.submodule1 = submodule1
self.submodule2 = submodule2
config = """model:
main_param: 2
submodule1:
class_path: lightning.pytorch.demos.boring_classes.BoringModel
submodule2:
class_path: lightning.pytorch.demos.boring_classes.BoringModel
"""
config_path = Path("config.yaml")
config_path.write_text(config)
cli_args = [f"--config={config_path}"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(MainModule, run=False)
assert cli.config["model"]["main_param"] == 2
assert isinstance(cli.model.submodule1, BoringModel)
assert isinstance(cli.model.submodule2, BoringModel)
@pytest.mark.skipif(not _TORCHVISION_AVAILABLE, reason=str(_TORCHVISION_AVAILABLE))
def test_lightning_cli_torch_modules(cleandir):
class TestModule(BoringModel):
def __init__(self, activation: torch.nn.Module = None, transform: Optional[list[torch.nn.Module]] = None):
super().__init__()
self.activation = activation
self.transform = transform
config = """model:
activation:
class_path: torch.nn.LeakyReLU
init_args:
negative_slope: 0.2
transform:
- class_path: torchvision.transforms.Resize
init_args:
size: 64
- class_path: torchvision.transforms.CenterCrop
init_args:
size: 64
"""
config_path = Path("config.yaml")
config_path.write_text(config)
cli_args = [f"--config={config_path}"]
with mock.patch("sys.argv", ["any.py"] + cli_args):
cli = LightningCLI(TestModule, run=False)
assert isinstance(cli.model.activation, torch.nn.LeakyReLU)
assert cli.model.activation.negative_slope == 0.2
assert len(cli.model.transform) == 2
assert all(isinstance(v, torch.nn.Module) for v in cli.model.transform)
| BoringCkptPathSubclass |
python | openai__openai-python | src/openai/resources/vector_stores/vector_stores.py | {
"start": 33317,
"end": 34352
} | class ____:
def __init__(self, vector_stores: VectorStores) -> None:
self._vector_stores = vector_stores
self.create = to_streamed_response_wrapper(
vector_stores.create,
)
self.retrieve = to_streamed_response_wrapper(
vector_stores.retrieve,
)
self.update = to_streamed_response_wrapper(
vector_stores.update,
)
self.list = to_streamed_response_wrapper(
vector_stores.list,
)
self.delete = to_streamed_response_wrapper(
vector_stores.delete,
)
self.search = to_streamed_response_wrapper(
vector_stores.search,
)
@cached_property
def files(self) -> FilesWithStreamingResponse:
return FilesWithStreamingResponse(self._vector_stores.files)
@cached_property
def file_batches(self) -> FileBatchesWithStreamingResponse:
return FileBatchesWithStreamingResponse(self._vector_stores.file_batches)
| VectorStoresWithStreamingResponse |
python | gevent__gevent | src/greentest/3.14/test_httplib.py | {
"start": 60133,
"end": 63908
} | class ____(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertStartsWith(p2, p)
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def test_readline_without_limit(self):
self._verify_readline(self.resp.readline, self.lines_expected, limit=-1)
def _verify_readline(self, readline, expected, limit=5):
all = []
while True:
# short readlines
line = readline(limit)
if line and line != b"foo":
if len(line) < 5:
self.assertEndsWith(line, b"\n")
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
self.assertTrue(self.resp.isclosed())
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
self.assertTrue(resp.isclosed())
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
self.assertTrue(resp.isclosed())
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
self.assertFalse(self.resp.isclosed())
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
| ExtendedReadTest |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 34382,
"end": 37274
} | class ____(nn.Module):
"""Attention with rotary position encoding."""
def __init__(
self,
config: Sam3TrackerVideoConfig,
kv_in_dim: Optional[int] = None,
rope_k_repeat=False,
):
super().__init__()
self.config = config
self.hidden_size = config.memory_attention_hidden_size
self.internal_dim = self.hidden_size // config.memory_attention_downsample_rate
self.num_attention_heads = config.memory_attention_num_attention_heads
self.head_dim = self.internal_dim // config.memory_attention_num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.kv_in_dim = kv_in_dim if kv_in_dim is not None else self.hidden_size
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
self.rope_k_repeat = rope_k_repeat
self.dropout_p = config.memory_attention_rope_dropout
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
num_k_exclude_rope: int = 0,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Tensor:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
cos, sin = position_embeddings
# Apply rotary position encoding, excluding some keys if specified
query, key = apply_rotary_pos_emb_2d(
query, key, cos, sin, repeat_freqs_k=self.rope_k_repeat, num_k_exclude_rope=num_k_exclude_rope
)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.dropout_p,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Sam3TrackerVideoRoPEAttention |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-jumps-to-reach-the-last-index.py | {
"start": 36,
"end": 480
} | class ____(object):
def maximumJumps(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
dp = [-1]*len(nums)
dp[0] = 0
for i in xrange(1, len(nums)):
for j in xrange(i):
if abs(nums[i]-nums[j]) <= target:
if dp[j] != -1:
dp[i] = max(dp[i], dp[j]+1)
return dp[-1]
| Solution |
python | pydata__xarray | xarray/computation/rolling.py | {
"start": 37480,
"end": 44143
} | class ____(CoarsenArithmetic, Generic[T_Xarray]):
"""A object that implements the coarsen.
See Also
--------
Dataset.coarsen
DataArray.coarsen
"""
__slots__ = (
"boundary",
"coord_func",
"obj",
"side",
"trim_excess",
"windows",
)
_attributes = ("windows", "side", "trim_excess")
obj: T_Xarray
windows: Mapping[Hashable, int]
side: SideOptions | Mapping[Hashable, SideOptions]
boundary: CoarsenBoundaryOptions
coord_func: Mapping[Hashable, str | Callable]
def __init__(
self,
obj: T_Xarray,
windows: Mapping[Any, int],
boundary: CoarsenBoundaryOptions,
side: SideOptions | Mapping[Any, SideOptions],
coord_func: str | Callable | Mapping[Any, str | Callable],
) -> None:
"""
Moving window object.
Parameters
----------
obj : Dataset or DataArray
Object to window.
windows : mapping of hashable to int
A mapping from the name of the dimension to create the rolling
exponential window along (e.g. `time`) to the size of the moving window.
boundary : {"exact", "trim", "pad"}
If 'exact', a ValueError will be raised if dimension size is not a
multiple of window size. If 'trim', the excess indexes are trimmed.
If 'pad', NA will be padded.
side : 'left' or 'right' or mapping from dimension to 'left' or 'right'
coord_func : function (name) or mapping from coordinate name to function (name).
Returns
-------
coarsen
"""
self.obj = obj
self.windows = windows
self.side = side
self.boundary = boundary
missing_dims = tuple(dim for dim in windows.keys() if dim not in self.obj.dims)
if missing_dims:
raise ValueError(
f"Window dimensions {missing_dims} not found in {self.obj.__class__.__name__} "
f"dimensions {tuple(self.obj.dims)}"
)
if utils.is_dict_like(coord_func):
coord_func_map = coord_func
else:
coord_func_map = dict.fromkeys(self.obj.dims, coord_func)
for c in self.obj.coords:
if c not in coord_func_map:
coord_func_map[c] = duck_array_ops.mean # type: ignore[index]
self.coord_func = coord_func_map
def _get_keep_attrs(self, keep_attrs):
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
return keep_attrs
def __repr__(self) -> str:
"""provide a nice str repr of our coarsen object"""
attrs = ",".join(
f"{k}->{getattr(self, k)}"
for k in self._attributes
if getattr(self, k, None) is not None
)
return f"{self.__class__.__name__} [{attrs}]"
def construct(
self,
window_dim=None,
keep_attrs=None,
**window_dim_kwargs,
) -> T_Xarray:
"""
Convert this Coarsen object to a DataArray or Dataset,
where the coarsening dimension is split or reshaped to two
new dimensions.
Parameters
----------
window_dim: mapping
A mapping from existing dimension name to new dimension names.
The size of the second dimension will be the length of the
coarsening window.
keep_attrs: bool, optional
Preserve attributes if True
**window_dim_kwargs : {dim: new_name, ...}
The keyword arguments form of ``window_dim``.
Returns
-------
Dataset or DataArray with reshaped dimensions
Examples
--------
>>> da = xr.DataArray(np.arange(24), dims="time")
>>> da.coarsen(time=12).construct(time=("year", "month"))
<xarray.DataArray (year: 2, month: 12)> Size: 192B
array([[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]])
Dimensions without coordinates: year, month
See Also
--------
DataArrayRolling.construct
DatasetRolling.construct
"""
from xarray.core.dataarray import DataArray
from xarray.core.dataset import Dataset
window_dim = either_dict_or_kwargs(
window_dim, window_dim_kwargs, "Coarsen.construct"
)
if not window_dim:
raise ValueError(
"Either window_dim or window_dim_kwargs need to be specified."
)
bad_new_dims = tuple(
win
for win, dims in window_dim.items()
if len(dims) != 2 or isinstance(dims, str)
)
if bad_new_dims:
raise ValueError(
f"Please provide exactly two dimension names for the following coarsening dimensions: {bad_new_dims}"
)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=True)
missing_dims = set(window_dim) - set(self.windows)
if missing_dims:
raise ValueError(
f"'window_dim' must contain entries for all dimensions to coarsen. Missing {missing_dims}"
)
extra_windows = set(self.windows) - set(window_dim)
if extra_windows:
raise ValueError(
f"'window_dim' includes dimensions that will not be coarsened: {extra_windows}"
)
reshaped = Dataset()
if isinstance(self.obj, DataArray):
obj = self.obj._to_temp_dataset()
else:
obj = self.obj
reshaped.attrs = obj.attrs if keep_attrs else {}
for key, var in obj.variables.items():
reshaped_dims = tuple(
itertools.chain(*[window_dim.get(dim, [dim]) for dim in list(var.dims)])
)
if reshaped_dims != var.dims:
windows = {w: self.windows[w] for w in window_dim if w in var.dims}
reshaped_var, _ = var.coarsen_reshape(windows, self.boundary, self.side)
attrs = var.attrs if keep_attrs else {}
reshaped[key] = (reshaped_dims, reshaped_var, attrs)
else:
reshaped[key] = var
# should handle window_dim being unindexed
should_be_coords = (set(window_dim) & set(self.obj.coords)) | set(
self.obj.coords
)
result = reshaped.set_coords(should_be_coords)
if isinstance(self.obj, DataArray):
return self.obj._from_temp_dataset(result)
else:
return result
| Coarsen |
python | sympy__sympy | sympy/geometry/entity.py | {
"start": 16828,
"end": 19890
} | class ____(GeometryEntity, Set):
"""Parent class of all GeometryEntity that are also Sets
(compatible with sympy.sets)
"""
__slots__ = ()
def _contains(self, other):
"""sympy.sets uses the _contains method, so include it for compatibility."""
if isinstance(other, Set) and other.is_FiniteSet:
return all(self.__contains__(i) for i in other)
return self.__contains__(other)
@dispatch(GeometrySet, Set) # type:ignore # noqa:F811
def union_sets(self, o): # noqa:F811
""" Returns the union of self and o
for use with sympy.sets.Set, if possible. """
# if its a FiniteSet, merge any points
# we contain and return a union with the rest
if o.is_FiniteSet:
other_points = [p for p in o if not self._contains(p)]
if len(other_points) == len(o):
return None
return Union(self, FiniteSet(*other_points))
if self._contains(o):
return self
return None
@dispatch(GeometrySet, Set) # type: ignore # noqa:F811
def intersection_sets(self, o): # noqa:F811
""" Returns a sympy.sets.Set of intersection objects,
if possible. """
from sympy.geometry.point import Point
try:
# if o is a FiniteSet, find the intersection directly
# to avoid infinite recursion
if o.is_FiniteSet:
inter = FiniteSet(*(p for p in o if self.contains(p)))
else:
inter = self.intersection(o)
except NotImplementedError:
# sympy.sets.Set.reduce expects None if an object
# doesn't know how to simplify
return None
# put the points in a FiniteSet
points = FiniteSet(*[p for p in inter if isinstance(p, Point)])
non_points = [p for p in inter if not isinstance(p, Point)]
return Union(*(non_points + [points]))
def translate(x, y):
"""Return the matrix to translate a 2-D point by x and y."""
rv = eye(3)
rv[2, 0] = x
rv[2, 1] = y
return rv
def scale(x, y, pt=None):
"""Return the matrix to multiply a 2-D point's coordinates by x and y.
If pt is given, the scaling is done relative to that point."""
rv = eye(3)
rv[0, 0] = x
rv[1, 1] = y
if pt:
from sympy.geometry.point import Point
pt = Point(pt, dim=2)
tr1 = translate(*(-pt).args)
tr2 = translate(*pt.args)
return tr1*rv*tr2
return rv
def rotate(th):
"""Return the matrix to rotate a 2-D point about the origin by ``angle``.
The angle is measured in radians. To Point a point about a point other
then the origin, translate the Point, do the rotation, and
translate it back:
>>> from sympy.geometry.entity import rotate, translate
>>> from sympy import Point, pi
>>> rot_about_11 = translate(-1, -1)*rotate(pi/2)*translate(1, 1)
>>> Point(1, 1).transform(rot_about_11)
Point2D(1, 1)
>>> Point(0, 0).transform(rot_about_11)
Point2D(2, 0)
"""
s = sin(th)
rv = eye(3)*cos(th)
rv[0, 1] = s
rv[1, 0] = -s
rv[2, 2] = 1
return rv
| GeometrySet |
python | django__django | tests/forms_tests/widget_tests/test_numberinput.py | {
"start": 128,
"end": 1049
} | class ____(WidgetTest):
widget = NumberInput(attrs={"max": 12345, "min": 1234, "step": 9999})
@override_settings(USE_THOUSAND_SEPARATOR=True)
def test_attrs_not_localized(self):
self.check_html(
self.widget,
"name",
"value",
'<input type="number" name="name" value="value" max="12345" min="1234" '
'step="9999">',
)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = CharField(widget=self.widget)
form = TestForm()
self.assertIs(self.widget.use_fieldset, False)
self.assertHTMLEqual(
'<div><label for="id_field">Field:</label>'
'<input id="id_field" max="12345" min="1234" '
'name="field" required step="9999" type="number"></div>',
form.render(),
)
| NumberInputTests |
python | walkccc__LeetCode | solutions/810. Chalkboard XOR Game/810.py | {
"start": 0,
"end": 137
} | class ____:
def xorGame(self, nums: list[int]) -> bool:
return functools.reduce(operator.xor, nums) == 0 or len(nums) % 2 == 0
| Solution |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 2651,
"end": 2826
} | class ____(_SimpleAutomotiveTestMixin):
"""Test de_CH automotive provider methods"""
license_plate_pattern: Pattern = re.compile(r"[A-Z]{2}-\d{1,3}\s?\d{0,3}")
| TestDeCh |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_types.py | {
"start": 41609,
"end": 61111
} | class ____(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = ("json_type",)
__backend__ = True
datatype = JSON
@classmethod
def define_tables(cls, metadata):
Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("name", String(30), nullable=False),
Column("data", cls.datatype, nullable=False),
Column("nulldata", cls.datatype(none_as_null=True)),
)
def test_round_trip_data1(self, connection):
self._test_round_trip({"key1": "value1", "key2": "value2"}, connection)
@testing.combinations(
("unicode", True), ("ascii", False), argnames="unicode_", id_="ia"
)
@testing.combinations(100, 1999, 3000, 4000, 5000, 9000, argnames="length")
def test_round_trip_pretty_large_data(self, connection, unicode_, length):
if unicode_:
data = "réve🐍illé" * ((length // 9) + 1)
data = data[0 : (length // 2)]
else:
data = "abcdefg" * ((length // 7) + 1)
data = data[0:length]
self._test_round_trip({"key1": data, "key2": data}, connection)
def _test_round_trip(self, data_element, connection):
data_table = self.tables.data_table
connection.execute(
data_table.insert(),
{"id": 1, "name": "row1", "data": data_element},
)
row = connection.execute(select(data_table.c.data)).first()
eq_(row, (data_element,))
def _index_fixtures(include_comparison):
if include_comparison:
# basically SQL Server and MariaDB can kind of do json
# comparison, MySQL, PG and SQLite can't. not worth it.
json_elements = []
else:
json_elements = [
("json", {"foo": "bar"}),
("json", ["one", "two", "three"]),
(None, {"foo": "bar"}),
(None, ["one", "two", "three"]),
]
elements = [
("boolean", True),
("boolean", False),
("boolean", None),
("string", "some string"),
("string", None),
("string", "réve illé"),
(
"string",
"réve🐍 illé",
testing.requires.json_index_supplementary_unicode_element,
),
("integer", 15),
("integer", 1),
("integer", 0),
("integer", None),
("float", 28.5),
("float", None),
("float", 1234567.89, testing.requires.literal_float_coercion),
("numeric", 1234567.89),
# this one "works" because the float value you see here is
# lost immediately to floating point stuff
(
"numeric",
99998969694839.983485848,
),
("numeric", 99939.983485848),
("_decimal", decimal.Decimal("1234567.89")),
(
"_decimal",
decimal.Decimal("99998969694839.983485848"),
# fails on SQLite and MySQL (non-mariadb)
requirements.cast_precision_numerics_many_significant_digits,
),
(
"_decimal",
decimal.Decimal("99939.983485848"),
),
] + json_elements
def decorate(fn):
fn = testing.combinations(id_="sa", *elements)(fn)
return fn
return decorate
def _json_value_insert(self, connection, datatype, value, data_element):
data_table = self.tables.data_table
if datatype == "_decimal":
# Python's builtin json serializer basically doesn't support
# Decimal objects without implicit float conversion period.
# users can otherwise use simplejson which supports
# precision decimals
# https://bugs.python.org/issue16535
# inserting as strings to avoid a new fixture around the
# dialect which would have idiosyncrasies for different
# backends.
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return str(o)
return super().default(o)
json_data = json.dumps(data_element, cls=DecimalEncoder)
# take the quotes out. yup, there is *literally* no other
# way to get Python's json.dumps() to put all the digits in
# the string
json_data = re.sub(r'"(%s)"' % str(value), str(value), json_data)
datatype = "numeric"
connection.execute(
data_table.insert().values(
name="row1",
# to pass the string directly to every backend, including
# PostgreSQL which needs the value to be CAST as JSON
# both in the SQL as well as at the prepared statement
# level for asyncpg, while at the same time MySQL
# doesn't even support CAST for JSON, here we are
# sending the string embedded in the SQL without using
# a parameter.
data=bindparam(None, json_data, literal_execute=True),
nulldata=bindparam(None, json_data, literal_execute=True),
),
)
else:
connection.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
p_s = None
if datatype:
if datatype == "numeric":
a, b = str(value).split(".")
s = len(b)
p = len(a) + s
if isinstance(value, decimal.Decimal):
compare_value = value
else:
compare_value = decimal.Decimal(str(value))
p_s = (p, s)
else:
compare_value = value
else:
compare_value = value
return datatype, compare_value, p_s
@testing.requires.legacy_unconditional_json_extract
@_index_fixtures(False)
def test_index_typed_access(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": value}
with config.db.begin() as conn:
datatype, compare_value, p_s = self._json_value_insert(
conn, datatype, value, data_element
)
expr = data_table.c.data["key1"]
if datatype:
if datatype == "numeric" and p_s:
expr = expr.as_numeric(*p_s)
else:
expr = getattr(expr, "as_%s" % datatype)()
roundtrip = conn.scalar(select(expr))
eq_(roundtrip, compare_value)
is_(type(roundtrip), type(compare_value))
@testing.requires.legacy_unconditional_json_extract
@_index_fixtures(True)
def test_index_typed_comparison(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": value}
with config.db.begin() as conn:
datatype, compare_value, p_s = self._json_value_insert(
conn, datatype, value, data_element
)
expr = data_table.c.data["key1"]
if datatype:
if datatype == "numeric" and p_s:
expr = expr.as_numeric(*p_s)
else:
expr = getattr(expr, "as_%s" % datatype)()
row = conn.execute(
select(expr).where(expr == compare_value)
).first()
# make sure we get a row even if value is None
eq_(row, (compare_value,))
@testing.requires.legacy_unconditional_json_extract
@_index_fixtures(True)
def test_path_typed_comparison(self, datatype, value):
data_table = self.tables.data_table
data_element = {"key1": {"subkey1": value}}
with config.db.begin() as conn:
datatype, compare_value, p_s = self._json_value_insert(
conn, datatype, value, data_element
)
expr = data_table.c.data[("key1", "subkey1")]
if datatype:
if datatype == "numeric" and p_s:
expr = expr.as_numeric(*p_s)
else:
expr = getattr(expr, "as_%s" % datatype)()
row = conn.execute(
select(expr).where(expr == compare_value)
).first()
# make sure we get a row even if value is None
eq_(row, (compare_value,))
@testing.combinations(
(True,),
(False,),
(None,),
(15,),
(0,),
(-1,),
(-1.0,),
(15.052,),
("a string",),
("réve illé",),
("réve🐍 illé",),
)
def test_single_element_round_trip(self, element):
data_table = self.tables.data_table
data_element = element
with config.db.begin() as conn:
conn.execute(
data_table.insert(),
{
"name": "row1",
"data": data_element,
"nulldata": data_element,
},
)
row = conn.execute(
select(data_table.c.data, data_table.c.nulldata)
).first()
eq_(row, (data_element, data_element))
def test_round_trip_custom_json(self):
data_table = self.tables.data_table
data_element = {"key1": "data1"}
js = mock.Mock(side_effect=json.dumps)
jd = mock.Mock(side_effect=json.loads)
engine = engines.testing_engine(
options=dict(json_serializer=js, json_deserializer=jd)
)
# support sqlite :memory: database...
data_table.create(engine, checkfirst=True)
with engine.begin() as conn:
conn.execute(
data_table.insert(), {"name": "row1", "data": data_element}
)
row = conn.execute(select(data_table.c.data)).first()
eq_(row, (data_element,))
eq_(js.mock_calls, [mock.call(data_element)])
if testing.requires.json_deserializer_binary.enabled:
eq_(
jd.mock_calls,
[mock.call(json.dumps(data_element).encode())],
)
else:
eq_(jd.mock_calls, [mock.call(json.dumps(data_element))])
@testing.combinations(
("parameters",),
("multiparameters",),
("values",),
("omit",),
argnames="insert_type",
)
def test_round_trip_none_as_sql_null(self, connection, insert_type):
col = self.tables.data_table.c["nulldata"]
conn = connection
if insert_type == "parameters":
stmt, params = self.tables.data_table.insert(), {
"name": "r1",
"nulldata": None,
"data": None,
}
elif insert_type == "multiparameters":
stmt, params = self.tables.data_table.insert(), [
{"name": "r1", "nulldata": None, "data": None}
]
elif insert_type == "values":
stmt, params = (
self.tables.data_table.insert().values(
name="r1",
nulldata=None,
data=None,
),
{},
)
elif insert_type == "omit":
stmt, params = (
self.tables.data_table.insert(),
{"name": "r1", "data": None},
)
else:
assert False
conn.execute(stmt, params)
eq_(
conn.scalar(
select(self.tables.data_table.c.name).where(col.is_(null()))
),
"r1",
)
eq_(conn.scalar(select(col)), None)
def test_round_trip_json_null_as_json_null(self, connection):
col = self.tables.data_table.c["data"]
conn = connection
conn.execute(
self.tables.data_table.insert(),
{"name": "r1", "data": JSON.NULL},
)
eq_(
conn.scalar(
select(self.tables.data_table.c.name).where(
cast(col, String) == "null"
)
),
"r1",
)
eq_(conn.scalar(select(col)), None)
@testing.combinations(
("parameters",),
("multiparameters",),
("values",),
argnames="insert_type",
)
def test_round_trip_none_as_json_null(self, connection, insert_type):
col = self.tables.data_table.c["data"]
if insert_type == "parameters":
stmt, params = self.tables.data_table.insert(), {
"name": "r1",
"data": None,
}
elif insert_type == "multiparameters":
stmt, params = self.tables.data_table.insert(), [
{"name": "r1", "data": None}
]
elif insert_type == "values":
stmt, params = (
self.tables.data_table.insert().values(name="r1", data=None),
{},
)
else:
assert False
conn = connection
conn.execute(stmt, params)
eq_(
conn.scalar(
select(self.tables.data_table.c.name).where(
cast(col, String) == "null"
)
),
"r1",
)
eq_(conn.scalar(select(col)), None)
def test_unicode_round_trip(self):
# note we include Unicode supplementary characters as well
with config.db.begin() as conn:
conn.execute(
self.tables.data_table.insert(),
{
"name": "r1",
"data": {
"réve🐍 illé": "réve🐍 illé",
"data": {"k1": "drôl🐍e"},
},
},
)
eq_(
conn.scalar(select(self.tables.data_table.c.data)),
{
"réve🐍 illé": "réve🐍 illé",
"data": {"k1": "drôl🐍e"},
},
)
def test_eval_none_flag_orm(self, connection):
Base = declarative_base()
class Data(Base):
__table__ = self.tables.data_table
with Session(connection) as s:
d1 = Data(name="d1", data=None, nulldata=None)
s.add(d1)
s.commit()
s.bulk_insert_mappings(
Data, [{"name": "d2", "data": None, "nulldata": None}]
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String()),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d1")
.first(),
("null", None),
)
eq_(
s.query(
cast(self.tables.data_table.c.data, String()),
cast(self.tables.data_table.c.nulldata, String),
)
.filter(self.tables.data_table.c.name == "d2")
.first(),
("null", None),
)
@testing.combinations(
("string",),
("integer",),
("float",),
("numeric",),
("boolean",),
argnames="cross_cast",
)
@testing.combinations(
("boolean", True, {"string"}),
("boolean", False, {"string"}),
("boolean", None, {"all"}),
("string", "45", {"integer", "float", "numeric"}),
("string", "45.684", {"float", "numeric"}),
("string", "some string", {"string"}),
("string", None, {"all"}),
("string", "réve illé", {"string"}),
("string", "true", {"boolean"}),
("string", "false", {"boolean"}),
("integer", 15, {"string", "numeric", "float"}),
("integer", 1, {"all"}),
("integer", 0, {"all"}),
("integer", None, {"all"}),
("float", None, {"all"}),
("float", 1234567.89, {"string", "numeric"}),
("numeric", 1234567.89, {"string", "float"}),
argnames="datatype, value, allowed_targets",
)
@testing.variation("json_access", ["getitem", "path"])
def test_index_cross_casts(
self,
datatype,
value,
allowed_targets,
cross_cast,
json_access: Variation,
connection,
):
"""cross cast tests set up for #11074"""
data_table = self.tables.data_table
if json_access.getitem:
data_element = {"key1": value}
elif json_access.path:
data_element = {"attr1": {"key1": value}}
else:
json_access.fail()
datatype, _, _ = self._json_value_insert(
connection, datatype, value, data_element
)
if json_access.getitem:
expr = data_table.c.data["key1"]
elif json_access.path:
expr = data_table.c.data[("attr1", "key1")]
else:
json_access.fail()
if cross_cast == "numeric":
expr = getattr(expr, "as_%s" % cross_cast)(10, 2)
else:
expr = getattr(expr, "as_%s" % cross_cast)()
if (
cross_cast != datatype
and "all" not in allowed_targets
and cross_cast not in allowed_targets
):
try:
roundtrip = connection.scalar(select(expr))
except Exception:
# We can't predict in a backend-agnostic way what CASTS
# will fail and which will proceed with a (possibly
# useless) value. PostgreSQL CASTS fail in 100% of cases
# that the types aren't compatible. SQL Server fails in
# most, except for booleans because it uses ints for
# booleans which are easier to cast. MySQL and SQLite do
# not raise for CAST under any circumstances for the four
# of string/int/float/boolean. one way to force a fail
# would be to have backends inject a special version of
# Float/Unicode/Integer/Boolean that enforces a python
# check of the expected data value. However for now we let
# the backends ensure the expected type is returned but we
# don't try to validate the value itself for non-sensical
# casts.
return
else:
roundtrip = connection.scalar(select(expr))
if value is None:
eq_(roundtrip, None)
elif cross_cast == "string":
assert isinstance(roundtrip, str)
elif cross_cast == "integer":
assert isinstance(roundtrip, int)
elif cross_cast == "float":
assert isinstance(roundtrip, float)
elif cross_cast == "numeric":
assert isinstance(roundtrip, decimal.Decimal)
elif cross_cast == "boolean":
assert isinstance(roundtrip, bool)
else:
assert False
| JSONTest |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/webhook.py | {
"start": 2218,
"end": 3067
} | class ____(Exception):
"""Signature algorithm is unsupported"""
def get_host(request: HttpRequest) -> str | None:
# XXX: There's lots of customers that are giving us an IP rather than a host name
# Use HTTP_X_REAL_IP in a follow up PR (#42405)
return request.headers.get("x-github-enterprise-host")
def get_installation_metadata(event, host):
if not host:
return
external_id = get_github_external_id(event=event, host=host)
integration = integration_service.get_integration(
external_id=external_id,
provider=IntegrationProviderSlug.GITHUB_ENTERPRISE.value,
status=ObjectStatus.ACTIVE,
)
if integration is None:
metrics.incr("integrations.github_enterprise.does_not_exist")
return
return integration.metadata["installation"]
| UnsupportedSignatureAlgorithmError |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 10397,
"end": 14884
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new job to inspect storage or calculate risk metrics.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPCreateDLPJobOperator`
:param project_id: (Optional) Google Cloud project ID where the
DLP Instance exists. If set to None or missing, the default
project_id from the Google Cloud connection is used.
:param inspect_job: (Optional) The configuration for the inspect job.
:param risk_job: (Optional) The configuration for the risk job.
:param job_id: (Optional) The job ID.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param wait_until_finished: (Optional) If true, it will keep polling the job state
until it is set to DONE.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"project_id",
"inspect_job",
"risk_job",
"job_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPJobDetailsLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
inspect_job: dict | InspectJobConfig | None = None,
risk_job: dict | RiskAnalysisJobConfig | None = None,
job_id: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
wait_until_finished: bool = True,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.inspect_job = inspect_job
self.risk_job = risk_job
self.job_id = job_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.wait_until_finished = wait_until_finished
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
try:
job = hook.create_dlp_job(
project_id=self.project_id,
inspect_job=self.inspect_job,
risk_job=self.risk_job,
job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
wait_until_finished=self.wait_until_finished,
)
except AlreadyExists:
if self.job_id is None:
raise RuntimeError("The job_id must be set here!")
job = hook.get_dlp_job(
project_id=self.project_id,
dlp_job_id=self.job_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
result = DlpJob.to_dict(job)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPJobDetailsLink.persist(
context=context,
project_id=project_id,
job_name=result["name"].split("/")[-1] if result["name"] else None,
)
return result
| CloudDLPCreateDLPJobOperator |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 38244,
"end": 38444
} | class ____(Pix2SkyProjection, QuadCube):
r"""
Tangential spherical cube projection - pixel to sky.
Corresponds to the ``TSC`` projection in FITS WCS.
"""
| Pix2Sky_TangentialSphericalCube |
python | getsentry__sentry | src/sentry/core/endpoints/team_stats.py | {
"start": 596,
"end": 2744
} | class ____(TeamEndpoint, StatsMixin):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ENTERPRISE
def get(self, request: Request, team) -> Response:
"""
Retrieve Event Counts for a Team
````````````````````````````````
.. caution::
This endpoint may change in the future without notice.
Return a set of points representing a normalized timestamp and the
number of events seen in the period.
Query ranges are limited to Sentry's configured time-series
resolutions.
:pparam string organization_id_or_slug: the id or slug of the organization.
:pparam string team_id_or_slug: the id or slug of the team.
:qparam string stat: the name of the stat to query (``"received"``,
``"rejected"``)
:qparam timestamp since: a timestamp to set the start of the query
in seconds since UNIX epoch.
:qparam timestamp until: a timestamp to set the end of the query
in seconds since UNIX epoch.
:qparam string resolution: an explicit resolution to search
for (one of ``10s``, ``1h``, and ``1d``)
:auth: required
"""
try:
environment_id = get_environment_id(request, team.organization_id)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
projects = Project.objects.get_for_user(team=team, user=request.user)
if not projects:
return Response([])
data = list(
tsdb.backend.get_range(
model=TSDBModel.project,
keys=[p.id for p in projects],
**self._parse_args(request, environment_id),
tenant_ids={"organization_id": team.organization_id},
).values()
)
summarized = []
for n in range(len(data[0])):
total = sum(d[n][1] for d in data)
summarized.append((data[0][n][0], total))
return Response(summarized)
| TeamStatsEndpoint |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_ipc.py | {
"start": 3819,
"end": 3900
} | class ____(GoEthereumAsyncEthModuleTest):
pass
| TestGoEthereumAsyncEthModuleTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 729838,
"end": 730260
} | class ____(sgqlc.types.Type, Contribution):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("commit_count", "repository")
commit_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="commitCount"
)
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
| CreatedCommitContribution |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 82356,
"end": 90451
} | class ____(Request):
"""
Creates a new dataset version
:param dataset: Dataset ID
:type dataset: str
:param task: ID of the task creating the version
:type task: str
:param name: Version name Unique
:type name: str
:param comment: Version comment
:type comment: str
:param parent: Version parent ID
:type parent: str
:param stats: Version statistics
:type stats: Statistics
:param metadata: User-specified metadata object. Keys must not include '$' and
'.'.
:type metadata: dict
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
"""
_service = "datasets"
_action = "create_version"
_version = "2.23"
_schema = {
"definitions": {
"stat_count": {
"properties": {
"count": {
"description": "Item name",
"type": ["integer", "null"],
},
"name": {
"description": "Number of appearances",
"type": ["string", "null"],
},
},
"type": "object",
},
"statistics": {
"properties": {
"content_types": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of content type counts for the version (e.g.\n 'image/jpeg',"
" 'image/png', 'video/mp4')"
),
},
"type": ["array", "null"],
},
"frames": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of frame counts, indicating the\n type of frames included in"
" the version (annotated/"
),
},
"type": ["array", "null"],
},
"labels": {
"items": {
"$ref": "#/definitions/stat_count",
"description": (
"List of labels' counts,\n indicating the categories included in the"
" version"
),
},
"type": ["array", "null"],
},
},
"type": "object",
},
},
"properties": {
"comment": {"description": "Version comment", "type": "string"},
"dataset": {"description": "Dataset ID", "type": "string"},
"metadata": {
"additionalProperties": True,
"description": "User-specified metadata object. Keys must not include '$' and '.'.",
"type": "object",
},
"name": {"description": "Version name Unique", "type": "string"},
"parent": {"description": "Version parent ID", "type": "string"},
"stats": {
"$ref": "#/definitions/statistics",
"description": "Version statistics",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {
"description": "ID of the task creating the version",
"type": "string",
},
},
"required": ["dataset", "name"],
"type": "object",
}
def __init__(
self,
dataset,
name,
task=None,
comment=None,
parent=None,
stats=None,
metadata=None,
tags=None,
system_tags=None,
**kwargs
):
super(CreateVersionRequest, self).__init__(**kwargs)
self.dataset = dataset
self.task = task
self.name = name
self.comment = comment
self.parent = parent
self.stats = stats
self.metadata = metadata
self.tags = tags
self.system_tags = system_tags
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("stats")
def stats(self):
return self._property_stats
@stats.setter
def stats(self, value):
if value is None:
self._property_stats = None
return
if isinstance(value, dict):
value = Statistics.from_dict(value)
else:
self.assert_isinstance(value, "stats", Statistics)
self._property_stats = value
@schema_property("metadata")
def metadata(self):
return self._property_metadata
@metadata.setter
def metadata(self, value):
if value is None:
self._property_metadata = None
return
self.assert_isinstance(value, "metadata", (dict,))
self._property_metadata = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
| CreateVersionRequest |
python | hynek__structlog | src/structlog/processors.py | {
"start": 29093,
"end": 30352
} | class ____:
r"""
Rename the ``event`` key in event dicts.
This is useful if you want to use consistent log message keys across
platforms and/or use the ``event`` key for something custom.
.. warning::
It's recommended to put this processor right before the renderer, since
some processors may rely on the presence and meaning of the ``event``
key.
Args:
to: Rename ``event_dict["event"]`` to ``event_dict[to]``
replace_by:
Rename ``event_dict[replace_by]`` to ``event_dict["event"]``.
*replace_by* missing from ``event_dict`` is handled gracefully.
.. versionadded:: 22.1.0
See also the :ref:`rename-event` recipe.
"""
def __init__(self, to: str, replace_by: str | None = None):
self.to = to
self.replace_by = replace_by
def __call__(
self, logger: logging.Logger, name: str, event_dict: EventDict
) -> EventDict:
event = event_dict.pop("event")
event_dict[self.to] = event
if self.replace_by is not None:
replace_by = event_dict.pop(self.replace_by, None)
if replace_by is not None:
event_dict["event"] = replace_by
return event_dict
| EventRenamer |
python | kamyu104__LeetCode-Solutions | Python/longest-path-with-different-adjacent-characters.py | {
"start": 80,
"end": 1427
} | class ____(object):
def longestPath(self, parent, s):
"""
:type parent: List[int]
:type s: str
:rtype: int
"""
def topological_sort(s, adj, in_degree):
result = 1
top2 = collections.defaultdict(lambda:[0]*2)
q = [(i, 1) for i, d in enumerate(in_degree) if not d]
while q:
new_q = []
for (u, l) in q:
for v in adj[u]:
if s[v] != s[u]:
if l > top2[v][0]:
top2[v][0], top2[v][1] = l, top2[v][0]
elif l > top2[v][1]:
top2[v][1] = l
in_degree[v] -= 1
if in_degree[v]:
continue
new_q.append((v, top2[v][0]+1))
result = max(result, top2[v][0]+top2[v][1]+1)
del top2[v]
q = new_q
return result
adj = [[] for _ in xrange(len(s))]
in_degree = [0]*len(s)
for i in xrange(1, len(parent)):
adj[i].append(parent[i])
in_degree[parent[i]] += 1
return topological_sort(s, adj, in_degree)
# Time: O(n)
# Space: O(h)
# tree, dfs
| Solution |
python | kamyu104__LeetCode-Solutions | Python/stable-subarrays-with-equal-boundary-and-interior-sum.py | {
"start": 715,
"end": 1213
} | class ____(object):
def countStableSubarrays(self, capacity):
"""
:type capacity: List[int]
:rtype: int
"""
cnt = collections.defaultdict(lambda: collections.defaultdict(int))
result = prefix = 0
for i, x in enumerate(capacity):
result += cnt[x][prefix-x]
prefix += x
cnt[x][prefix] += 1
if x == 0 and i-1 >= 0 and capacity[i-1] == 0:
result -= 1
return result
| Solution2 |
python | getsentry__sentry | src/sentry/db/models/base.py | {
"start": 13224,
"end": 13394
} | class ____(BaseModel):
id: models.Field[int, int] = BoundedBigAutoField(primary_key=True)
class Meta:
abstract = True
__repr__ = sane_repr("id")
| Model |
python | marshmallow-code__apispec | tests/test_ext_marshmallow_openapi.py | {
"start": 8310,
"end": 14974
} | class ____:
def test_custom_properties_for_custom_fields(self, spec_fixture):
class DelimitedList(fields.List):
"""Delimited list field"""
def delimited_list2param(self, field: fields.Field, **kwargs) -> dict:
ret: dict = {}
if isinstance(field, DelimitedList):
if self.openapi_version.major < 3:
ret["collectionFormat"] = "csv"
else:
ret["explode"] = False
ret["style"] = "form"
return ret
spec_fixture.marshmallow_plugin.converter.add_parameter_attribute_function(
delimited_list2param
)
class MySchema(Schema):
delimited_list = DelimitedList(fields.Int)
param = spec_fixture.marshmallow_plugin.converter.schema2parameters(
MySchema(), location="query"
)[0]
if spec_fixture.openapi.openapi_version.major < 3:
assert param["collectionFormat"] == "csv"
else:
assert param["explode"] is False
assert param["style"] == "form"
def test_field_required(self, openapi):
field = fields.Str(required=True)
res = openapi._field2parameter(field, name="field", location="query")
assert res["required"] is True
def test_field_deprecated(self, openapi):
field = fields.Str(metadata={"deprecated": True})
res = openapi._field2parameter(field, name="field", location="query")
assert res["deprecated"] is True
def test_schema_partial(self, openapi):
class UserSchema(Schema):
field = fields.Str(required=True)
res_nodump = openapi.schema2parameters(
UserSchema(partial=True), location="query"
)
param = res_nodump[0]
assert param["required"] is False
def test_schema_partial_list(self, openapi):
class UserSchema(Schema):
field = fields.Str(required=True)
partial_field = fields.Str(required=True)
res_nodump = openapi.schema2parameters(
UserSchema(partial=("partial_field",)), location="query"
)
param = next(p for p in res_nodump if p["name"] == "field")
assert param["required"] is True
param = next(p for p in res_nodump if p["name"] == "partial_field")
assert param["required"] is False
@pytest.mark.parametrize("ListClass", [fields.List, CustomList])
def test_field_list(self, ListClass, openapi):
field = ListClass(fields.Str)
res = openapi._field2parameter(field, name="field", location="query")
assert res["in"] == "query"
if openapi.openapi_version.major < 3:
assert res["type"] == "array"
assert res["items"]["type"] == "string"
assert res["collectionFormat"] == "multi"
else:
assert res["schema"]["type"] == "array"
assert res["schema"]["items"]["type"] == "string"
assert res["style"] == "form"
assert res["explode"] is True
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema, location="body")
assert len(res) == 1
param = res[0]
assert param["in"] == "body"
assert param["schema"] == {"$ref": "#/definitions/User"}
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body_with_dump_only(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email(dump_only=True)
res_nodump = openapi.schema2parameters(UserSchema, location="body")
assert len(res_nodump) == 1
param = res_nodump[0]
assert param["in"] == "body"
assert param["schema"] == build_ref(openapi.spec, "schema", "User")
# json/body is invalid for OpenAPI 3
@pytest.mark.parametrize("openapi", ("2.0",), indirect=True)
def test_schema_body_many(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema(many=True), location="body")
assert len(res) == 1
param = res[0]
assert param["in"] == "body"
assert param["schema"]["type"] == "array"
assert param["schema"]["items"] == {"$ref": "#/definitions/User"}
def test_schema_query(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema, location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_schema_query_instance(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(UserSchema(), location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_schema_query_instance_many_should_raise_exception(self, openapi):
class UserSchema(Schema):
name = fields.Str()
email = fields.Email()
with pytest.raises(AssertionError):
openapi.schema2parameters(UserSchema(many=True), location="query")
def test_fields_query(self, openapi):
class MySchema(Schema):
name = fields.Str()
email = fields.Email()
res = openapi.schema2parameters(MySchema, location="query")
assert len(res) == 2
res.sort(key=lambda param: param["name"])
assert res[0]["name"] == "email"
assert res[0]["in"] == "query"
assert res[1]["name"] == "name"
assert res[1]["in"] == "query"
def test_raises_error_if_not_a_schema(self, openapi):
class NotASchema:
pass
expected_error = (
f"{NotASchema!r} is neither a Schema class nor a Schema instance."
)
with pytest.raises(ValueError, match=expected_error):
openapi.schema2jsonschema(NotASchema)
| TestMarshmallowSchemaToParameters |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 188354,
"end": 189543
} | class ____(Response):
"""
Response of tasks.delete_models endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "delete_models"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(DeleteModelsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| DeleteModelsResponse |
python | scrapy__scrapy | tests/test_utils_datatypes.py | {
"start": 9000,
"end": 10869
} | class ____:
def test_cache_with_limit(self):
cache = LocalWeakReferencedCache(limit=2)
r1 = Request("https://example.org")
r2 = Request("https://example.com")
r3 = Request("https://example.net")
cache[r1] = 1
cache[r2] = 2
cache[r3] = 3
assert len(cache) == 2
assert r1 not in cache
assert r2 in cache
assert r3 in cache
assert cache[r1] is None
assert cache[r2] == 2
assert cache[r3] == 3
del r2
# PyPy takes longer to collect dead references
garbage_collect()
assert len(cache) == 1
def test_cache_non_weak_referenceable_objects(self):
cache = LocalWeakReferencedCache()
k1 = None
k2 = 1
k3 = [1, 2, 3]
cache[k1] = 1
cache[k2] = 2
cache[k3] = 3
assert k1 not in cache
assert k2 not in cache
assert k3 not in cache
assert len(cache) == 0
def test_cache_without_limit(self):
maximum = 10**4
cache = LocalWeakReferencedCache()
refs = []
for x in range(maximum):
refs.append(Request(f"https://example.org/{x}"))
cache[refs[-1]] = x
assert len(cache) == maximum
for i, r in enumerate(refs):
assert r in cache
assert cache[r] == i
del r # delete reference to the last object in the list # pylint: disable=undefined-loop-variable
# delete half of the objects, make sure that is reflected in the cache
for _ in range(maximum // 2):
refs.pop()
# PyPy takes longer to collect dead references
garbage_collect()
assert len(cache) == maximum // 2
for i, r in enumerate(refs):
assert r in cache
assert cache[r] == i
| TestLocalWeakReferencedCache |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 3298,
"end": 3492
} | class ____(RuntimeError):
cond: sympy.Basic
def __init__(self, cond: sympy.Basic, *args: Any) -> None:
super().__init__(*args)
self.cond = cond
| GuardOnDataDependentSymNode |
python | huggingface__transformers | src/transformers/models/qwen2_vl/image_processing_qwen2_vl.py | {
"start": 3675,
"end": 24877
} | class ____(BaseImageProcessor):
r"""
Constructs a Qwen2-VL image processor that dynamically resizes images based on the original images.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions.
size (`dict[str, int]`, *optional*, defaults to `{"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use when resizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
Mean to use if normalizing the image. This is a float or list of floats for each channel in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
Standard deviation to use if normalizing the image. This is a float or list of floats for each channel in the image.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
min_pixels (`int`, *optional*, defaults to `56 * 56`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `28 * 28 * 1280`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to 14):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to 2):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to 2):
The merge size of the vision encoder to llm encoder.
"""
model_input_names = ["pixel_values", "image_grid_thw"]
valid_kwargs = Qwen2VLImageProcessorKwargs
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
min_pixels: Optional[int] = None,
max_pixels: Optional[int] = None,
patch_size: int = 14,
temporal_patch_size: int = 2,
merge_size: int = 2,
**kwargs,
) -> None:
super().__init__(**kwargs)
if size is not None and ("shortest_edge" not in size or "longest_edge" not in size):
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
else:
size = {"shortest_edge": 56 * 56, "longest_edge": 28 * 28 * 1280}
# backward compatibility: override size with min_pixels and max_pixels if they are provided
if min_pixels is not None:
size["shortest_edge"] = min_pixels
if max_pixels is not None:
size["longest_edge"] = max_pixels
self.min_pixels = size["shortest_edge"]
self.max_pixels = size["longest_edge"]
self.size = size
self.do_resize = do_resize
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.patch_size = patch_size
self.temporal_patch_size = temporal_patch_size
self.merge_size = merge_size
self.do_convert_rgb = do_convert_rgb
def _preprocess(
self,
images: Union[ImageInput, VideoInput],
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.
Args:
images (`ImageInput`):
Image or batch of images to preprocess. Expects pixel values ranging from 0 to 255. If pixel values range from 0 to 1, set `do_rescale=False`.
vision_info (`list[Dict]`, *optional*):
Optional list of dictionaries containing additional information about vision inputs.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. `shortest_edge` and `longest_edge` keys must be present.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the `PILImageResampling` enums.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Scale factor to use if rescaling the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation to use if normalizing the image. Can be a float or a list of floats corresponding to the number of channels in the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
data_format (`ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
images = make_flat_list_of_images(images)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
height, width = get_image_size(images[0], channel_dim=input_data_format)
resized_height, resized_width = height, width
processed_images = []
for image in images:
if do_resize:
resized_height, resized_width = smart_resize(
height,
width,
factor=patch_size * merge_size,
min_pixels=size["shortest_edge"],
max_pixels=size["longest_edge"],
)
image = resize(
image, size=(resized_height, resized_width), resample=resample, input_data_format=input_data_format
)
if do_rescale:
image = self.rescale(image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
processed_images.append(image)
patches = np.array(processed_images)
if data_format == ChannelDimension.LAST:
patches = patches.transpose(0, 3, 1, 2)
if patches.shape[0] % temporal_patch_size != 0:
repeats = np.repeat(
patches[-1][np.newaxis], temporal_patch_size - (patches.shape[0] % temporal_patch_size), axis=0
)
patches = np.concatenate([patches, repeats], axis=0)
channel = patches.shape[1]
grid_t = patches.shape[0] // temporal_patch_size
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
patches = patches.reshape(
grid_t,
temporal_patch_size,
channel,
grid_h // merge_size,
merge_size,
patch_size,
grid_w // merge_size,
merge_size,
patch_size,
)
patches = patches.transpose(0, 3, 6, 4, 7, 2, 1, 5, 8)
flatten_patches = patches.reshape(
grid_t * grid_h * grid_w, channel * temporal_patch_size * patch_size * patch_size
)
return flatten_patches, (grid_t, grid_h, grid_w)
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
min_pixels: Optional[int] = None,
max_pixels: Optional[int] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
patch_size: Optional[int] = None,
temporal_patch_size: Optional[int] = None,
merge_size: Optional[int] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
min_pixels (`int`, *optional*, defaults to `self.min_pixels`):
The min pixels of the image to resize the image.
max_pixels (`int`, *optional*, defaults to `self.max_pixels`):
The max pixels of the image to resize the image.
patch_size (`int`, *optional*, defaults to `self.patch_size`):
The spatial patch size of the vision encoder.
temporal_patch_size (`int`, *optional*, defaults to `self.temporal_patch_size`):
The temporal patch size of the vision encoder.
merge_size (`int`, *optional*, defaults to `self.merge_size`):
The merge size of the vision encoder to llm encoder.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
min_pixels = min_pixels if min_pixels is not None else self.min_pixels
max_pixels = max_pixels if max_pixels is not None else self.max_pixels
if size is not None:
if "shortest_edge" not in size or "longest_edge" not in size:
raise ValueError("size must contain 'shortest_edge' and 'longest_edge' keys.")
min_pixels = size["shortest_edge"]
elif min_pixels is not None and max_pixels is not None:
# backward compatibility: override size with min_pixels and max_pixels if they are provided
size = {"shortest_edge": min_pixels, "longest_edge": max_pixels}
else:
size = {**self.size}
do_resize = do_resize if do_resize is not None else self.do_resize
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
patch_size = patch_size if patch_size is not None else self.patch_size
temporal_patch_size = temporal_patch_size if temporal_patch_size is not None else self.temporal_patch_size
merge_size = merge_size if merge_size is not None else self.merge_size
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
if images is not None:
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if images is not None and not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
data = {}
pixel_values, vision_grid_thws = [], []
for image in images:
patches, image_grid_thw = self._preprocess(
image,
do_resize=do_resize,
size=size,
resample=resample,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
patch_size=patch_size,
temporal_patch_size=temporal_patch_size,
merge_size=merge_size,
data_format=data_format,
do_convert_rgb=do_convert_rgb,
input_data_format=input_data_format,
)
pixel_values.extend(patches)
vision_grid_thws.append(image_grid_thw)
pixel_values = np.array(pixel_values)
vision_grid_thws = np.array(vision_grid_thws)
data.update({"pixel_values": pixel_values, "image_grid_thw": vision_grid_thws})
return BatchFeature(data=data, tensor_type=return_tensors)
def get_number_of_image_patches(self, height: int, width: int, images_kwargs=None):
"""
A utility that returns number of image patches for a given image size.
Args:
height (`int`):
Height of the input image.
width (`int`):
Width of the input image.
images_kwargs (`dict`, *optional*)
Any kwargs to override defaults of the image processor.
Returns:
`int`: Number of image patches per image.
"""
min_pixels = images_kwargs["min_pixels"] if "min_pixels" in images_kwargs else self.size["shortest_edge"]
max_pixels = images_kwargs["max_pixels"] if "max_pixels" in images_kwargs else self.size["longest_edge"]
patch_size = images_kwargs.get("patch_size", self.patch_size)
merge_size = images_kwargs.get("merge_size", self.merge_size)
factor = patch_size * merge_size
resized_height, resized_width = smart_resize(
height, width, factor, min_pixels=min_pixels, max_pixels=max_pixels
)
grid_h, grid_w = resized_height // patch_size, resized_width // patch_size
return grid_h * grid_w
__all__ = ["Qwen2VLImageProcessor"]
| Qwen2VLImageProcessor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-netsuite/source_netsuite/streams.py | {
"start": 8504,
"end": 12088
} | class ____(NetsuiteStream):
@property
def cursor_field(self) -> str:
return INCREMENTAL_CURSOR
def filter_records_newer_than_state(
self,
stream_state: Mapping[str, Any] = None,
records: Mapping[str, Any] = None,
) -> Iterable[Mapping[str, Any]]:
"""Parse the records with respect to `stream_state` for `incremental` sync."""
if stream_state:
for record in records:
if record.get(self.cursor_field, self.start_datetime) >= stream_state.get(self.cursor_field):
yield record
else:
yield from records
def get_state_value(self, stream_state: Mapping[str, Any] = None) -> str:
"""
Sometimes the object has no `cursor_field` value assigned, and the ` "" ` emmited as state value,
this causes conflicts with datetime lib to parse the `time component`,
to avoid the errors we falling back to default start_date from input config.
"""
state = stream_state.get(self.cursor_field) if stream_state else self.start_datetime
if not state:
self.logger.info(f"Stream state for `{self.name}` was not emmited, falling back to default value: {self.start_datetime}")
return self.start_datetime
return state
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
**kwargs,
) -> Iterable[Mapping]:
records = super().parse_response(response, stream_state, stream_slice, next_page_token)
yield from self.filter_records_newer_than_state(stream_state, records)
def get_updated_state(
self,
current_stream_state: MutableMapping[str, Any],
latest_record: Mapping[str, Any],
) -> Mapping[str, Any]:
latest_cursor = latest_record.get(self.cursor_field, self.start_datetime)
current_cursor = current_stream_state.get(self.cursor_field, self.start_datetime)
return {self.cursor_field: max(latest_cursor, current_cursor)}
def request_params(
self, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, **kwargs
) -> MutableMapping[str, Any]:
params = {**(next_page_token or {})}
if stream_slice:
params.update(
**{"q": f'{self.cursor_field} AFTER "{stream_slice["start"]}" AND {self.cursor_field} BEFORE "{stream_slice["end"]}"'}
)
return params
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
# Netsuite cannot order records returned by the API, so we need stream slices
# to maintain state properly https://docs.airbyte.com/connector-development/cdk-python/incremental-stream/#streamstream_slices
slices = []
state = self.get_state_value(stream_state)
start = datetime.strptime(state, NETSUITE_OUTPUT_DATETIME_FORMAT).date()
# handle abnormal state values
if start > date.today():
return slices
else:
while start <= date.today():
next_day = start + timedelta(days=self.window_in_days)
slice_start = start.strftime(self.default_datetime_format)
slice_end = next_day.strftime(self.default_datetime_format)
yield {"start": slice_start, "end": slice_end}
start = next_day
| IncrementalNetsuiteStream |
python | pandas-dev__pandas | pandas/io/excel/_pyxlsb.py | {
"start": 483,
"end": 4358
} | class ____(BaseExcelReader["Workbook"]):
@doc(storage_options=_shared_docs["storage_options"])
def __init__(
self,
filepath_or_buffer: FilePath | ReadBuffer[bytes],
storage_options: StorageOptions | None = None,
engine_kwargs: dict | None = None,
) -> None:
"""
Reader using pyxlsb engine.
Parameters
----------
filepath_or_buffer : str, path object, or Workbook
Object to be parsed.
{storage_options}
engine_kwargs : dict, optional
Arbitrary keyword arguments passed to excel engine.
"""
import_optional_dependency("pyxlsb")
# This will call load_workbook on the filepath or buffer
# And set the result to the book-attribute
super().__init__(
filepath_or_buffer,
storage_options=storage_options,
engine_kwargs=engine_kwargs,
)
@property
def _workbook_class(self) -> type[Workbook]:
from pyxlsb import Workbook
return Workbook
def load_workbook(
self, filepath_or_buffer: FilePath | ReadBuffer[bytes], engine_kwargs
) -> Workbook:
from pyxlsb import open_workbook
# TODO: hack in buffer capability
# This might need some modifications to the Pyxlsb library
# Actual work for opening it is in xlsbpackage.py, line 20-ish
return open_workbook(filepath_or_buffer, **engine_kwargs)
@property
def sheet_names(self) -> list[str]:
return self.book.sheets
def get_sheet_by_name(self, name: str):
self.raise_if_bad_sheet_by_name(name)
return self.book.get_sheet(name)
def get_sheet_by_index(self, index: int):
self.raise_if_bad_sheet_by_index(index)
# pyxlsb sheets are indexed from 1 onwards
# There's a fix for this in the source, but the pypi package doesn't have it
return self.book.get_sheet(index + 1)
def _convert_cell(self, cell) -> Scalar:
# TODO: there is no way to distinguish between floats and datetimes in pyxlsb
# This means that there is no way to read datetime types from an xlsb file yet
if cell.v is None:
return "" # Prevents non-named columns from not showing up as Unnamed: i
if isinstance(cell.v, float):
val = int(cell.v)
if val == cell.v:
return val
else:
return float(cell.v)
return cell.v
def get_sheet_data(
self,
sheet,
file_rows_needed: int | None = None,
) -> list[list[Scalar]]:
data: list[list[Scalar]] = []
previous_row_number = -1
# When sparse=True the rows can have different lengths and empty rows are
# not returned. The cells are namedtuples of row, col, value (r, c, v).
for row in sheet.rows(sparse=True):
row_number = row[0].r
converted_row = [self._convert_cell(cell) for cell in row]
while converted_row and converted_row[-1] == "":
# trim trailing empty elements
converted_row.pop()
if converted_row:
data.extend([[]] * (row_number - previous_row_number - 1))
data.append(converted_row)
previous_row_number = row_number
if file_rows_needed is not None and len(data) >= file_rows_needed:
break
if data:
# extend rows to max_width
max_width = max(len(data_row) for data_row in data)
if min(len(data_row) for data_row in data) < max_width:
empty_cell: list[Scalar] = [""]
data = [
data_row + (max_width - len(data_row)) * empty_cell
for data_row in data
]
return data
| PyxlsbReader |
python | kamyu104__LeetCode-Solutions | Python/find-lucky-integer-in-an-array.py | {
"start": 50,
"end": 362
} | class ____(object):
def findLucky(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
count = collections.Counter(arr)
result = -1
for k, v in count.iteritems():
if k == v:
result = max(result, k)
return result
| Solution |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_getitem.py | {
"start": 17608,
"end": 22736
} | class ____:
def test_getitem_callable(self):
# GH#12533
ser = Series(4, index=list("ABCD"))
result = ser[lambda x: "A"]
assert result == ser.loc["A"]
result = ser[lambda x: ["A", "B"]]
expected = ser.loc[["A", "B"]]
tm.assert_series_equal(result, expected)
result = ser[lambda x: [True, False, True, True]]
expected = ser.iloc[[0, 2, 3]]
tm.assert_series_equal(result, expected)
def test_getitem_generator(string_series):
gen = (x > 0 for x in string_series)
result = string_series[gen]
result2 = string_series[iter(string_series > 0)]
expected = string_series[string_series > 0]
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
@pytest.mark.parametrize(
"data",
[
[0, 1],
date_range("2012-01-01", periods=2),
date_range("2012-01-01", periods=2, tz="CET"),
],
)
def test_getitem_ndim_deprecated(data):
series = Series(data)
with pytest.raises(ValueError, match="Multi-dimensional indexing"):
series[:, None]
def test_getitem_multilevel_scalar_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data,
):
# not implementing this for now
df = multiindex_year_month_day_dataframe_random_data
ser = df["A"]
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
ser[2000, 3:4]
def test_getitem_dataframe_raises():
rng = list(range(10))
ser = Series(10, index=rng)
df = DataFrame(rng, index=rng)
msg = (
"Indexing a Series with DataFrame is not supported, "
"use the appropriate DataFrame column"
)
with pytest.raises(TypeError, match=msg):
ser[df > 5]
def test_getitem_assignment_series_alignment():
# https://github.com/pandas-dev/pandas/issues/37427
# with getitem, when assigning with a Series, it is not first aligned
ser = Series(range(10))
idx = np.array([2, 4, 9])
ser[idx] = Series([10, 11, 12])
expected = Series([0, 1, 10, 3, 11, 5, 6, 7, 8, 12])
tm.assert_series_equal(ser, expected)
def test_getitem_duplicate_index_mistyped_key_raises_keyerror():
# GH#29189 float_index.get_loc(None) should raise KeyError, not TypeError
ser = Series([2, 5, 6, 8], index=[2.0, 4.0, 4.0, 5.0])
with pytest.raises(KeyError, match="None"):
ser[None]
with pytest.raises(KeyError, match="None"):
ser.index.get_loc(None)
with pytest.raises(KeyError, match="None"):
ser.index._engine.get_loc(None)
def test_getitem_1tuple_slice_without_multiindex():
ser = Series(range(5))
key = (slice(3),)
result = ser[key]
expected = ser[key[0]]
tm.assert_series_equal(result, expected)
def test_getitem_preserve_name(datetime_series):
result = datetime_series[datetime_series > 0]
assert result.name == datetime_series.name
result = datetime_series[5:10]
assert result.name == datetime_series.name
def test_getitem_with_integer_labels():
# integer indexes, be careful
ser = Series(
np.random.default_rng(2).standard_normal(10), index=list(range(0, 20, 2))
)
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
with pytest.raises(KeyError, match="not in index"):
ser[inds]
with pytest.raises(KeyError, match="not in index"):
ser[arr_inds]
def test_getitem_missing(datetime_series):
# missing
d = datetime_series.index[0] - BDay()
msg = r"Timestamp\('1999-12-31 00:00:00'\)"
with pytest.raises(KeyError, match=msg):
datetime_series[d]
def test_getitem_fancy(string_series, object_series):
msg = r"None of \[Index\(\[1, 2, 3\], dtype='int(32|64)'\)\] are in the \[index\]"
with pytest.raises(KeyError, match=msg):
string_series[[1, 2, 3]]
with pytest.raises(KeyError, match=msg):
object_series[[1, 2, 3]]
def test_getitem_box_float64(datetime_series):
with pytest.raises(KeyError, match="^5$"):
datetime_series[5]
def test_getitem_unordered_dup():
obj = Series(range(5), index=["c", "a", "a", "b", "b"])
assert is_scalar(obj["c"])
assert obj["c"] == 0
def test_getitem_dups():
ser = Series(range(5), index=["A", "A", "B", "C", "C"], dtype=np.int64)
expected = Series([3, 4], index=["C", "C"], dtype=np.int64)
result = ser["C"]
tm.assert_series_equal(result, expected)
def test_getitem_categorical_str():
# GH#31765
ser = Series(range(5), index=Categorical(["a", "b", "c", "a", "b"]))
result = ser["a"]
expected = ser.iloc[[0, 3]]
tm.assert_series_equal(result, expected)
def test_slice_can_reorder_not_uniquely_indexed():
ser = Series(1, index=["a", "a", "b", "b", "c"])
ser[::-1] # it works!
@pytest.mark.parametrize("index_vals", ["aabcd", "aadcb"])
def test_duplicated_index_getitem_positional_indexer(index_vals):
# GH 11747; changed in 3.0 integers are treated as always-labels
s = Series(range(5), index=list(index_vals))
with pytest.raises(KeyError, match="^3$"):
s[3]
| TestGetitemCallable |
python | django__django | django/db/models/fields/related.py | {
"start": 19725,
"end": 36360
} | class ____(RelatedField):
"""
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
requires_unique_target = True
related_accessor_class = ReverseManyToOneDescriptor
forward_related_accessor_class = ForwardManyToOneDescriptor
rel_class = ForeignObjectRel
def __init__(
self,
to,
on_delete,
from_fields,
to_fields,
rel=None,
related_name=None,
related_query_name=None,
limit_choices_to=None,
parent_link=False,
swappable=True,
**kwargs,
):
if rel is None:
rel = self.rel_class(
self,
to,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
parent_link=parent_link,
on_delete=on_delete,
)
super().__init__(
rel=rel,
related_name=related_name,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
**kwargs,
)
self.from_fields = from_fields
self.to_fields = to_fields
self.swappable = swappable
def __copy__(self):
obj = super().__copy__()
# Remove any cached PathInfo values.
obj.__dict__.pop("path_infos", None)
obj.__dict__.pop("reverse_path_infos", None)
return obj
def check(self, **kwargs):
return [
*super().check(**kwargs),
*self._check_to_fields_exist(),
*self._check_to_fields_composite_pk(),
*self._check_unique_target(),
*self._check_conflict_with_managers(),
]
def _check_to_fields_exist(self):
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
if to_field:
try:
self.remote_field.model._meta.get_field(to_field)
except exceptions.FieldDoesNotExist:
errors.append(
checks.Error(
"The to_field '%s' doesn't exist on the related "
"model '%s'."
% (to_field, self.remote_field.model._meta.label),
obj=self,
id="fields.E312",
)
)
return errors
def _check_to_fields_composite_pk(self):
from django.db.models.fields.composite import CompositePrimaryKey
# Skip nonexistent models.
if isinstance(self.remote_field.model, str):
return []
errors = []
for to_field in self.to_fields:
try:
field = (
self.remote_field.model._meta.pk
if to_field is None
else self.remote_field.model._meta.get_field(to_field)
)
except exceptions.FieldDoesNotExist:
pass
else:
if isinstance(field, CompositePrimaryKey):
errors.append(
checks.Error(
"Field defines a relation involving model "
f"{self.remote_field.model._meta.object_name!r} which has "
"a CompositePrimaryKey and such relations are not "
"supported.",
obj=self,
id="fields.E347",
)
)
return errors
def _check_unique_target(self):
rel_is_string = isinstance(self.remote_field.model, str)
if rel_is_string or not self.requires_unique_target:
return []
try:
self.foreign_related_fields
except exceptions.FieldDoesNotExist:
return []
if not self.foreign_related_fields:
return []
has_unique_constraint = any(
rel_field.unique for rel_field in self.foreign_related_fields
)
if not has_unique_constraint:
foreign_fields = {f.name for f in self.foreign_related_fields}
remote_opts = self.remote_field.model._meta
has_unique_constraint = (
any(
frozenset(ut) <= foreign_fields
for ut in remote_opts.unique_together
)
or any(
frozenset(uc.fields) <= foreign_fields
for uc in remote_opts.total_unique_constraints
)
# If the model defines a composite primary key and the foreign
# key refers to it, the target is unique.
or (
frozenset(field.name for field in remote_opts.pk_fields)
== foreign_fields
)
)
if not has_unique_constraint:
if len(self.foreign_related_fields) > 1:
field_combination = ", ".join(
f"'{rel_field.name}'" for rel_field in self.foreign_related_fields
)
model_name = self.remote_field.model.__name__
return [
checks.Error(
f"No subset of the fields {field_combination} on model "
f"'{model_name}' is unique.",
hint=(
"Mark a single field as unique=True or add a set of "
"fields to a unique constraint (via unique_together "
"or a UniqueConstraint (without condition) in the "
"model Meta.constraints)."
),
obj=self,
id="fields.E310",
)
]
else:
field_name = self.foreign_related_fields[0].name
model_name = self.remote_field.model.__name__
return [
checks.Error(
f"'{model_name}.{field_name}' must be unique because it is "
"referenced by a foreign key.",
hint=(
"Add unique=True to this field or add a "
"UniqueConstraint (without condition) in the model "
"Meta.constraints."
),
obj=self,
id="fields.E311",
)
]
return []
def _check_conflict_with_managers(self):
errors = []
manager_names = {manager.name for manager in self.opts.managers}
for rel_objs in self.model._meta.related_objects:
related_object_name = rel_objs.name
if related_object_name in manager_names:
field_name = f"{self.model._meta.object_name}.{self.name}"
errors.append(
checks.Error(
f"Related name '{related_object_name}' for '{field_name}' "
"clashes with the name of a model manager.",
hint=(
"Rename the model manager or change the related_name "
f"argument in the definition for field '{field_name}'."
),
obj=self,
id="fields.E348",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
kwargs["on_delete"] = self.remote_field.on_delete
kwargs["from_fields"] = self.from_fields
kwargs["to_fields"] = self.to_fields
if self.remote_field.parent_link:
kwargs["parent_link"] = self.remote_field.parent_link
if isinstance(self.remote_field.model, str):
if "." in self.remote_field.model:
app_label, model_name = self.remote_field.model.split(".")
kwargs["to"] = "%s.%s" % (app_label, model_name.lower())
else:
kwargs["to"] = self.remote_field.model.lower()
else:
kwargs["to"] = self.remote_field.model._meta.label_lower
# If swappable is True, then see if we're actually pointing to the
# target of a swap.
swappable_setting = self.swappable_setting
if swappable_setting is not None:
# If it's already a settings reference, error
if hasattr(kwargs["to"], "setting_name"):
if kwargs["to"].setting_name != swappable_setting:
raise ValueError(
"Cannot deconstruct a ForeignKey pointing to a model "
"that is swapped in place of more than one model (%s and %s)"
% (kwargs["to"].setting_name, swappable_setting)
)
# Set it
kwargs["to"] = SettingsReference(
kwargs["to"],
swappable_setting,
)
return name, path, args, kwargs
def resolve_related_fields(self):
if not self.from_fields or len(self.from_fields) != len(self.to_fields):
raise ValueError(
"Foreign Object from and to fields must be the same non-zero length"
)
if isinstance(self.remote_field.model, str):
raise ValueError(
"Related model %r cannot be resolved" % self.remote_field.model
)
related_fields = []
for from_field_name, to_field_name in zip(self.from_fields, self.to_fields):
from_field = (
self
if from_field_name == RECURSIVE_RELATIONSHIP_CONSTANT
else self.opts.get_field(from_field_name)
)
to_field = (
self.remote_field.model._meta.pk
if to_field_name is None
else self.remote_field.model._meta.get_field(to_field_name)
)
related_fields.append((from_field, to_field))
return related_fields
@cached_property
def related_fields(self):
return self.resolve_related_fields()
@cached_property
def reverse_related_fields(self):
return [(rhs_field, lhs_field) for lhs_field, rhs_field in self.related_fields]
@cached_property
def local_related_fields(self):
return tuple(lhs_field for lhs_field, rhs_field in self.related_fields)
@cached_property
def foreign_related_fields(self):
return tuple(
rhs_field for lhs_field, rhs_field in self.related_fields if rhs_field
)
def get_local_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.local_related_fields)
def get_foreign_related_value(self, instance):
return self.get_instance_value_for_fields(instance, self.foreign_related_fields)
@staticmethod
def get_instance_value_for_fields(instance, fields):
ret = []
opts = instance._meta
for field in fields:
# Gotcha: in some cases (like fixture loading) a model can have
# different values in parent_ptr_id and parent's id. So, use
# instance.pk (that is, parent_ptr_id) when asked for instance.id.
if field.primary_key:
possible_parent_link = opts.get_ancestor_link(field.model)
if (
not possible_parent_link
or possible_parent_link.primary_key
or possible_parent_link.model._meta.abstract
):
ret.append(instance.pk)
continue
ret.append(getattr(instance, field.attname))
return tuple(ret)
def get_attname_column(self):
attname, column = super().get_attname_column()
return attname, None
def get_joining_fields(self, reverse_join=False):
return tuple(
self.reverse_related_fields if reverse_join else self.related_fields
)
def get_reverse_joining_fields(self):
return self.get_joining_fields(reverse_join=True)
def get_extra_descriptor_filter(self, instance):
"""
Return an extra filter condition for related object fetching when
user does 'instance.fieldname', that is the extra filter is used in
the descriptor of the field.
The filter should be either a dict usable in .filter(**kwargs) call or
a Q-object. The condition will be ANDed together with the relation's
joining columns.
A parallel method is get_extra_restriction() which is used in
JOIN and subquery conditions.
"""
return {}
def get_extra_restriction(self, alias, related_alias):
"""
Return a pair condition used for joining and subquery pushdown. The
condition is something that responds to as_sql(compiler, connection)
method.
Note that currently referring both the 'alias' and 'related_alias'
will not work in some conditions, like subquery pushdown.
A parallel method is get_extra_descriptor_filter() which is used in
instance.fieldname related object fetching.
"""
return None
def get_path_info(self, filtered_relation=None):
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=self.foreign_related_fields,
join_field=self,
m2m=False,
direct=True,
filtered_relation=filtered_relation,
)
]
@cached_property
def path_infos(self):
return self.get_path_info()
def get_reverse_path_info(self, filtered_relation=None):
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [
PathInfo(
from_opts=from_opts,
to_opts=opts,
target_fields=(opts.pk,),
join_field=self.remote_field,
m2m=not self.unique,
direct=False,
filtered_relation=filtered_relation,
)
]
@cached_property
def reverse_path_infos(self):
return self.get_reverse_path_info()
@classmethod
@functools.cache
def get_class_lookups(cls):
bases = inspect.getmro(cls)
bases = bases[: bases.index(ForeignObject) + 1]
class_lookups = [parent.__dict__.get("class_lookups", {}) for parent in bases]
return cls.merge_dicts(class_lookups)
def contribute_to_class(self, cls, name, private_only=False, **kwargs):
super().contribute_to_class(cls, name, private_only=private_only, **kwargs)
setattr(cls, self.name, self.forward_related_accessor_class(self))
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# and swapped models don't get a related descriptor.
if not self.remote_field.hidden and not related.related_model._meta.swapped:
setattr(
cls._meta.concrete_model,
related.accessor_name,
self.related_accessor_class(related),
)
# While 'limit_choices_to' might be a callable, simply pass
# it along for later - this is too early because it's still
# model load time.
if self.remote_field.limit_choices_to:
cls._meta.related_fkey_lookups.append(
self.remote_field.limit_choices_to
)
ForeignObject.register_lookup(RelatedIn)
ForeignObject.register_lookup(RelatedExact)
ForeignObject.register_lookup(RelatedLessThan)
ForeignObject.register_lookup(RelatedGreaterThan)
ForeignObject.register_lookup(RelatedGreaterThanOrEqual)
ForeignObject.register_lookup(RelatedLessThanOrEqual)
ForeignObject.register_lookup(RelatedIsNull)
| ForeignObject |
python | anthropics__anthropic-sdk-python | src/anthropic/types/raw_content_block_stop_event.py | {
"start": 200,
"end": 299
} | class ____(BaseModel):
index: int
type: Literal["content_block_stop"]
| RawContentBlockStopEvent |
python | pytorch__pytorch | torch/_inductor/codegen/common.py | {
"start": 48526,
"end": 49184
} | class ____(IndentedBuffer):
def indent(self, offset: int = 1) -> contextlib.AbstractContextManager[None]:
@contextlib.contextmanager
def ctx() -> Iterator[None]:
for _ in range(offset):
self.writeline("{")
self._indent += 1
for _ in range(-offset):
self._indent -= 1
self.writeline("}")
yield
for _ in range(-offset):
self.writeline("{")
self._indent += 1
for _ in range(offset):
self._indent -= 1
self.writeline("}")
return ctx()
| BracesBuffer |
python | viewflow__viewflow | tests/json/test_json__time.py | {
"start": 250,
"end": 952
} | class ____(TestCase):
def test_crud(self):
model = TimeFieldModel(time_field=time(12, 59))
self.assertIsInstance(
model._meta.get_field('time_field'),
models.TimeField
)
self.assertEqual(model.data, {
'time_field': '12:59:00+00:00'
})
model.save()
model = TimeFieldModel.objects.get()
self.assertEqual(model.data, {
'time_field': '12:59:00+00:00'
})
self.assertEqual(model.time_field, time(12, 59))
def test_null_value(self):
model = TimeFieldModel(time_field=None)
self.assertEqual(model.time_field, None)
self.assertEqual(model.data, {})
| Test |
python | walkccc__LeetCode | solutions/1681. Minimum Incompatibility/1681.py | {
"start": 0,
"end": 2299
} | class ____:
def __init__(self):
self.MAX_NUM = 16
def minimumIncompatibility(self, nums: list[int], k: int) -> int:
MAX_COMPATIBILITY = (16 - 1) * (16 // 2)
n = len(nums)
subsetSize = n // k
maxMask = 1 << n
incompatibilities = self._getIncompatibilities(nums, subsetSize)
# dp[i] := the minimum possible sum of incompatibilities of the subset
# of numbers represented by the bitmask i
dp = [MAX_COMPATIBILITY] * maxMask
dp[0] = 0
for mask in range(1, maxMask):
# The number of 1s in `mask` isn't a multiple of `subsetSize`.
if mask.bit_count() % subsetSize != 0:
continue
# https://cp-algorithms.com/algebra/all-submasks.html
submask = mask
while submask > 0:
if incompatibilities[submask] != -1: # valid submask
dp[mask] = min(dp[mask], dp[mask - submask] +
incompatibilities[submask])
submask = (submask - 1) & mask
return dp[-1] if dp[-1] != MAX_COMPATIBILITY else -1
def _getIncompatibilities(
self,
nums: list[int],
subsetSize: int,
) -> list[int]:
"""
Returns an incompatibilities array where
* incompatibilities[i] := the incompatibility of the subset of numbers
represented by the bitmask i
* incompatibilities[i] := -1 if the number of 1s in the bitmask i is not
`subsetSize`
"""
maxMask = 1 << len(nums)
incompatibilities = [-1] * maxMask
for mask in range(maxMask):
if mask.bit_count() == subsetSize and self._isUnique(nums, mask, subsetSize):
incompatibilities[mask] = self._getIncompatibility(nums, mask)
return incompatibilities
def _isUnique(self, nums: list[int], mask: int, subsetSize: int) -> bool:
"""Returns True if the numbers selected by `mask` are unique."""
used = 0
for i, num in enumerate(nums):
if mask >> i & 1:
used |= 1 << num
return used.bit_count() == subsetSize
def _getIncompatibility(self, nums: list[int], mask: int) -> int:
"""
Returns the incompatibility of the selected numbers represented by the
`mask`.
"""
mn = self.MAX_NUM
mx = 0
for i, num in enumerate(nums):
if mask >> i & 1:
mx = max(mx, num)
mn = min(mn, num)
return mx - mn
| Solution |
python | pypa__hatch | tests/venv/test_utils.py | {
"start": 52,
"end": 251
} | class ____:
def test_length(self):
assert len(get_random_venv_name()) == 4
def test_different(self):
assert get_random_venv_name() != get_random_venv_name()
| TestGetRandomVenvName |
python | django__django | tests/user_commands/management/commands/dance.py | {
"start": 68,
"end": 971
} | class ____(BaseCommand):
help = "Dance around like a madman."
args = ""
requires_system_checks = "__all__"
def add_arguments(self, parser):
parser.add_argument("integer", nargs="?", type=int, default=0)
parser.add_argument("-s", "--style", default="Rock'n'Roll")
parser.add_argument("-x", "--example")
parser.add_argument("--opt-3", action="store_true", dest="option3")
def handle(self, *args, **options):
example = options["example"]
if example == "raise":
raise CommandError(returncode=3)
if options["verbosity"] > 0:
self.stdout.write("I don't feel like dancing %s." % options["style"])
self.stdout.write(",".join(options))
if options["integer"] > 0:
self.stdout.write(
"You passed %d as a positional argument." % options["integer"]
)
| Command |
python | ray-project__ray | rllib/algorithms/iql/torch/default_iql_torch_rl_module.py | {
"start": 683,
"end": 3050
} | class ____(DefaultSACTorchRLModule, DefaultIQLRLModule):
framework: str = "torch"
@override(DefaultSACTorchRLModule)
def _forward_train(self, batch: Dict, **kwargs) -> Dict[str, Any]:
# Right now, IQL runs only with continuous action spaces.
# TODO (simon): Implement it also for discrete action spaces.
if not isinstance(self.action_space, gym.spaces.Box):
raise ValueError(
f"Unsupported action space type: {type(self.action_space)}. "
"Only continuous action spaces are supported."
)
# Call the forward pass of the SAC module.
output = super()._forward_train(batch, **kwargs)
# Create batches for the forward passes of the target Q-networks and the
# value function.
batch_curr = {
Columns.OBS: batch[Columns.OBS],
Columns.ACTIONS: batch[Columns.ACTIONS],
}
batch_next = {Columns.OBS: batch[Columns.NEXT_OBS]}
# These target q-values are needed for the value loss and actor loss.
output[QF_TARGET_PREDS] = self._qf_forward_train_helper(
batch_curr, encoder=self.target_qf_encoder, head=self.target_qf
)
# If a twin-Q architecture is used run its target Q-network.
if self.twin_q:
output[QF_TARGET_PREDS] = torch.min(
output[QF_TARGET_PREDS],
self._qf_forward_train_helper(
batch_curr, encoder=self.target_qf_twin_encoder, head=self.qf_twin
),
)
# Compute values for the current observations.
output[Columns.VF_PREDS] = self.compute_values(batch_curr)
# The values of the next observations are needed for the critic loss.
output[VF_PREDS_NEXT] = self.compute_values(batch_next)
return output
@override(ValueFunctionAPI)
def compute_values(
self,
batch: Dict[str, Any],
embeddings: Optional[Any] = None,
) -> TensorType:
# If no embeddings are provided make a forward pass on the encoder.
if embeddings is None:
embeddings = self.vf_encoder(batch)[ENCODER_OUT]
# Value head.
vf_out = self.vf(embeddings)
# Squeeze out last dimension (single node value head).
return vf_out.squeeze(-1)
| DefaultIQLTorchRLModule |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 7788,
"end": 10681
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
use_qformer_text_input=False,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
self.use_qformer_text_input = use_qformer_text_input
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Blip2QFormerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
use_qformer_text_input=self.use_qformer_text_input,
)
# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py
| Blip2QFormerModelTester |
python | tensorflow__tensorflow | tensorflow/python/keras/losses.py | {
"start": 26361,
"end": 29339
} | class ____(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Standalone usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> scce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> scce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> scce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
"""Initializes `SparseCategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
reduction: Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the instance. Defaults to
'sparse_categorical_crossentropy'.
"""
super().__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
| SparseCategoricalCrossentropy |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 22390,
"end": 22788
} | class ____(BaseModel):
"""
Task inlet reference serializer for assets.
"""
model_config = ConfigDict(
extra="forbid",
)
dag_id: Annotated[str, Field(title="Dag Id")]
task_id: Annotated[str, Field(title="Task Id")]
created_at: Annotated[datetime, Field(title="Created At")]
updated_at: Annotated[datetime, Field(title="Updated At")]
| TaskInletAssetReference |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/tests/test_subprocess_env_manager.py | {
"start": 1117,
"end": 1564
} | class ____:
def __init__(self, worker_id, resp=None):
self.worker_id = worker_id
self.process = None
self.conn = None
self.send = Mock()
self.recv = Mock(return_value=resp)
self.waiting = False
def create_worker_mock(worker_id, step_queue, env_factor, engine_c):
return MockEnvWorker(
worker_id, EnvironmentResponse(EnvironmentCommand.RESET, worker_id, worker_id)
)
| MockEnvWorker |
python | yaml__pyyaml | lib/yaml/tokens.py | {
"start": 1109,
"end": 1163
} | class ____(Token):
id = '<stream end>'
| StreamEndToken |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py | {
"start": 8543,
"end": 13184
} | class ____(CloudBase):
"""Base class for cloud provider plugins. Sets up cloud resources before delegation."""
def __init__(self, args: IntegrationConfig, config_extension: str = '.ini') -> None:
super().__init__(args)
self.ci_provider = get_ci_provider()
self.remove_config = False
self.config_static_name = 'cloud-config-%s%s' % (self.platform, config_extension)
self.config_static_path = os.path.join(data_context().content.integration_path, self.config_static_name)
self.config_template_path = os.path.join(ANSIBLE_TEST_CONFIG_ROOT, '%s.template' % self.config_static_name)
self.config_extension = config_extension
self.uses_config = False
self.uses_docker = False
def filter(self, targets: tuple[IntegrationTarget, ...], exclude: list[str]) -> None:
"""Filter out the cloud tests when the necessary config and resources are not available."""
if not self.uses_docker and not self.uses_config:
return
if self.uses_docker and docker_available():
return
if self.uses_config and os.path.exists(self.config_static_path):
return
skip = 'cloud/%s/' % self.platform
skipped = [target.name for target in targets if skip in target.aliases]
if skipped:
exclude.append(skip)
if not self.uses_docker and self.uses_config:
display.warning('Excluding tests marked "%s" which require a "%s" config file (see "%s"): %s'
% (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
elif self.uses_docker and not self.uses_config:
display.warning('Excluding tests marked "%s" which requires container support: %s'
% (skip.rstrip('/'), ', '.join(skipped)))
elif self.uses_docker and self.uses_config:
display.warning('Excluding tests marked "%s" which requires container support or a "%s" config file (see "%s"): %s'
% (skip.rstrip('/'), self.config_static_path, self.config_template_path, ', '.join(skipped)))
def setup(self) -> None:
"""Setup the cloud resource before delegation and register a cleanup callback."""
self.resource_prefix = self.ci_provider.generate_resource_prefix()
self.resource_prefix = re.sub(r'[^a-zA-Z0-9]+', '-', self.resource_prefix)[:63].lower().rstrip('-')
ExitHandler.register(self.cleanup)
def cleanup(self) -> None:
"""Clean up the cloud resource and any temporary configuration files after tests complete."""
if self.remove_config:
os.remove(self.config_path)
def _use_static_config(self) -> bool:
"""Use a static config file if available. Returns True if static config is used, otherwise returns False."""
if os.path.isfile(self.config_static_path):
display.info('Using existing %s cloud config: %s' % (self.platform, self.config_static_path), verbosity=1)
self.config_path = self.config_static_path
static = True
else:
static = False
self.managed = not static
return static
def _write_config(self, content: str) -> None:
"""Write the given content to the config file."""
prefix = '%s-' % os.path.splitext(os.path.basename(self.config_static_path))[0]
with tempfile.NamedTemporaryFile(dir=data_context().content.integration_path, prefix=prefix, suffix=self.config_extension, delete=False) as config_fd:
filename = os.path.join(data_context().content.integration_path, os.path.basename(config_fd.name))
self.config_path = filename
self.remove_config = True
display.info('>>> Config: %s\n%s' % (filename, content.strip()), verbosity=3)
config_fd.write(to_bytes(content))
config_fd.flush()
def _read_config_template(self) -> str:
"""Read and return the configuration template."""
lines = read_text_file(self.config_template_path).splitlines()
lines = [line for line in lines if not line.startswith('#')]
config = '\n'.join(lines).strip() + '\n'
return config
@staticmethod
def _populate_config_template(template: str, values: dict[str, str]) -> str:
"""Populate and return the given template with the provided values."""
for key in sorted(values):
value = values[key]
template = template.replace('@%s' % key, value)
return template
| CloudProvider |
python | davidhalter__parso | parso/python/errors.py | {
"start": 19283,
"end": 19614
} | class ____(SyntaxRule):
message = "'await' outside async function"
def is_issue(self, leaf):
return not self._normalizer.context.is_async_funcdef()
def get_error_node(self, node):
# Return the whole await statement.
return node.parent
@ErrorFinder.register_rule(value='break')
| _AwaitOutsideAsync |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 832964,
"end": 833675
} | class ____(sgqlc.types.Type):
"""A level of permission and source for a user's access to a
repository.
"""
__schema__ = github_schema
__field_names__ = ("organization", "permission", "source")
organization = sgqlc.types.Field(sgqlc.types.non_null("Organization"), graphql_name="organization")
"""The organization the repository belongs to."""
permission = sgqlc.types.Field(sgqlc.types.non_null(DefaultRepositoryPermissionField), graphql_name="permission")
"""The level of access this source has granted to the user."""
source = sgqlc.types.Field(sgqlc.types.non_null("PermissionGranter"), graphql_name="source")
"""The source of this permission."""
| PermissionSource |
python | pytorch__pytorch | torch/_dynamo/variables/higher_order_ops.py | {
"start": 114241,
"end": 117240
} | class ____(TorchHigherOrderOperatorVariable):
"""
This hop is not exposed to users but is inserted into the graph
after export as a post-processing step.
"""
def call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
args, kwargs = LazyVariableTracker.realize_all((args, kwargs))
if kwargs:
unimplemented(
gb_type="wrap_with_set_grad_enabled: unexpected kwargs",
context=f"args: {args}, kwargs: {kwargs}",
explanation=f"wrap_with_set_grad_enabled expects no keyword arguments (got {len(kwargs)}).",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
grad_enabled, fn_var, *rest_args = args
if not isinstance(grad_enabled, ConstantVariable):
unimplemented(
gb_type="wrap_with_set_grad_enabled: non-constant grad_enabled",
context=str(grad_enabled),
explanation="wrap_with_set_grad_enabled expects grad_enabled argument to be a constant.",
hints=[
*graph_break_hints.DYNAMO_BUG,
],
)
_check_supported_callable_arg(tx, fn_var, "enable_grad_fn")
with torch.set_grad_enabled(grad_enabled.as_python_constant()):
(
(body_r, treespec),
body_graph,
body_lifted_freevars,
) = speculate_subgraph(
tx,
fn_var,
[*rest_args],
{},
"torch.ops.higher_order.wrap_with_set_grad_enabled",
source_target=self.value,
set_subgraph_inputs="manual",
should_flatten_outputs=True,
)
if len(body_lifted_freevars) > 0:
unimplemented(
gb_type="wrap_with_set_grad_enabled: unexpected freevars",
context=str(body_lifted_freevars),
explanation="wrap_with_set_grad_enabled expects no freevars.",
hints=[],
)
body_gmod = torch.fx.GraphModule(tx.output.nn_modules, body_graph)
body_name = tx.output.install_subgraph(
"wrap_body",
body_gmod,
)
body_node = make_attr(tx, body_name)
proxy_args = tuple(
[
grad_enabled.as_python_constant(),
body_node,
]
+ [operand.as_proxy() for operand in rest_args]
)
example_value = pytree.tree_map_only(
torch.fx.Proxy,
lambda a: a.node.meta["example_value"],
body_r.as_proxy(),
)
return _call_function_and_unflatten_output(
tx, self.value, proxy_args, {}, example_value, treespec, body_r
)
| WrapWithSetGradEnabledHigherOrderVariable |
python | django__django | django/contrib/postgres/aggregates/general.py | {
"start": 1209,
"end": 2028
} | class ____(_StringAgg):
def __init__(self, expression, delimiter, **extra):
if isinstance(delimiter, str):
warnings.warn(
"delimiter: str will be resolved as a field reference instead "
"of a string literal on Django 7.0. Pass "
f"`delimiter=Value({delimiter!r})` to preserve the previous behavior.",
category=RemovedInDjango70Warning,
stacklevel=2,
)
delimiter = Value(delimiter)
warnings.warn(
"The PostgreSQL specific StringAgg function is deprecated. Use "
"django.db.models.aggregate.StringAgg instead.",
category=RemovedInDjango70Warning,
stacklevel=2,
)
super().__init__(expression, delimiter, **extra)
| StringAgg |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/GradientEditorItem.py | {
"start": 32717,
"end": 35872
} | class ____(QtWidgets.QGraphicsWidget): ## NOTE: Making this a subclass of GraphicsObject instead results in
## activating this bug: https://bugreports.qt-project.org/browse/PYSIDE-86
## private class
# When making Tick a subclass of QtWidgets.QGraphicsObject as origin,
# ..GraphicsScene.items(self, *args) will get Tick object as a
# class of QtGui.QMultimediaWidgets.QGraphicsVideoItem in python2.7-PyQt5(5.4.0)
sigMoving = QtCore.Signal(object, object)
sigMoved = QtCore.Signal(object)
sigClicked = QtCore.Signal(object, object)
def __init__(self, pos, color, movable=True, scale=10, pen='w', removeAllowed=True):
self.movable = movable
self.moving = False
self.scale = scale
self.color = color
self.pen = fn.mkPen(pen)
self.hoverPen = fn.mkPen(255,255,0)
self.currentPen = self.pen
self.removeAllowed = removeAllowed
self.pg = QtGui.QPainterPath(QtCore.QPointF(0,0))
self.pg.lineTo(QtCore.QPointF(-scale/3**0.5, scale))
self.pg.lineTo(QtCore.QPointF(scale/3**0.5, scale))
self.pg.closeSubpath()
QtWidgets.QGraphicsWidget.__init__(self)
self.setPos(pos[0], pos[1])
if self.movable:
self.setZValue(1)
else:
self.setZValue(0)
def boundingRect(self):
return self.pg.boundingRect()
def shape(self):
return self.pg
def paint(self, p, *args):
p.setRenderHints(QtGui.QPainter.RenderHint.Antialiasing)
p.fillPath(self.pg, fn.mkBrush(self.color))
p.setPen(self.currentPen)
p.drawPath(self.pg)
def mouseDragEvent(self, ev):
if self.movable and ev.button() == QtCore.Qt.MouseButton.LeftButton:
if ev.isStart():
self.moving = True
self.cursorOffset = self.pos() - self.mapToParent(ev.buttonDownPos())
self.startPosition = self.pos()
ev.accept()
if not self.moving:
return
newPos = self.cursorOffset + self.mapToParent(ev.pos())
newPos.setY(self.pos().y())
self.setPos(newPos)
self.sigMoving.emit(self, newPos)
if ev.isFinish():
self.moving = False
self.sigMoved.emit(self)
def mouseClickEvent(self, ev):
ev.accept()
if ev.button() == QtCore.Qt.MouseButton.RightButton and self.moving:
self.setPos(self.startPosition)
self.moving = False
self.sigMoving.emit(self, self.startPosition)
self.sigMoved.emit(self)
else:
self.sigClicked.emit(self, ev)
def hoverEvent(self, ev):
if (not ev.isExit()) and ev.acceptDrags(QtCore.Qt.MouseButton.LeftButton):
ev.acceptClicks(QtCore.Qt.MouseButton.LeftButton)
ev.acceptClicks(QtCore.Qt.MouseButton.RightButton)
self.currentPen = self.hoverPen
else:
self.currentPen = self.pen
self.update()
| Tick |
python | PrefectHQ__prefect | scripts/pyright_diff.py | {
"start": 66,
"end": 2891
} | class ____(NamedTuple):
"""Structured representation of a diagnostic for easier table formatting."""
file: str
line: int
character: int
severity: str
message: str
def normalize_diagnostic(diagnostic: Dict[Any, Any]) -> Dict[Any, Any]:
"""Normalize a diagnostic by removing or standardizing volatile fields."""
normalized = diagnostic.copy()
normalized.pop("time", None)
normalized.pop("version", None)
return normalized
def load_and_normalize_file(file_path: str) -> Dict[Any, Any]:
"""Load a JSON file and normalize its contents."""
with open(file_path, "r") as f:
data = json.load(f)
return normalize_diagnostic(data)
def parse_diagnostic(diag: Dict[Any, Any]) -> Diagnostic:
"""Convert a diagnostic dict into a Diagnostic object."""
file = diag.get("file", "unknown_file")
message = diag.get("message", "no message")
range_info = diag.get("range", {})
start = range_info.get("start", {})
line = start.get("line", 0)
char = start.get("character", 0)
severity = diag.get("severity", "unknown")
return Diagnostic(file, line, char, severity, message)
def format_markdown_table(diagnostics: list[Diagnostic]) -> str:
"""Format list of diagnostics as a markdown table."""
if not diagnostics:
return "\nNo new errors found!"
table = ["| File | Location | Message |", "|------|----------|---------|"]
for diag in sorted(diagnostics, key=lambda x: (x.file, x.line, x.character)):
# Escape pipe characters and replace newlines with HTML breaks
message = diag.message.replace("|", "\\|").replace("\n", "<br>")
location = f"L{diag.line}:{diag.character}"
table.append(f"| {diag.file} | {location} | {message} |")
return "\n".join(table)
def compare_pyright_outputs(base_file: str, new_file: str) -> None:
"""Compare two pyright JSON output files and display only new errors."""
base_data = load_and_normalize_file(base_file)
new_data = load_and_normalize_file(new_file)
# Group diagnostics by file
base_diags = set()
new_diags = set()
# Process diagnostics from type completeness symbols
for data, diag_set in [(base_data, base_diags), (new_data, new_diags)]:
for symbol in data.get("typeCompleteness", {}).get("symbols", []):
for diag in symbol.get("diagnostics", []):
if diag.get("severity", "") == "error":
diag_set.add(parse_diagnostic(diag))
# Find new errors
new_errors = list(new_diags - base_diags)
print(format_markdown_table(new_errors))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python pyright_diff.py <base.json> <new.json>")
sys.exit(1)
compare_pyright_outputs(sys.argv[1], sys.argv[2])
| Diagnostic |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_sample.py | {
"start": 159,
"end": 8196
} | class ____:
@pytest.fixture
def obj(self, frame_or_series):
if frame_or_series is Series:
arr = np.random.default_rng(2).standard_normal(10)
else:
arr = np.random.default_rng(2).standard_normal((10, 10))
return frame_or_series(arr, dtype=None)
@pytest.mark.parametrize("test", list(range(10)))
def test_sample(self, test, obj):
# Fixes issue: 2419
# Check behavior of random_state argument
# Check for stability when receives seed or random state -- run 10
# times.
seed = np.random.default_rng(2).integers(0, 100)
tm.assert_equal(
obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed)
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=seed),
obj.sample(frac=0.7, random_state=seed),
)
tm.assert_equal(
obj.sample(n=4, random_state=np.random.default_rng(test)),
obj.sample(n=4, random_state=np.random.default_rng(test)),
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=np.random.default_rng(test)),
obj.sample(frac=0.7, random_state=np.random.default_rng(test)),
)
tm.assert_equal(
obj.sample(
frac=2,
replace=True,
random_state=np.random.default_rng(test),
),
obj.sample(
frac=2,
replace=True,
random_state=np.random.default_rng(test),
),
)
os1, os2 = [], []
for _ in range(2):
os1.append(obj.sample(n=4, random_state=test))
os2.append(obj.sample(frac=0.7, random_state=test))
tm.assert_equal(*os1)
tm.assert_equal(*os2)
def test_sample_lengths(self, obj):
# Check lengths are right
assert len(obj.sample(n=4) == 4)
assert len(obj.sample(frac=0.34) == 3)
assert len(obj.sample(frac=0.36) == 4)
def test_sample_invalid_random_state(self, obj):
# Check for error when random_state argument invalid.
msg = (
"random_state must be an integer, array-like, a BitGenerator, Generator, "
"a numpy RandomState, or None"
)
with pytest.raises(ValueError, match=msg):
obj.sample(random_state="a_string")
def test_sample_wont_accept_n_and_frac(self, obj):
# Giving both frac and N throws error
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, frac=0.3)
def test_sample_requires_positive_n_frac(self, obj):
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `n` >= 0",
):
obj.sample(n=-3)
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `frac` >= 0",
):
obj.sample(frac=-0.3)
def test_sample_requires_integer_n(self, obj):
# Make sure float values of `n` give error
with pytest.raises(ValueError, match="Only integers accepted as `n` values"):
obj.sample(n=3.2)
def test_sample_invalid_weight_lengths(self, obj):
# Weight length must be right
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=[0.5] * 11)
def test_sample_negative_weights(self, obj):
# Check won't accept negative weights
bad_weights = [-0.1] * 10
msg = "weight vector many not include negative values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=bad_weights)
def test_sample_inf_weights(self, obj):
# Check inf and -inf throw errors:
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
msg = "weight vector may not include `inf` values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_inf)
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_ninf)
def test_sample_unit_probabilities_raises(self, obj):
# GH#61516
high_variance_weights = [1] * 10
high_variance_weights[0] = 100
msg = (
"Weighted sampling cannot be achieved with replace=False. Either "
"set replace=True or use smaller weights. See the docstring of "
"sample for details."
)
with pytest.raises(ValueError, match=msg):
obj.sample(n=2, weights=high_variance_weights, replace=False)
def test_sample_unit_probabilities_edge_case_do_not_raise(self, obj):
# GH#61516
# edge case, n*max(weights)/sum(weights) == 1
edge_variance_weights = [1] * 10
edge_variance_weights[0] = 9
# should not raise
obj.sample(n=2, weights=edge_variance_weights, replace=False)
def test_sample_unit_normal_probabilities_do_not_raise(self, obj):
# GH#61516
low_variance_weights = [1] * 10
low_variance_weights[0] = 8
# should not raise
obj.sample(n=2, weights=low_variance_weights, replace=False)
def test_sample_zero_weights(self, obj):
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=zero_weights)
def test_sample_missing_weights(self, obj):
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=nan_weights)
def test_sample_none_weights(self, obj):
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
tm.assert_equal(
obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6]
)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
("np.random.MT19937", 3),
("np.random.PCG64", 11),
],
)
def test_sample_random_state(self, func_str, arg, frame_or_series):
# GH#32503
obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
obj = tm.get_obj(obj, frame_or_series)
result = obj.sample(n=3, random_state=eval(func_str)(arg))
expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_equal(result, expected)
def test_sample_generator(self, frame_or_series):
# GH#38100
obj = frame_or_series(np.arange(100))
rng = np.random.default_rng(2)
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=rng)
result2 = obj.sample(n=50, random_state=rng)
assert not (result1.index.values == result2.index.values).all()
# Matching generator initialization must give same result
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=np.random.default_rng(11))
result2 = obj.sample(n=50, random_state=np.random.default_rng(11))
tm.assert_equal(result1, result2)
def test_sample_upsampling_without_replacement(self, frame_or_series):
# GH#27451
obj = DataFrame({"A": list("abc")})
obj = tm.get_obj(obj, frame_or_series)
msg = (
"Replace has to be set to `True` when upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
obj.sample(frac=2, replace=False)
| TestSample |
python | pennersr__django-allauth | allauth/headless/mfa/response.py | {
"start": 2524,
"end": 2672
} | class ____(APIResponse):
def __init__(self, request):
super().__init__(request, status=HTTPStatus.NOT_FOUND)
| RecoveryCodesNotFoundResponse |
python | google__pytype | pytype/overlays/asyncio_types_overlay.py | {
"start": 786,
"end": 1955
} | class ____(abstract.PyTDFunction):
"""Implements the @types.coroutine and @asyncio.coroutine decorator."""
@classmethod
def make(cls, ctx, module):
return super().make("coroutine", ctx, module)
def call(self, node, func, args, alias_map=None):
"""Marks the function as a generator-based coroutine."""
del func, alias_map # unused
self.match_args(node, args)
func_var = args.posargs[0]
for funcv in func_var.data:
code = funcv.code
if not code.has_iterable_coroutine() and (
self.module == "asyncio"
or self.module == "types"
and code.has_generator()
):
code.set_iterable_coroutine()
if funcv.signature.has_return_annotation:
ret = funcv.signature.annotations["return"]
params = {
param: ret.get_formal_type_parameter(param)
for param in (abstract_utils.T, abstract_utils.T2, abstract_utils.V)
}
coroutine_type = abstract.ParameterizedClass(
self.ctx.convert.coroutine_type, params, self.ctx
)
funcv.signature.annotations["return"] = coroutine_type
return node, func_var
| CoroutineDecorator |
python | python-jsonschema__jsonschema | jsonschema/validators.py | {
"start": 31478,
"end": 47159
} | class ____:
"""
Resolve JSON References.
Arguments:
base_uri (str):
The URI of the referring document
referrer:
The actual referring document
store (dict):
A mapping from URIs to documents to cache
cache_remote (bool):
Whether remote refs should be cached after first resolution
handlers (dict):
A mapping from URI schemes to functions that should be used
to retrieve them
urljoin_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of joining
the resolution scope to subscopes.
remote_cache (:func:`functools.lru_cache`):
A cache that will be used for caching the results of
resolved remote URLs.
Attributes:
cache_remote (bool):
Whether remote refs should be cached after first resolution
.. deprecated:: v4.18.0
``RefResolver`` has been deprecated in favor of `referencing`.
"""
_DEPRECATION_MESSAGE = (
"jsonschema.RefResolver is deprecated as of v4.18.0, in favor of the "
"https://github.com/python-jsonschema/referencing library, which "
"provides more compliant referencing behavior as well as more "
"flexible APIs for customization. A future release will remove "
"RefResolver. Please file a feature request (on referencing) if you "
"are missing an API for the kind of customization you need."
)
def __init__(
self,
base_uri,
referrer,
store=HashTrieMap(),
cache_remote=True,
handlers=(),
urljoin_cache=None,
remote_cache=None,
):
if urljoin_cache is None:
urljoin_cache = lru_cache(1024)(urljoin)
if remote_cache is None:
remote_cache = lru_cache(1024)(self.resolve_from_url)
self.referrer = referrer
self.cache_remote = cache_remote
self.handlers = dict(handlers)
self._scopes_stack = [base_uri]
self.store = _utils.URIDict(
(uri, each.contents) for uri, each in SPECIFICATIONS.items()
)
self.store.update(
(id, each.META_SCHEMA) for id, each in _META_SCHEMAS.items()
)
self.store.update(store)
self.store.update(
(schema["$id"], schema)
for schema in store.values()
if isinstance(schema, Mapping) and "$id" in schema
)
self.store[base_uri] = referrer
self._urljoin_cache = urljoin_cache
self._remote_cache = remote_cache
@classmethod
def from_schema( # noqa: D417
cls,
schema,
id_of=referencing.jsonschema.DRAFT202012.id_of,
*args,
**kwargs,
):
"""
Construct a resolver from a JSON schema object.
Arguments:
schema:
the referring schema
Returns:
`_RefResolver`
"""
return cls(base_uri=id_of(schema) or "", referrer=schema, *args, **kwargs) # noqa: B026, E501
def push_scope(self, scope):
"""
Enter a given sub-scope.
Treats further dereferences as being performed underneath the
given scope.
"""
self._scopes_stack.append(
self._urljoin_cache(self.resolution_scope, scope),
)
def pop_scope(self):
"""
Exit the most recent entered scope.
Treats further dereferences as being performed underneath the
original scope.
Don't call this method more times than `push_scope` has been
called.
"""
try:
self._scopes_stack.pop()
except IndexError:
raise exceptions._RefResolutionError(
"Failed to pop the scope from an empty stack. "
"`pop_scope()` should only be called once for every "
"`push_scope()`",
) from None
@property
def resolution_scope(self):
"""
Retrieve the current resolution scope.
"""
return self._scopes_stack[-1]
@property
def base_uri(self):
"""
Retrieve the current base URI, not including any fragment.
"""
uri, _ = urldefrag(self.resolution_scope)
return uri
@contextlib.contextmanager
def in_scope(self, scope):
"""
Temporarily enter the given scope for the duration of the context.
.. deprecated:: v4.0.0
"""
warnings.warn(
"jsonschema.RefResolver.in_scope is deprecated and will be "
"removed in a future release.",
DeprecationWarning,
stacklevel=3,
)
self.push_scope(scope)
try:
yield
finally:
self.pop_scope()
@contextlib.contextmanager
def resolving(self, ref):
"""
Resolve the given ``ref`` and enter its resolution scope.
Exits the scope on exit of this context manager.
Arguments:
ref (str):
The reference to resolve
"""
url, resolved = self.resolve(ref)
self.push_scope(url)
try:
yield resolved
finally:
self.pop_scope()
def _find_in_referrer(self, key):
return self._get_subschemas_cache()[key]
@lru_cache # noqa: B019
def _get_subschemas_cache(self):
cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS}
for keyword, subschema in _search_schema(
self.referrer, _match_subschema_keywords,
):
cache[keyword].append(subschema)
return cache
@lru_cache # noqa: B019
def _find_in_subschemas(self, url):
subschemas = self._get_subschemas_cache()["$id"]
if not subschemas:
return None
uri, fragment = urldefrag(url)
for subschema in subschemas:
id = subschema["$id"]
if not isinstance(id, str):
continue
target_uri = self._urljoin_cache(self.resolution_scope, id)
if target_uri.rstrip("/") == uri.rstrip("/"):
if fragment:
subschema = self.resolve_fragment(subschema, fragment)
self.store[url] = subschema
return url, subschema
return None
def resolve(self, ref):
"""
Resolve the given reference.
"""
url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/")
match = self._find_in_subschemas(url)
if match is not None:
return match
return url, self._remote_cache(url)
def resolve_from_url(self, url):
"""
Resolve the given URL.
"""
url, fragment = urldefrag(url)
if not url:
url = self.base_uri
try:
document = self.store[url]
except KeyError:
try:
document = self.resolve_remote(url)
except Exception as exc:
raise exceptions._RefResolutionError(exc) from exc
return self.resolve_fragment(document, fragment)
def resolve_fragment(self, document, fragment):
"""
Resolve a ``fragment`` within the referenced ``document``.
Arguments:
document:
The referent document
fragment (str):
a URI fragment to resolve within it
"""
fragment = fragment.lstrip("/")
if not fragment:
return document
if document is self.referrer:
find = self._find_in_referrer
else:
def find(key):
yield from _search_schema(document, _match_keyword(key))
for keyword in ["$anchor", "$dynamicAnchor"]:
for subschema in find(keyword):
if fragment == subschema[keyword]:
return subschema
for keyword in ["id", "$id"]:
for subschema in find(keyword):
if "#" + fragment == subschema[keyword]:
return subschema
# Resolve via path
parts = unquote(fragment).split("/") if fragment else []
for part in parts:
part = part.replace("~1", "/").replace("~0", "~")
if isinstance(document, Sequence):
try: # noqa: SIM105
part = int(part)
except ValueError:
pass
try:
document = document[part]
except (TypeError, LookupError) as err:
raise exceptions._RefResolutionError(
f"Unresolvable JSON pointer: {fragment!r}",
) from err
return document
def resolve_remote(self, uri):
"""
Resolve a remote ``uri``.
If called directly, does not check the store first, but after
retrieving the document at the specified URI it will be saved in
the store if :attr:`cache_remote` is True.
.. note::
If the requests_ library is present, ``jsonschema`` will use it to
request the remote ``uri``, so that the correct encoding is
detected and used.
If it isn't, or if the scheme of the ``uri`` is not ``http`` or
``https``, UTF-8 is assumed.
Arguments:
uri (str):
The URI to resolve
Returns:
The retrieved document
.. _requests: https://pypi.org/project/requests/
"""
try:
import requests
except ImportError:
requests = None
scheme = urlsplit(uri).scheme
if scheme in self.handlers:
result = self.handlers[scheme](uri)
elif scheme in ["http", "https"] and requests:
# Requests has support for detecting the correct encoding of
# json over http
result = requests.get(uri).json()
else:
# Otherwise, pass off to urllib and assume utf-8
from urllib.request import urlopen
with urlopen(uri) as url: # noqa: S310
result = json.loads(url.read().decode("utf-8"))
if self.cache_remote:
self.store[uri] = result
return result
_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor")
def _match_keyword(keyword):
def matcher(value):
if keyword in value:
yield value
return matcher
def _match_subschema_keywords(value):
for keyword in _SUBSCHEMAS_KEYWORDS:
if keyword in value:
yield keyword, value
def _search_schema(schema, matcher):
"""Breadth-first search routine."""
values = deque([schema])
while values:
value = values.pop()
if not isinstance(value, dict):
continue
yield from matcher(value)
values.extendleft(value.values())
def validate(instance, schema, cls=None, *args, **kwargs): # noqa: D417
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems": 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`~jsonschema.validators.validate` will first verify that the
provided schema is itself valid, since not doing so can lead to less
obvious error messages and fail in less obvious or consistent ways.
If you know you have a valid schema already, especially
if you intend to validate multiple instances with
the same schema, you likely would prefer using the
`jsonschema.protocols.Validator.validate` method directly on a
specific validator (e.g. ``Draft202012Validator.validate``).
Arguments:
instance:
The instance to validate
schema:
The schema to validate with
cls (jsonschema.protocols.Validator):
The class that will be used to validate the instance.
If the ``cls`` argument is not provided, two things will happen
in accordance with the specification. First, if the schema has a
:kw:`$schema` keyword containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that
all schemas contain :kw:`$schema` properties for this reason. If no
:kw:`$schema` property is found, the default validator class is the
latest released draft.
Any other provided positional and keyword arguments will be passed
on when instantiating the ``cls``.
Raises:
`jsonschema.exceptions.ValidationError`:
if the instance is invalid
`jsonschema.exceptions.SchemaError`:
if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with
`jsonschema.validators.validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
validator = cls(schema, *args, **kwargs)
error = exceptions.best_match(validator.iter_errors(instance))
if error is not None:
raise error
def validator_for(
schema,
default: type[Validator] | _utils.Unset = _UNSET,
) -> type[Validator]:
"""
Retrieve the validator class appropriate for validating the given schema.
Uses the :kw:`$schema` keyword that should be present in the given
schema to look up the appropriate validator class.
Arguments:
schema (collections.abc.Mapping or bool):
the schema to look at
default:
the default to return if the appropriate validator class
cannot be determined.
If unprovided, the default is to return the latest supported
draft.
Examples:
The :kw:`$schema` JSON Schema keyword will control which validator
class is returned:
>>> schema = {
... "$schema": "https://json-schema.org/draft/2020-12/schema",
... "type": "integer",
... }
>>> jsonschema.validators.validator_for(schema)
<class 'jsonschema.validators.Draft202012Validator'>
Here, a draft 7 schema instead will return the draft 7 validator:
>>> schema = {
... "$schema": "http://json-schema.org/draft-07/schema#",
... "type": "integer",
... }
>>> jsonschema.validators.validator_for(schema)
<class 'jsonschema.validators.Draft7Validator'>
Schemas with no ``$schema`` keyword will fallback to the default
argument:
>>> schema = {"type": "integer"}
>>> jsonschema.validators.validator_for(
... schema, default=Draft7Validator,
... )
<class 'jsonschema.validators.Draft7Validator'>
or if none is provided, to the latest version supported.
Always including the keyword when authoring schemas is highly
recommended.
"""
DefaultValidator = _LATEST_VERSION if default is _UNSET else default
if schema is True or schema is False or "$schema" not in schema:
return DefaultValidator # type: ignore[return-value]
if schema["$schema"] not in _META_SCHEMAS and default is _UNSET:
warn(
(
"The metaschema specified by $schema was not found. "
"Using the latest draft to validate, but this will raise "
"an error in the future."
),
DeprecationWarning,
stacklevel=2,
)
return _META_SCHEMAS.get(schema["$schema"], DefaultValidator)
| _RefResolver |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_number_of_decimal_places_to_equal.py | {
"start": 2298,
"end": 8686
} | class ____(ColumnMapExpectation):
"""Expect all values in a numeric column to have the same number of specified decimal places.
This expectation tests if all the values in a column has the same number of decimal places as the
inputted number of decimal places. In the case where the decimal places are all 0s (an integer),
the value automatically passes. Currently have not figured out how to preserve 0s in decimal to string conversion.
"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"a": [2.15, 17.57, 34.21, 1.00],
"b": [1.17, 4.3, 6.433, 2.14],
"c": [1, 4.00, 6.43, 2.14],
},
"schemas": {},
"tests": [
{
"title": "positive_test",
"exact_match_out": False,
"in": {"column": "a", "decimal_places": 2},
"out": {"success": True},
},
{
"title": "negative_test",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "b", "decimal_places": 2},
"out": {"success": False},
},
{
"title": "positive_test_whole_numbers",
"include_in_gallery": True,
"exact_match_out": False,
"in": {"column": "c", "decimal_places": 2},
"out": {"success": True},
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"experimental",
"precision",
"formatting",
"floating_point",
"hackathon-20200123",
],
"contributors": [
"@samsonq",
"@spbail",
"@Lord-of-Bugs",
"@BladderBoy",
"@Rim921",
],
}
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.decimal_places_equal"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
# Please see {some doc} for more information about domain and success keys, and other arguments to Expectations
success_keys = ("decimal_places",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This method defines a question Renderer
# For more info on Renderers, see {some doc}
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.question")
# def _question_renderer(
# cls, configuration, result=None, runtime_configuration=None
# ):
# column = configuration.kwargs.get("column")
# mostly = configuration.kwargs.get("mostly")
# return f'Do at least {mostly * 100}% of values in column "{column}" equal 3?'
# This method defines an answer Renderer
#!!! This example renderer should render RenderedStringTemplateContent, not just a string
# @classmethod
# @renderer(renderer_type="renderer.answer")
# def _answer_renderer(
# cls, configuration=None, result=None, runtime_configuration=None
# ):
# column = result.expectation_config.kwargs.get("column")
# mostly = result.expectation_config.kwargs.get("mostly")
# regex = result.expectation_config.kwargs.get("regex")
# if result.success:
# return f'At least {mostly * 100}% of values in column "{column}" equal 3.'
# else:
# return f'Less than {mostly * 100}% of values in column "{column}" equal 3.'
# This method defines a prescriptive Renderer
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_suite_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# runtime_configuration=None,
# **kwargs,
# ):
#!!! This example renderer should be shorter
# runtime_configuration = runtime_configuration or {}
# include_column_name = False if runtime_configuration.get("include_column_name") is False else True
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# ["column", "regex", "mostly", "row_condition", "condition_parser"],
# )
# template_str = "values must be equal to 3"
# if params["mostly"] is not None:
# params["mostly_pct"] = num_to_str(
# params["mostly"] * 100, precision=15, no_scientific=True
# )
# # params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
# template_str += ", at least $mostly_pct % of the time."
# else:
# template_str += "."
# if include_column_name:
# template_str = "$column " + template_str
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
if __name__ == "__main__":
ExpectColumnValuesNumberOfDecimalPlacesToEqual().print_diagnostic_checklist()
| ExpectColumnValuesNumberOfDecimalPlacesToEqual |
python | getsentry__sentry | tests/sentry/net/test_socket.py | {
"start": 271,
"end": 1906
} | class ____(TestCase):
@override_blocklist("10.0.0.0/8", "127.0.0.1")
def test_is_ipaddress_allowed(self) -> None:
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("127.0.0.1") is False
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("10.0.1.1") is False
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("1.1.1.1") is True
@override_blocklist("::ffff:10.0.0.0/104", "::1/128")
def test_is_ipaddress_allowed_ipv6(self) -> None:
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("::1") is False
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("::ffff:10.0.1.2") is False
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("::ffff:1.1.1.1") is True
is_ipaddress_allowed.cache_clear()
assert is_ipaddress_allowed("2001:db8:a::123") is True
@override_blocklist("10.0.0.0/8", "127.0.0.1")
@patch("socket.getaddrinfo")
def test_is_safe_hostname(self, mock_getaddrinfo: MagicMock) -> None:
mock_getaddrinfo.return_value = [(2, 1, 6, "", ("81.0.0.1", 0))]
assert is_safe_hostname("example.com") is True
mock_getaddrinfo.return_value = [(2, 1, 6, "", ("127.0.0.1", 0))]
assert is_safe_hostname("example.com") is False
@override_settings(SENTRY_ENSURE_FQDN=True)
def test_ensure_fqdn(self) -> None:
assert ensure_fqdn("example.com") == "example.com."
assert ensure_fqdn("127.0.0.1") == "127.0.0.1"
assert ensure_fqdn("example.com.") == "example.com."
| SocketTest |
python | doocs__leetcode | solution/3500-3599/3582.Generate Tag for Video Caption/Solution.py | {
"start": 0,
"end": 221
} | class ____:
def generateTag(self, caption: str) -> str:
words = [s.capitalize() for s in caption.split()]
if words:
words[0] = words[0].lower()
return "#" + "".join(words)[:99]
| Solution |
python | numpy__numpy | benchmarks/benchmarks/bench_reduce.py | {
"start": 73,
"end": 340
} | class ____(Benchmark):
def setup(self):
self.squares = get_squares().values()
def time_axis_0(self):
[np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
[np.add.reduce(a, axis=1) for a in self.squares]
| AddReduce |
python | kamyu104__LeetCode-Solutions | Python/find-the-kth-largest-integer-in-the-array.py | {
"start": 71,
"end": 1463
} | class ____(object):
def kthLargestNumber(self, nums, k):
"""
:type nums: List[str]
:type k: int
:rtype: str
"""
def nth_element(nums, n, compare=lambda a, b: a < b):
def tri_partition(nums, left, right, target, compare):
mid = left
while mid <= right:
if nums[mid] == target:
mid += 1
elif compare(nums[mid], target):
nums[left], nums[mid] = nums[mid], nums[left]
left += 1
mid += 1
else:
nums[mid], nums[right] = nums[right], nums[mid]
right -= 1
return left, right
left, right = 0, len(nums)-1
while left <= right:
pivot_idx = random.randint(left, right)
pivot_left, pivot_right = tri_partition(nums, left, right, nums[pivot_idx], compare)
if pivot_left <= n <= pivot_right:
return
elif pivot_left > n:
right = pivot_left-1
else: # pivot_right < n.
left = pivot_right+1
nth_element(nums, k-1, compare=lambda a, b: a > b if len(a) == len(b) else len(a) > len(b))
return nums[k-1]
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_column_values_to_match_like_pattern.py | {
"start": 2150,
"end": 13523
} | class ____(ColumnMapExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectColumnValuesToMatchLikePattern is a \
Column Map Expectation.
Column Map Expectations are one of the most common types of Expectation.
They are evaluated for a single column and ask a yes/no question for every row in that column.
Based on the result, they then calculate the percentage of rows that gave a positive answer. If the percentage is high enough, the Expectation considers that data valid.
Args:
column (str): \
{COLUMN_DESCRIPTION}
like_pattern (str): \
{LIKE_PATTERN_DESCRIPTION}
Other Parameters:
mostly (None or a float between 0 and 1): \
{MOSTLY_DESCRIPTION} \
For more detail, see [mostly](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#mostly). Default 1.
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectColumnValuesToMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_match_regex)
[ExpectColumnValuesToMatchRegexList](https://greatexpectations.io/expectations/expect_column_values_to_match_regex_list)
[ExpectColumnValuesToNotMatchRegex](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex)
[ExpectColumnValuesToNotMatchRegexList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_regex_list)
[ExpectColumnValuesToMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_match_like_pattern_list)
[ExpectColumnValuesToNotMatchLikePattern](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern)
[ExpectColumnValuesToNotMatchLikePatternList](https://greatexpectations.io/expectations/expect_column_values_to_not_match_like_pattern_list)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[10]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test test2
0 "aaa" "ade"
1 "abb" "bee"
2 "acc" "24601"
Code Examples:
Passing Case:
Input:
ExpectColumnValuesToMatchLikePattern(
column="test",
like_pattern="[a]%"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 0,
"unexpected_percent": 0.0,
"partial_unexpected_list": [],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 0.0,
"unexpected_percent_nonmissing": 0.0
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectColumnValuesToMatchLikePattern(
column="test2",
like_pattern="[a]%"
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"element_count": 3,
"unexpected_count": 2,
"unexpected_percent": 66.66666666666666,
"partial_unexpected_list": [
"bee",
"24601"
],
"missing_count": 0,
"missing_percent": 0.0,
"unexpected_percent_total": 66.66666666666666,
"unexpected_percent_nonmissing": 66.66666666666666
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
like_pattern: Union[str, SuiteParameterDict] = pydantic.Field(
description=LIKE_PATTERN_DESCRIPTION
)
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
map_metric = "column_values.match_like_pattern"
success_keys = (
"mostly",
"like_pattern",
)
args_keys = (
"column",
"like_pattern",
)
class Config:
title = "Expect column values to match like pattern"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectColumnValuesToMatchLikePattern]
) -> None:
ColumnMapExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column", RendererValueType.STRING),
("like_pattern", RendererValueType.STRING),
("mostly", RendererValueType.NUMBER),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if renderer_configuration.include_column_name:
template_str = "$column values "
else:
template_str = "Values "
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str += (
"must match like pattern $like_pattern, at least $mostly_pct % of the time."
)
else:
template_str += "must match like pattern $like_pattern."
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
) -> List[RenderedStringTemplateContent]:
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "like_pattern", "mostly"],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
mostly_str = "" if params.get("mostly") is None else ", at least $mostly_pct % of the time"
like_pattern = params.get("like_pattern") # noqa: F841 # FIXME CoP
template_str = f"Values must match like pattern $like_pattern {mostly_str}: "
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectColumnValuesToMatchLikePattern |
python | hynek__structlog | tests/test_threadlocal.py | {
"start": 3128,
"end": 4189
} | class ____:
def test_does_not_affect_global(self, log):
"""
A logger from as_mutable is independent from thread local state.
"""
log = log.new(x=42)
with pytest.deprecated_call():
il = as_immutable(log)
assert isinstance(il._context, dict)
il = il.bind(y=23)
assert {"x": 42, "y": 23} == il._context
assert {"x": 42} == log._context._dict
def test_converts_proxy(self, log):
"""
as_immutable converts a BoundLoggerLazyProxy into a concrete bound
logger.
"""
with pytest.deprecated_call():
il = as_immutable(log)
assert isinstance(il._context, dict)
assert isinstance(il, BoundLoggerBase)
def test_idempotency(self, log):
"""
as_immutable on an as_immutable logger works.
"""
with pytest.deprecated_call():
il = as_immutable(log)
with pytest.deprecated_call():
assert isinstance(as_immutable(il), BoundLoggerBase)
| TestAsImmutable |
python | huggingface__transformers | src/transformers/models/maskformer/modeling_maskformer.py | {
"start": 70034,
"end": 74000
} | class ____(MaskFormerPreTrainedModel):
def __init__(self, config: MaskFormerConfig):
super().__init__(config)
self.pixel_level_module = MaskFormerPixelLevelModule(config)
self.transformer_module = MaskFormerTransformerModule(
in_features=self.pixel_level_module.encoder.channels[-1], config=config
)
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Tensor,
pixel_mask: Optional[Tensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> MaskFormerModelOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, MaskFormerModel
>>> from PIL import Image
>>> import requests
>>> # load MaskFormer fine-tuned on ADE20k semantic segmentation
>>> image_processor = AutoImageProcessor.from_pretrained("facebook/maskformer-swin-base-ade")
>>> model = MaskFormerModel.from_pretrained("facebook/maskformer-swin-base-ade")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**inputs)
>>> # the decoder of MaskFormer outputs hidden states of shape (batch_size, num_queries, hidden_size)
>>> transformer_decoder_last_hidden_state = outputs.transformer_decoder_last_hidden_state
>>> list(transformer_decoder_last_hidden_state.shape)
[1, 100, 256]
```"""
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
batch_size, _, height, width = pixel_values.shape
if pixel_mask is None:
pixel_mask = torch.ones((batch_size, height, width), device=pixel_values.device)
pixel_level_module_output = self.pixel_level_module(
pixel_values, output_hidden_states, return_dict=return_dict
)
image_features = pixel_level_module_output[0]
pixel_embeddings = pixel_level_module_output[1]
transformer_module_output = self.transformer_module(image_features, output_hidden_states, output_attentions)
queries = transformer_module_output.last_hidden_state
encoder_hidden_states = None
pixel_decoder_hidden_states = None
transformer_decoder_hidden_states = None
hidden_states = None
if output_hidden_states:
encoder_hidden_states = pixel_level_module_output[2]
pixel_decoder_hidden_states = pixel_level_module_output[3]
transformer_decoder_hidden_states = transformer_module_output[1]
hidden_states = encoder_hidden_states + pixel_decoder_hidden_states + transformer_decoder_hidden_states
output = MaskFormerModelOutput(
encoder_last_hidden_state=image_features,
pixel_decoder_last_hidden_state=pixel_embeddings,
transformer_decoder_last_hidden_state=queries,
encoder_hidden_states=encoder_hidden_states,
pixel_decoder_hidden_states=pixel_decoder_hidden_states,
transformer_decoder_hidden_states=transformer_decoder_hidden_states,
hidden_states=hidden_states,
attentions=transformer_module_output.attentions,
)
if not return_dict:
output = tuple(v for v in output.values())
return output
| MaskFormerModel |
python | django__django | django/template/engine.py | {
"start": 349,
"end": 8401
} | class ____:
default_builtins = [
"django.template.defaulttags",
"django.template.defaultfilters",
"django.template.loader_tags",
]
def __init__(
self,
dirs=None,
app_dirs=False,
context_processors=None,
debug=False,
loaders=None,
string_if_invalid="",
file_charset="utf-8",
libraries=None,
builtins=None,
autoescape=True,
):
if dirs is None:
dirs = []
if context_processors is None:
context_processors = []
if loaders is None:
loaders = ["django.template.loaders.filesystem.Loader"]
if app_dirs:
loaders += ["django.template.loaders.app_directories.Loader"]
loaders = [("django.template.loaders.cached.Loader", loaders)]
else:
if app_dirs:
raise ImproperlyConfigured(
"app_dirs must not be set when loaders is defined."
)
if libraries is None:
libraries = {}
if builtins is None:
builtins = []
self.dirs = dirs
self.app_dirs = app_dirs
self.autoescape = autoescape
self.context_processors = context_processors
self.debug = debug
self.loaders = loaders
self.string_if_invalid = string_if_invalid
self.file_charset = file_charset
self.libraries = libraries
self.template_libraries = self.get_template_libraries(libraries)
self.builtins = self.default_builtins + builtins
self.template_builtins = self.get_template_builtins(self.builtins)
def __repr__(self):
return (
"<%s:%s app_dirs=%s%s debug=%s loaders=%s string_if_invalid=%s "
"file_charset=%s%s%s autoescape=%s>"
) % (
self.__class__.__qualname__,
"" if not self.dirs else " dirs=%s" % repr(self.dirs),
self.app_dirs,
(
""
if not self.context_processors
else " context_processors=%s" % repr(self.context_processors)
),
self.debug,
repr(self.loaders),
repr(self.string_if_invalid),
repr(self.file_charset),
"" if not self.libraries else " libraries=%s" % repr(self.libraries),
"" if not self.builtins else " builtins=%s" % repr(self.builtins),
repr(self.autoescape),
)
@staticmethod
@functools.lru_cache
def get_default():
"""
Return the first DjangoTemplates backend that's configured, or raise
ImproperlyConfigured if none are configured.
This is required for preserving historical APIs that rely on a
globally available, implicitly configured engine such as:
>>> from django.template import Context, Template
>>> template = Template("Hello {{ name }}!")
>>> context = Context({'name': "world"})
>>> template.render(context)
'Hello world!'
"""
# Since Engine is imported in django.template and since
# DjangoTemplates is a wrapper around this Engine class,
# local imports are required to avoid import loops.
from django.template import engines
from django.template.backends.django import DjangoTemplates
for engine in engines.all():
if isinstance(engine, DjangoTemplates):
return engine.engine
raise ImproperlyConfigured("No DjangoTemplates backend is configured.")
@cached_property
def template_context_processors(self):
context_processors = _builtin_context_processors
context_processors += tuple(self.context_processors)
return tuple(import_string(path) for path in context_processors)
def get_template_builtins(self, builtins):
return [import_library(x) for x in builtins]
def get_template_libraries(self, libraries):
loaded = {}
for name, path in libraries.items():
loaded[name] = import_library(path)
return loaded
@cached_property
def template_loaders(self):
return self.get_template_loaders(self.loaders)
def get_template_loaders(self, template_loaders):
loaders = []
for template_loader in template_loaders:
loader = self.find_template_loader(template_loader)
if loader is not None:
loaders.append(loader)
return loaders
def find_template_loader(self, loader):
if isinstance(loader, (tuple, list)):
loader, *args = loader
else:
args = []
if isinstance(loader, str):
loader_class = import_string(loader)
return loader_class(self, *args)
else:
raise ImproperlyConfigured(
"Invalid value in template loaders configuration: %r" % loader
)
def find_template(self, name, dirs=None, skip=None):
tried = []
for loader in self.template_loaders:
try:
template = loader.get_template(name, skip=skip)
return template, template.origin
except TemplateDoesNotExist as e:
tried.extend(e.tried)
raise TemplateDoesNotExist(name, tried=tried)
def from_string(self, template_code):
"""
Return a compiled Template object for the given template code,
handling template inheritance recursively.
"""
return Template(template_code, engine=self)
def get_template(self, template_name):
"""
Return a compiled Template object for the given template name,
handling template inheritance recursively.
"""
original_name = template_name
try:
template_name, _, partial_name = template_name.partition("#")
except AttributeError:
raise TemplateDoesNotExist(original_name)
if not template_name:
raise TemplateDoesNotExist(original_name)
template, origin = self.find_template(template_name)
if not hasattr(template, "render"):
# template needs to be compiled
template = Template(template, origin, template_name, engine=self)
if not partial_name:
return template
extra_data = getattr(template, "extra_data", {})
try:
partial = extra_data["partials"][partial_name]
except (KeyError, TypeError):
raise TemplateDoesNotExist(partial_name, tried=[template_name])
partial.engine = self
return partial
def render_to_string(self, template_name, context=None):
"""
Render the template specified by template_name with the given context.
For use in Django's test suite.
"""
if isinstance(template_name, (list, tuple)):
t = self.select_template(template_name)
else:
t = self.get_template(template_name)
# Django < 1.8 accepted a Context in `context` even though that's
# unintended. Preserve this ability but don't rewrap `context`.
if isinstance(context, Context):
return t.render(context)
else:
return t.render(Context(context, autoescape=self.autoescape))
def select_template(self, template_name_list):
"""
Given a list of template names, return the first that can be loaded.
"""
if not template_name_list:
raise TemplateDoesNotExist("No template names provided")
not_found = []
for template_name in template_name_list:
try:
return self.get_template(template_name)
except TemplateDoesNotExist as exc:
if exc.args[0] not in not_found:
not_found.append(exc.args[0])
continue
# If we get here, none of the templates could be loaded
raise TemplateDoesNotExist(", ".join(not_found))
| Engine |
python | pytorch__pytorch | test/inductor/test_split_cat_fx_aten_passes.py | {
"start": 7031,
"end": 13160
} | class ____(TestCase):
def compare_dict_tensors(self, ref_dict, res_dict, rtol=1e-3, atol=1e-3):
if len(set(ref_dict.keys())) != len(set(res_dict.keys())):
return False
for key1 in ref_dict:
key2 = "_orig_mod." + key1
assert key2 in res_dict, f"{key1} does not exist in traced module"
if not torch.allclose(ref_dict[key1], res_dict[key2], rtol=rtol, atol=atol):
return False
return True
def compare_pred(self, module, traced, input, rtol=1e-3, atol=1e-3):
ref = module(*input)
res = traced(*input)
self.assertEqual(ref, res, rtol=rtol, atol=atol)
def compare_parameters(self, module, traced, rtol=1e-3, atol=1e-3):
ref_params = dict(module.named_parameters())
res_params = dict(traced.named_parameters())
self.assertTrue(self.compare_dict_tensors(ref_params, res_params, rtol, atol))
def compare_gradients(self, module, traced, rtol=1e-3, atol=1e-3):
ref_grad = {key: param.grad for key, param in module.named_parameters()}
res_grad = {key: param.grad for key, param in traced.named_parameters()}
self.assertTrue(
self.compare_dict_tensors(ref_grad, res_grad, rtol=rtol, atol=atol)
)
@requires_gpu_and_triton
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"normalization_aten_pass": {},
"split_cat_aten_pass": {"threshold_to_cat": 5},
},
)
def test_split_cat_post_grad(self):
counters.clear()
inputs = [
torch.randn(1024, 128, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 128, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 32, device=torch.device(device=GPU_TYPE)),
]
module = TestSplitCat()
traced = torch.compile(module)
ref = module(*inputs)
res = traced(*inputs)
self.compare_pred(module, traced, inputs)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 5)
self.assertEqual(counters["inductor"]["split_cat_aten_pass"], 1)
self.assertEqual(ref, res, rtol=1e-8, atol=1e-8)
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
inputs = [
torch.randn(1024, 96 * 21, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 96 * 4, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 96, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 96, device=torch.device(device=GPU_TYPE)),
]
module = TestSplitCatPartial()
traced = torch.compile(module)
ref = module(*inputs)
res = traced(*inputs)
self.compare_pred(module, traced, inputs)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 3)
self.assertEqual(counters["inductor"]["split_cat_aten_pass"], 1)
self.assertEqual(ref, res, rtol=1e-8, atol=1e-8)
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu_and_triton
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"normalization_aten_pass": {},
"split_cat_aten_pass": {"threshold_to_cat": 5},
},
)
def test_split_cat_post_grad_singular(self):
counters.clear()
inputs = [
torch.randn(1024, 128, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 128, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 32, device=torch.device(device=GPU_TYPE)),
]
module = TestSplitCatSingular()
traced = torch.compile(module)
ref = module(*inputs)
res = traced(*inputs)
self.compare_pred(module, traced, inputs)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 4)
self.assertEqual(counters["inductor"]["split_cat_aten_pass"], 0)
self.assertEqual(ref, res, rtol=1e-8, atol=1e-8)
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu_and_triton
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"normalization_aten_pass": {},
"select_cat_aten_pass": {},
},
)
def test_select_cat_post_grad(self):
counters.clear()
inputs = [
torch.randn(1024, 6, 128, device=torch.device(device=GPU_TYPE)),
torch.randn(1024, 6, 128, device=torch.device(device=GPU_TYPE)),
]
module = TestSelectCat()
traced = torch.compile(module)
ref = module(*inputs)
res = traced(*inputs)
self.compare_pred(module, traced, inputs)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 4)
self.assertEqual(counters["inductor"]["select_cat_aten_pass"], 1)
self.assertEqual(ref, res, rtol=1e-8, atol=1e-8)
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
@requires_gpu_and_triton
@torch._inductor.config.patch(
pre_grad_fusion_options={},
post_grad_fusion_options={
"normalization_aten_pass": {},
"move_view_after_cat_aten_pass": {},
},
)
def test_move_view_after_cat_aten(self):
counters.clear()
inputs = [
torch.randn(7, 8, 96, device=torch.device(device=GPU_TYPE)),
]
module = TestMoveViewAferCat()
traced = torch.compile(module)
ref = module(*inputs)
res = traced(*inputs)
self.compare_pred(module, traced, inputs)
self.assertEqual(counters["inductor"]["normalization_aten_pass"], 4)
self.assertEqual(counters["inductor"]["move_view_after_cat_aten_pass"], 1)
self.assertEqual(ref, res, rtol=1e-8, atol=1e-8)
self.compare_parameters(module, traced, rtol=1e-8, atol=1e-8)
counters.clear()
| TestSplitCatAten |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 1264,
"end": 2184
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Temporal-Distance IoU loss for video grounding.
logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Contains start_time/duration and end_time/duration. It is the time slot of the videos corresponding to the
input texts.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
| TvpVideoGroundingOutput |
python | bokeh__bokeh | src/bokeh/models/axes.py | {
"start": 8379,
"end": 8636
} | class ____(Axis):
''' A base class for all numeric, non-categorical axes types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| ContinuousAxis |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/wheel.py | {
"start": 574,
"end": 5581
} | class ____:
"""A wheel file"""
legacy_wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
\.whl|\.dist-info)$""",
re.VERBOSE,
)
def __init__(self, filename: str) -> None:
self.filename = filename
# To make mypy happy specify type hints that can come from either
# parse_wheel_filename or the legacy_wheel_file_re match.
self.name: str
self._build_tag: Optional[BuildTag] = None
try:
wheel_info = parse_wheel_filename(filename)
self.name, _version, self._build_tag, self.file_tags = wheel_info
self.version = str(_version)
except _PackagingInvalidWheelFilename as e:
# Check if the wheel filename is in the legacy format
legacy_wheel_info = self.legacy_wheel_file_re.match(filename)
if not legacy_wheel_info:
raise InvalidWheelFilename(e.args[0]) from None
deprecated(
reason=(
f"Wheel filename {filename!r} is not correctly normalised. "
"Future versions of pip will raise the following error:\n"
f"{e.args[0]}\n\n"
),
replacement=(
"to rename the wheel to use a correctly normalised "
"name (this may require updating the version in "
"the project metadata)"
),
gone_in="25.3",
issue=12938,
)
self.name = legacy_wheel_info.group("name").replace("_", "-")
self.version = legacy_wheel_info.group("ver").replace("_", "-")
# Generate the file tags from the legacy wheel filename
pyversions = legacy_wheel_info.group("pyver").split(".")
abis = legacy_wheel_info.group("abi").split(".")
plats = legacy_wheel_info.group("plat").split(".")
self.file_tags = frozenset(
Tag(interpreter=py, abi=abi, platform=plat)
for py in pyversions
for abi in abis
for plat in plats
)
@property
def build_tag(self) -> BuildTag:
if self._build_tag is not None:
return self._build_tag
# Parse the build tag from the legacy wheel filename
legacy_wheel_info = self.legacy_wheel_file_re.match(self.filename)
assert legacy_wheel_info is not None, "guaranteed by filename validation"
build_tag = legacy_wheel_info.group("build")
match = re.match(r"^(\d+)(.*)$", build_tag)
assert match is not None, "guaranteed by filename validation"
build_tag_groups = match.groups()
self._build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
return self._build_tag
def get_formatted_file_tags(self) -> List[str]:
"""Return the wheel's tags as a sorted list of strings."""
return sorted(str(tag) for tag in self.file_tags)
def support_index_min(self, tags: List[Tag]) -> int:
"""Return the lowest index that one of the wheel's file_tag combinations
achieves in the given list of supported tags.
For example, if there are 8 supported tags and one of the file tags
is first in the list, then return 0.
:param tags: the PEP 425 tags to check the wheel against, in order
with most preferred first.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
try:
return next(i for i, t in enumerate(tags) if t in self.file_tags)
except StopIteration:
raise ValueError()
def find_most_preferred_tag(
self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
) -> int:
"""Return the priority of the most preferred tag that one of the wheel's file
tag combinations achieves in the given list of supported tags using the given
tag_to_priority mapping, where lower priorities are more-preferred.
This is used in place of support_index_min in some cases in order to avoid
an expensive linear scan of a large list of tags.
:param tags: the PEP 425 tags to check the wheel against.
:param tag_to_priority: a mapping from tag to priority of that tag, where
lower is more preferred.
:raises ValueError: If none of the wheel's file tags match one of
the supported tags.
"""
return min(
tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
)
def supported(self, tags: Iterable[Tag]) -> bool:
"""Return whether the wheel is compatible with one of the given tags.
:param tags: the PEP 425 tags to check the wheel against.
"""
return not self.file_tags.isdisjoint(tags)
| Wheel |
python | huggingface__transformers | benchmark_v2/framework/hardware_metrics.py | {
"start": 564,
"end": 2593
} | class ____:
"""A class to hold information about the hardware."""
def __init__(self) -> None:
# Retrieve GPU stats
try:
self.gpu_name, self.gpu_memory_total_gb = get_device_name_and_memory_total()
except Exception:
self.gpu_name, self.gpu_memory_total_gb = None, None
# Retrieve python, torch and CUDA version
self.python_version = f"{sys.version.split()[0]}"
self.torch_version = torch.__version__
if hasattr(torch, "cuda") and torch.cuda.is_available():
self.cuda_version = torch.version.cuda
else:
self.cuda_version = None
# Retrieve general hardware information
self.cpu_count = psutil.cpu_count()
self.memory_total_mb = int(psutil.virtual_memory().total / (1024 * 1024))
def to_dict(self) -> dict[str, None | int | float | str]:
return {
"gpu_name": self.gpu_name,
"gpu_memory_total_gb": self.gpu_memory_total_gb,
"python_version": self.python_version,
"torch_version": self.torch_version,
}
# Functions to get information about the GPU
def get_amd_gpu_stats() -> tuple[int, float]:
"""Returns the utilization and memory used of an AMD GPU, both in percent"""
rocm_smi_output = subprocess.check_output(["rocm-smi", "--json", "--showuse", "--showmeminfo", "VRAM"])
gpu_stats = json.loads(rocm_smi_output.decode("utf-8"))
gpu_stats = [
(card_id, stats["GPU use (%)"], stats["VRAM Total Used Memory (B)"]) for card_id, stats in gpu_stats.items()
]
gpu_stats.sort(key=lambda x: x[1], reverse=True)
return int(gpu_stats[0][1]), float(gpu_stats[0][2]) / 1024**3
def get_nvidia_gpu_stats() -> tuple[int, float]:
"""Returns the utilization and memory used of an NVIDIA GPU, both in percent"""
gpu_stats = gpustat.GPUStatCollection.new_query()
gpu_stats = gpu_stats[0]
return int(gpu_stats["utilization.gpu"]), float(gpu_stats["memory.used"]) / 1024**3
| HardwareInfo |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/_collections.py | {
"start": 15453,
"end": 15532
} | class ____(Protocol[_T_co]):
def __call__(self) -> _T_co: ...
| _CreateFuncType |
python | realpython__materials | django-vue-graphql/source_code_final/back_end/blog/migrations/0001_initial.py | {
"start": 158,
"end": 3015
} | class ____(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Tag",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=50, unique=True)),
],
),
migrations.CreateModel(
name="Profile",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("website", models.URLField(blank=True)),
("bio", models.CharField(blank=True, max_length=240)),
(
"user",
models.OneToOneField(
on_delete=django.db.models.deletion.PROTECT,
to=settings.AUTH_USER_MODEL,
),
),
],
),
migrations.CreateModel(
name="Post",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("title", models.CharField(max_length=255, unique=True)),
("subtitle", models.CharField(blank=True, max_length=255)),
("slug", models.SlugField(max_length=255, unique=True)),
("body", models.TextField()),
("meta_description", models.CharField(blank=True, max_length=150)),
("date_created", models.DateTimeField(auto_now_add=True)),
("date_modified", models.DateTimeField(auto_now=True)),
("publish_date", models.DateTimeField(blank=True, null=True)),
("published", models.BooleanField(default=False)),
(
"author",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT, to="blog.profile"
),
),
("tags", models.ManyToManyField(blank=True, to="blog.tag")),
],
options={
"ordering": ["-publish_date"],
},
),
]
| Migration |
python | geekcomputers__Python | classicIndianCardMatch.py | {
"start": 740,
"end": 3515
} | class ____:
def __init__(self):
self.deck = [card(suit, rank) for suit in SUITS for rank in RANKS]
def shuffle(self):
random.shuffle(self.deck)
def dealCard(self):
return random.choice(self.deck)
def __str__(self):
print(self.deck)
# Begin play
# create two decks, one for each player.
print("Gathering brand new two decks of cards............\n")
deck1 = deck()
deck2 = deck()
time.sleep(5)
print("..........decks ready!!!\n")
print("Combining and shuffling both the decks..")
time.sleep(10)
# Shuffle the decks
deck1.shuffle()
deck2.shuffle()
# combine both the shuffled decks
combinedDeck = deck1.deck + deck2.deck
# ReShuffle the combined deck, cut it and distribute to two players.
random.shuffle(combinedDeck)
print("....decks have been combined and shuffled...\n")
print("------------------------------------------\n")
input("Enter a key to cut the deck..\n")
player1 = combinedDeck[0:52]
player2 = combinedDeck[52:]
print(
"Deck has been split into two and Human get a half and computer gets the other...\n"
)
# Begin play:
print("------------------------------------------\n")
print("player1 == Human\n")
print("player2 == Computer\n")
print("------------------------------------------\n")
print("player1 goes first...hit any key to place the card on the pile..\n")
centerPile = []
currentPlayer2Card = None
while (
len(player1) != 0 and len(player2) != 0
): # this needs a fix as it goes on an infinite loop on a success.
switchPlayer = True
while switchPlayer == True:
for card in range(len(player1)):
input("Enter any key to place a card!!!\n")
currentPlayer1Card = player1[card].rank
print("Your current card's rank: {}".format(currentPlayer1Card))
centerPile.append(player1[card])
player1.pop(card)
switchPlayer = False
if currentPlayer2Card == currentPlayer1Card:
player1 = player1 + centerPile
print(
"The human got a match and takes all the cards from center pile.."
)
break
while switchPlayer == False:
for card in range(len(player2)):
currentPlayer2Card = player2[card].rank
print("Computer's current card's rank: {}".format(currentPlayer2Card))
centerPile.append(player2[card])
player2.pop(card)
switchPlayer = True
if currentPlayer1Card == currentPlayer2Card:
player2 = player2 + centerPile
print("Computer got a match and takes all the cards from center pile..")
break
print("GAME OVER!!!\n")
print("Human has {} cards and computer has {}..".format(len(player1), len(player2)))
| deck |
python | numpy__numpy | numpy/f2py/tests/test_string.py | {
"start": 56,
"end": 518
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "string", "char.f90")]
@pytest.mark.slow
def test_char(self):
strings = np.array(["ab", "cd", "ef"], dtype="c").T
inp, out = self.module.char_test.change_strings(
strings, strings.shape[1])
assert inp == pytest.approx(strings)
expected = strings.copy()
expected[1, :] = "AAA"
assert out == pytest.approx(expected)
| TestString |
python | kamyu104__LeetCode-Solutions | Python/minimum-number-of-coins-to-be-added.py | {
"start": 64,
"end": 689
} | class ____(object):
def minimumAddedCoins(self, coins, target):
"""
:type coins: List[int]
:type target: int
:rtype: int
"""
coins.sort()
result = reachable = 0
for x in coins:
# if x > target:
# break
while not reachable >= x-1:
result += 1
reachable += reachable+1
reachable += x
while not reachable >= target:
result += 1
reachable += reachable+1
return result
# Time: O(nlogn + logt)
# Space: O(1)
# lc0330
# sort, greedy
| Solution |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard_python3/django/polls/models.py | {
"start": 658,
"end": 970
} | class ____(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField("date published")
def __str__(self):
return self.question_text
def was_published_recently(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
| Question |
python | huggingface__transformers | src/transformers/pipelines/document_question_answering.py | {
"start": 3500,
"end": 3743
} | class ____(ExplicitEnum):
LayoutLM = "layoutlm"
LayoutLMv2andv3 = "layoutlmv2andv3"
VisionEncoderDecoder = "vision_encoder_decoder"
@add_end_docstrings(build_pipeline_init_args(has_image_processor=True, has_tokenizer=True))
| ModelType |
python | huggingface__transformers | tests/models/flaubert/test_modeling_flaubert.py | {
"start": 12332,
"end": 17404
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. Outdated custom `prepare_inputs_for_generation` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available() and is_sacremoses_available()
else {}
)
# TODO: Fix the failed tests
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if (
pipeline_test_case_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast")
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
# Flaubert has 2 QA models -> need to manually set the correct labels for one of them here
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
inputs_dict["start_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
inputs_dict["end_positions"] = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = FlaubertModelTester(self)
self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_flaubert_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*config_and_inputs)
# Copied from tests/models/distilbert/test_modeling_distilbert.py with Distilbert->Flaubert
def test_flaubert_model_with_sinusoidal_encodings(self):
config = FlaubertConfig(sinusoidal_embeddings=True)
model = FlaubertModel(config=config)
sinusoidal_pos_embds = torch.empty((config.max_position_embeddings, config.emb_dim), dtype=torch.float32)
create_sinusoidal_embeddings(config.max_position_embeddings, config.emb_dim, sinusoidal_pos_embds)
self.model_tester.parent.assertTrue(torch.equal(model.position_embeddings.weight, sinusoidal_pos_embds))
def test_flaubert_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs)
def test_flaubert_simple_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs)
def test_flaubert_qa(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*config_and_inputs)
def test_flaubert_sequence_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
def test_flaubert_token_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs)
def test_flaubert_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model_name = "flaubert/flaubert_small_cased"
model = FlaubertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
@require_torch
| FlaubertModelTest |
python | Netflix__metaflow | metaflow/tutorials/03-playlist-redux/playlist.py | {
"start": 49,
"end": 4117
} | class ____(FlowSpec):
"""
The next version of our playlist generator that uses the statistics
generated from 'Episode 02' to improve the title recommendations.
The flow performs the following steps:
1) Load the genre-specific statistics from the MovieStatsFlow.
2) In parallel branches:
- A) Build a playlist from the top grossing films in the requested genre.
- B) Choose a random movie.
3) Join the two to create a movie playlist and display it.
"""
genre = Parameter(
"genre", help="Filter movies for a particular genre.", default="Sci-Fi"
)
recommendations = Parameter(
"recommendations",
help="The number of movies recommended for " "the playlist.",
default=5,
)
@step
def start(self):
"""
Use the Metaflow client to retrieve the latest successful run from our
MovieStatsFlow and assign them as data artifacts in this flow.
"""
from metaflow import Flow, get_metadata
# Print metadata provider
print("Using metadata provider: %s" % get_metadata())
# Load the analysis from the MovieStatsFlow.
run = Flow("MovieStatsFlow").latest_successful_run
print("Using analysis from '%s'" % str(run))
self.genre_stats = run.data.genre_stats
# Compute our two recommendation types in parallel.
self.next(self.bonus_movie, self.genre_movies)
@step
def bonus_movie(self):
"""
This step chooses a random title for a different movie genre.
"""
import random
# Concatenate all the genre-specific data frames.
df = {"movie_title": [], "genres": []}
for genre, data in self.genre_stats.items():
if genre != self.genre.lower():
for row_idx in range(len(data["dataframe"]["movie_title"])):
if (
self.genre.lower()
not in data["dataframe"]["genres"][row_idx].lower()
):
df["movie_title"].append(
data["dataframe"]["movie_title"][row_idx]
)
df["genres"].append(data["dataframe"]["genres"][row_idx])
# Choose a random movie.
random_index = random.randint(0, len(df["genres"]) - 1)
self.bonus = (df["movie_title"][random_index], df["genres"][random_index])
self.next(self.join)
@step
def genre_movies(self):
"""
Select the top performing movies from the user specified genre.
"""
from random import shuffle
# For the genre of interest, generate a potential playlist using only
# highest gross box office titles (i.e. those in the last quartile).
genre = self.genre.lower()
if genre not in self.genre_stats:
self.movies = []
else:
df = self.genre_stats[genre]["dataframe"]
quartiles = self.genre_stats[genre]["quartiles"]
self.movies = [
df["movie_title"][i]
for i, g in enumerate(df["gross"])
if g >= quartiles[-1]
]
# Shuffle the playlist.
shuffle(self.movies)
self.next(self.join)
@step
def join(self, inputs):
"""
Join our parallel branches and merge results.
"""
self.playlist = inputs.genre_movies.movies
self.bonus = inputs.bonus_movie.bonus
self.next(self.end)
@step
def end(self):
"""
Print out the playlist and bonus movie.
"""
# Print the playlist.
print("Playlist for movies in genre '%s'" % self.genre)
for pick, movie in enumerate(self.playlist, start=1):
print("Pick %d: '%s'" % (pick, movie))
if pick >= self.recommendations:
break
print("Bonus Pick: '%s' from '%s'" % (self.bonus[0], self.bonus[1]))
if __name__ == "__main__":
PlayListFlow()
| PlayListFlow |
python | tensorflow__tensorflow | tensorflow/lite/python/lite_test.py | {
"start": 121251,
"end": 121959
} | class ____(LiteTest, parameterized.TestCase):
@parameterized.named_parameters(
('size', lite.Optimize.OPTIMIZE_FOR_SIZE),
('latency', lite.Optimize.OPTIMIZE_FOR_LATENCY))
def testDeprecatedOptionWarning(self, optimization):
"""Test if the warning message when using TOCO is logged."""
log = io.StringIO()
handler = logging.StreamHandler(log)
logging.root.addHandler(handler)
warning_message = 'please use optimizations=[Optimize.DEFAULT] instead.'
lite.QuantizationMode([optimization], lite.TargetSpec(), None, None)
self.assertIn(warning_message, log.getvalue())
logging.root.removeHandler(handler)
if __name__ == '__main__':
test.main()
| QuantizationModeTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-one-bit-operations-to-make-integers-zero.py | {
"start": 760,
"end": 1783
} | class ____(object):
def minimumOneBitOperations(self, n):
"""
:type n: int
:rtype: int
"""
# [observation1]:
# f(1) = 1
# f(10) = 2 * f(1) + 1 = 3
# f(100) = 2 * f(10) + 1 = 7
# by mathematical induction
# => f(2^k) = 2^(k+1)-1
#
# [observation2]
# n f(n)
# 000 0
# 001 1
# 011 2
# 010 3
# 110 4
# 111 5
# 101 6
# 100 7
# let pos be an array of positions where the bit is 1 in ascending order:
# f(0XX...X) + f(1XX...X) = f(100...0)
# f(1XX...X) = f(100...0) - f(0XX...X)
# = (2^(pos[k-1]+1)-1) - f(0XX...X)
# by mathematical induction
# => f(n) = (2^(pos[k-1]+1)-1) - (2^(pos[k-2])+1) + ... + (-1)^(k-1) * (2^(pos[0]+1)-1)
result = 0
while n:
result = -result - (n^(n-1)) # 2^(pos[i]+1)-1
n &= n-1
return abs(result)
| Solution2 |
python | sphinx-doc__sphinx | sphinx/errors.py | {
"start": 977,
"end": 1096
} | class ____(SphinxError):
"""Application initialization error."""
category = 'Application error'
| ApplicationError |
python | Unity-Technologies__ml-agents | ml-agents-envs/mlagents_envs/registry/base_registry_entry.py | {
"start": 109,
"end": 1856
} | class ____:
def __init__(
self,
identifier: str,
expected_reward: Optional[float],
description: Optional[str],
):
"""
BaseRegistryEntry allows launching a Unity Environment with its make method.
:param identifier: The name of the Unity Environment.
:param expected_reward: The cumulative reward that an Agent must receive
for the task to be considered solved.
:param description: A description of the Unity Environment. Contains human
readable information about potential special arguments that the make method can
take as well as information regarding the observation, reward, actions,
behaviors and number of agents in the Environment.
"""
self._identifier = identifier
self._expected_reward = expected_reward
self._description = description
@property
def identifier(self) -> str:
"""
The unique identifier of the entry
"""
return self._identifier
@property
def expected_reward(self) -> Optional[float]:
"""
The cumulative reward that an Agent must receive for the task to be considered
solved.
"""
return self._expected_reward
@property
def description(self) -> Optional[str]:
"""
A description of the Unity Environment the entry can make.
"""
return self._description
@abstractmethod
def make(self, **kwargs: Any) -> BaseEnv:
"""
This method creates a Unity BaseEnv (usually a UnityEnvironment).
"""
raise NotImplementedError(
f"The make() method not implemented for entry {self.identifier}"
)
| BaseRegistryEntry |
python | keras-team__keras | keras/src/backend/common/variables.py | {
"start": 538,
"end": 23318
} | class ____:
"""Represents a backend-agnostic variable in Keras.
A `Variable` acts as a container for state. It holds a tensor value and can
be updated. With the JAX backend, variables are used to implement
"functionalization", the pattern of lifting stateful operations out of
a piece of computation to turn it into a stateless function.
Args:
initializer: Initial value or callable for initialization.
If a callable is used, it should take the arguments
`shape` and `dtype`.
shape: Optional. Tuple for the variable's shape.
Required if `initializer` is a callable.
dtype: Optional. Data type of the variable. Defaults to the global float
dtype type (`"float32"` if never configured).
trainable: Optional. Boolean indicating if variable is trainable.
Defaults to `True`.
autocast: Optional. Boolean indicating whether the variable supports
autocasting. If `True`, the layer may first convert the variable
to the compute data type when accessed. Defaults to `True`.
aggregation: Optional string, one of `None`, `"none"`, `"mean"`,
`"sum"` or `"only_first_replica"` specifying how a distributed
variable will be aggregated. This serves as a semantic annotation,
to be taken into account by downstream backends or users. Defaults
to `"none"`.
name: Optional. A unique name for the variable. Automatically generated
if not set.
Attributes:
shape: The shape of the variable (tuple of integers).
ndim: The number of dimensions of the variable (integer).
dtype: The data type of the variable (string).
trainable: Whether the variable is trainable (boolean).
autocast: Whether the variable supports autocasting (boolean).
aggregation: How a distributed variable will be aggregated (string).
value: The current value of the variable (NumPy array or tensor).
name: The name of the variable (string).
path: The path of the variable within the Keras model or layer (string).
kwargs: Additional backend-specific keyword arguments.
Examples:
**Initializing a `Variable` with a NumPy array:**
```python
import numpy as np
import keras
initial_array = np.ones((3, 3))
variable_from_array = keras.Variable(initializer=initial_array)
```
**Using a Keras initializer to create a `Variable`:**
```python
from keras.src.initializers import Ones
variable_from_initializer = keras.Variable(
initializer=Ones(), shape=(3, 3), dtype="float32"
)
```
**Updating the value of a `Variable`:**
```python
new_value = np.zeros((3, 3), dtype="float32")
variable_from_array.assign(new_value)
```
**Marking a `Variable` as non-trainable:**
```python
non_trainable_variable = keras.Variable(
initializer=np.ones((3, 3), dtype="float32"), trainable=False
)
```
"""
def __init__(
self,
initializer,
shape=None,
dtype=None,
trainable=True,
autocast=True,
aggregation="none",
synchronization="auto",
name=None,
**kwargs,
):
del kwargs
name = name or auto_name(self.__class__.__name__)
if not isinstance(name, str) or "/" in name:
raise ValueError(
"Argument `name` must be a string and "
"cannot contain character `/`. "
f"Received: name={name}"
)
if aggregation not in (
None,
"none",
"mean",
"sum",
"only_first_replica",
):
raise ValueError(
"Invalid value for argument `aggregation`. Expected "
"one of `None`, `'none'`, `'mean'`, `'sum'`, "
"`'only_first_replica'`. "
f"Received: aggregation={aggregation}"
)
if aggregation is None:
aggregation = "none"
if synchronization not in (
None,
"none",
"on_read",
"on_write",
"auto",
):
raise ValueError(
"Invalid value for argument `synchronization`. Expected "
"one of `None`, `'none'`, `'on_read'`, `'on_write'`, "
"`'auto'`. "
f"Received: synchronization={synchronization}"
)
if synchronization is None:
synchronization = "none"
self._name = name
parent_path = current_path()
if parent_path:
self._path = f"{parent_path}/{name}"
else:
self._path = name
self._shape = None
self._initializer = None
self._regularizer = None
self._constraint = None
self._trainable = bool(trainable)
self._autocast = bool(autocast)
self._aggregation = aggregation
self._synchronization = synchronization
# `self._overwrite_with_gradient` is an internal property to determine
# whether this variable should be overwritten by the computed gradient.
# Ref: https://github.com/google/flax/blob/main/flax/linen/fp8_ops.py
self._overwrite_with_gradient = False
if isinstance(initializer, str):
from keras.src import initializers
initializer = initializers.get(initializer)
if callable(initializer):
if shape is None:
raise ValueError(
"When creating a Variable from an initializer, "
"the `shape` argument should be specified. "
f"Received: initializer={initializer} "
f"and shape={shape}"
)
else:
initializer = self._convert_to_tensor(initializer, dtype=dtype)
# If dtype is None and `initializer` is an array, use its dtype.
if dtype is None:
dtype = initializer.dtype
self._dtype = standardize_dtype(dtype)
if in_stateless_scope():
if callable(initializer):
self._value = None
self._initializer = initializer
self._shape = self._validate_shape(shape)
register_uninitialized_variable(self)
else:
raise ValueError(
"You are attempting to create a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are created "
"before you start using your layer/model objects.\n\n"
"In some cases, you might be seeing this error "
"because you need to "
"implement a `def build(self, input_shape)` method "
"on your layer/model, which will "
"create its variables.\n\n"
"In some other cases, you might be seeing this error "
"because you are instantiating a `Variable` and "
"assigning it to a layer without going through "
"self.add_variable()/self.add_weight(). Always prefer "
"using these methods "
"(with a `shape` and `initializer` argument)."
)
else:
if callable(initializer):
self._shape = self._validate_shape(shape)
self._initialize_with_initializer(initializer)
else:
self._initialize(initializer)
self._shape = self._validate_shape(self._value.shape)
self._ndim = len(self._shape)
def _deferred_initialize(self):
if self._value is not None:
# If NNX is enabled, it's possible the variable was already
# initialized by a concrete call. In this case, _deferred_initialize
# returns early and does not raise an error.
if config.is_nnx_enabled():
return
raise ValueError(f"Variable {self.path} is already initialized.")
if in_stateless_scope():
raise ValueError(
"You are attempting to initialize a variable "
"while in a stateless scope. This is disallowed. "
"Make sure that all variables are initialized "
"before you start using your layer/model objects."
)
self._initialize_with_initializer(self._initializer)
self._initializer = None
def _validate_shape(self, shape):
shape = standardize_shape(shape)
if None in shape:
raise ValueError(
"Shapes used to initialize variables must be "
"fully-defined (no `None` dimensions). Received: "
f"shape={shape} for variable path='{self.path}'"
)
return shape
def _maybe_autocast(self, value):
autocast_scope = get_autocast_scope()
if self._autocast and autocast_scope is not None:
return autocast_scope.maybe_cast(value)
return value
def numpy(self):
return np.array(self)
@property
def aggregation(self):
"""The strategy for aggregating this variable."""
return self._aggregation
@property
def synchronization(self):
"""The strategy for synchronizing this variable."""
return self._synchronization
@property
def value(self):
"""The current value of the variable (numpy array or backend tensor)."""
if in_stateless_scope():
scope = get_stateless_scope()
value = scope.get_current_value(self)
if value is not None:
return self._maybe_autocast(value)
if self._value is None:
# Uninitialized variable. Return a placeholder.
# This is fine because it's only ever used
# in during shape inference / graph tracing
# (anything else would be a bug, to be fixed.)
return self._maybe_autocast(
self._initializer(self._shape, dtype=self._dtype)
)
return self._maybe_autocast(self._value)
def assign(self, value):
value = self._convert_to_tensor(value, dtype=self._dtype)
if not shape_equal(value.shape, self.shape):
raise ValueError(
"The shape of the target variable and "
"the shape of the target value in "
"`variable.assign(value)` must match. "
f"variable.shape={self.shape}, "
f"Received: value.shape={value.shape}. "
f"Target variable: {self}"
)
if in_stateless_scope():
scope = get_stateless_scope()
scope.add_update((self, value))
else:
self._direct_assign(value)
return value
def assign_add(self, value):
return self.assign(self + value)
def assign_sub(self, value):
return self.assign(self - value)
@property
def dtype(self):
"""The data type of the variable."""
autocast_scope = get_autocast_scope()
if (
self._autocast
and autocast_scope is not None
and is_float_dtype(self._dtype)
):
dtype = autocast_scope.dtype
else:
dtype = self._dtype
return backend.standardize_dtype(dtype)
@property
def shape(self):
"""The shape of the variable."""
return self._shape
@property
def ndim(self):
"""The number of dimensions of the variable."""
return self._ndim
@property
def trainable(self):
"""Whether the variable is trainable."""
return self._trainable
@trainable.setter
def trainable(self, value):
self._trainable = bool(value)
@property
def name(self):
"""The name of the variable."""
return self._name
@property
def path(self):
"""The path of the variable within the Keras model or layer."""
return self._path
@property
def overwrite_with_gradient(self):
"""Whether this variable should be overwritten by the gradient.
This property is designed for a special case where we want to overwrite
the variable directly with its computed gradient. For example, in float8
training, new `scale` and `amax_history` are computed as gradients, and
we want to overwrite them directly instead of following the typical
procedure such as gradient descent with a learning rate, gradient
clipping and weight decaying.
"""
return self._overwrite_with_gradient
@overwrite_with_gradient.setter
def overwrite_with_gradient(self, value):
if not isinstance(value, bool):
raise TypeError(
"`overwrite_with_gradient` must be a boolean. "
f"Received: {value}"
)
self._overwrite_with_gradient = value
@property
def regularizer(self):
return self._regularizer
@regularizer.setter
def regularizer(self, value):
from keras.src.regularizers import Regularizer
if value is not None and not isinstance(value, Regularizer):
raise ValueError(
"Invalid value for attribute `regularizer`. Expected an "
"instance of `keras.regularizers.Regularizer`, or `None`. "
f"Received: regularizer={value}"
)
self._regularizer = value
@property
def constraint(self):
return self._constraint
@constraint.setter
def constraint(self, value):
from keras.src.constraints import Constraint
if value is not None and not isinstance(value, Constraint):
raise ValueError(
"Invalid value for attribute `constraint`. Expected an "
"instance of `keras.constraints.Constraint`, or `None`. "
f"Received: constraint={value}"
)
self._constraint = value
def __repr__(self):
value = None
if hasattr(self, "_value") and self._value is not None:
try:
value = backend.core.convert_to_numpy(self._value)
except:
# In some cases the conversion to numpy can fail.
pass
value_str = f", value={value}" if value is not None else ""
return (
f"<Variable path={self.path}, shape={self.shape}, "
f"dtype={self.dtype}{value_str}>"
)
def _initialize(self, value):
raise NotImplementedError
def _initialize_with_initializer(self, initializer):
value = self._convert_to_tensor(
initializer(self._shape, dtype=self._dtype)
)
self._initialize(value)
def _convert_to_tensor(self, value, dtype=None):
raise NotImplementedError
def __getitem__(self, idx):
return self.value.__getitem__(idx)
def __int__(self):
if self.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={self.shape}"
)
return int(self.value)
def __float__(self):
if self.ndim > 0:
raise TypeError(
"Only scalar arrays can be converted to Python scalars. "
f"Got: shape={self.shape}"
)
return float(self.value)
def __array__(self, dtype=None):
# We can't directly use self.value.__array__ here because of scalar.
# Numpy require this method to return as array like object. In the case
# of scalar, it will fail the type checking from numpy. We need to
# return a 0d array via numpy.
return np.asarray(self.value.__array__(dtype))
def __bool__(self):
raise TypeError("A Keras Variable cannot be used as a boolean.")
def __neg__(self):
return self.value.__neg__()
def __pos__(self):
return self.value
def __abs__(self):
return self.value.__abs__()
def __invert__(self):
return self.value.__invert__()
def __eq__(self, other):
return backend.numpy.equal(self.value, other)
def __ne__(self, other):
return backend.numpy.not_equal(self.value, other)
def __lt__(self, other):
return backend.numpy.less(self.value, other)
def __le__(self, other):
return backend.numpy.less_equal(self.value, other)
def __gt__(self, other):
return backend.numpy.greater(self.value, other)
def __ge__(self, other):
return backend.numpy.greater_equal(self.value, other)
def __add__(self, other):
return backend.numpy.add(self.value, other)
def __radd__(self, other):
return backend.numpy.add(other, self.value)
def __sub__(self, other):
return backend.numpy.subtract(self.value, other)
def __rsub__(self, other):
return backend.numpy.subtract(other, self.value)
def __mul__(self, other):
return backend.numpy.multiply(self.value, other)
def __rmul__(self, other):
return backend.numpy.multiply(other, self.value)
def __truediv__(self, other):
return backend.numpy.true_divide(self.value, other)
def __rtruediv__(self, other):
return backend.numpy.true_divide(other, self.value)
def __floordiv__(self, other):
return backend.numpy.floor_divide(self.value, other)
def __rfloordiv__(self, other):
return backend.numpy.floor_divide(other, self.value)
def __mod__(self, other):
return backend.numpy.mod(self.value, other)
def __rmod__(self, other):
return backend.numpy.mod(other, self.value)
def __pow__(self, other):
return backend.numpy.power(self.value, other)
def __rpow__(self, other):
return backend.numpy.power(other, self.value)
def __matmul__(self, other):
return backend.numpy.matmul(self.value, other)
def __rmatmul__(self, other):
return backend.numpy.matmul(other, self.value)
def __and__(self, other):
return backend.numpy.logical_and(self.value, other)
def __rand__(self, other):
return backend.numpy.logical_and(other, self.value)
def __or__(self, other):
return backend.numpy.logical_or(self.value, other)
def __ror__(self, other):
return backend.numpy.logical_or(other, self.value)
def __xor__(self, other):
return backend.numpy.logical_xor(self.value, other)
def __rxor__(self, other):
return backend.numpy.logical_xor(other, self.value)
def __round__(self, ndigits=None):
decimals = ndigits or 0
return backend.numpy.round(self.value, decimals=decimals)
def register_uninitialized_variable(variable):
uninitialized_variables = global_state.get_global_attribute(
"uninitialized_variables", [], set_to_default=True
)
uninitialized_variables.append(variable)
def initialize_all_variables():
collection = global_state.get_global_attribute("uninitialized_variables")
if collection:
for v in collection:
v._deferred_initialize()
global_state.set_global_attribute("uninitialized_variables", [])
@keras_export(
["keras.utils.standardize_dtype", "keras.backend.standardize_dtype"]
)
def standardize_dtype(dtype):
if dtype is None:
return config.floatx()
dtype = dtypes.PYTHON_DTYPES_MAP.get(dtype, dtype)
if hasattr(dtype, "name"):
dtype = dtype.name
elif hasattr(dtype, "__name__"):
dtype = dtype.__name__
elif hasattr(dtype, "__str__") and (
"torch" in str(dtype) or "jax.numpy" in str(dtype)
):
dtype = str(dtype).split(".")[-1]
if dtype not in dtypes.ALLOWED_DTYPES:
raise ValueError(f"Invalid dtype: {dtype}")
return dtype
def standardize_shape(shape):
if not isinstance(shape, tuple):
if shape is None:
raise ValueError("Undefined shapes are not supported.")
if not hasattr(shape, "__iter__"):
raise ValueError(f"Cannot convert '{shape}' to a shape.")
if config.backend() == "tensorflow":
if isinstance(shape, tf.TensorShape):
# `tf.TensorShape` may contain `Dimension` objects.
# We need to convert the items in it to either int or `None`
shape = shape.as_list()
if config.backend() == "jax":
# Replace `_DimExpr` (dimension expression) with None
from jax import export as jax_export
shape = tuple(
None if jax_export.is_symbolic_dim(d) else d for d in shape
)
# Handle dimensions that are not ints and not None, verify they're >= 0.
standardized_shape = []
for d in shape:
if d is None:
standardized_shape.append(d)
continue
# Reject these even if they can be cast to int successfully.
if isinstance(d, (str, float)):
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
f"Found invalid dimension '{d}' of type '{type(d)}'. "
)
try:
# Cast numpy scalars, tf constant tensors, etc.
d = int(d)
except Exception as e:
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
f"Found invalid dimension '{d}' of type '{type(d)}'. "
) from e
if d < 0:
raise ValueError(
f"Cannot convert '{shape}' to a shape. "
"Negative dimensions are not allowed."
)
standardized_shape.append(d)
# This also turns subclasses of `tuple` (e.g. `torch.Size`) to plain tuple.
return tuple(standardized_shape)
def shape_equal(a_shape, b_shape):
"""Return whether a_shape == b_shape (allows None entries)."""
if len(a_shape) != len(b_shape):
return False
for e1, e2 in zip(a_shape, b_shape):
if e1 is not None and e2 is not None and e1 != e2:
return False
return True
@keras_export("keras.backend.is_float_dtype")
def is_float_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("float") or dtype.startswith("bfloat")
@keras_export("keras.backend.is_int_dtype")
def is_int_dtype(dtype):
dtype = standardize_dtype(dtype)
return dtype.startswith("int") or dtype.startswith("uint")
def get_autocast_scope():
return global_state.get_global_attribute("autocast_scope")
| Variable |
python | spack__spack | lib/spack/spack/vendor/attr/validators.py | {
"start": 10327,
"end": 11864
} | class ____:
member_validator = attrib(validator=is_callable())
iterable_validator = attrib(
default=None, validator=optional(is_callable())
)
def __call__(self, inst, attr, value):
"""
We use a callable class to be able to change the ``__repr__``.
"""
if self.iterable_validator is not None:
self.iterable_validator(inst, attr, value)
for member in value:
self.member_validator(inst, attr, member)
def __repr__(self):
iterable_identifier = (
""
if self.iterable_validator is None
else " {iterable!r}".format(iterable=self.iterable_validator)
)
return (
"<deep_iterable validator for{iterable_identifier}"
" iterables of {member!r}>"
).format(
iterable_identifier=iterable_identifier,
member=self.member_validator,
)
def deep_iterable(member_validator, iterable_validator=None):
"""
A validator that performs deep validation of an iterable.
:param member_validator: Validator(s) to apply to iterable members
:param iterable_validator: Validator to apply to iterable itself
(optional)
.. versionadded:: 19.1.0
:raises TypeError: if any sub-validators fail
"""
if isinstance(member_validator, (list, tuple)):
member_validator = and_(*member_validator)
return _DeepIterable(member_validator, iterable_validator)
@attrs(repr=False, slots=True, hash=True)
| _DeepIterable |
python | django__django | tests/fixtures_model_package/tests.py | {
"start": 147,
"end": 654
} | class ____(TestCase):
fixtures = ["model_package_fixture1.json", "model_package_fixture2.json"]
def test_class_fixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertQuerySetEqual(
Article.objects.all(),
[
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
| SampleTestCase |
python | django__django | tests/constraints/models.py | {
"start": 2769,
"end": 2841
} | class ____(UniqueConstraintProduct):
pass
| ChildUniqueConstraintProduct |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP008.py | {
"start": 2842,
"end": 3342
} | class ____:
def method1(self):
super(ExampleWithKeywords, self, invalid=True).some_method() # Should emit diagnostic but NOT be fixed
def method2(self):
super(ExampleWithKeywords, self, **{"kwarg": "value"}).some_method() # Should emit diagnostic but NOT be fixed
def method3(self):
super(ExampleWithKeywords, self).some_method() # Should be fixed - no keywords
# See: https://github.com/astral-sh/ruff/issues/19357
# Must be detected
| ExampleWithKeywords |
python | networkx__networkx | networkx/algorithms/flow/tests/test_mincost.py | {
"start": 92,
"end": 17806
} | class ____:
def test_simple_digraph(self):
G = nx.DiGraph()
G.add_node("a", demand=-5)
G.add_node("d", demand=5)
G.add_edge("a", "b", weight=3, capacity=4)
G.add_edge("a", "c", weight=6, capacity=10)
G.add_edge("b", "d", weight=1, capacity=9)
G.add_edge("c", "d", weight=2, capacity=5)
flowCost, H = nx.network_simplex(G)
soln = {"a": {"b": 4, "c": 1}, "b": {"d": 4}, "c": {"d": 1}, "d": {}}
assert flowCost == 24
assert nx.min_cost_flow_cost(G) == 24
assert H == soln
assert nx.min_cost_flow(G) == soln
assert nx.cost_of_flow(G, H) == 24
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 24
assert nx.cost_of_flow(G, H) == 24
assert H == soln
def test_negcycle_infcap(self):
G = nx.DiGraph()
G.add_node("s", demand=-5)
G.add_node("t", demand=5)
G.add_edge("s", "a", weight=1, capacity=3)
G.add_edge("a", "b", weight=3)
G.add_edge("c", "a", weight=-6)
G.add_edge("b", "d", weight=1)
G.add_edge("d", "c", weight=-2)
G.add_edge("d", "t", weight=1, capacity=3)
pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
def test_sum_demands_not_zero(self):
G = nx.DiGraph()
G.add_node("s", demand=-5)
G.add_node("t", demand=4)
G.add_edge("s", "a", weight=1, capacity=3)
G.add_edge("a", "b", weight=3)
G.add_edge("a", "c", weight=-6)
G.add_edge("b", "d", weight=1)
G.add_edge("c", "d", weight=-2)
G.add_edge("d", "t", weight=1, capacity=3)
pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_no_flow_satisfying_demands(self):
G = nx.DiGraph()
G.add_node("s", demand=-5)
G.add_node("t", demand=5)
G.add_edge("s", "a", weight=1, capacity=3)
G.add_edge("a", "b", weight=3)
G.add_edge("a", "c", weight=-6)
G.add_edge("b", "d", weight=1)
G.add_edge("c", "d", weight=-2)
G.add_edge("d", "t", weight=1, capacity=3)
pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_transshipment(self):
G = nx.DiGraph()
G.add_node("a", demand=1)
G.add_node("b", demand=-2)
G.add_node("c", demand=-2)
G.add_node("d", demand=3)
G.add_node("e", demand=-4)
G.add_node("f", demand=-4)
G.add_node("g", demand=3)
G.add_node("h", demand=2)
G.add_node("r", demand=3)
G.add_edge("a", "c", weight=3)
G.add_edge("r", "a", weight=2)
G.add_edge("b", "a", weight=9)
G.add_edge("r", "c", weight=0)
G.add_edge("b", "r", weight=-6)
G.add_edge("c", "d", weight=5)
G.add_edge("e", "r", weight=4)
G.add_edge("e", "f", weight=3)
G.add_edge("h", "b", weight=4)
G.add_edge("f", "d", weight=7)
G.add_edge("f", "h", weight=12)
G.add_edge("g", "d", weight=12)
G.add_edge("f", "g", weight=-1)
G.add_edge("h", "g", weight=-10)
flowCost, H = nx.network_simplex(G)
soln = {
"a": {"c": 0},
"b": {"a": 0, "r": 2},
"c": {"d": 3},
"d": {},
"e": {"r": 3, "f": 1},
"f": {"d": 0, "g": 3, "h": 2},
"g": {"d": 0},
"h": {"b": 0, "g": 0},
"r": {"a": 1, "c": 1},
}
assert flowCost == 41
assert nx.min_cost_flow_cost(G) == 41
assert H == soln
assert nx.min_cost_flow(G) == soln
assert nx.cost_of_flow(G, H) == 41
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 41
assert nx.cost_of_flow(G, H) == 41
assert H == soln
def test_max_flow_min_cost(self):
G = nx.DiGraph()
G.add_edge("s", "a", bandwidth=6)
G.add_edge("s", "c", bandwidth=10, cost=10)
G.add_edge("a", "b", cost=6)
G.add_edge("b", "d", bandwidth=8, cost=7)
G.add_edge("c", "d", cost=10)
G.add_edge("d", "t", bandwidth=5, cost=5)
soln = {
"s": {"a": 5, "c": 0},
"a": {"b": 5},
"b": {"d": 5},
"c": {"d": 0},
"d": {"t": 5},
"t": {},
}
flow = nx.max_flow_min_cost(G, "s", "t", capacity="bandwidth", weight="cost")
assert flow == soln
assert nx.cost_of_flow(G, flow, weight="cost") == 90
G.add_edge("t", "s", cost=-100)
flowCost, flow = nx.capacity_scaling(G, capacity="bandwidth", weight="cost")
G.remove_edge("t", "s")
assert flowCost == -410
assert flow["t"]["s"] == 5
del flow["t"]["s"]
assert flow == soln
assert nx.cost_of_flow(G, flow, weight="cost") == 90
def test_digraph1(self):
# From Bradley, S. P., Hax, A. C. and Magnanti, T. L. Applied
# Mathematical Programming. Addison-Wesley, 1977.
G = nx.DiGraph()
G.add_node(1, demand=-20)
G.add_node(4, demand=5)
G.add_node(5, demand=15)
G.add_edges_from(
[
(1, 2, {"capacity": 15, "weight": 4}),
(1, 3, {"capacity": 8, "weight": 4}),
(2, 3, {"weight": 2}),
(2, 4, {"capacity": 4, "weight": 2}),
(2, 5, {"capacity": 10, "weight": 6}),
(3, 4, {"capacity": 15, "weight": 1}),
(3, 5, {"capacity": 5, "weight": 3}),
(4, 5, {"weight": 2}),
(5, 3, {"capacity": 4, "weight": 1}),
]
)
flowCost, H = nx.network_simplex(G)
soln = {
1: {2: 12, 3: 8},
2: {3: 8, 4: 4, 5: 0},
3: {4: 11, 5: 5},
4: {5: 10},
5: {3: 0},
}
assert flowCost == 150
assert nx.min_cost_flow_cost(G) == 150
assert H == soln
assert nx.min_cost_flow(G) == soln
assert nx.cost_of_flow(G, H) == 150
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 150
assert H == soln
assert nx.cost_of_flow(G, H) == 150
def test_digraph2(self):
# Example from ticket #430 from mfrasca. Original source:
# http://www.cs.princeton.edu/courses/archive/spr03/cs226/lectures/mincost.4up.pdf, slide 11.
G = nx.DiGraph()
G.add_edge("s", 1, capacity=12)
G.add_edge("s", 2, capacity=6)
G.add_edge("s", 3, capacity=14)
G.add_edge(1, 2, capacity=11, weight=4)
G.add_edge(2, 3, capacity=9, weight=6)
G.add_edge(1, 4, capacity=5, weight=5)
G.add_edge(1, 5, capacity=2, weight=12)
G.add_edge(2, 5, capacity=4, weight=4)
G.add_edge(2, 6, capacity=2, weight=6)
G.add_edge(3, 6, capacity=31, weight=3)
G.add_edge(4, 5, capacity=18, weight=4)
G.add_edge(5, 6, capacity=9, weight=5)
G.add_edge(4, "t", capacity=3)
G.add_edge(5, "t", capacity=7)
G.add_edge(6, "t", capacity=22)
flow = nx.max_flow_min_cost(G, "s", "t")
soln = {
1: {2: 6, 4: 5, 5: 1},
2: {3: 6, 5: 4, 6: 2},
3: {6: 20},
4: {5: 2, "t": 3},
5: {6: 0, "t": 7},
6: {"t": 22},
"s": {1: 12, 2: 6, 3: 14},
"t": {},
}
assert flow == soln
G.add_edge("t", "s", weight=-100)
flowCost, flow = nx.capacity_scaling(G)
G.remove_edge("t", "s")
assert flow["t"]["s"] == 32
assert flowCost == -3007
del flow["t"]["s"]
assert flow == soln
assert nx.cost_of_flow(G, flow) == 193
def test_digraph3(self):
"""Combinatorial Optimization: Algorithms and Complexity,
Papadimitriou Steiglitz at page 140 has an example, 7.1, but that
admits multiple solutions, so I alter it a bit. From ticket #430
by mfrasca."""
G = nx.DiGraph()
G.add_edge("s", "a")
G["s"]["a"].update({0: 2, 1: 4})
G.add_edge("s", "b")
G["s"]["b"].update({0: 2, 1: 1})
G.add_edge("a", "b")
G["a"]["b"].update({0: 5, 1: 2})
G.add_edge("a", "t")
G["a"]["t"].update({0: 1, 1: 5})
G.add_edge("b", "a")
G["b"]["a"].update({0: 1, 1: 3})
G.add_edge("b", "t")
G["b"]["t"].update({0: 3, 1: 2})
"PS.ex.7.1: testing main function"
sol = nx.max_flow_min_cost(G, "s", "t", capacity=0, weight=1)
flow = sum(v for v in sol["s"].values())
assert 4 == flow
assert 23 == nx.cost_of_flow(G, sol, weight=1)
assert sol["s"] == {"a": 2, "b": 2}
assert sol["a"] == {"b": 1, "t": 1}
assert sol["b"] == {"a": 0, "t": 3}
assert sol["t"] == {}
G.add_edge("t", "s")
G["t"]["s"].update({1: -100})
flowCost, sol = nx.capacity_scaling(G, capacity=0, weight=1)
G.remove_edge("t", "s")
flow = sum(v for v in sol["s"].values())
assert 4 == flow
assert sol["t"]["s"] == 4
assert flowCost == -377
del sol["t"]["s"]
assert sol["s"] == {"a": 2, "b": 2}
assert sol["a"] == {"b": 1, "t": 1}
assert sol["b"] == {"a": 0, "t": 3}
assert sol["t"] == {}
assert nx.cost_of_flow(G, sol, weight=1) == 23
def test_zero_capacity_edges(self):
"""Address issue raised in ticket #617 by arv."""
G = nx.DiGraph()
G.add_edges_from(
[
(1, 2, {"capacity": 1, "weight": 1}),
(1, 5, {"capacity": 1, "weight": 1}),
(2, 3, {"capacity": 0, "weight": 1}),
(2, 5, {"capacity": 1, "weight": 1}),
(5, 3, {"capacity": 2, "weight": 1}),
(5, 4, {"capacity": 0, "weight": 1}),
(3, 4, {"capacity": 2, "weight": 1}),
]
)
G.nodes[1]["demand"] = -1
G.nodes[2]["demand"] = -1
G.nodes[4]["demand"] = 2
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0, 5: 1}, 2: {3: 0, 5: 1}, 3: {4: 2}, 4: {}, 5: {3: 2, 4: 0}}
assert flowCost == 6
assert nx.min_cost_flow_cost(G) == 6
assert H == soln
assert nx.min_cost_flow(G) == soln
assert nx.cost_of_flow(G, H) == 6
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 6
assert H == soln
assert nx.cost_of_flow(G, H) == 6
def test_digon(self):
"""Check if digons are handled properly. Taken from ticket
#618 by arv."""
nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
edges = [
(1, 2, {"capacity": 3, "weight": 600000}),
(2, 1, {"capacity": 2, "weight": 0}),
(2, 3, {"capacity": 5, "weight": 714285}),
(3, 2, {"capacity": 2, "weight": 0}),
]
G = nx.DiGraph(edges)
G.add_nodes_from(nodes)
flowCost, H = nx.network_simplex(G)
soln = {1: {2: 0}, 2: {1: 0, 3: 4}, 3: {2: 0}}
assert flowCost == 2857140
assert nx.min_cost_flow_cost(G) == 2857140
assert H == soln
assert nx.min_cost_flow(G) == soln
assert nx.cost_of_flow(G, H) == 2857140
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 2857140
assert H == soln
assert nx.cost_of_flow(G, H) == 2857140
def test_deadend(self):
"""Check if one-node cycles are handled properly. Taken from ticket
#2906 from @sshraven."""
G = nx.DiGraph()
G.add_nodes_from(range(5), demand=0)
G.nodes[4]["demand"] = -13
G.nodes[3]["demand"] = 13
G.add_edges_from([(0, 2), (0, 3), (2, 1)], capacity=20, weight=0.1)
pytest.raises(nx.NetworkXUnfeasible, nx.min_cost_flow, G)
def test_infinite_capacity_neg_digon(self):
"""An infinite capacity negative cost digon results in an unbounded
instance."""
nodes = [(1, {}), (2, {"demand": -4}), (3, {"demand": 4})]
edges = [
(1, 2, {"weight": -600}),
(2, 1, {"weight": 0}),
(2, 3, {"capacity": 5, "weight": 714285}),
(3, 2, {"capacity": 2, "weight": 0}),
]
G = nx.DiGraph(edges)
G.add_nodes_from(nodes)
pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
def test_finite_capacity_neg_digon(self):
"""The digon should receive the maximum amount of flow it can handle.
Taken from ticket #749 by @chuongdo."""
G = nx.DiGraph()
G.add_edge("a", "b", capacity=1, weight=-1)
G.add_edge("b", "a", capacity=1, weight=-1)
min_cost = -2
assert nx.min_cost_flow_cost(G) == min_cost
flowCost, H = nx.capacity_scaling(G)
assert flowCost == -2
assert H == {"a": {"b": 1}, "b": {"a": 1}}
assert nx.cost_of_flow(G, H) == -2
def test_multidigraph(self):
"""Multidigraphs are acceptable."""
G = nx.MultiDiGraph()
G.add_weighted_edges_from([(1, 2, 1), (2, 3, 2)], weight="capacity")
flowCost, H = nx.network_simplex(G)
assert flowCost == 0
assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}}
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 0
assert H == {1: {2: {0: 0}}, 2: {3: {0: 0}}, 3: {}}
def test_negative_selfloops(self):
"""Negative selfloops should cause an exception if uncapacitated and
always be saturated otherwise.
"""
G = nx.DiGraph()
G.add_edge(1, 1, weight=-1)
pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
G[1][1]["capacity"] = 2
flowCost, H = nx.network_simplex(G)
assert flowCost == -2
assert H == {1: {1: 2}}
flowCost, H = nx.capacity_scaling(G)
assert flowCost == -2
assert H == {1: {1: 2}}
G = nx.MultiDiGraph()
G.add_edge(1, 1, "x", weight=-1)
G.add_edge(1, 1, "y", weight=1)
pytest.raises(nx.NetworkXUnbounded, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnbounded, nx.capacity_scaling, G)
G[1][1]["x"]["capacity"] = 2
flowCost, H = nx.network_simplex(G)
assert flowCost == -2
assert H == {1: {1: {"x": 2, "y": 0}}}
flowCost, H = nx.capacity_scaling(G)
assert flowCost == -2
assert H == {1: {1: {"x": 2, "y": 0}}}
def test_bone_shaped(self):
# From #1283
G = nx.DiGraph()
G.add_node(0, demand=-4)
G.add_node(1, demand=2)
G.add_node(2, demand=2)
G.add_node(3, demand=4)
G.add_node(4, demand=-2)
G.add_node(5, demand=-2)
G.add_edge(0, 1, capacity=4)
G.add_edge(0, 2, capacity=4)
G.add_edge(4, 3, capacity=4)
G.add_edge(5, 3, capacity=4)
G.add_edge(0, 3, capacity=0)
flowCost, H = nx.network_simplex(G)
assert flowCost == 0
assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}}
flowCost, H = nx.capacity_scaling(G)
assert flowCost == 0
assert H == {0: {1: 2, 2: 2, 3: 0}, 1: {}, 2: {}, 3: {}, 4: {3: 2}, 5: {3: 2}}
def test_exceptions(self):
G = nx.Graph()
pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
G = nx.MultiGraph()
pytest.raises(nx.NetworkXNotImplemented, nx.network_simplex, G)
pytest.raises(nx.NetworkXNotImplemented, nx.capacity_scaling, G)
G = nx.DiGraph()
pytest.raises(nx.NetworkXError, nx.network_simplex, G)
# pytest.raises(nx.NetworkXError, nx.capacity_scaling, G)
G.add_node(0, demand=float("inf"))
pytest.raises(nx.NetworkXError, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G.nodes[0]["demand"] = 0
G.add_node(1, demand=0)
G.add_edge(0, 1, weight=-float("inf"))
pytest.raises(nx.NetworkXError, nx.network_simplex, G)
pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G[0][1]["weight"] = 0
G.add_edge(0, 0, weight=float("inf"))
pytest.raises(nx.NetworkXError, nx.network_simplex, G)
# pytest.raises(nx.NetworkXError, nx.capacity_scaling, G)
G[0][0]["weight"] = 0
G[0][1]["capacity"] = -1
pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
# pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
G[0][1]["capacity"] = 0
G[0][0]["capacity"] = -1
pytest.raises(nx.NetworkXUnfeasible, nx.network_simplex, G)
# pytest.raises(nx.NetworkXUnfeasible, nx.capacity_scaling, G)
def test_large(self):
fname = (
importlib.resources.files("networkx.algorithms.flow.tests")
/ "netgen-2.gpickle.bz2"
)
with bz2.BZ2File(fname, "rb") as f:
G = pickle.load(f)
flowCost, flowDict = nx.network_simplex(G)
assert 6749969302 == flowCost
assert 6749969302 == nx.cost_of_flow(G, flowDict)
flowCost, flowDict = nx.capacity_scaling(G)
assert 6749969302 == flowCost
assert 6749969302 == nx.cost_of_flow(G, flowDict)
| TestMinCostFlow |
python | mlflow__mlflow | mlflow/entities/webhook.py | {
"start": 1148,
"end": 1893
} | class ____(str, Enum):
REGISTERED_MODEL = "registered_model"
MODEL_VERSION = "model_version"
MODEL_VERSION_TAG = "model_version_tag"
MODEL_VERSION_ALIAS = "model_version_alias"
PROMPT = "prompt"
PROMPT_VERSION = "prompt_version"
PROMPT_TAG = "prompt_tag"
PROMPT_VERSION_TAG = "prompt_version_tag"
PROMPT_ALIAS = "prompt_alias"
def __str__(self) -> str:
return self.value
@classmethod
def from_proto(cls, proto: int) -> Self:
proto_name = ProtoWebhookEntity.Name(proto)
entity_value = proto_name.lower()
return cls(entity_value)
def to_proto(self) -> int:
proto_name = self.value.upper()
return ProtoWebhookEntity.Value(proto_name)
| WebhookEntity |
python | doocs__leetcode | solution/2100-2199/2198.Number of Single Divisor Triplets/Solution.py | {
"start": 0,
"end": 685
} | class ____:
def singleDivisorTriplet(self, nums: List[int]) -> int:
cnt = Counter(nums)
ans = 0
for a, x in cnt.items():
for b, y in cnt.items():
for c, z in cnt.items():
s = a + b + c
if sum(s % v == 0 for v in (a, b, c)) == 1:
if a == b:
ans += x * (x - 1) * z
elif a == c:
ans += x * (x - 1) * y
elif b == c:
ans += x * y * (y - 1)
else:
ans += x * y * z
return ans
| Solution |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 2041,
"end": 2162
} | class ____(
# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP
):
...
| AltCLIPOutput |
python | dask__distributed | distributed/utils.py | {
"start": 9187,
"end": 9476
} | class ____:
"""An awaitable object that always returns None.
Useful to return from a method that can be called in both asynchronous and
synchronous contexts"""
def __await__(self):
async def f():
return None
return f().__await__()
| NoOpAwaitable |
python | pyca__cryptography | src/cryptography/hazmat/decrepit/ciphers/algorithms.py | {
"start": 627,
"end": 1065
} | class ____(BlockCipherAlgorithm):
name = "3DES"
block_size = 64
key_sizes = frozenset([64, 128, 192])
def __init__(self, key: bytes):
if len(key) == 8:
key += key + key
elif len(key) == 16:
key += key[:8]
self.key = _verify_key_size(self, key)
@property
def key_size(self) -> int:
return len(self.key) * 8
# Not actually supported, marker for tests
| TripleDES |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.