language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | gevent__gevent | src/greentest/3.10/test_socket.py | {
"start": 90210,
"end": 91557
} | class ____(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
| BasicBluetoothTest |
python | django__django | tests/template_loader/tests.py | {
"start": 849,
"end": 7483
} | class ____(SimpleTestCase):
def test_get_template_first_engine(self):
template = get_template("template_loader/hello.html")
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_get_template_second_engine(self):
template = get_template("template_loader/goodbye.html")
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_get_template_using_engine(self):
template = get_template("template_loader/hello.html", using="django")
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_get_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
get_template("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
"template_loader/unknown.html",
)
self.assertEqual(e.exception.chain[-1].backend.name, "django")
def test_select_template_first_engine(self):
template = select_template(
["template_loader/unknown.html", "template_loader/hello.html"]
)
self.assertEqual(template.render(), "Hello! (template strings)\n")
def test_select_template_second_engine(self):
template = select_template(
["template_loader/unknown.html", "template_loader/goodbye.html"]
)
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_select_template_using_engine(self):
template = select_template(
["template_loader/unknown.html", "template_loader/hello.html"],
using="django",
)
self.assertEqual(template.render(), "Hello! (Django templates)\n")
def test_select_template_empty(self):
with self.assertRaises(TemplateDoesNotExist):
select_template([])
def test_select_template_string(self):
with self.assertRaisesMessage(
TypeError,
"select_template() takes an iterable of template names but got a "
"string: 'template_loader/hello.html'. Use get_template() if you "
"want to load a single template by name.",
):
select_template("template_loader/hello.html")
def test_select_template_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
select_template(
["template_loader/unknown.html", "template_loader/missing.html"]
)
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
"template_loader/unknown.html",
)
self.assertEqual(e.exception.chain[0].backend.name, "dummy")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
"template_loader/missing.html",
)
self.assertEqual(e.exception.chain[-1].backend.name, "django")
def test_select_template_tries_all_engines_before_names(self):
template = select_template(
["template_loader/goodbye.html", "template_loader/hello.html"]
)
self.assertEqual(template.render(), "Goodbye! (Django templates)\n")
def test_render_to_string_first_engine(self):
content = render_to_string("template_loader/hello.html")
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_second_engine(self):
content = render_to_string("template_loader/goodbye.html")
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_request(self):
request = RequestFactory().get("/foobar/")
content = render_to_string("template_loader/request.html", request=request)
self.assertEqual(content, "/foobar/\n")
def test_render_to_string_using_engine(self):
content = render_to_string("template_loader/hello.html", using="django")
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string("template_loader/unknown.html")
self.assertEqual(
e.exception.chain[-1].tried[0][0].template_name,
"template_loader/unknown.html",
)
self.assertEqual(e.exception.chain[-1].backend.name, "django")
def test_render_to_string_with_list_first_engine(self):
content = render_to_string(
["template_loader/unknown.html", "template_loader/hello.html"]
)
self.assertEqual(content, "Hello! (template strings)\n")
def test_render_to_string_with_list_second_engine(self):
content = render_to_string(
["template_loader/unknown.html", "template_loader/goodbye.html"]
)
self.assertEqual(content, "Goodbye! (Django templates)\n")
def test_render_to_string_with_list_using_engine(self):
content = render_to_string(
["template_loader/unknown.html", "template_loader/hello.html"],
using="django",
)
self.assertEqual(content, "Hello! (Django templates)\n")
def test_render_to_string_with_list_empty(self):
with self.assertRaises(TemplateDoesNotExist):
render_to_string([])
def test_render_to_string_with_list_not_found(self):
with self.assertRaises(TemplateDoesNotExist) as e:
render_to_string(
["template_loader/unknown.html", "template_loader/missing.html"]
)
self.assertEqual(
e.exception.chain[0].tried[0][0].template_name,
"template_loader/unknown.html",
)
self.assertEqual(e.exception.chain[0].backend.name, "dummy")
self.assertEqual(
e.exception.chain[1].tried[0][0].template_name,
"template_loader/unknown.html",
)
self.assertEqual(e.exception.chain[1].backend.name, "django")
self.assertEqual(
e.exception.chain[2].tried[0][0].template_name,
"template_loader/missing.html",
)
self.assertEqual(e.exception.chain[2].backend.name, "dummy")
self.assertEqual(
e.exception.chain[3].tried[0][0].template_name,
"template_loader/missing.html",
)
self.assertEqual(e.exception.chain[3].backend.name, "django")
def test_render_to_string_with_list_tries_all_engines_before_names(self):
content = render_to_string(
["template_loader/goodbye.html", "template_loader/hello.html"]
)
self.assertEqual(content, "Goodbye! (Django templates)\n")
| TemplateLoaderTests |
python | joke2k__faker | faker/providers/job/fi_FI/__init__.py | {
"start": 42,
"end": 6012
} | class ____(BaseProvider):
# jobs parsed from a list provided by State Treasury:
# http://www.valtiokonttori.fi/download/noname/%7BF69EA5BD-C919-49FE-8D51-91434E4B030D%7D/82158
jobs = [
"Agrologi",
"Aikuiskoulutusjohtaja",
"Aineenopettaja",
"Ajojärjestelijä",
"Akatemian tutkijatohtori",
"Aktuaari",
"Alakoulujen apulaisrehtori",
"Alikersantti",
"Alkoholiasiain ylitarkastaja",
"Back office asiantuntija",
"Ballistikko",
"Bioanalyytikko",
"Brand manager",
"Budjettiassistentti",
"Business controller",
"Cc-yritysneuvoja",
"Cert-fi -yksikön päällikkö",
"Communication officer",
"Consul",
"Counsellor",
"Data-analyytikko",
"Dekaanin sihteeri",
"Dieettikeittäjä",
"Digitaalisen kokeen toteuttaja",
"Diplomi-insinööri",
"Dokumentoija",
"Dosentti",
"Eakr-koordinaattori",
"Editoija",
"Edunvalvontasihteeri",
"Egr-ohjaaja",
"Ekokampuskoordinaattori",
"Elektroniikka-asentaja",
"Elinkeinopäällikkö",
"Elokuvakonemestari",
"Elputeknikko",
"Eläinlääkintöneuvos",
"Faktori",
"Farmakologi",
"Fidipro-professori",
"Filmiteknikko",
"Financial controller",
"Floristi",
"Fysioterapeutti",
"Fyysikko",
"Gemmologi",
"Gentax-järjestelmäasiantuntija",
"Geofyysikko",
"Gis-asiantuntija",
"Gm huto pääkäyttäjä",
"Graafikko",
"Haastattelija",
"Hakukoordinaattori",
"Hallimestari",
"Hammashoitaja",
"Hankearkkitehti",
"Harjaantumisopetuksen erityisluokanopettaja",
"Havainnontarkastaja",
"Helikopterihuoltoaliupseeri",
"Henkikirjoittaja",
"Johtava kuluttajaoikeusneuvoja",
"Ict-arkkitehti",
"Ihmisoikeuskeskuksen johtaja",
"Iktyonomi",
"Ilma-aluksen päällikkö",
"Iltapäiväkerhon ohjaaja",
"Immunologi",
"Info-palvelupisteen hoitaja",
"Innoittaja",
"Jakeluvastaava",
"Jalkaväen tarkastaja",
"Jaoksen johtaja",
"Jatkokoulutettava eläinlääkäri",
"Jhs-projektipäällikkö",
"Johdon asiantuntija",
"Joukkoliikenneasiantuntija",
"Julkaisu- ja markkinointisuunnittelija",
"Junamies",
"Juontaja",
"Kaapeli-insinööri",
"Kabinettisihteeri",
"Kadettikoulun johtaja",
"Kahvila-apulainen",
"Kairaaja",
"Kalabiologi",
"Kampanjapäällikkö",
"Kanavanhoitaja",
"Kapellimestari",
"Karjamestari",
"Laadunvarmistuksen asiantuntija",
"Laboraattori",
"Laillisuusvalvontasihteeri",
"Laki- ja henkilöstöasiainjohtaja",
"Lapsiasiavaltuutettu",
"Laskennan kehittämispäällikkö",
"Lataamoinsinööri",
"Lautakuntasihteeri",
"Lavastaja",
"Maa- ja vesirakennustyöntekijä",
"Maisema-arkkitehti",
"Majakkateknikko",
"Maksatusasiantuntija",
"Malli",
"Mareografihoitaja",
"Mastoteknikko",
"Matemaatikko",
"Media- ja kulttuurikoordinaattori",
"Neuropsykologi",
"Nimikkeistöpäällikkö",
"Nosturinkuljettaja",
"Notaari",
"Nukutuslääkäri",
"Numerointisihteeri",
"Nuorempi konstaapeli",
"Näytearkistonhoitaja",
"Näönkäytön asiantuntija",
"Obduktiokoordinaattori",
"Observaattori",
"Offset-monistaja",
"Ohjaaja",
"Oikaisulautakunnan puheenjohtaja",
"Oleskelulupakäsittelijä",
"Omistajaohjausyksikön johtaja",
"Ompelija",
"Opas",
"Operaatiopäällikkö",
"Padonhoitaja",
"Paikallisjohtaja",
"Pakolaiskeskuksen johtaja",
"Palkanlaskentapäällikkö",
"Panostaja",
"Paperikonservaattori",
"Parturi-kampaaja",
"Passi- ja maahantulolupavirkailija/toimistovirkailija",
"Pataljoonan komentaja",
"Pedagogi",
"Radioasentaja",
"Rahakammion johtaja",
"Raideliikennejohtaja",
"Rajaeläinlääkäri",
"Rakennemuutosjohtaja",
"Raportoinnin asiantuntija",
"Ratainsinööri",
"Rauhanturvaaja",
"Ravintohaastattelija",
"Rehtori",
"Saamelaisarkistonhoitaja",
"Sadehavainnontekijä",
"Sairaala-apulainen",
"Saksan, englannin ja ruotsinkielen lehtori",
"Salkunhoitaja",
"Sanomakeskusaliupseeri",
"Satamapäällikkö",
"Seismologi",
"Sektorijohtaja",
"Selvittelijä",
"Taajuussuunnittelija",
"Taideamanuenssi",
"Tallentaja",
"Tanssija",
"Tapahtumakoordinaattori",
"Tarjoilija",
"Tasa-arvoneuvos",
"Tavaraliikennelupakäsittelijä",
"Team finland kasvu- ja kansainvälistymiskoordinaattori",
"Teemapäällikkö",
"Ulkoasiainneuvos",
"Ulosottojohtaja",
"Ultraäänihoitaja",
"Unix-asiantuntija",
"Upseeri",
"Urakonsultti",
"Urheiluohjaaja",
"Vaaitsija",
"Vac-yhdyshenkilö",
"Vahingonkorvausasiantuntija",
"Vaihteenhoitaja",
"Vakuustoimittaja",
"Valaistusmestari",
"Vammaisasiamies",
"Vanhempi tutkijainsinööri",
"Vapaa-ajan ohjaaja",
"Varadekaani",
"Www-asiantuntija",
"Yhdenvertaisuusvaltuutettu",
"Yhteinen tuntiopettaja",
"Yksikkösihteeri",
"Yleinen edunvalvoja",
"Yliaktuaari",
"Ylläpidon palvelupäällikkö",
"Yläasteen rehtori",
"Ympärintönsuojeluyksikön päällikkö",
"Yrittäjyysneuvoja",
"Yva-koordinaattori",
]
| Provider |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_descriptors.py | {
"start": 1934,
"end": 2016
} | class ____:
def prepare_value(self, owner, name, value):
return value
| Foo |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column.py | {
"start": 109929,
"end": 111898
} | class ____(_CategoricalColumn,
collections.namedtuple(
'_IdentityCategoricalColumn',
('key', 'num_buckets', 'default_value'))):
"""See `categorical_column_with_identity`."""
@property
def name(self):
return self.key
@property
def _parse_example_spec(self):
return {self.key: parsing_ops.VarLenFeature(dtypes.int64)}
def _transform_feature(self, inputs):
input_tensor = _to_sparse_input_and_drop_ignore_values(inputs.get(self.key))
if not input_tensor.dtype.is_integer:
raise ValueError('Invalid input, not integer. key: {} dtype: {}'.format(
self.key, input_tensor.dtype))
values = input_tensor.values
if input_tensor.values.dtype != dtypes.int64:
values = math_ops.cast(values, dtypes.int64, name='values')
if self.default_value is not None:
num_buckets = math_ops.cast(
self.num_buckets, dtypes.int64, name='num_buckets')
zero = math_ops.cast(0, dtypes.int64, name='zero')
# Assign default for out-of-range values.
values = array_ops.where(
math_ops.logical_or(
values < zero, values >= num_buckets, name='out_of_range'),
array_ops.fill(
dims=array_ops.shape(values),
value=math_ops.cast(self.default_value, dtypes.int64),
name='default_values'), values)
return sparse_tensor_lib.SparseTensor(
indices=input_tensor.indices,
values=values,
dense_shape=input_tensor.dense_shape)
@property
def _num_buckets(self):
"""Returns number of buckets in this sparse feature."""
return self.num_buckets
def _get_sparse_tensors(self,
inputs,
weight_collections=None,
trainable=None):
return _CategoricalColumn.IdWeightPair(inputs.get(self), None)
| _IdentityCategoricalColumn |
python | django-extensions__django-extensions | tests/test_module_in_project_dir.py | {
"start": 65,
"end": 119
} | class ____(BaseIncludedClass):
pass
| FourthDerivedClass |
python | jazzband__tablib | src/tablib/formats/_ods.py | {
"start": 1597,
"end": 8523
} | class ____:
title = 'ods'
extensions = ('ods',)
@classmethod
def export_set(cls, dataset):
"""Returns ODF representation of Dataset."""
wb = opendocument.OpenDocumentSpreadsheet()
wb.automaticstyles.addElement(bold)
wb.styles.addElement(date_style)
wb.automaticstyles.addElement(ds)
wb.styles.addElement(time_style)
wb.automaticstyles.addElement(ts)
wb.styles.addElement(datetime_style)
wb.automaticstyles.addElement(dts)
ws = table.Table(name=dataset.title if dataset.title else 'Tablib Dataset')
wb.spreadsheet.addElement(ws)
cls.dset_sheet(dataset, ws)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def export_book(cls, databook):
"""Returns ODF representation of DataBook."""
wb = opendocument.OpenDocumentSpreadsheet()
wb.automaticstyles.addElement(bold)
for i, dset in enumerate(databook._datasets):
ws = table.Table(name=dset.title if dset.title else f"Sheet{i}")
wb.spreadsheet.addElement(ws)
cls.dset_sheet(dset, ws)
stream = BytesIO()
wb.save(stream)
return stream.getvalue()
@classmethod
def import_sheet(cls, dset, sheet, headers=True, skip_lines=0):
"""Populate dataset `dset` with sheet data."""
dset.title = sheet.getAttribute('name')
def is_real_cell(cell):
return cell.hasChildNodes() or not cell.getAttribute('numbercolumnsrepeated')
rows = (row for row in sheet.childNodes if row.tagName == "table:table-row")
for i, row in enumerate(rows):
if i < skip_lines:
continue
row_vals = [cls.read_cell(cell) for cell in row.childNodes if is_real_cell(cell)]
if not row_vals:
continue
if i == skip_lines and headers:
dset.headers = row_vals
else:
if i > skip_lines and len(row_vals) < dset.width:
row_vals += [''] * (dset.width - len(row_vals))
dset.append(row_vals)
@classmethod
def read_cell(cls, cell, value_type=None):
def convert_date(val):
if 'T' in val:
return dt.datetime.strptime(val, "%Y-%m-%dT%H:%M:%S")
else:
return dt.datetime.strptime(val, "%Y-%m-%d").date()
if value_type is None:
value_type = cell.getAttribute('valuetype')
if value_type == 'date':
date_value = cell.getAttribute('datevalue')
if date_value:
return convert_date(date_value)
if value_type == 'time':
time_value = cell.getAttribute('timevalue')
try:
return dt.datetime.strptime(time_value, "PT%HH%MM%SS").time()
except ValueError:
# backwards compatibility for times exported with older tablib versions
return dt.datetime.strptime(time_value, "%H:%M:%S").time()
if value_type == 'boolean':
bool_value = cell.getAttribute('booleanvalue')
return bool_value == 'true'
if not cell.childNodes:
value = getattr(cell, 'data', None)
if value is None:
try:
value = cell.getAttribute('value')
except ValueError:
pass
if value is None:
return ''
if value_type == 'float':
return float(value)
if value_type == 'date':
return convert_date(value)
return value # Any other type default to 'string'
for subnode in cell.childNodes:
return cls.read_cell(subnode, value_type)
@classmethod
def import_set(cls, dset, in_stream, headers=True, skip_lines=0):
"""Populate dataset `dset` from ODS stream."""
dset.wipe()
ods_book = opendocument.load(in_stream)
for sheet in ods_book.spreadsheet.childNodes:
if sheet.qname[1] == 'table':
cls.import_sheet(dset, sheet, headers, skip_lines)
@classmethod
def import_book(cls, dbook, in_stream, headers=True):
"""Populate databook `dbook` from ODS stream."""
dbook.wipe()
ods_book = opendocument.load(in_stream)
for sheet in ods_book.spreadsheet.childNodes:
if sheet.qname[1] != 'table':
continue
dset = tablib.Dataset()
cls.import_sheet(dset, sheet, headers)
dbook.add_sheet(dset)
@classmethod
def dset_sheet(cls, dataset, ws):
"""Completes given worksheet from given Dataset."""
_package = dataset._package(dicts=False)
for i, sep in enumerate(dataset._separators):
_offset = i
_package.insert((sep[0] + _offset), (sep[1],))
for row_number, row in enumerate(_package, start=1):
is_header = row_number == 1 and dataset.headers
style = bold if is_header else None
odf_row = table.TableRow(stylename=style)
ws.addElement(odf_row)
for j, col in enumerate(row):
if isinstance(col, numbers.Number):
cell = table.TableCell(valuetype="float", value=col)
elif isinstance(col, dt.datetime):
cell = table.TableCell(
valuetype="date",
datevalue=col.strftime('%Y-%m-%dT%H:%M:%S'),
stylename=dts,
)
cell.addElement(text.P(text=col.strftime('%Y-%m-%d %H:%M:%S')))
elif isinstance(col, dt.date):
date_value = col.strftime('%Y-%m-%d')
cell = table.TableCell(valuetype="date", datevalue=date_value, stylename=ds)
cell.addElement(text.P(text=date_value))
elif isinstance(col, dt.time):
cell = table.TableCell(
valuetype="time",
timevalue=col.strftime('PT%HH%MM%SS'),
stylename=ts,
)
cell.addElement(text.P(text=col.strftime('%H:%M:%S')))
elif col is None:
cell = table.TableCell(valuetype="void")
else:
cell = table.TableCell(valuetype="string")
cell.addElement(text.P(text=str(col), stylename=style))
odf_row.addElement(cell)
@classmethod
def detect(cls, stream):
if isinstance(stream, bytes):
# load expects a file-like object.
stream = BytesIO(stream)
try:
opendocument.load(stream)
return True
except Exception:
return False
| ODSFormat |
python | django__django | tests/file_storage/tests.py | {
"start": 24378,
"end": 28052
} | class ____(FileStorageTests):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.temp_dir)
self.storage = self.storage_class(
location=self.temp_dir, base_url="/test_media_url/", allow_overwrite=True
)
def test_save_overwrite_behavior(self):
"""Saving to same file name twice overwrites the first file."""
name = "test.file"
self.assertFalse(self.storage.exists(name))
content_1 = b"content one"
content_2 = b"second content"
f_1 = ContentFile(content_1)
f_2 = ContentFile(content_2)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(self.storage.exists(name))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(self.storage.exists(name))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
def test_save_overwrite_behavior_truncate(self):
name = "test.file"
original_content = b"content extra extra extra"
new_smaller_content = b"content"
self.storage.save(name, ContentFile(original_content))
try:
self.storage.save(name, ContentFile(new_smaller_content))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), new_smaller_content)
finally:
self.storage.delete(name)
def test_save_overwrite_behavior_temp_file(self):
"""Saving to same file name twice overwrites the first file."""
name = "test.file"
self.assertFalse(self.storage.exists(name))
content_1 = b"content one"
content_2 = b"second content"
f_1 = TemporaryUploadedFile("tmp1", "text/plain", 11, "utf8")
self.addCleanup(f_1.close)
f_1.write(content_1)
f_1.seek(0)
f_2 = TemporaryUploadedFile("tmp2", "text/plain", 14, "utf8")
self.addCleanup(f_2.close)
f_2.write(content_2)
f_2.seek(0)
stored_name_1 = self.storage.save(name, f_1)
try:
self.assertEqual(stored_name_1, name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_1)
stored_name_2 = self.storage.save(name, f_2)
self.assertEqual(stored_name_2, name)
self.assertTrue(os.path.exists(os.path.join(self.temp_dir, name)))
with self.storage.open(name) as fp:
self.assertEqual(fp.read(), content_2)
finally:
self.storage.delete(name)
def test_file_name_truncation(self):
name = "test_long_file_name.txt"
file = ContentFile(b"content")
stored_name = self.storage.save(name, file, max_length=10)
self.addCleanup(self.storage.delete, stored_name)
self.assertEqual(stored_name, "test_l.txt")
self.assertEqual(len(stored_name), 10)
def test_file_name_truncation_extension_too_long(self):
name = "file_name.longext"
file = ContentFile(b"content")
with self.assertRaisesMessage(
SuspiciousFileOperation, "Storage can not find an available filename"
):
self.storage.save(name, file, max_length=5)
| OverwritingStorageTests |
python | getsentry__sentry | src/sentry/explore/endpoints/explore_saved_query_detail.py | {
"start": 1930,
"end": 5786
} | class ____(ExploreSavedQueryBase):
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"PUT": ApiPublishStatus.PRIVATE,
}
def has_feature(self, organization, request):
return features.has(
"organizations:visibility-explore-view", organization, actor=request.user
)
@extend_schema(
operation_id="Retrieve an Organization's Explore Saved Query",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
ExploreSavedQueryParams.EXPLORE_SAVED_QUERY_ID,
],
request=None,
responses={
200: ExploreSavedQueryModelSerializer,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ExploreExamples.EXPLORE_SAVED_QUERY_GET_RESPONSE,
)
def get(self, request: Request, organization, query) -> Response:
"""
Retrieve a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
return Response(serialize(query, request.user), status=200)
@extend_schema(
operation_id="Edit an Organization's Explore Saved Query",
parameters=[GlobalParams.ORG_ID_OR_SLUG, ExploreSavedQueryParams.EXPLORE_SAVED_QUERY_ID],
request=ExploreSavedQuerySerializer,
responses={
200: ExploreSavedQueryModelSerializer,
400: RESPONSE_BAD_REQUEST,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=ExploreExamples.EXPLORE_SAVED_QUERY_GET_RESPONSE,
)
def put(self, request: Request, organization: Organization, query) -> Response:
"""
Modify a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
if query.prebuilt_id is not None:
return self.respond(status=400, message="Cannot modify prebuilt queries")
try:
params = self.get_filter_params(
request, organization, project_ids=request.data.get("projects")
)
except NoProjects:
raise ParseError(detail="No Projects found, join a Team")
serializer = ExploreSavedQuerySerializer(
data=request.data,
context={"params": params, "organization": organization, "user": request.user},
)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
data = serializer.validated_data
query.update(
organization=organization,
name=data["name"],
query=data["query"],
dataset=data["dataset"],
changed_reason=None,
)
query.set_projects(data["project_ids"])
return Response(serialize(query), status=200)
@extend_schema(
operation_id="Delete an Organization's Explore Saved Query",
parameters=[GlobalParams.ORG_ID_OR_SLUG, ExploreSavedQueryParams.EXPLORE_SAVED_QUERY_ID],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, organization, query) -> Response:
"""
Delete a saved query.
"""
if not self.has_feature(organization, request):
return self.respond(status=404)
self.check_object_permissions(request, query)
if query.prebuilt_id is not None:
return self.respond(status=400, message="Cannot delete prebuilt queries")
query.delete()
return Response(status=204)
@region_silo_endpoint
| ExploreSavedQueryDetailEndpoint |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1047995,
"end": 1079458
} | class ____(FieldChannelMixin, core.PositionFieldDef):
r"""
Y schema wrapper.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
axis : dict, :class:`Axis`, None
An object defining properties of axis's gridlines, ticks and labels. If ``null``,
the axis for the encoding channel will be removed.
**Default value:** If undefined, default `axis properties
<https://vega.github.io/vega-lite/docs/axis.html>`__ are applied.
**See also:** `axis <https://vega.github.io/vega-lite/docs/axis.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : bool, dict, Literal['binned'], :class:`BinParams`, None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
impute : dict, :class:`ImputeParams`, None
An object defining the properties of the Impute Operation to be applied. The field
value of the other positional channel is taken as ``key`` of the ``Impute``
Operation. The field of the ``color`` channel if specified is used as ``groupby`` of
the ``Impute`` Operation.
**See also:** `impute <https://vega.github.io/vega-lite/docs/impute.html>`__
documentation.
scale : dict, :class:`Scale`, None
An object defining properties of the channel's scale, which is the function that
transforms values in the data domain (numbers, dates, strings, etc) to visual values
(pixels, colors, sizes) of the encoding channels.
If ``null``, the scale will be `disabled and the data value will be directly encoded
<https://vega.github.io/vega-lite/docs/scale.html#disable>`__.
**Default value:** If undefined, default `scale properties
<https://vega.github.io/vega-lite/docs/scale.html>`__ are applied.
**See also:** `scale <https://vega.github.io/vega-lite/docs/scale.html>`__
documentation.
sort : dict, :class:`Sort`, Sequence[str], Sequence[bool], Sequence[float], :class:`SortArray`, :class:`SortOrder`, :class:`AllSortString`, :class:`SortByChannel`, :class:`SortByEncoding`, :class:`EncodingSortField`, :class:`SortByChannelDesc`, Sequence[dict, :class:`DateTime`], Literal['-x', '-y', '-color', '-fill', '-stroke', '-strokeWidth', '-size', '-shape', '-fillOpacity', '-strokeOpacity', '-opacity', '-text', 'ascending', 'descending', 'x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text'], None
Sort order for the encoded field.
For continuous fields (quantitative or temporal), ``sort`` can be either
``"ascending"`` or ``"descending"``.
For discrete fields, ``sort`` can be one of the following:
* ``"ascending"`` or ``"descending"`` -- for sorting by the values' natural order in
JavaScript.
* `A string indicating an encoding channel name to sort by
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__ (e.g.,
``"x"`` or ``"y"``) with an optional minus prefix for descending sort (e.g.,
``"-x"`` to sort by x-field, descending). This channel string is short-form of `a
sort-by-encoding definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-by-encoding>`__. For
example, ``"sort": "-x"`` is equivalent to ``"sort": {"encoding": "x", "order":
"descending"}``.
* `A sort field definition
<https://vega.github.io/vega-lite/docs/sort.html#sort-field>`__ for sorting by
another field.
* `An array specifying the field values in preferred order
<https://vega.github.io/vega-lite/docs/sort.html#sort-array>`__. In this case, the
sort order will obey the values in the array, followed by any unspecified values
in their original order. For discrete time field, values in the sort array can be
`date-time definition objects
<https://vega.github.io/vega-lite/docs/datetime.html>`__. In addition, for time
units ``"month"`` and ``"day"``, the values can be the month or day names (case
insensitive) or their 3-letter initials (e.g., ``"Mon"``, ``"Tue"``).
* ``null`` indicating no sort.
**Default value:** ``"ascending"``
**Note:** ``null`` and sorting by another channel is not supported for ``row`` and
``column``.
**See also:** `sort <https://vega.github.io/vega-lite/docs/sort.html>`__
documentation.
stack : bool, :class:`StackOffset`, Literal['zero', 'center', 'normalize'], None
Type of stacking offset if the field should be stacked. ``stack`` is only applicable
for ``x``, ``y``, ``theta``, and ``radius`` channels with continuous domains. For
example, ``stack`` of ``y`` can be used to customize stacking for a vertical bar
chart.
``stack`` can be one of the following values:
* ``"zero"`` or ``true``: stacking with baseline offset at zero value of the scale
(for creating typical stacked `bar
<https://vega.github.io/vega-lite/docs/stack.html#bar>`__ and `area
<https://vega.github.io/vega-lite/docs/stack.html#area>`__ chart).
* ``"normalize"`` - stacking with normalized domain (for creating `normalized
stacked bar and area charts
<https://vega.github.io/vega-lite/docs/stack.html#normalized>`__ and pie charts
`with percentage tooltip
<https://vega.github.io/vega-lite/docs/arc.html#tooltip>`__).
* ``"center"`` - stacking with center baseline (for `streamgraph
<https://vega.github.io/vega-lite/docs/stack.html#streamgraph>`__).
* ``null`` or ``false`` - No-stacking. This will produce layered `bar
<https://vega.github.io/vega-lite/docs/stack.html#layered-bar-chart>`__ and area
chart.
**Default value:** ``zero`` for plots with all of the following conditions are true:
(1) the mark is ``bar``, ``area``, or ``arc``; (2) the stacked measure channel (x or
y) has a linear scale; (3) At least one of non-position channels mapped to an
unaggregated field that is different from x and y. Otherwise, ``null`` by default.
**See also:** `stack <https://vega.github.io/vega-lite/docs/stack.html>`__
documentation.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`StandardType`, Literal['quantitative', 'ordinal', 'temporal', 'nominal']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "y"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> Y: ...
@overload
def aggregate(self, *, argmax: Optional[str | SchemaBase] = Undefined) -> Y: ...
@overload
def aggregate(self, *, argmin: Optional[str | SchemaBase] = Undefined) -> Y: ...
@overload
def axis(self, _: Axis | None, /) -> Y: ...
@overload
def axis(
self,
*,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
bandPosition: Optional[float | Parameter | SchemaBase | Map] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[bool] = Undefined,
domainCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
domainColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
domainDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
domainDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
format: Optional[str | SchemaBase | Map] = Undefined,
formatType: Optional[str] = Undefined,
grid: Optional[bool] = Undefined,
gridCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
gridColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
gridDash: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
gridDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
gridWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
labelAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
labelBound: Optional[bool | float | Parameter | SchemaBase | Map] = Undefined,
labelColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
labelExpr: Optional[str] = Undefined,
labelFlush: Optional[bool | float] = Undefined,
labelFlushOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
labelFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
labelLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelOverlap: Optional[
bool | Parameter | SchemaBase | Literal["greedy", "parity"] | Map
] = Undefined,
labelPadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labelSeparation: Optional[float | Parameter | SchemaBase | Map] = Undefined,
labels: Optional[bool] = Undefined,
maxExtent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
minExtent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
orient: Optional[Parameter | SchemaBase | Map | AxisOrient_T] = Undefined,
position: Optional[float | Parameter | SchemaBase | Map] = Undefined,
style: Optional[str | Sequence[str]] = Undefined,
tickBand: Optional[
Parameter | SchemaBase | Literal["center", "extent"] | Map
] = Undefined,
tickCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
tickColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
tickCount: Optional[
float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
tickDash: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
tickDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tickExtra: Optional[bool] = Undefined,
tickMinStep: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tickOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tickOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tickRound: Optional[bool] = Undefined,
tickSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
tickWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ticks: Optional[bool] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
titleAlign: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
titleAnchor: Optional[Parameter | SchemaBase | Map | TitleAnchor_T] = Undefined,
titleAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleBaseline: Optional[
Parameter | SchemaBase | Map | TextBaseline_T
] = Undefined,
titleColor: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
titleFont: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleFontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
titleFontWeight: Optional[
Parameter | SchemaBase | Map | FontWeight_T
] = Undefined,
titleLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleLineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titlePadding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleX: Optional[float | Parameter | SchemaBase | Map] = Undefined,
titleY: Optional[float | Parameter | SchemaBase | Map] = Undefined,
translate: Optional[float | Parameter | SchemaBase | Map] = Undefined,
values: Optional[
Parameter
| SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
] = Undefined,
zindex: Optional[float] = Undefined,
) -> Y: ...
@overload
def bandPosition(self, _: float, /) -> Y: ...
@overload
def bin(self, _: bool | Bin | Literal["binned"] | None, /) -> Y: ...
@overload
def bin(
self,
*,
anchor: Optional[float] = Undefined,
base: Optional[float] = Undefined,
binned: Optional[bool] = Undefined,
divide: Optional[Sequence[float]] = Undefined,
extent: Optional[Parameter | SchemaBase | Sequence[float] | Map] = Undefined,
maxbins: Optional[float] = Undefined,
minstep: Optional[float] = Undefined,
nice: Optional[bool] = Undefined,
step: Optional[float] = Undefined,
steps: Optional[Sequence[float]] = Undefined,
) -> Y: ...
@overload
def field(self, _: str | RepeatRef, /) -> Y: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> Y: ...
@overload
def impute(self, _: Impute | None, /) -> Y: ...
@overload
def impute(
self,
*,
frame: Optional[Sequence[float | None]] = Undefined,
keyvals: Optional[SchemaBase | Sequence[Any] | Map] = Undefined,
method: Optional[SchemaBase | ImputeMethod_T] = Undefined,
value: Optional[Any] = Undefined,
) -> Y: ...
@overload
def scale(self, _: Scale | None, /) -> Y: ...
@overload
def scale(
self,
*,
align: Optional[float | Parameter | SchemaBase | Map] = Undefined,
base: Optional[float | Parameter | SchemaBase | Map] = Undefined,
bins: Optional[SchemaBase | Sequence[float] | Map] = Undefined,
clamp: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
constant: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domain: Optional[
Parameter
| SchemaBase
| Literal["unaggregated"]
| Sequence[
str | bool | float | Temporal | Parameter | SchemaBase | Map | None
]
| Map
] = Undefined,
domainMax: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainMid: Optional[float | Parameter | SchemaBase | Map] = Undefined,
domainMin: Optional[
float | Temporal | Parameter | SchemaBase | Map
] = Undefined,
domainRaw: Optional[Parameter | SchemaBase | Map] = Undefined,
exponent: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[
Parameter | SchemaBase | Map | ScaleInterpolateEnum_T
] = Undefined,
nice: Optional[
bool | float | Parameter | SchemaBase | Map | TimeInterval_T
] = Undefined,
padding: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingInner: Optional[float | Parameter | SchemaBase | Map] = Undefined,
paddingOuter: Optional[float | Parameter | SchemaBase | Map] = Undefined,
range: Optional[
SchemaBase
| Sequence[str | float | Parameter | SchemaBase | Sequence[float] | Map]
| Map
| RangeEnum_T
] = Undefined,
rangeMax: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
rangeMin: Optional[str | float | Parameter | SchemaBase | Map] = Undefined,
reverse: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
round: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
scheme: Optional[Parameter | SchemaBase | Map | ColorScheme_T] = Undefined,
type: Optional[SchemaBase | ScaleType_T] = Undefined,
zero: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
) -> Y: ...
@overload
def sort(
self,
_: Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[DateTime | Temporal]
| AllSortString_T
| None,
/,
) -> Y: ...
@overload
def sort(
self,
*,
field: Optional[str | SchemaBase | Map] = Undefined,
op: Optional[SchemaBase | NonArgAggregateOp_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Y: ...
@overload
def sort(
self,
*,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
) -> Y: ...
@overload
def stack(self, _: bool | StackOffset_T | None, /) -> Y: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> Y: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> Y: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> Y: ...
@overload
def type(self, _: StandardType_T, /) -> Y: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
axis: Optional[SchemaBase | Map | None] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[bool | SchemaBase | Literal["binned"] | Map | None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
impute: Optional[SchemaBase | Map | None] = Undefined,
scale: Optional[SchemaBase | Map | None] = Undefined,
sort: Optional[
SchemaBase
| Sequence[str]
| Sequence[bool]
| Sequence[float]
| Sequence[Temporal | SchemaBase | Map]
| Map
| AllSortString_T
| None
] = Undefined,
stack: Optional[bool | SchemaBase | StackOffset_T | None] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | StandardType_T] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
axis=axis,
bandPosition=bandPosition,
bin=bin,
field=field,
impute=impute,
scale=scale,
sort=sort,
stack=stack,
timeUnit=timeUnit,
title=title,
type=type,
**kwds,
)
@with_property_setters
| Y |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 60963,
"end": 61361
} | class ____(BiffRecord):
"""
This record represents a cell that contains an RK value (encoded integer or
floating-point value). If a floating-point value cannot be encoded to an RK value,
a NUMBER record will be written.
"""
_REC_ID = 0x027E
def __init__(self, row, col, xf_index, rk_encoded):
self._rec_data = pack('<3Hi', row, col, xf_index, rk_encoded)
| RKRecord |
python | falconry__falcon | falcon/bench/nuts/nuts/controllers/root.py | {
"start": 134,
"end": 472
} | class ____:
def __init__(self, account_id):
self.account_id = account_id
@expose(content_type='text/plain')
def test(self):
user_agent = request.headers['User-Agent'] # NOQA
limit = request.params.get('limit', '10') # NOQA
response.headers.update(_headers)
return _body
| TestController |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/config_migrations.py | {
"start": 4775,
"end": 7521
} | class ____:
"""
This class stands for migrating the config at runtime.
This migration is intended for backwards compatibility with the previous version, so existing secrets configurations gets migrated to new path.
Starting from `2.2.0`, the `client_id`, `client_secret` and `access_token` will be placed at `credentials` path.
"""
@classmethod
def _should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""
This method determines whether the config should be migrated to nest existing fields at credentials.
It is assumed if credentials does not exist on configuration, `client_id`, `client_secret` and `access_token` exists on root path.
Returns:
> True, if the migration is necessary
> False, otherwise.
"""
return "access_token" in config or "client_id" in config or "client_secret" in config
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
This method checks the input args, should the config be migrated,
transform if neccessary and emit the CONTROL message.
"""
# get config path
config_path = AirbyteEntrypoint(source).extract_config(args)
# proceed only if `--config` arg is provided
if config_path:
# read the existing config
config = source.read_config(config_path)
# migration check
if cls._should_migrate(config):
emit_configuration_as_airbyte_control_message(cls._modify_and_save(config_path, source, config))
@classmethod
def _transform(cls, config: Mapping[str, Any]) -> Mapping[str, Any]:
# transform the config
if "credentials" not in config:
config["credentials"] = {
"auth_type": "Service",
}
if "access_token" in config:
config["credentials"]["access_token"] = config.pop("access_token")
if "client_id" in config:
config["credentials"]["auth_type"] = "Client"
config["credentials"]["client_id"] = config.pop("client_id")
if "client_secret" in config:
config["credentials"]["auth_type"] = "Client"
config["credentials"]["client_secret"] = config.pop("client_secret")
# return transformed config
return config
@classmethod
def _modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
# modify the config
migrated_config = cls._transform(config)
# save the config
source.write_config(migrated_config, config_path)
# return modified config
return migrated_config
| MigrateSecretsPathInConnector |
python | allegroai__clearml | clearml/automation/scheduler.py | {
"start": 536,
"end": 3309
} | class ____(object):
name = attrib(type=str, default=None)
base_task_id = attrib(type=str, default=None)
base_function = attrib(type=Callable, default=None)
queue = attrib(type=str, default=None)
target_project = attrib(type=str, default=None)
single_instance = attrib(type=bool, default=False)
task_parameters = attrib(type=dict, default={})
task_overrides = attrib(type=dict, default={})
clone_task = attrib(type=bool, default=True)
_executed_instances = attrib(type=list, default=None)
def to_dict(self, full: bool = False) -> Dict[str, Any]:
return {k: v for k, v in self.__dict__.items() if not callable(v) and (full or not str(k).startswith("_"))}
def update(self, a_job: Union[Dict, "BaseScheduleJob"]) -> "BaseScheduleJob":
converters = {a.name: a.converter for a in getattr(self, "__attrs_attrs__", [])}
for k, v in (a_job.to_dict(full=True) if not isinstance(a_job, dict) else a_job).items():
if v is not None and not callable(getattr(self, k, v)):
setattr(self, k, converters[k](v) if converters.get(k) else v)
return self
def verify(self) -> None:
if self.base_function and not self.name:
raise ValueError("Entry 'name' must be supplied for function scheduling")
if self.base_task_id and not self.queue:
raise ValueError("Target 'queue' must be provided for function scheduling")
if not self.base_function and not self.base_task_id:
raise ValueError("Either schedule function or task-id must be provided")
def get_last_executed_task_id(self) -> Optional[str]:
return self._executed_instances[-1] if self._executed_instances else None
def run(self, task_id: Optional[str]) -> None:
if task_id:
# make sure we have a new instance
if not self._executed_instances:
self._executed_instances = []
self._executed_instances.append(str(task_id))
def get_resolved_target_project(self) -> Optional[str]:
if not self.base_task_id or not self.target_project:
return self.target_project
# noinspection PyBroadException
try:
task = Task.get_task(task_id=self.base_task_id)
# noinspection PyProtectedMember
if (
PipelineController._tag in task.get_system_tags()
and "/{}/".format(PipelineController._project_section) not in self.target_project
):
# noinspection PyProtectedMember
return "{}/{}/{}".format(self.target_project, PipelineController._project_section, task.name)
except Exception:
pass
return self.target_project
@attrs
| BaseScheduleJob |
python | plotly__plotly.py | plotly/graph_objs/isosurface/_caps.py | {
"start": 233,
"end": 3851
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "isosurface"
_path_str = "isosurface.caps"
_valid_props = {"x", "y", "z"}
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.X`
- A dict of string/value properties that will be passed
to the X constructor
Returns
-------
plotly.graph_objs.isosurface.caps.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Returns
-------
plotly.graph_objs.isosurface.caps.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Returns
-------
plotly.graph_objs.isosurface.caps.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.isosurface.caps.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.caps.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.caps.Z`
instance or dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Caps object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Caps`
x
:class:`plotly.graph_objects.isosurface.caps.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.caps.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.caps.Z`
instance or dict with compatible properties
Returns
-------
Caps
"""
super().__init__("caps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.isosurface.Caps
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Caps`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("x", arg, x)
self._set_property("y", arg, y)
self._set_property("z", arg, z)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Caps |
python | getsentry__sentry | src/sentry/api/endpoints/debug_files.py | {
"start": 15293,
"end": 15776
} | class ____(ProjectEndpoint):
owner = ApiOwner.OWNERS_INGEST
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectReleasePermission,)
def get(self, request: Request, project: Project) -> Response:
checksums = request.GET.getlist("checksums")
missing = ProjectDebugFile.objects.find_missing(checksums, project=project)
return Response({"missing": missing})
@region_silo_endpoint
| UnknownDebugFilesEndpoint |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/numerics_test.py | {
"start": 1448,
"end": 4593
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super(NumericTest, self).setUp()
self.skipForDeviceType(['TPU'],
'all tests require 8 TPU cores.',
unless_device_count_equals_to=8)
test_util.reset_logical_devices('CPU', 8)
accelerator_util.initialize_accelerator_system()
self.stateless_random_seed = [0, 1]
def _create_mesh(self, topology, device):
device_ids = test_util.create_device_ids_array(topology)
return Mesh(
_MESH_DIMS,
device_ids,
np.ravel(device_ids).tolist(),
test_util.create_device_list(topology, device),
)
# Tests AllReduce numerics with and without mixed precision reduce enabled,
# based on go/dtensor-numerics.
@parameterized.named_parameters(('_without_mixed_precision_reduce', False),
('_with_mixed_precision_reduce', True))
def test_all_reduce(self, enable_mixed_precision_reduce):
if enable_mixed_precision_reduce:
os.environ['DTENSOR_ENABLE_MIXED_PRECISION_REDUCE'] = ''
# Override group size since we are testing on smaller mesh.
os.environ['DTENSOR_REDUCE_IN_BFLOAT16_MAX_GROUP_SIZE'] = '4'
else:
if 'DTENSOR_ENABLE_MIXED_PRECISION_REDUCE' in os.environ:
del os.environ['DTENSOR_ENABLE_MIXED_PRECISION_REDUCE']
@polymorphic_function.function
def _compute_reduction(inp):
return math_ops.reduce_sum(inp, axis=[2])
input_tensor = stateless_random_ops.stateless_random_uniform(
shape=(8, 8, 8, 64),
seed=self.stateless_random_seed,
minval=-5.0,
maxval=5.0,
dtype=dtypes.bfloat16,
)
expected = _compute_reduction(input_tensor)
# Compute reduction on 8x1, since dim 2 is unsharded AllReduce will not be
# needed.
mesh_8x1 = self._create_mesh((8, 1), 'TPU')
input_8x1 = numpy_util.pack_numpy(
input_tensor,
Layout([_MESH_DIM_X, UNSHARDED, UNSHARDED, UNSHARDED], mesh_8x1),
)
result_8x1 = _compute_reduction(input_8x1)
result_8x1_np = numpy_util.to_numpy(result_8x1)
# Compute reduction on 1x8, AllReduce will be needed since dim 2 is sharded.
mesh_1x8 = self._create_mesh((1, 8), 'TPU')
input_1x8 = numpy_util.pack_numpy(
input_tensor,
Layout([_MESH_DIM_X, UNSHARDED, _MESH_DIM_Y, UNSHARDED], mesh_1x8),
)
result_1x8 = _compute_reduction(input_1x8)
result_1x8_np = numpy_util.to_numpy(result_1x8)
self.assertEqual(result_8x1.dtype, dtypes.bfloat16)
self.assertEqual(result_1x8.dtype, dtypes.bfloat16)
# Mixed precision does not apply since AllReduce was not used, result will
# always be close to the expected value.
self.assertAllClose(result_8x1_np, expected, atol=1e-5, rtol=1e-5)
# AllReduce was needed, so result will be more accurate if mixed precision
# is enabled.
if enable_mixed_precision_reduce:
self.assertAllClose(result_1x8_np, expected, atol=1e-5, rtol=1e-5)
else:
self.assertNotAllClose(result_1x8_np, expected, atol=1e-5, rtol=1e-5)
if __name__ == '__main__':
test.main()
| NumericTest |
python | walkccc__LeetCode | solutions/120. Triangle/120.py | {
"start": 0,
"end": 283
} | class ____:
def minimumTotal(self, triangle: list[list[int]]) -> int:
for i in range(len(triangle) - 2, -1, -1):
for j in range(i + 1):
triangle[i][j] += min(triangle[i + 1][j],
triangle[i + 1][j + 1])
return triangle[0][0]
| Solution |
python | pytest-dev__pytest-django | tests/test_database.py | {
"start": 8837,
"end": 16136
} | class ____:
"Tests for the django_db marker."
@pytest.mark.django_db
def test_access(self) -> None:
Item.objects.create(name="spam")
@pytest.mark.django_db
def test_clean_db(self) -> None:
# Relies on the order: test_access created an object.
assert Item.objects.count() == 0
@pytest.mark.django_db
def test_transactions_disabled(self) -> None:
if not connection.features.supports_transactions:
pytest.skip("transactions required for this test")
assert connection.in_atomic_block
@pytest.mark.django_db(transaction=False)
def test_transactions_disabled_explicit(self) -> None:
if not connection.features.supports_transactions:
pytest.skip("transactions required for this test")
assert connection.in_atomic_block
@pytest.mark.django_db(transaction=True)
def test_transactions_enabled(self) -> None:
if not connection.features.supports_transactions:
pytest.skip("transactions required for this test")
assert not connection.in_atomic_block
@pytest.mark.django_db
def test_reset_sequences_disabled(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert not marker.kwargs
@pytest.mark.django_db(reset_sequences=True)
def test_reset_sequences_enabled(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert marker.kwargs["reset_sequences"]
@pytest.mark.django_db(transaction=True, reset_sequences=True)
def test_transaction_reset_sequences_enabled(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert marker.kwargs["reset_sequences"]
@pytest.mark.django_db(databases=["default", "replica", "second"])
def test_databases(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert marker.kwargs["databases"] == ["default", "replica", "second"]
@pytest.mark.django_db(databases=["second"])
def test_second_database(self) -> None:
SecondItem.objects.create(name="spam")
@pytest.mark.django_db(databases=["default"])
def test_not_allowed_database(self) -> None:
with pytest.raises(AssertionError, match="not allowed"):
SecondItem.objects.count()
with pytest.raises(AssertionError, match="not allowed"):
SecondItem.objects.create(name="spam")
@pytest.mark.django_db(databases=["replica"])
def test_replica_database(self) -> None:
Item.objects.using("replica").count()
@pytest.mark.django_db(databases=["replica"])
def test_replica_database_not_allowed(self) -> None:
with pytest.raises(AssertionError, match="not allowed"):
Item.objects.count()
@pytest.mark.django_db(transaction=True, databases=["default", "replica"])
def test_replica_mirrors_default_database(self) -> None:
Item.objects.create(name="spam")
Item.objects.using("replica").create(name="spam")
assert Item.objects.count() == 2
assert Item.objects.using("replica").count() == 2
@pytest.mark.django_db(databases="__all__")
def test_all_databases(self) -> None:
Item.objects.count()
Item.objects.create(name="spam")
SecondItem.objects.count()
SecondItem.objects.create(name="spam")
@pytest.mark.django_db
def test_serialized_rollback_disabled(self, request: pytest.FixtureRequest):
marker = request.node.get_closest_marker("django_db")
assert not marker.kwargs
# The test works when transactions are not supported, but it interacts
# badly with other tests.
@pytest.mark.skipif("not connection.features.supports_transactions")
@pytest.mark.django_db(serialized_rollback=True)
def test_serialized_rollback_enabled(self, request: pytest.FixtureRequest):
marker = request.node.get_closest_marker("django_db")
assert marker.kwargs["serialized_rollback"]
@pytest.mark.django_db
def test_available_apps_disabled(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert not marker.kwargs
@pytest.mark.django_db(available_apps=["pytest_django_test.app"])
def test_available_apps_enabled(self, request: pytest.FixtureRequest) -> None:
marker = request.node.get_closest_marker("django_db")
assert marker.kwargs["available_apps"] == ["pytest_django_test.app"]
@pytest.mark.django_db
def test_available_apps_default(self) -> None:
from django.apps import apps
from django.conf import settings
for app in settings.INSTALLED_APPS:
assert apps.is_installed(app)
@pytest.mark.django_db(available_apps=["pytest_django_test.app"])
def test_available_apps_limited(self) -> None:
from django.apps import apps
from django.conf import settings
assert apps.is_installed("pytest_django_test.app")
for app in settings.INSTALLED_APPS:
if app != "pytest_django_test.app":
assert not apps.is_installed(app)
def test_unittest_interaction(django_pytester: DjangoPytester) -> None:
"Test that (non-Django) unittests cannot access the DB."
django_pytester.create_test_module(
"""
import pytest
import unittest
from .app.models import Item
class TestCase_setupClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
Item.objects.create(name='foo')
def test_db_access_1(self):
Item.objects.count() == 1
class TestCase_setUp(unittest.TestCase):
@classmethod
def setUp(cls):
Item.objects.create(name='foo')
def test_db_access_2(self):
Item.objects.count() == 1
class TestCase(unittest.TestCase):
def test_db_access_3(self):
Item.objects.count() == 1
"""
)
result = django_pytester.runpytest_subprocess("-v", "--reuse-db")
result.stdout.fnmatch_lines(
[
"*test_db_access_1 ERROR*",
"*test_db_access_2 FAILED*",
"*test_db_access_3 FAILED*",
"*ERROR at setup of TestCase_setupClass.test_db_access_1*",
'*RuntimeError: Database access not allowed, use the "django_db" mark, '
'or the "db" or "transactional_db" fixtures to enable it.',
]
)
def test_django_testcase_multi_db(django_pytester: DjangoPytester) -> None:
"""Test that Django TestCase multi-db support works."""
django_pytester.create_test_module(
"""
import pytest
from django.test import TestCase
from .app.models import Item, SecondItem
class TestCase(TestCase):
databases = ["default", "second"]
def test_db_access(self):
Item.objects.count() == 0
SecondItem.objects.count() == 0
"""
)
result = django_pytester.runpytest_subprocess("-v", "--reuse-db")
result.assert_outcomes(passed=1)
| TestDatabaseMarker |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 82480,
"end": 82702
} | class ____(_PrintableStructure):
_fields_ = [
('eventType', c_ulonglong),
('gpuId', c_uint)
]
_fmt_ = {'eventType': "0x%08X"}
nvmlSystemEventSetWaitRequest_v1 = 0x1000020
| c_nvmlSystemEventData_v1_t |
python | milvus-io__pymilvus | pymilvus/milvus_client/milvus_client.py | {
"start": 1285,
"end": 69423
} | class ____(BaseMilvusClient):
"""The Milvus Client"""
# pylint: disable=logging-too-many-args, too-many-instance-attributes
def __init__(
self,
uri: str = "http://localhost:19530",
user: str = "",
password: str = "",
db_name: str = "",
token: str = "",
timeout: Optional[float] = None,
**kwargs,
) -> None:
"""A client for the common Milvus use case.
This client attempts to hide away the complexity of using Pymilvus. In a lot ofcases what
the user wants is a simple wrapper that supports adding data, deleting data, and searching.
Args:
uri (str, optional): The connection address to use to connect to the
instance. Defaults to "http://localhost:19530". Another example:
"https://username:password@in01-12a.aws-us-west-2.vectordb.zillizcloud.com:19538
timeout (float, optional): What timeout to use for function calls. Defaults
to None.
Unit: second
"""
self._using = create_connection(
uri, token, db_name, user=user, password=password, timeout=timeout, **kwargs
)
self.is_self_hosted = bool(self.get_server_type() == "milvus")
def create_collection(
self,
collection_name: str,
dimension: Optional[int] = None,
primary_field_name: str = "id", # default is "id"
id_type: str = "int", # or "string",
vector_field_name: str = "vector", # default is "vector"
metric_type: str = "COSINE",
auto_id: bool = False,
timeout: Optional[float] = None,
schema: Optional[CollectionSchema] = None,
index_params: Optional[IndexParams] = None,
**kwargs,
):
if schema is None:
return self._fast_create_collection(
collection_name,
dimension,
primary_field_name=primary_field_name,
id_type=id_type,
vector_field_name=vector_field_name,
metric_type=metric_type,
auto_id=auto_id,
timeout=timeout,
**kwargs,
)
return self._create_collection_with_schema(
collection_name, schema, index_params, timeout=timeout, **kwargs
)
def _fast_create_collection(
self,
collection_name: str,
dimension: int,
primary_field_name: str = "id", # default is "id"
id_type: Union[DataType, str] = DataType.INT64, # or "string",
vector_field_name: str = "vector", # default is "vector"
metric_type: str = "COSINE",
auto_id: bool = False,
timeout: Optional[float] = None,
**kwargs,
):
validate_param("dimension", dimension, int)
if "enable_dynamic_field" not in kwargs:
kwargs["enable_dynamic_field"] = True
schema = self.create_schema(auto_id=auto_id, **kwargs)
if id_type in ("int", DataType.INT64):
pk_data_type = DataType.INT64
elif id_type in ("string", "str", DataType.VARCHAR):
pk_data_type = DataType.VARCHAR
else:
raise PrimaryKeyException(message=ExceptionsMessage.PrimaryFieldType)
pk_args = {}
if "max_length" in kwargs and pk_data_type == DataType.VARCHAR:
pk_args["max_length"] = kwargs["max_length"]
schema.add_field(primary_field_name, pk_data_type, is_primary=True, **pk_args)
schema.add_field(vector_field_name, DataType.FLOAT_VECTOR, dim=dimension)
schema.verify()
conn = self._get_connection()
if "consistency_level" not in kwargs:
kwargs["consistency_level"] = DEFAULT_CONSISTENCY_LEVEL
conn.create_collection(collection_name, schema, timeout=timeout, **kwargs)
index_params = IndexParams()
index_params.add_index(vector_field_name, index_type="AUTOINDEX", metric_type=metric_type)
self.create_index(collection_name, index_params, timeout=timeout)
self.load_collection(collection_name, timeout=timeout)
def create_index(
self,
collection_name: str,
index_params: IndexParams,
timeout: Optional[float] = None,
**kwargs,
):
validate_param("collection_name", collection_name, str)
validate_param("index_params", index_params, IndexParams)
if len(index_params) == 0:
raise ParamError(message="IndexParams is empty, no index can be created")
for index_param in index_params:
self._create_index(collection_name, index_param, timeout=timeout, **kwargs)
def _create_index(
self,
collection_name: str,
index_param: IndexParam,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.create_index(
collection_name,
index_param.field_name,
index_param.get_index_configs(),
timeout=timeout,
index_name=index_param.index_name,
**kwargs,
)
def insert(
self,
collection_name: str,
data: Union[Dict, List[Dict]],
timeout: Optional[float] = None,
partition_name: Optional[str] = "",
**kwargs,
) -> Dict:
"""Insert data into the collection.
If the Milvus Client was initiated without an existing Collection, the first dict passed
in will be used to initiate the collection.
Args:
data (List[Dict[str, any]]): A list of dicts to pass in. If list not provided, will
cast to list.
timeout (float, optional): The timeout to use, will override init timeout. Defaults
to None.
Raises:
DataNotMatchException: If the data has missing fields an exception will be thrown.
MilvusException: General Milvus error on insert.
Returns:
Dict: Number of rows that were inserted and the inserted primary key list.
"""
# If no data provided, we cannot input anything
if isinstance(data, Dict):
data = [data]
msg = "wrong type of argument 'data',"
msg += f"expected 'Dict' or list of 'Dict', got '{type(data).__name__}'"
if not isinstance(data, List):
raise TypeError(msg)
if len(data) == 0:
return {"insert_count": 0, "ids": []}
conn = self._get_connection()
# Insert into the collection.
try:
res = conn.insert_rows(
collection_name, data, partition_name=partition_name, timeout=timeout, **kwargs
)
except Exception as ex:
raise ex from ex
return OmitZeroDict(
{
"insert_count": res.insert_count,
"ids": res.primary_keys,
"cost": res.cost,
}
)
def upsert(
self,
collection_name: str,
data: Union[Dict, List[Dict]],
timeout: Optional[float] = None,
partition_name: Optional[str] = "",
**kwargs,
) -> Dict:
"""Upsert data into the collection.
Args:
collection_name (str): Name of the collection to upsert into.
data (List[Dict[str, any]]): A list of dicts to pass in. If list not provided, will
cast to list.
timeout (float, optional): The timeout to use, will override init timeout. Defaults
to None.
partition_name (str, optional): Name of the partition to upsert into.
**kwargs (dict): Extra keyword arguments.
* *partial_update* (bool, optional): Whether this is a partial update operation.
If True, only the specified fields will be updated while others remain unchanged
Default is False.
Raises:
DataNotMatchException: If the data has missing fields an exception will be thrown.
MilvusException: General Milvus error on upsert.
Returns:
Dict: Number of rows that were upserted.
"""
# If no data provided, we cannot input anything
if isinstance(data, Dict):
data = [data]
msg = "wrong type of argument 'data',"
msg += f"expected 'Dict' or list of 'Dict', got '{type(data).__name__}'"
if not isinstance(data, List):
raise TypeError(msg)
if len(data) == 0:
return {"upsert_count": 0, "ids": []}
conn = self._get_connection()
# Upsert into the collection.
try:
res = conn.upsert_rows(
collection_name, data, partition_name=partition_name, timeout=timeout, **kwargs
)
except Exception as ex:
raise ex from ex
return OmitZeroDict(
{
"upsert_count": res.upsert_count,
"cost": res.cost,
# milvus server supports upsert on autoid=ture from v2.4.15
# upsert on autoid=ture will return new ids for user
"ids": res.primary_keys,
}
)
def hybrid_search(
self,
collection_name: str,
reqs: List[AnnSearchRequest],
ranker: Union[BaseRanker, Function],
limit: int = 10,
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[List[dict]]:
"""Conducts multi vector similarity search with a rerank for rearrangement.
Args:
collection_name(``string``): The name of collection.
reqs (``List[AnnSearchRequest]``): The vector search requests.
ranker (``Union[BaseRanker, Function]``): The ranker.
limit (``int``): The max number of returned record, also known as `topk`.
partition_names (``List[str]``, optional): The names of partitions to search on.
output_fields (``List[str]``, optional):
The name of fields to return in the search result. Can only get scalar fields.
round_decimal (``int``, optional):
The specified number of decimal places of returned distance.
Defaults to -1 means no round to returned distance.
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server
responds or an error occurs.
**kwargs (``dict``): Optional search params
* *offset* (``int``, optinal)
offset for pagination.
* *consistency_level* (``str/int``, optional)
Which consistency level to use when searching in the collection.
Options of consistency level: Strong, Bounded, Eventually, Session, Customized.
Note: this parameter overwrites the same one specified when creating collection,
if no consistency level was specified, search will use the
consistency level when you create the collection.
Returns:
List[List[dict]]: A nested list of dicts containing the result data.
Raises:
MilvusException: If anything goes wrong
"""
conn = self._get_connection()
return conn.hybrid_search(
collection_name,
reqs,
ranker,
limit=limit,
partition_names=partition_names,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
def search(
self,
collection_name: str,
data: Union[List[list], list],
filter: str = "",
limit: int = 10,
output_fields: Optional[List[str]] = None,
search_params: Optional[dict] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
anns_field: Optional[str] = None,
ranker: Optional[Union[Function, FunctionScore]] = None,
**kwargs,
) -> List[List[dict]]:
"""Search for a query vector/vectors.
In order for the search to process, a collection needs to have been either provided
at init or data needs to have been inserted.
Args:
data (Union[List[list], list, List[EmbeddingList]]): The vector/vectors/embedding
list to search.
limit (int, optional): How many results to return per search. Defaults to 10.
filter(str, optional): A filter to use for the search. Defaults to None.
output_fields (List[str], optional): List of which field values to return. If None
specified, only primary fields including distances will be returned.
search_params (dict, optional): The search params to use for the search.
ranker (Function, optional): The ranker to use for the search.
timeout (float, optional): Timeout to use, overides the client level assigned at init.
Defaults to None.
Raises:
ValueError: The collection being searched doesnt exist. Need to insert data first.
Returns:
List[List[dict]]: A nested list of dicts containing the result data. Embeddings are
not included in the result data.
"""
# Convert EmbeddingList objects to flat arrays if present
if isinstance(data, list) and data and isinstance(data[0], EmbeddingList):
data = [emb_list.to_flat_array() for emb_list in data]
kwargs["is_embedding_list"] = True
conn = self._get_connection()
return conn.search(
collection_name,
data,
anns_field or "",
search_params or {},
expression=filter,
limit=limit,
output_fields=output_fields,
partition_names=partition_names,
expr_params=kwargs.pop("filter_params", {}),
timeout=timeout,
ranker=ranker,
**kwargs,
)
def query(
self,
collection_name: str,
filter: str = "",
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
ids: Optional[Union[List, str, int]] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[dict]:
"""Query for entries in the Collection.
Args:
filter (str): The filter to use for the query.
output_fields (List[str], optional): List of which field values to return. If None
specified, all fields excluding vector field will be returned.
partitions (List[str], optional): Which partitions to perform query. Defaults to None.
timeout (float, optional): Timeout to use, overides the client level assigned at init.
Defaults to None.
Raises:
ValueError: Missing collection.
Returns:
List[dict]: A list of result dicts, vectors are not included.
"""
if filter and not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
if filter and ids is not None:
raise ParamError(message=ExceptionsMessage.AmbiguousQueryFilterParam)
if isinstance(ids, (int, str)):
ids = [ids]
conn = self._get_connection()
if ids:
schema_dict, _ = conn._get_schema_from_cache_or_remote(collection_name, timeout=timeout)
filter = self._pack_pks_expr(schema_dict, ids)
if not output_fields:
output_fields = ["*"]
return conn.query(
collection_name,
expr=filter,
output_fields=output_fields,
partition_names=partition_names,
timeout=timeout,
expr_params=kwargs.pop("filter_params", {}),
**kwargs,
)
def query_iterator(
self,
collection_name: str,
batch_size: Optional[int] = 1000,
limit: Optional[int] = UNLIMITED,
filter: Optional[str] = "",
output_fields: Optional[List[str]] = None,
partition_names: Optional[List[str]] = None,
timeout: Optional[float] = None,
**kwargs,
):
if filter is not None and not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
conn = self._get_connection()
# set up schema for iterator
schema_dict = conn.describe_collection(collection_name, timeout=timeout, **kwargs)
return QueryIterator(
connection=conn,
collection_name=collection_name,
batch_size=batch_size,
limit=limit,
expr=filter,
output_fields=output_fields,
partition_names=partition_names,
schema=schema_dict,
timeout=timeout,
**kwargs,
)
def search_iterator(
self,
collection_name: str,
data: Union[List[list], list],
batch_size: Optional[int] = 1000,
filter: Optional[str] = None,
limit: Optional[int] = UNLIMITED,
output_fields: Optional[List[str]] = None,
search_params: Optional[dict] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
anns_field: Optional[str] = None,
round_decimal: int = -1,
**kwargs,
) -> Union[SearchIteratorV2, SearchIterator]:
"""Creates an iterator for searching vectors in batches.
This method returns an iterator that performs vector similarity search in batches,
which is useful when dealing with large result sets. It automatically attempts to use
Search Iterator V2 if supported by the server, otherwise falls back to V1.
Args:
collection_name (str): Name of the collection to search in.
data (Union[List[list], list]): Vector data to search with. For V2, only single vector
search is supported.
batch_size (int, optional): Number of results to fetch per batch. Defaults to 1000.
Must be between 1 and MAX_BATCH_SIZE.
filter (str, optional): Filtering expression to filter the results. Defaults to None.
limit (int, optional): Total number of results to return. Defaults to UNLIMITED.
(Deprecated) This parameter is deprecated and will be removed in a future release.
output_fields (List[str], optional): Fields to return in the results.
search_params (dict, optional): Parameters for the search operation.
timeout (float, optional): Timeout in seconds for each RPC call.
partition_names (List[str], optional): Names of partitions to search in.
anns_field (str, optional): Name of the vector field to search. Can be empty when
there is only one vector field in the collection.
round_decimal (int, optional): Number of decimal places for distance values.
Defaults to -1 (no rounding).
**kwargs: Additional arguments to pass to the search operation.
Returns:
SearchIterator: An iterator object that yields search results in batches.
Raises:
MilvusException: If the search operation fails.
ParamError: If the input parameters are invalid (e.g., invalid batch_size or multiple
vectors in data when using V2).
Examples:
>>> # Search with iterator
>>> iterator = client.search_iterator(
... collection_name="my_collection",
... data=[[0.1, 0.2]],
... batch_size=100
... )
"""
conn = self._get_connection()
# compatibility logic, change this when support get version from server
try:
return SearchIteratorV2(
connection=conn,
collection_name=collection_name,
data=data,
batch_size=batch_size,
limit=limit,
filter=filter,
output_fields=output_fields,
search_params=search_params or {},
timeout=timeout,
partition_names=partition_names,
anns_field=anns_field or "",
round_decimal=round_decimal,
**kwargs,
)
except ServerVersionIncompatibleException:
# for compatibility, return search_iterator V1
logger.warning(ExceptionsMessage.SearchIteratorV2FallbackWarning)
# following is the old code for search_iterator V1
if filter is not None and not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
# set up schema for iterator
schema_dict = conn.describe_collection(collection_name, timeout=timeout, **kwargs)
# if anns_field is not provided
# if only one vector field, use to search
# if multiple vector fields, raise exception and abort
if anns_field is None or anns_field == "":
vec_field = None
fields = schema_dict[FIELDS]
vec_field_count = 0
for field in fields:
if is_vector_type(field[TYPE]):
vec_field_count += 1
vec_field = field
if vec_field is None:
raise MilvusException(
code=ErrorCode.UNEXPECTED_ERROR,
message="there should be at least one vector field in milvus collection",
)
if vec_field_count > 1:
raise MilvusException(
code=ErrorCode.UNEXPECTED_ERROR,
message="must specify anns_field when there are more than one vector field",
)
anns_field = vec_field["name"]
if anns_field is None or anns_field == "":
raise MilvusException(
code=ErrorCode.UNEXPECTED_ERROR,
message=f"cannot get anns_field name for search iterator, got:{anns_field}",
)
# set up metrics type for search_iterator which is mandatory
if search_params is None:
search_params = {}
if METRIC_TYPE not in search_params:
indexes = conn.list_indexes(collection_name)
for index in indexes:
if anns_field == index.index_name:
params = index.params
for param in params:
if param.key == METRIC_TYPE:
search_params[METRIC_TYPE] = param.value
if METRIC_TYPE not in search_params:
raise MilvusException(
ParamError, f"Cannot set up metrics type for anns_field:{anns_field}"
)
search_params["params"] = get_params(search_params)
return SearchIterator(
connection=self._get_connection(),
collection_name=collection_name,
data=data,
ann_field=anns_field,
param=search_params,
batch_size=batch_size,
limit=limit,
expr=filter,
partition_names=partition_names,
output_fields=output_fields,
timeout=timeout,
round_decimal=round_decimal,
schema=schema_dict,
**kwargs,
)
def get(
self,
collection_name: str,
ids: Union[list, str, int],
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[dict]:
"""Grab the inserted vectors using the primary key from the Collection.
Due to current implementations, grabbing a large amount of vectors is slow.
Args:
ids (str): The pk's to get vectors for. Depending on pk_field type it can be int or str
or a list of either.
timeout (float, optional): Timeout to use, overides the client level assigned at
init. Defaults to None.
Raises:
ValueError: Missing collection.
Returns:
List[dict]: A list of result dicts with keys {pk_field, vector_field}
"""
if not isinstance(ids, list):
ids = [ids]
if len(ids) == 0:
return []
conn = self._get_connection()
schema_dict, _ = conn._get_schema_from_cache_or_remote(collection_name, timeout=timeout)
if not output_fields:
output_fields = ["*"]
expr = self._pack_pks_expr(schema_dict, ids)
return conn.query(
collection_name,
expr=expr,
output_fields=output_fields,
partition_names=partition_names,
timeout=timeout,
**kwargs,
)
def delete(
self,
collection_name: str,
ids: Optional[Union[list, str, int]] = None,
timeout: Optional[float] = None,
filter: Optional[str] = None,
partition_name: Optional[str] = None,
**kwargs,
) -> Dict[str, int]:
"""Delete entries in the collection by their pk or by filter.
Starting from version 2.3.2, Milvus no longer includes the primary keys in the result
when processing the delete operation on expressions.
This change is due to the large amount of data involved.
The delete interface no longer returns any results.
If no exceptions are thrown, it indicates a successful deletion.
However, for backward compatibility, If the primary_keys returned from old
Milvus(previous 2.3.2) is not empty, the list of primary keys is still returned.
Args:
ids (list, str, int, optional): The pk's to delete.
Depending on pk_field type it can be int or str or a list of either.
Default to None.
filter(str, optional): A filter to use for the deletion. Defaults to none.
timeout (int, optional): Timeout to use, overides the client level assigned at init.
Defaults to None.
Note: You need to passin either ids or filter, and they cannot be used at the same time.
Returns:
Dict: with key 'deleted_count' and value number of rows that were deleted.
"""
pks = kwargs.get("pks", [])
if isinstance(pks, (int, str)):
pks = [pks]
for pk in pks:
if not isinstance(pk, (int, str)):
msg = f"wrong type of argument pks, expect list, int or str, got '{type(pk).__name__}'"
raise TypeError(msg)
if ids is not None:
if isinstance(ids, (int, str)):
pks.append(ids)
elif isinstance(ids, list):
for id in ids:
if not isinstance(id, (int, str)):
msg = f"wrong type of argument ids, expect list, int or str, got '{type(id).__name__}'"
raise TypeError(msg)
pks.extend(ids)
else:
msg = f"wrong type of argument ids, expect list, int or str, got '{type(ids).__name__}'"
raise TypeError(msg)
# validate ambiguous delete filter param before describe collection rpc
if filter and len(pks) > 0:
raise ParamError(message=ExceptionsMessage.AmbiguousDeleteFilterParam)
expr = ""
conn = self._get_connection()
if len(pks) > 0:
schema_dict, _ = conn._get_schema_from_cache_or_remote(collection_name, timeout=timeout)
expr = self._pack_pks_expr(schema_dict, pks)
else:
if not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
expr = filter
ret_pks = []
res = conn.delete(
collection_name=collection_name,
expression=expr,
partition_name=partition_name,
expr_params=kwargs.pop("filter_params", {}),
timeout=timeout,
**kwargs,
)
if res.primary_keys:
ret_pks.extend(res.primary_keys)
# compatible with deletions that returns primary keys
if ret_pks:
return ret_pks
return OmitZeroDict({"delete_count": res.delete_count, "cost": res.cost})
def get_collection_stats(self, collection_name: str, timeout: Optional[float] = None) -> Dict:
conn = self._get_connection()
stats = conn.get_collection_stats(collection_name, timeout=timeout)
result = {stat.key: stat.value for stat in stats}
if "row_count" in result:
result["row_count"] = int(result["row_count"])
return result
def describe_collection(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
result = conn.describe_collection(collection_name, timeout=timeout, **kwargs)
# Convert internal struct_array_fields to user-friendly format
if isinstance(result, dict) and "struct_array_fields" in result:
converted_fields = convert_struct_fields_to_user_format(result["struct_array_fields"])
result["fields"].extend(converted_fields)
# Remove internal struct_array_fields from user-facing response
result.pop("struct_array_fields")
return result
def has_collection(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
return conn.has_collection(collection_name, timeout=timeout, **kwargs)
def list_collections(self, **kwargs):
conn = self._get_connection()
return conn.list_collections(**kwargs)
def drop_collection(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
"""Delete the collection stored in this object"""
conn = self._get_connection()
conn.drop_collection(collection_name, timeout=timeout, **kwargs)
def rename_collection(
self,
old_name: str,
new_name: str,
target_db: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.rename_collections(old_name, new_name, target_db, timeout=timeout, **kwargs)
def _create_collection_with_schema(
self,
collection_name: str,
schema: CollectionSchema,
index_params: IndexParams,
timeout: Optional[float] = None,
**kwargs,
):
schema.verify()
conn = self._get_connection()
if "consistency_level" not in kwargs:
kwargs["consistency_level"] = DEFAULT_CONSISTENCY_LEVEL
conn.create_collection(collection_name, schema, timeout=timeout, **kwargs)
if index_params:
self.create_index(collection_name, index_params, timeout=timeout)
self.load_collection(collection_name, timeout=timeout)
def close(self):
connections.remove_connection(self._using)
def load_collection(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
"""Loads the collection."""
conn = self._get_connection()
conn.load_collection(collection_name, timeout=timeout, **kwargs)
def release_collection(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
conn.release_collection(collection_name, timeout=timeout, **kwargs)
def get_load_state(
self,
collection_name: str,
partition_name: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
) -> Dict:
conn = self._get_connection()
partition_names = None
if partition_name:
partition_names = [partition_name]
try:
state = conn.get_load_state(collection_name, partition_names, timeout=timeout, **kwargs)
except Exception as ex:
raise ex from ex
ret = {"state": state}
if state == LoadState.Loading:
progress = conn.get_loading_progress(collection_name, partition_names, timeout=timeout)
ret["progress"] = progress
return ret
def refresh_load(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
kwargs.pop("_refresh", None)
conn = self._get_connection()
conn.load_collection(collection_name, timeout=timeout, _refresh=True, **kwargs)
def list_indexes(self, collection_name: str, field_name: Optional[str] = "", **kwargs):
"""List all indexes of collection. If `field_name` is not specified,
return all the indexes of this collection, otherwise this interface will return
all indexes on this field of the collection.
:param collection_name: The name of collection.
:type collection_name: str
:param field_name: The name of field. If no field name is specified, all indexes
of this collection will be returned.
:return: The name list of all indexes.
:rtype: str list
"""
conn = self._get_connection()
indexes = conn.list_indexes(collection_name, **kwargs)
index_name_list = []
for index in indexes:
if not index:
continue
if not field_name or index.field_name == field_name:
index_name_list.append(index.index_name)
return index_name_list
def drop_index(
self, collection_name: str, index_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.drop_index(collection_name, "", index_name, timeout=timeout, **kwargs)
def describe_index(
self, collection_name: str, index_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
return conn.describe_index(collection_name, index_name, timeout=timeout, **kwargs)
def alter_index_properties(
self,
collection_name: str,
index_name: str,
properties: dict,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.alter_index_properties(
collection_name, index_name, properties=properties, timeout=timeout, **kwargs
)
def drop_index_properties(
self,
collection_name: str,
index_name: str,
property_keys: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.drop_index_properties(
collection_name, index_name, property_keys=property_keys, timeout=timeout, **kwargs
)
def alter_collection_properties(
self, collection_name: str, properties: dict, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.alter_collection_properties(
collection_name,
properties=properties,
timeout=timeout,
**kwargs,
)
def drop_collection_properties(
self,
collection_name: str,
property_keys: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.drop_collection_properties(
collection_name, property_keys=property_keys, timeout=timeout, **kwargs
)
def alter_collection_field(
self,
collection_name: str,
field_name: str,
field_params: dict,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.alter_collection_field_properties(
collection_name,
field_name=field_name,
field_params=field_params,
timeout=timeout,
**kwargs,
)
def add_collection_field(
self,
collection_name: str,
field_name: str,
data_type: DataType,
desc: str = "",
timeout: Optional[float] = None,
**kwargs,
):
"""Add a new field to the collection.
Args:
collection_name(``string``): The name of collection.
name (str): The name of the field.
dtype (DataType): The data type of the field.
desc (str): The description of the field.
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server
responds or an error occurs.
**kwargs (``dict``): Optional field params
nullable: bool, indicates field is nullable or not, shall be ``True`` for now
default_value: default val for added field
Raises:
MilvusException: If anything goes wrong
"""
field_schema = self.create_field_schema(field_name, data_type, desc, **kwargs)
conn = self._get_connection()
conn.add_collection_field(
collection_name,
field_schema,
timeout=timeout,
**kwargs,
)
def create_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.create_partition(collection_name, partition_name, timeout=timeout, **kwargs)
def drop_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.drop_partition(collection_name, partition_name, timeout=timeout, **kwargs)
def has_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
) -> bool:
conn = self._get_connection()
return conn.has_partition(collection_name, partition_name, timeout=timeout, **kwargs)
def list_partitions(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> List[str]:
conn = self._get_connection()
return conn.list_partitions(collection_name, timeout=timeout, **kwargs)
def load_partitions(
self,
collection_name: str,
partition_names: Union[str, List[str]],
timeout: Optional[float] = None,
**kwargs,
):
if isinstance(partition_names, str):
partition_names = [partition_names]
conn = self._get_connection()
conn.load_partitions(collection_name, partition_names, timeout=timeout, **kwargs)
def release_partitions(
self,
collection_name: str,
partition_names: Union[str, List[str]],
timeout: Optional[float] = None,
**kwargs,
):
if isinstance(partition_names, str):
partition_names = [partition_names]
conn = self._get_connection()
conn.release_partitions(collection_name, partition_names, timeout=timeout, **kwargs)
def get_partition_stats(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
if not isinstance(partition_name, str):
msg = f"wrong type of argument 'partition_name', str expected, got '{type(partition_name).__name__}'"
raise TypeError(msg)
ret = conn.get_partition_stats(collection_name, partition_name, timeout=timeout, **kwargs)
result = {stat.key: stat.value for stat in ret}
if "row_count" in result:
result["row_count"] = int(result["row_count"])
return result
def create_user(self, user_name: str, password: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
return conn.create_user(user_name, password, timeout=timeout, **kwargs)
def drop_user(self, user_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
return conn.delete_user(user_name, timeout=timeout, **kwargs)
def update_password(
self,
user_name: str,
old_password: str,
new_password: str,
reset_connection: Optional[bool] = False,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.update_password(user_name, old_password, new_password, timeout=timeout, **kwargs)
if reset_connection:
conn._setup_authorization_interceptor(user_name, new_password, None)
conn._setup_grpc_channel()
def list_users(self, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
return conn.list_usernames(timeout=timeout, **kwargs)
def describe_user(self, user_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
try:
res = conn.select_one_user(user_name, True, timeout=timeout, **kwargs)
except Exception as ex:
raise ex from ex
if res.groups:
item = res.groups[0]
return {"user_name": user_name, "roles": item.roles}
return {}
def grant_role(self, user_name: str, role_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
conn.add_user_to_role(user_name, role_name, timeout=timeout, **kwargs)
def revoke_role(
self, user_name: str, role_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.remove_user_from_role(user_name, role_name, timeout=timeout, **kwargs)
def create_role(self, role_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
conn.create_role(role_name, timeout=timeout, **kwargs)
def drop_role(
self, role_name: str, force_drop: bool = False, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.drop_role(role_name, force_drop=force_drop, timeout=timeout, **kwargs)
def describe_role(self, role_name: str, timeout: Optional[float] = None, **kwargs) -> Dict:
conn = self._get_connection()
db_name = kwargs.pop("db_name", "")
try:
res = conn.select_grant_for_one_role(role_name, db_name, timeout=timeout, **kwargs)
except Exception as ex:
raise ex from ex
ret = {}
ret["role"] = role_name
ret["privileges"] = [dict(i) for i in res.groups]
return ret
def list_roles(self, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
try:
res = conn.select_all_role(False, timeout=timeout, **kwargs)
except Exception as ex:
raise ex from ex
groups = res.groups
return [g.role_name for g in groups]
def grant_privilege(
self,
role_name: str,
object_type: str,
privilege: str,
object_name: str,
db_name: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.grant_privilege(
role_name, object_type, object_name, privilege, db_name, timeout=timeout, **kwargs
)
def revoke_privilege(
self,
role_name: str,
object_type: str,
privilege: str,
object_name: str,
db_name: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.revoke_privilege(
role_name, object_type, object_name, privilege, db_name, timeout=timeout, **kwargs
)
def grant_privilege_v2(
self,
role_name: str,
privilege: str,
collection_name: str,
db_name: Optional[str] = None,
timeout: Optional[float] = None,
**kwargs,
):
"""Grant a privilege or a privilege group to a role.
Args:
role_name (``str``): The name of the role.
privilege (``str``): The privilege or privilege group to grant.
collection_name (``str``): The name of the collection.
db_name (``str``, optional): The name of the database. It will use default database
if not specified.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.grant_privilege_v2(
role_name,
privilege,
collection_name,
db_name=db_name,
timeout=timeout,
**kwargs,
)
def revoke_privilege_v2(
self,
role_name: str,
privilege: str,
collection_name: str,
db_name: Optional[str] = None,
timeout: Optional[float] = None,
**kwargs,
):
"""Revoke a privilege or a privilege group from a role.
Args:
role_name (``str``): The name of the role.
privilege (``str``): The privilege or privilege group to revoke.
collection_name (``str``): The name of the collection.
db_name (``str``, optional): The name of the database. It will use default database
if not specified.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.revoke_privilege_v2(
role_name,
privilege,
collection_name,
db_name=db_name,
timeout=timeout,
**kwargs,
)
def create_alias(
self, collection_name: str, alias: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.create_alias(collection_name, alias, timeout=timeout, **kwargs)
def drop_alias(self, alias: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
conn.drop_alias(alias, timeout=timeout, **kwargs)
def alter_alias(
self, collection_name: str, alias: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
conn.alter_alias(collection_name, alias, timeout=timeout, **kwargs)
def describe_alias(self, alias: str, timeout: Optional[float] = None, **kwargs) -> Dict:
conn = self._get_connection()
return conn.describe_alias(alias, timeout=timeout, **kwargs)
def list_aliases(
self, collection_name: str = "", timeout: Optional[float] = None, **kwargs
) -> List[str]:
conn = self._get_connection()
return conn.list_aliases(collection_name, timeout=timeout, **kwargs)
# deprecated same to use_database
def using_database(self, db_name: str, **kwargs):
self.use_database(db_name, **kwargs)
def use_database(self, db_name: str, **kwargs):
conn = self._get_connection()
conn.reset_db_name(db_name)
def create_database(
self,
db_name: str,
properties: Optional[dict] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
conn.create_database(db_name=db_name, properties=properties, timeout=timeout, **kwargs)
def drop_database(self, db_name: str, **kwargs):
conn = self._get_connection()
conn.drop_database(db_name, **kwargs)
def list_databases(self, timeout: Optional[float] = None, **kwargs) -> List[str]:
conn = self._get_connection()
return conn.list_database(timeout=timeout, **kwargs)
def describe_database(self, db_name: str, **kwargs) -> dict:
conn = self._get_connection()
return conn.describe_database(db_name, **kwargs)
def alter_database_properties(self, db_name: str, properties: dict, **kwargs):
conn = self._get_connection()
conn.alter_database(db_name, properties, **kwargs)
def drop_database_properties(self, db_name: str, property_keys: List[str], **kwargs):
conn = self._get_connection()
conn.drop_database_properties(db_name, property_keys, **kwargs)
def flush(
self,
collection_name: str,
timeout: Optional[float] = None,
**kwargs,
):
"""Seal all segments in the collection. Inserts after flushing will be written into
new segments.
Args:
collection_name(``string``): The name of collection.
timeout (float): an optional duration of time in seconds to allow for the RPCs.
If timeout is not set, the client keeps waiting until the server
responds or an error occurs.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.flush([collection_name], timeout=timeout, **kwargs)
def compact(
self,
collection_name: str,
is_clustering: Optional[bool] = False,
is_l0: Optional[bool] = False,
timeout: Optional[float] = None,
**kwargs,
) -> int:
"""Compact merge the small segments in a collection
Args:
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
is_clustering (``bool``, optional): Option to trigger clustering compaction.
Raises:
MilvusException: If anything goes wrong.
Returns:
int: An integer represents the server's compaction job. You can use this job ID
for subsequent state inquiries.
"""
conn = self._get_connection()
return conn.compact(
collection_name, is_clustering=is_clustering, is_l0=is_l0, timeout=timeout, **kwargs
)
def get_compaction_state(
self,
job_id: int,
timeout: Optional[float] = None,
**kwargs,
) -> str:
"""Get the state of compaction job
Args:
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
Returns:
str: the state of this compaction job. Possible values are "UndefiedState", "Executing"
and "Completed".
"""
conn = self._get_connection()
result = conn.get_compaction_state(job_id, timeout=timeout, **kwargs)
return result.state_name
def get_server_version(
self,
timeout: Optional[float] = None,
**kwargs,
) -> str:
"""Get the running server's version
Args:
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server
responds or an error occurs.
Returns:
str: A string represent the server's version.
Raises:
MilvusException: If anything goes wrong
"""
conn = self._get_connection()
return conn.get_server_version(timeout=timeout, **kwargs)
def create_privilege_group(
self,
group_name: str,
timeout: Optional[float] = None,
**kwargs,
):
"""Create a new privilege group.
Args:
group_name (``str``): The name of the privilege group.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.create_privilege_group(group_name, timeout=timeout, **kwargs)
def drop_privilege_group(
self,
group_name: str,
timeout: Optional[float] = None,
**kwargs,
):
"""Drop a privilege group.
Args:
group_name (``str``): The name of the privilege group.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.drop_privilege_group(group_name, timeout=timeout, **kwargs)
def list_privilege_groups(
self,
timeout: Optional[float] = None,
**kwargs,
) -> List[Dict[str, str]]:
"""List all privilege groups.
Args:
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Returns:
List[Dict[str, str]]: A list of privilege groups.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
res = conn.list_privilege_groups(timeout=timeout, **kwargs)
ret = []
for g in res.groups:
ret.append({"privilege_group": g.privilege_group, "privileges": g.privileges})
return ret
def add_privileges_to_group(
self,
group_name: str,
privileges: List[str],
timeout: Optional[float] = None,
**kwargs,
):
"""Add privileges to a privilege group.
Args:
group_name (``str``): The name of the privilege group.
privileges (``List[str]``): A list of privileges to be added to the group.
Privileges should be the same type in a group otherwise it will raise an exception.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.add_privileges_to_group(group_name, privileges, timeout=timeout, **kwargs)
def remove_privileges_from_group(
self,
group_name: str,
privileges: List[str],
timeout: Optional[float] = None,
**kwargs,
):
"""Remove privileges from a privilege group.
Args:
group_name (``str``): The name of the privilege group.
privileges (``List[str]``): A list of privileges to be removed from the group.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
conn.remove_privileges_from_group(group_name, privileges, timeout=timeout, **kwargs)
def create_resource_group(self, name: str, timeout: Optional[float] = None, **kwargs):
"""Create a resource group
It will success whether or not the resource group exists.
Args:
name: The name of the resource group.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.create_resource_group(name, timeout, **kwargs)
def update_resource_groups(
self,
configs: Dict[str, ResourceGroupConfig],
timeout: Optional[float] = None,
):
"""Update resource groups.
This function updates the resource groups based on the provided configurations.
Args:
configs: A mapping of resource group names to their configurations.
timeout: The timeout value in seconds. Defaults to None.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.update_resource_groups(configs, timeout)
def drop_resource_group(
self,
name: str,
timeout: Optional[float] = None,
):
"""Drop a resource group
It will success if the resource group is existed and empty, otherwise fail.
Args:
name: The name of the resource group.
timeout: The timeout value in seconds. Defaults to None.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.drop_resource_group(name, timeout)
def describe_resource_group(self, name: str, timeout: Optional[float] = None):
"""Drop a resource group
It will success if the resource group is existed and empty, otherwise fail.
Args:
name: The name of the resource group.
timeout: The timeout value in seconds. Defaults to None.
Returns:
ResourceGroupInfo: The detail info of the resource group.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.describe_resource_group(name, timeout)
def list_resource_groups(self, timeout: Optional[float] = None):
"""list all resource group names
Args:
timeout: The timeout value in seconds. Defaults to None.
Returns:
list[str]: all resource group names
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.list_resource_groups(timeout)
def transfer_replica(
self,
source_group: str,
target_group: str,
collection_name: str,
num_replicas: int,
timeout: Optional[float] = None,
):
"""transfer num_replica from source resource group to target resource group
Args:
source_group: source resource group name
target_group: target resource group name
collection_name: collection name which replica belong to
num_replicas: transfer replica num
timeout: The timeout value in seconds. Defaults to None.
Raises:
MilvusException: If anything goes wrong.
"""
conn = self._get_connection()
return conn.transfer_replica(
source_group, target_group, collection_name, num_replicas, timeout
)
def describe_replica(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> List[ReplicaInfo]:
"""Get the current loaded replica information
Args:
collection_name (``str``): The name of the given collection.
timeout (``float``, optional): An optional duration of time in seconds to allow
for the RPC. When timeout is set to None, client waits until server response
or error occur.
Returns:
List[ReplicaInfo]: All the replica information.
"""
conn = self._get_connection()
return conn.describe_replica(collection_name, timeout=timeout, **kwargs)
def run_analyzer(
self,
texts: Union[str, List[str]],
analyzer_params: Optional[Union[str, Dict]] = None,
with_hash: bool = False,
with_detail: bool = False,
collection_name: Optional[str] = None,
field_name: Optional[str] = None,
analyzer_names: Optional[Union[str, List[str]]] = None,
timeout: Optional[float] = None,
):
"""Run analyzer. Return result tokens of analysis.
Args:
text(``str``,``List[str]``): The input text (string or string list).
analyzer_params(``str``,``Dict``,``None``): The parameters of analyzer.
timeout(``float``, optional): The timeout value in seconds. Defaults to None.
Returns:
(``List[str]``,``List[List[str]]``): The result tokens of analysis.
"""
return self._get_connection().run_analyzer(
texts,
analyzer_params=analyzer_params,
with_hash=with_hash,
with_detail=with_detail,
collection_name=collection_name,
field_name=field_name,
analyzer_names=analyzer_names,
timeout=timeout,
)
def update_replicate_configuration(
self,
clusters: Optional[List[Dict]] = None,
cross_cluster_topology: Optional[List[Dict]] = None,
timeout: Optional[float] = None,
**kwargs,
):
"""
Update replication configuration across Milvus clusters.
Args:
clusters (List[Dict], optional): List of cluster configurations.
Each dict should contain:
- cluster_id (str): Unique identifier for the cluster
- connection_param (Dict): Connection parameters with 'uri' and 'token'
- pchannels (List[str], optional): Physical channels for the cluster
cross_cluster_topology (List[Dict], optional): List of replication relationships.
Each dict should contain:
- source_cluster_id (str): ID of the source cluster
- target_cluster_id (str): ID of the target cluster
cross_cluster_topology (List[Dict], optional): List of replication relationships.
Each dict should contain:
- source_cluster_id (str): ID of the source cluster
- target_cluster_id (str): ID of the target cluster
timeout (float, optional): An optional duration of time in seconds to allow for the RPC
**kwargs: Additional arguments
Returns:
Status: The status of the operation
Raises:
ParamError: If neither clusters nor cross_cluster_topology is provided
MilvusException: If the operation fails
Examples:
client.update_replicate_configuration(
clusters=[
{
"cluster_id": "source_cluster",
"connection_param": {
"uri": "http://source:19530",
"token": "source_token"
},
"pchannels": ["source_pchannel1", "source_pchannel2"]
},
{
"cluster_id": "target_cluster",
"connection_param": {
"uri": "http://target:19530",
"token": "target_token"
},
"pchannels": ["target_pchannel1", "target_pchannel2"]
}
],
cross_cluster_topology=[
{
"source_cluster_id": "source_cluster",
"target_cluster_id": "target_cluster"
}
]
)
"""
return self._get_connection().update_replicate_configuration(
clusters=clusters,
cross_cluster_topology=cross_cluster_topology,
timeout=timeout,
**kwargs,
)
def flush_all(self, timeout: Optional[float] = None, **kwargs) -> None:
"""Flush all collections.
Args:
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
"""
self._get_connection().flush_all(timeout=timeout, **kwargs)
def get_flush_all_state(self, timeout: Optional[float] = None, **kwargs) -> bool:
"""Get the flush all state.
Args:
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
bool: True if flush all operation is completed, False otherwise.
"""
return self._get_connection().get_flush_all_state(timeout=timeout, **kwargs)
def list_loaded_segments(
self,
collection_name: str,
timeout: Optional[float] = None,
**kwargs,
) -> List[LoadedSegmentInfo]:
"""List loaded segments for a collection.
Args:
collection_name (str): The name of the collection.
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
List[LoadedSegmentInfo]: A list of loaded segment information.
"""
infos = self._get_connection().get_query_segment_info(
collection_name, timeout=timeout, **kwargs
)
return [
LoadedSegmentInfo(
info.segmentID,
info.collectionID,
collection_name,
info.num_rows,
info.is_sorted,
info.state,
info.level,
info.storage_version,
info.mem_size,
)
for info in infos
]
def list_persistent_segments(
self,
collection_name: str,
timeout: Optional[float] = None,
**kwargs,
) -> List[SegmentInfo]:
"""List persistent segments for a collection.
Args:
collection_name (str): The name of the collection.
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
List[SegmentInfo]: A list of persistent segment information.
"""
infos = self._get_connection().get_persistent_segment_infos(
collection_name, timeout=timeout, **kwargs
)
return [
SegmentInfo(
info.segmentID,
info.collectionID,
collection_name,
info.num_rows,
info.is_sorted,
info.state,
info.level,
info.storage_version,
)
for info in infos
]
def get_compaction_plans(
self,
job_id: int,
timeout: Optional[float] = None,
**kwargs,
) -> CompactionPlans:
"""Get compaction plans for a specific job.
Args:
job_id (int): The ID of the compaction job.
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
CompactionPlans: The compaction plans for the specified job.
"""
return self._get_connection().get_compaction_plans(job_id, timeout=timeout, **kwargs)
| MilvusClient |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramType1.py | {
"start": 264,
"end": 1227
} | class ____:
def m1(self: "Child1"): ...
# This should generate an error.
def m2(self: Parent): ...
# This should generate an error.
def m3(self: type["Child1"]): ...
def m4(self: _TChild1) -> _TChild1: ...
# This should generate an error.
def m5(self: type[_TChild1]) -> _TChild1: ...
def m6(self: _T) -> _T: ...
# This should generate an error.
def __new__(cls: "Child1"): ...
@classmethod
def cm1(cls: type["Child1"]): ...
# This should generate an error.
@classmethod
# This should generate an error.
def cm2(cls: "Child1"): ...
@classmethod
# This should generate an error.
def cm3(cls: type[Parent]): ...
@classmethod
def cm4(cls: type[_TChild1]) -> _TChild1: ...
# This should generate an error.
@classmethod
# This should generate an error.
def cm5(cls: _TChild1) -> _TChild1: ...
@classmethod
def cm6(cls: type[_T]) -> _T: ...
| Child1 |
python | PyCQA__pylint | tests/functional/r/renamed_import_logging_not_lazy.py | {
"start": 219,
"end": 543
} | class ____:
"""Fake logger"""
logger = renamed_logging.getLogger(__name__)
fake_logger = Logger()
# Statements that should be flagged
renamed_logging.warning('%s, %s' % (4, 5)) # [logging-not-lazy]
logger.warning('%s' % 5) # [logging-not-lazy]
# Statements that should not be flagged:
fake_logger.warn('%s' % 5)
| Logger |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_application_fees.py | {
"start": 3509,
"end": 9805
} | class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response().with_record(_an_application_fee()).with_record(_an_application_fee()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_many_pages_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response()
.with_pagination()
.with_record(_an_application_fee().with_id("last_record_id_from_first_page"))
.build(),
)
http_mocker.get(
_application_fees_request()
.with_starting_after("last_record_id_from_first_page")
.with_created_gte(_A_START_DATE)
.with_created_lte(_NOW)
.with_limit(100)
.build(),
_application_fees_response().with_record(_an_application_fee()).with_record(_an_application_fee()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_no_state_when_read_then_return_ignore_lookback(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response().with_record(_an_application_fee()).build(),
)
self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
# request matched http_mocker
@HttpMocker()
def test_when_read_then_add_cursor_field(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response().with_record(_an_application_fee()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
assert output.records[0].record.data["updated"] == output.records[0].record.data["created"]
@HttpMocker()
def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
_application_fees_request().with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(),
_application_fees_response().build(),
)
http_mocker.get(
_application_fees_request()
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_application_fees_response().build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
@HttpMocker()
def test_given_http_status_400_when_read_then_stream_incomplete(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_any_query_params().build(),
a_response_with_status(400),
)
output = self._read(_config())
assert_stream_did_not_run(output, _STREAM_NAME, "Your account is not set up to use Issuing")
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
# entrypoint caught unexpected exception as availability strategy was just raising http exceptions for not 400 and 403.
# now default_error_mapping will classify 401 as config error which seems correct
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_any_query_params().build(),
[
a_response_with_status(429),
_application_fees_response().with_record(_an_application_fee()).build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_once_before_200_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_any_query_params().build(),
[a_response_with_status(500), _application_fees_response().with_record(_an_application_fee()).build()],
)
output = self._read(_config())
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_application_fees_request().with_any_query_params().build(),
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=0):
output = self._read(_config(), expecting_exception=True)
# concurrent read processor handles exceptions as config errors after complete the max_retries
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
| FullRefreshTest |
python | dask__dask | dask/dataframe/tseries/resample.py | {
"start": 5770,
"end": 5849
} | class ____(ResampleReduction):
how = "count"
fill_value = 0
| ResampleCount |
python | astropy__astropy | astropy/io/ascii/html.py | {
"start": 4199,
"end": 5345
} | class ____(core.TableOutputter):
"""
Output the HTML data as an ``astropy.table.Table`` object.
This subclass allows for the final table to contain
multidimensional columns (defined using the colspan attribute
of <th>).
"""
default_converters = [
core.convert_numpy(int),
core.convert_numpy(float),
core.convert_numpy(str),
]
def __call__(self, cols, meta):
"""
Process the data in multidimensional columns.
"""
new_cols = []
col_num = 0
while col_num < len(cols):
col = cols[col_num]
if hasattr(col, "colspan"):
# Join elements of spanned columns together into list of tuples
span_cols = cols[col_num : col_num + col.colspan]
new_col = core.Column(col.name)
new_col.str_vals = list(zip(*[x.str_vals for x in span_cols]))
new_cols.append(new_col)
col_num += col.colspan
else:
new_cols.append(col)
col_num += 1
return super().__call__(new_cols, meta)
| HTMLOutputter |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_S.py | {
"start": 3912,
"end": 5103
} | class ____(Benchmark):
r"""
Schaffer 2 objective function.
This class defines the Schaffer 2 [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Schaffer02}}(x) = 0.5 + \frac{\sin^2 (x_1^2 - x_2^2)^2 - 0.5}
{1 + 0.001(x_1^2 + x_2^2)^2}
with :math:`x_i \in [-100, 100]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x) = 0` for :math:`x = [0, 0]` for
:math:`i = 1, 2`
.. [1] Mishra, S. Some new test functions for global optimization and
performance of repulsive particle swarm method.
Munich Personal RePEc Archive, 2006, 2718
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-100.0] * self.N,
[100.0] * self.N))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
num = sin(x[0] ** 2 - x[1] ** 2) ** 2 - 0.5
den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2
return 0.5 + num / den
| Schaffer02 |
python | kamyu104__LeetCode-Solutions | Python/letter-tile-possibilities.py | {
"start": 1537,
"end": 2026
} | class ____(object):
def numTilePossibilities(self, tiles):
"""
:type tiles: str
:rtype: int
"""
def backtracking(counter):
total = 0
for k, v in counter.iteritems():
if not v:
continue
counter[k] -= 1
total += 1+backtracking(counter)
counter[k] += 1
return total
return backtracking(collections.Counter(tiles))
| Solution2 |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_massachusetts_zip.py | {
"start": 772,
"end": 1791
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_massachusetts_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_massachusetts_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidMassachusettsZip |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type_field.py | {
"start": 2846,
"end": 7118
} | class ____(
collections.namedtuple('ExtensionTypeField',
['name', 'value_type', 'default'])):
"""Metadata about a single field in a `tf.ExtensionType` object."""
NO_DEFAULT = Sentinel('ExtensionTypeField.NO_DEFAULT')
def __new__(cls, name, value_type, default=NO_DEFAULT):
"""Constructs a new ExtensionTypeField containing metadata for a single field.
Args:
name: The name of the new field (`str`). May not be a reserved name.
value_type: A python type expression constraining what values this field
can take.
default: The default value for the new field, or `NO_DEFAULT` if this
field has no default value.
Returns:
A new `ExtensionTypeField`.
Raises:
TypeError: If the type described by `value_type` is not currently
supported by `tf.ExtensionType`.
TypeError: If `default` is specified and its type does not match
`value_type`.
"""
try:
validate_field_value_type(value_type, allow_forward_references=True)
except TypeError as e:
raise TypeError(f'In field {name!r}: {e}') from e
if default is not cls.NO_DEFAULT:
default = _convert_value(default, value_type,
(f'default value for {name}',),
_ConversionContext.DEFAULT)
return super(ExtensionTypeField, cls).__new__(cls, name, value_type,
default)
@staticmethod
def is_reserved_name(name):
"""Returns true if `name` is a reserved name."""
return name in RESERVED_FIELD_NAMES or name.lower().startswith(
'_tf_extension_type')
def validate_field_value_type(value_type,
in_mapping_key=False,
allow_forward_references=False):
"""Checks that `value_type` contains only supported type annotations.
Args:
value_type: The type annotation to check.
in_mapping_key: True if `value_type` is nested in the key of a mapping.
allow_forward_references: If false, then raise an exception if a
`value_type` contains a forward reference (i.e., a string literal).
Raises:
TypeError: If `value_type` contains an unsupported type annotation.
"""
if isinstance(value_type, str) or type_annotations.is_forward_ref(value_type):
if allow_forward_references:
return
else:
raise TypeError(f'Unresolved forward reference {value_type!r}')
if value_type in (int, float, str, bytes, bool, None, _NoneType,
dtypes.DType):
return
elif (value_type in (tensor.Tensor, tensor_shape.TensorShape) or
(isinstance(value_type, type) and
_issubclass(value_type, composite_tensor.CompositeTensor))):
if in_mapping_key:
raise TypeError(f'Mapping had a key {value_type.__name__!r} with type '
f'{type(value_type).__name__!r}')
elif (type_annotations.is_generic_tuple(value_type) or
type_annotations.is_generic_union(value_type)):
type_args = type_annotations.get_generic_type_args(value_type)
if (len(type_args) == 2 and type_args[1] is Ellipsis and
type_annotations.is_generic_tuple(value_type)): # `Tuple[X, ...]`
validate_field_value_type(type_args[0], in_mapping_key,
allow_forward_references)
else:
for arg in type_annotations.get_generic_type_args(value_type):
validate_field_value_type(arg, in_mapping_key, allow_forward_references)
elif type_annotations.is_generic_mapping(value_type):
key_type, value_type = type_annotations.get_generic_type_args(value_type)
validate_field_value_type(key_type, True, allow_forward_references)
validate_field_value_type(value_type, in_mapping_key,
allow_forward_references)
elif isinstance(value_type, type):
raise TypeError(f'Unsupported type annotation {value_type.__name__!r}')
else:
raise TypeError(f'Unsupported type annotation {value_type!r}')
# ==============================================================================
# Type-checking & conversion for ExtensionTypeField values
# ==============================================================================
| ExtensionTypeField |
python | crytic__slither | slither/detectors/erc/erc20/arbitrary_send_erc20_no_permit.py | {
"start": 240,
"end": 1713
} | class ____(AbstractDetector):
"""
Detect when `msg.sender` is not used as `from` in transferFrom
"""
ARGUMENT = "arbitrary-send-erc20"
HELP = "transferFrom uses arbitrary `from`"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#arbitrary-from-in-transferfrom"
WIKI_TITLE = "Arbitrary `from` in transferFrom"
WIKI_DESCRIPTION = "Detect when `msg.sender` is not used as `from` in transferFrom."
WIKI_EXPLOIT_SCENARIO = """
```solidity
function a(address from, address to, uint256 amount) public {
erc20.transferFrom(from, to, am);
}
```
Alice approves this contract to spend her ERC20 tokens. Bob can call `a` and specify Alice's address as the `from` parameter in `transferFrom`, allowing him to transfer Alice's tokens to himself."""
WIKI_RECOMMENDATION = """
Use `msg.sender` as `from` in transferFrom.
"""
def _detect(self) -> List[Output]:
""""""
results: List[Output] = []
arbitrary_sends = ArbitrarySendErc20(self.compilation_unit)
arbitrary_sends.detect()
for node in arbitrary_sends.no_permit_results:
func = node.function
info: DETECTOR_INFO = [func, " uses arbitrary from in transferFrom: ", node, "\n"]
res = self.generate_result(info)
results.append(res)
return results
| ArbitrarySendErc20NoPermit |
python | tiangolo__fastapi | docs_src/cookie_param_models/tutorial002_pv1.py | {
"start": 112,
"end": 385
} | class ____(BaseModel):
class Config:
extra = "forbid"
session_id: str
fatebook_tracker: Union[str, None] = None
googall_tracker: Union[str, None] = None
@app.get("/items/")
async def read_items(cookies: Cookies = Cookie()):
return cookies
| Cookies |
python | altair-viz__altair | altair/vegalite/v6/schema/mixins.py | {
"start": 54970,
"end": 61707
} | class ____(SchemaBase):
"""
ErrorBandDef schema wrapper.
Parameters
----------
band : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
borders : bool, dict, :class:`BarConfig`, :class:`AreaConfig`, :class:`LineConfig`, :class:`MarkConfig`, :class:`RectConfig`, :class:`TickConfig`, :class:`AnyMarkConfig`
clip : bool
Whether a composite mark be clipped to the enclosing group's width and height.
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
extent : :class:`ErrorBarExtent`, Literal['ci', 'iqr', 'stderr', 'stdev']
The extent of the band. Available options include:
* ``"ci"``: Extend the band to the 95% bootstrapped confidence interval of the mean.
* ``"stderr"``: The size of band are set to the value of standard error, extending
from the mean.
* ``"stdev"``: The size of band are set to the value of standard deviation,
extending from the mean.
* ``"iqr"``: Extend the band to the q1 and q3.
**Default value:** ``"stderr"``.
interpolate : :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method for the error band. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes at the midpoint of
each pair of adjacent x-values.
* ``"step-before"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes before the x-value.
* ``"step-after"``: a piecewise constant function (a step function) consisting of
alternating horizontal and vertical lines. The y-value changes after the x-value.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
opacity : float
The opacity (value between [0,1]) of the mark.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
Orientation of the error band. This is normally automatically determined, but can be
specified when the orientation is ambiguous and cannot be automatically determined.
tension : float
The tension parameter for the interpolation type of the error band.
"""
_schema = {"$ref": "#/definitions/ErrorBandDef"}
def __init__(
self,
band: Optional[bool | SchemaBase | Map] = Undefined,
borders: Optional[bool | SchemaBase | Map] = Undefined,
clip: Optional[bool] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
extent: Optional[SchemaBase | ErrorBarExtent_T] = Undefined,
interpolate: Optional[SchemaBase | Interpolate_T] = Undefined,
opacity: Optional[float] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
tension: Optional[float] = Undefined,
**kwds,
):
super().__init__(
band=band,
borders=borders,
clip=clip,
color=color,
extent=extent,
interpolate=interpolate,
opacity=opacity,
orient=orient,
tension=tension,
**kwds,
)
| _ErrorBandDef |
python | python-attrs__attrs | tests/test_make.py | {
"start": 48877,
"end": 52392
} | class ____:
"""
Tests for metadata handling.
"""
@given(sorted_lists_of_attrs)
def test_metadata_present(self, list_of_attrs):
"""
Assert dictionaries are copied and present.
"""
C = make_class("C", dict(zip(gen_attr_names(), list_of_attrs)))
for hyp_attr, class_attr in zip(list_of_attrs, fields(C)):
if hyp_attr.metadata is None:
# The default is a singleton empty dict.
assert class_attr.metadata is not None
assert len(class_attr.metadata) == 0
else:
assert hyp_attr.metadata == class_attr.metadata
# Once more, just to assert getting items and iteration.
for k in class_attr.metadata:
assert hyp_attr.metadata[k] == class_attr.metadata[k]
assert hyp_attr.metadata.get(k) == class_attr.metadata.get(
k
)
@given(simple_classes(), text())
def test_metadata_immutability(self, C, string):
"""
The metadata dict should be best-effort immutable.
"""
for a in fields(C):
with pytest.raises(TypeError):
a.metadata[string] = string
with pytest.raises(AttributeError):
a.metadata.update({string: string})
with pytest.raises(AttributeError):
a.metadata.clear()
with pytest.raises(AttributeError):
a.metadata.setdefault(string, string)
for k in a.metadata:
# For some reason, MappingProxyType throws an IndexError for
# deletes on a large integer key.
with pytest.raises((TypeError, IndexError)):
del a.metadata[k]
with pytest.raises(AttributeError):
a.metadata.pop(k)
with pytest.raises(AttributeError):
a.metadata.popitem()
@given(lists(simple_attrs_without_metadata, min_size=2, max_size=5))
def test_empty_metadata_singleton(self, list_of_attrs):
"""
All empty metadata attributes share the same empty metadata dict.
"""
C = make_class("C", dict(zip(gen_attr_names(), list_of_attrs)))
for a in fields(C)[1:]:
assert a.metadata is fields(C)[0].metadata
@given(lists(simple_attrs_without_metadata, min_size=2, max_size=5))
def test_empty_countingattr_metadata_independent(self, list_of_attrs):
"""
All empty metadata attributes are independent before ``@attr.s``.
"""
for x, y in itertools.combinations(list_of_attrs, 2):
assert x.metadata is not y.metadata
@given(lists(simple_attrs_with_metadata(), min_size=2, max_size=5))
def test_not_none_metadata(self, list_of_attrs):
"""
Non-empty metadata attributes exist as fields after ``@attr.s``.
"""
C = make_class("C", dict(zip(gen_attr_names(), list_of_attrs)))
assert len(fields(C)) > 0
for cls_a, raw_a in zip(fields(C), list_of_attrs):
assert cls_a.metadata != {}
assert cls_a.metadata == raw_a.metadata
def test_metadata(self):
"""
If metadata that is not None is passed, it is used.
This is necessary for coverage because the previous test is
hypothesis-based.
"""
md = {}
a = attr.ib(metadata=md)
assert md is a.metadata
| TestMetadata |
python | pennersr__django-allauth | allauth/socialaccount/providers/stripe/views.py | {
"start": 181,
"end": 1015
} | class ____(OAuth2Adapter):
provider_id = "stripe"
access_token_url = "https://connect.stripe.com/oauth/token" # nosec
authorize_url = "https://connect.stripe.com/oauth/authorize"
profile_url = "https://api.stripe.com/v1/accounts/%s"
def complete_login(self, request, app, token, response, **kwargs):
headers = {"Authorization": "Bearer {0}".format(token.token)}
resp = (
get_adapter()
.get_requests_session()
.get(self.profile_url % response.get("stripe_user_id"), headers=headers)
)
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(StripeOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(StripeOAuth2Adapter)
| StripeOAuth2Adapter |
python | patrys__httmock | httmock.py | {
"start": 471,
"end": 3991
} | class ____(object):
def __init__(self, res):
self.headers = res.headers
def get_all(self, name, failobj=None):
return self.getheaders(name)
def getheaders(self, name):
return [self.headers.get(name)]
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, stream=False, http_vsn=11):
res = requests.Response()
res.status_code = status_code
if isinstance(content, (dict, list)):
content = json.dumps(content).encode('utf-8')
if isinstance(content, text_type):
content = content.encode('utf-8')
res._content = content
res._content_consumed = content
res.headers = structures.CaseInsensitiveDict(headers or {})
res.encoding = utils.get_encoding_from_headers(res.headers)
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
if hasattr(request, 'url'):
res.url = request.url
if isinstance(request.url, bytes):
res.url = request.url.decode('utf-8')
if 'set-cookie' in res.headers:
res.cookies.extract_cookies(cookies.MockResponse(Headers(res)),
cookies.MockRequest(request))
if stream:
res.raw = BytesIO(content)
else:
res.raw = BytesIO(b'')
res.raw.version = http_vsn
# normally this closes the underlying connection,
# but we have nothing to free.
res.close = lambda *args, **kwargs: None
return res
def all_requests(func):
@wraps(func)
def inner(*args, **kwargs):
return func(*args, **kwargs)
return inner
def urlmatch(scheme=None, netloc=None, path=None, method=None, query=None):
def decorator(func):
@wraps(func)
def inner(self_or_url, url_or_request, *args, **kwargs):
if isinstance(self_or_url, urlparse.SplitResult):
url = self_or_url
request = url_or_request
else:
url = url_or_request
request = args[0]
if scheme is not None and scheme != url.scheme:
return
if netloc is not None and not re.match(netloc, url.netloc):
return
if path is not None and not re.match(path, url.path):
return
if query is not None and not re.match(query, url.query):
return
if method is not None and method.upper() != request.method:
return
return func(self_or_url, url_or_request, *args, **kwargs)
return inner
return decorator
def handler_init_call(handler):
setattr(handler, 'call', {
'count': 0,
'called': False,
'requests': []
})
def handler_clean_call(handler):
if hasattr(handler, 'call'):
handler.call.update({
'count': 0,
'called': False,
'requests': []
})
def handler_called(handler, *args, **kwargs):
try:
return handler(*args, **kwargs)
finally:
handler.call['count'] += 1
handler.call['called'] = True
handler.call['requests'].append(args[1])
def remember_called(func):
handler_init_call(func)
@wraps(func)
def inner(*args, **kwargs):
return handler_called(func, *args, **kwargs)
return inner
def first_of(handlers, *args, **kwargs):
for handler in handlers:
res = handler(*args, **kwargs)
if res is not None:
return res
| Headers |
python | huggingface__transformers | src/transformers/models/deformable_detr/modular_deformable_detr.py | {
"start": 281,
"end": 3739
} | class ____(DetrImageProcessorFast):
def post_process_object_detection(
self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, list[tuple]] = None, top_k: int = 100
):
"""
Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x,
top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch.
Args:
outputs ([`DetrObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
(height, width) of each image in the batch. If left to None, predictions will not be resized.
top_k (`int`, *optional*, defaults to 100):
Keep only top k bounding boxes before filtering by thresholding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
out_logits, out_bbox = outputs.logits, outputs.pred_boxes
if target_sizes is not None:
if len(out_logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
prob = out_logits.sigmoid()
prob = prob.view(out_logits.shape[0], -1)
k_value = min(top_k, prob.size(1))
topk_values, topk_indexes = torch.topk(prob, k_value, dim=1)
scores = topk_values
topk_boxes = torch.div(topk_indexes, out_logits.shape[2], rounding_mode="floor")
labels = topk_indexes % out_logits.shape[2]
boxes = center_to_corners_format(out_bbox)
boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = []
for s, l, b in zip(scores, labels, boxes):
score = s[s > threshold]
label = l[s > threshold]
box = b[s > threshold]
results.append({"scores": score, "labels": label, "boxes": box})
return results
def post_process_instance_segmentation(self):
raise NotImplementedError("Segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_semantic_segmentation(self):
raise NotImplementedError("Semantic segmentation post-processing is not implemented for Deformable DETR yet.")
def post_process_panoptic_segmentation(self):
raise NotImplementedError("Panoptic segmentation post-processing is not implemented for Deformable DETR yet.")
__all__ = ["DeformableDetrImageProcessorFast"]
| DeformableDetrImageProcessorFast |
python | walkccc__LeetCode | solutions/763. Partition Labels/763.py | {
"start": 0,
"end": 422
} | class ____:
def partitionLabels(self, s: str) -> list[int]:
ans = []
letterToRightmostIndex = {c: i for i, c in enumerate(s)}
l = 0 # the leftmost index of the current running string
r = 0 # the rightmost index of the current running string
for i, c in enumerate(s):
r = max(r, letterToRightmostIndex[c])
if i == r:
ans.append(r - l + 1)
l = r + 1
return ans
| Solution |
python | viewflow__viewflow | tests/workflow/test_fields__flow.py | {
"start": 642,
"end": 679
} | class ____(Flow):
pass
| TestFieldsFlow |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 33049,
"end": 36004
} | class ____(StateSchema):
"""Object State"""
#: The id of the object.
object_id: str = state_column(filterable=True)
#: The size of the object in mb.
object_size: int = state_column(filterable=True, format_fn=Humanify.memory)
#: The status of the task that creates the object.
#:
#: - NIL: We don't have a status for this task because we are not the owner or the
#: task metadata has already been deleted.
#: - WAITING_FOR_DEPENDENCIES: The task is waiting for its dependencies
#: to be created.
#: - SCHEDULED: All dependencies have been created and the task is
#: scheduled to execute.
#: It could be because the task is waiting for resources,
#: runtime environmenet creation, fetching dependencies to the
#: local node, and etc..
#: - FINISHED: The task finished successfully.
#: - WAITING_FOR_EXECUTION: The task is scheduled properly and
#: waiting for execution. It includes time to deliver the task
#: to the remote worker + queueing time from the execution side.
#: - RUNNING: The task that is running.
task_status: TypeTaskStatus = state_column(filterable=True)
#: The number of times the task has been executed (including the current execution)
attempt_number: int = state_column(filterable=True)
#: The reference type of the object.
#: See :ref:`Debugging with Ray Memory <debug-with-ray-memory>` for more details.
#:
#: - ACTOR_HANDLE: The reference is an actor handle.
#: - PINNED_IN_MEMORY: The object is pinned in memory, meaning there's
#: in-flight `ray.get` on this reference.
#: - LOCAL_REFERENCE: There's a local reference (e.g., Python reference)
#: to this object reference. The object won't be GC'ed until all of them is gone.
#: - USED_BY_PENDING_TASK: The object reference is passed to other tasks. E.g.,
#: `a = ray.put()` -> `task.remote(a)`. In this case, a is used by a
#: pending task `task`.
#: - CAPTURED_IN_OBJECT: The object is serialized by other objects. E.g.,
#: `a = ray.put(1)` -> `b = ray.put([a])`. a is serialized within a list.
#: - UNKNOWN_STATUS: The object ref status is unkonwn.
reference_type: TypeReferenceType = state_column(filterable=True)
#: The callsite of the object.
call_site: str = state_column(filterable=True)
#: The worker type that creates the object.
#:
#: - WORKER: The regular Ray worker process that executes tasks or
#: instantiates an actor.
#: - DRIVER: The driver (Python script that calls `ray.init`).
#: - SPILL_WORKER: The worker that spills objects.
#: - RESTORE_WORKER: The worker that restores objects.
type: TypeWorkerType = state_column(filterable=True)
#: The pid of the owner.
pid: int = state_column(filterable=True)
#: The ip address of the owner.
ip: str = state_column(filterable=True)
@dataclass(init=not IS_PYDANTIC_2)
| ObjectState |
python | ray-project__ray | python/ray/serve/_private/test_utils.py | {
"start": 1284,
"end": 1864
} | class ____(TimerBase):
def __init__(self, start_time: Optional[float] = None):
self._lock = threading.Lock()
self.reset(start_time=start_time)
def reset(self, start_time: Optional[float] = None):
if start_time is None:
start_time = time.time()
self._curr = start_time
def time(self) -> float:
return self._curr
def advance(self, by: float):
with self._lock:
self._curr += by
def realistic_sleep(self, amt: float):
with self._lock:
self._curr += amt + 0.001
| MockTimer |
python | urllib3__urllib3 | test/contrib/emscripten/conftest.py | {
"start": 1494,
"end": 5203
} | class ____:
http_port: int
https_port: int
http_host: str
pyodide_dist_dir: Path
def _get_coverage_code() -> tuple[str, str]:
begin = textwrap.dedent(
"""
import coverage
_coverage = coverage.Coverage(source_pkgs=["urllib3"])
_coverage.start()
"""
)
end = textwrap.dedent(
"""
_coverage.stop()
_coverage.save()
_coverage_datafile = open(".coverage", "rb")
_coverage_outdata = _coverage_datafile.read()
_coverage_datafile.close()
# avoid polluting main namespace too much
import js as _coverage_js
# convert to js Array (as default conversion is TypedArray which does
# bad things in firefox)
_coverage_js.Array.from_(_coverage_outdata)
"""
)
return begin, end
def _get_jspi_monkeypatch_code(runtime: str, prefer_jspi: bool) -> tuple[str, str]:
"""
Return code to make Pyodide think JSPI is disabled in Chrome when a
test needs this to check some code paths.
"""
if runtime != "chrome" or prefer_jspi:
return "", ""
monkeypatch_code = textwrap.dedent(
"""
import pyodide.ffi
original_can_run_sync = pyodide.ffi.can_run_sync
if pyodide.ffi.can_run_sync():
pyodide.ffi.can_run_sync = lambda: False
"""
)
unmonkeypatch_code = "pyodide.ffi.can_run_sync = original_can_run_sync"
return monkeypatch_code, unmonkeypatch_code
@pytest.fixture()
def selenium_with_jspi_if_possible(
request: pytest.FixtureRequest, runtime: str, prefer_jspi: bool
) -> Generator[Any]:
if runtime == "node" and prefer_jspi:
fixture_name = "selenium_jspi"
else:
fixture_name = "selenium"
selenium_obj = request.getfixturevalue(fixture_name)
jspi_monkeypatch_code, jspi_unmonkeypatch_code = _get_jspi_monkeypatch_code(
runtime, prefer_jspi
)
if jspi_monkeypatch_code:
selenium_obj.run_async(jspi_monkeypatch_code)
try:
yield selenium_obj
finally:
if jspi_unmonkeypatch_code:
selenium_obj.run_async(jspi_unmonkeypatch_code)
@pytest.fixture()
def selenium_coverage(
selenium_with_jspi_if_possible: Any, testserver_http: PyodideServerInfo
) -> Generator[Any]:
def _install_packages(self: Any) -> None:
if self.browser == "node":
# stop Node.js checking our https certificates
self.run_js('process.env["NODE_TLS_REJECT_UNAUTHORIZED"] = 0;')
# install urllib3 from our test server, rather than from existing package
result = self.run_js(
f'await pyodide.loadPackage("http://{testserver_http.http_host}:{testserver_http.http_port}/dist/urllib3.whl")'
)
print("Installed package:", result)
self.run_js("await pyodide.loadPackage('coverage')")
setattr(
selenium_with_jspi_if_possible,
"_install_packages",
_install_packages.__get__(
selenium_with_jspi_if_possible, selenium_with_jspi_if_possible.__class__
),
)
selenium_with_jspi_if_possible._install_packages()
coverage_begin, coverage_end = _get_coverage_code()
selenium_with_jspi_if_possible.run_js(
f"await pyodide.runPythonAsync(`{coverage_begin}`)"
)
yield selenium_with_jspi_if_possible
# on teardown, save _coverage output
coverage_out_binary = bytes(
selenium_with_jspi_if_possible.run_js(
f"return await pyodide.runPythonAsync(`{coverage_end}`)"
)
)
with open(f"{_get_coverage_filename('.coverage.emscripten.')}", "wb") as outfile:
outfile.write(coverage_out_binary)
| PyodideServerInfo |
python | redis__redis-py | tests/test_scripting.py | {
"start": 549,
"end": 1644
} | class ____:
"""
We have a few tests to directly test the Script class.
However, most of the behavioral tests are covered by `TestScripting`.
"""
@pytest.fixture()
def script_str(self):
return "fake-script"
@pytest.fixture()
def script_bytes(self):
return b"\xcf\x84o\xcf\x81\xce\xbdo\xcf\x82"
def test_script_text(self, r, script_str, script_bytes):
assert Script(r, script_str).script == "fake-script"
assert Script(r, script_bytes).script == b"\xcf\x84o\xcf\x81\xce\xbdo\xcf\x82"
def test_string_script_sha(self, r, script_str):
script = Script(r, script_str)
assert script.sha == "505e4245f0866b60552741b3cce9a0c3d3b66a87"
def test_bytes_script_sha(self, r, script_bytes):
script = Script(r, script_bytes)
assert script.sha == "1329344e6bf995a35a8dc57ab1a6af8b2d54a763"
def test_encoder(self, r, script_bytes):
encoder = Script(r, script_bytes).get_encoder()
assert encoder is not None
assert encoder.encode("fake-script") == b"fake-script"
| TestScript |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_requests/tickets_request_builder.py | {
"start": 208,
"end": 1201
} | class ____(ZendeskSupportBaseRequestBuilder):
@classmethod
def tickets_endpoint(cls, authenticator: Authenticator) -> "TicketsRequestBuilder":
return cls("d3v-airbyte", "incremental/tickets/cursor.json").with_authenticator(authenticator)
def __init__(self, subdomain: str, resource: str) -> None:
super().__init__(subdomain, resource)
self._start_time: int = None
self._cursor: str = None
@property
def query_params(self):
params = super().query_params or {}
if self._cursor:
params["cursor"] = self._cursor
return params
if self._start_time:
params["start_time"] = self._start_time
return params
def with_start_time(self, start_time: int) -> "TicketsRequestBuilder":
self._start_time: int = start_time
return self
def with_cursor(self, cursor: str) -> "TicketsRequestBuilder":
self._cursor = cursor
return self
| TicketsRequestBuilder |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/datatree.py | {
"start": 39834,
"end": 48420
} | class ____(DataObserver):
def __init__(self, tree: DataTree):
# this attr isn't read, but is very useful for local debugging flaky
# errors, with
# `from hypothesis.vendor import pretty; print(pretty.pretty(self._root))`
self._root = tree.root
self._current_node: TreeNode = tree.root
self._index_in_current_node: int = 0
self._trail: list[TreeNode] = [self._current_node]
self.killed: bool = False
def draw_integer(
self, value: int, *, was_forced: bool, constraints: IntegerConstraints
) -> None:
self.draw_value(
"integer", value, was_forced=was_forced, constraints=constraints
)
def draw_float(
self, value: float, *, was_forced: bool, constraints: FloatConstraints
) -> None:
self.draw_value("float", value, was_forced=was_forced, constraints=constraints)
def draw_string(
self, value: str, *, was_forced: bool, constraints: StringConstraints
) -> None:
self.draw_value("string", value, was_forced=was_forced, constraints=constraints)
def draw_bytes(
self, value: bytes, *, was_forced: bool, constraints: BytesConstraints
) -> None:
self.draw_value("bytes", value, was_forced=was_forced, constraints=constraints)
def draw_boolean(
self, value: bool, *, was_forced: bool, constraints: BooleanConstraints
) -> None:
self.draw_value(
"boolean", value, was_forced=was_forced, constraints=constraints
)
def draw_value(
self,
choice_type: ChoiceTypeT,
value: ChoiceT,
*,
was_forced: bool,
constraints: ChoiceConstraintsT,
) -> None:
i = self._index_in_current_node
self._index_in_current_node += 1
node = self._current_node
if isinstance(value, float):
value = float_to_int(value)
assert len(node.constraints) == len(node.values) == len(node.choice_types)
if i < len(node.values):
if (
choice_type != node.choice_types[i]
or constraints != node.constraints[i]
):
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
# Note that we don't check whether a previously
# forced value is now free. That will be caught
# if we ever split the node there, but otherwise
# may pass silently. This is acceptable because it
# means we skip a hash set lookup on every
# draw and that's a pretty niche failure mode.
if was_forced and i not in node.forced:
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
if value != node.values[i]:
node.split_at(i)
assert i == len(node.values)
new_node = TreeNode()
assert isinstance(node.transition, Branch)
node.transition.children[value] = new_node
self._current_node = new_node
self._index_in_current_node = 0
else:
trans = node.transition
if trans is None:
node.choice_types.append(choice_type)
node.constraints.append(constraints)
node.values.append(value)
if was_forced:
node.mark_forced(i)
# generate_novel_prefix assumes the following invariant: any one
# of the series of draws in a particular node can vary, i.e. the
# max number of children is at least 2. However, some draws are
# pseudo-choices and only have a single value, such as
# integers(0, 0).
#
# Currently, we address this by forcefully splitting such
# single-valued nodes into a transition when we see them. An
# exception to this is if it was forced: forced pseudo-choices
# do not cause the above issue because they inherently cannot
# vary, and moreover they trip other invariants about never
# splitting forced nodes.
#
# An alternative is not writing such choices to the tree at
# all, and thus guaranteeing that each node has at least 2 max
# children.
if (
compute_max_children(choice_type, constraints) == 1
and not was_forced
):
node.split_at(i)
assert isinstance(node.transition, Branch)
self._current_node = node.transition.children[value]
self._index_in_current_node = 0
elif isinstance(trans, Conclusion):
assert trans.status != Status.OVERRUN
# We tried to draw where history says we should have
# stopped
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
else:
assert isinstance(trans, Branch), trans
if choice_type != trans.choice_type or constraints != trans.constraints:
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
try:
self._current_node = trans.children[value]
except KeyError:
self._current_node = trans.children.setdefault(value, TreeNode())
self._index_in_current_node = 0
if self._trail[-1] is not self._current_node:
self._trail.append(self._current_node)
def kill_branch(self) -> None:
"""Mark this part of the tree as not worth re-exploring."""
if self.killed:
return
self.killed = True
if self._index_in_current_node < len(self._current_node.values) or (
self._current_node.transition is not None
and not isinstance(self._current_node.transition, Killed)
):
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
if self._current_node.transition is None:
self._current_node.transition = Killed(TreeNode())
self.__update_exhausted()
self._current_node = self._current_node.transition.next_node
self._index_in_current_node = 0
self._trail.append(self._current_node)
def conclude_test(
self, status: Status, interesting_origin: InterestingOrigin | None
) -> None:
"""Says that ``status`` occurred at node ``node``. This updates the
node if necessary and checks for consistency."""
if status == Status.OVERRUN:
return
i = self._index_in_current_node
node = self._current_node
if i < len(node.values) or isinstance(node.transition, Branch):
raise FlakyStrategyDefinition(_FLAKY_STRAT_MSG)
new_transition = Conclusion(status, interesting_origin)
if node.transition is not None and node.transition != new_transition:
# As an, I'm afraid, horrible bodge, we deliberately ignore flakiness
# where tests go from interesting to valid, because it's much easier
# to produce good error messages for these further up the stack.
if isinstance(node.transition, Conclusion) and (
node.transition.status != Status.INTERESTING
or new_transition.status != Status.VALID
):
old_origin = node.transition.interesting_origin
new_origin = new_transition.interesting_origin
raise FlakyReplay(
f"Inconsistent results from replaying a test case!\n"
f" last: {node.transition.status.name} from {old_origin}\n"
f" this: {new_transition.status.name} from {new_origin}",
(old_origin, new_origin),
)
else:
node.transition = new_transition
assert node is self._trail[-1]
node.check_exhausted()
assert len(node.values) > 0 or node.check_exhausted()
if not self.killed:
self.__update_exhausted()
def __update_exhausted(self) -> None:
for t in reversed(self._trail):
# Any node we've traversed might have now become exhausted.
# We check from the right. As soon as we hit a node that
# isn't exhausted, this automatically implies that all of
# its parents are not exhausted, so we stop.
if not t.check_exhausted():
break
| TreeRecordingObserver |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | contents/11_Dyna_Q/maze_env.py | {
"start": 532,
"end": 3898
} | class ____(tk.Tk, object):
def __init__(self):
super(Maze, self).__init__()
self.action_space = ['u', 'd', 'l', 'r']
self.n_actions = len(self.action_space)
self.title('maze')
self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT))
self._build_maze()
def _build_maze(self):
self.canvas = tk.Canvas(self, bg='white',
height=MAZE_H * UNIT,
width=MAZE_W * UNIT)
# create grids
for c in range(0, MAZE_W * UNIT, UNIT):
x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT
self.canvas.create_line(x0, y0, x1, y1)
for r in range(0, MAZE_H * UNIT, UNIT):
x0, y0, x1, y1 = 0, r, MAZE_H * UNIT, r
self.canvas.create_line(x0, y0, x1, y1)
# create origin
origin = np.array([20, 20])
# hell
hell1_center = origin + np.array([UNIT * 2, UNIT])
self.hell1 = self.canvas.create_rectangle(
hell1_center[0] - 15, hell1_center[1] - 15,
hell1_center[0] + 15, hell1_center[1] + 15,
fill='black')
# hell
hell2_center = origin + np.array([UNIT, UNIT * 2])
self.hell2 = self.canvas.create_rectangle(
hell2_center[0] - 15, hell2_center[1] - 15,
hell2_center[0] + 15, hell2_center[1] + 15,
fill='black')
# create oval
oval_center = origin + UNIT * 2
self.oval = self.canvas.create_oval(
oval_center[0] - 15, oval_center[1] - 15,
oval_center[0] + 15, oval_center[1] + 15,
fill='yellow')
# create red rect
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# pack all
self.canvas.pack()
def reset(self):
self.update()
time.sleep(0.5)
self.canvas.delete(self.rect)
origin = np.array([20, 20])
self.rect = self.canvas.create_rectangle(
origin[0] - 15, origin[1] - 15,
origin[0] + 15, origin[1] + 15,
fill='red')
# return observation
return self.canvas.coords(self.rect)
def step(self, action):
s = self.canvas.coords(self.rect)
base_action = np.array([0, 0])
if action == 0: # up
if s[1] > UNIT:
base_action[1] -= UNIT
elif action == 1: # down
if s[1] < (MAZE_H - 1) * UNIT:
base_action[1] += UNIT
elif action == 2: # right
if s[0] < (MAZE_W - 1) * UNIT:
base_action[0] += UNIT
elif action == 3: # left
if s[0] > UNIT:
base_action[0] -= UNIT
self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent
s_ = self.canvas.coords(self.rect) # next state
# reward function
if s_ == self.canvas.coords(self.oval):
reward = 1
done = True
elif s_ in [self.canvas.coords(self.hell1), self.canvas.coords(self.hell2)]:
reward = -1
done = True
else:
reward = 0
done = False
return s_, reward, done
def render(self):
# time.sleep(0.1)
self.update()
| Maze |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_group_index_stats.py | {
"start": 1076,
"end": 5962
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
permission_classes = (OrganizationEventPermission,)
enforce_rate_limit = True
owner = ApiOwner.ISSUES
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(limit=10, window=1),
RateLimitCategory.USER: RateLimit(limit=10, window=1),
RateLimitCategory.ORGANIZATION: RateLimit(limit=10, window=1),
}
}
)
def get(self, request: Request, organization: Organization) -> Response:
"""
Get the stats on an Organization's Issues
`````````````````````````````
Return a list of issues (groups) with the requested stats. All parameters are
supplied as query string parameters.
:qparam list groups: A list of group ids
:qparam list expand: an optional list of strings to opt in to additional data. Supports `inbox`
:qparam list collapse: an optional list of strings to opt out of certain pieces of data. Supports `stats`, `lifetime`, `filtered`, and `base`
The ``groupStatsPeriod`` parameter can be used to select the timeline
stats which should be present. Possible values are: '' (disable),
'24h', '14d'
The ``statsPeriod`` parameter can be used to select a date window starting
from now. Ex. ``14d``.
The ``start`` and ``end`` parameters can be used to select an absolute
date period to fetch issues from.
:qparam string statsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string groupStatsPeriod: an optional stat period (can be one of
``"24h"``, ``"14d"``, and ``""``).
:qparam string start: Beginning date. You must also provide ``end``.
:qparam string end: End date. You must also provide ``start``.
"""
stats_period = request.GET.get("groupStatsPeriod")
try:
start, end = get_date_range_from_stats_period(request.GET)
except InvalidParams as e:
raise ParseError(detail=str(e))
expand = request.GET.getlist("expand", [])
collapse = request.GET.getlist("collapse", ["base"])
projects = self.get_projects(request, organization)
if not projects:
raise ParseError(
detail="Either the user has not access to any projects or you need to "
+ "include `projects` with your request. (i.e. projects=1,2,3)"
)
project_ids = [p.id for p in projects]
try:
group_ids = set(map(int, request.GET.getlist("groups")))
except ValueError:
raise ParseError(detail="Group ids must be integers")
if not group_ids:
raise ParseError(
detail="You should include `groups` with your request. (i.e. groups=1,2,3)"
)
else:
groups = list(
Group.objects.filter(id__in=group_ids, project_id__in=project_ids).select_related(
"project"
)
)
if not groups:
raise ParseError(detail="No matching groups found")
elif len(groups) > 100:
raise ParseError(detail="Too many groups requested.")
elif not all(request.access.has_project_access(g.project) for g in groups):
raise PermissionDenied
if stats_period not in (None, "", "24h", "14d", "auto"):
raise ParseError(detail=ERR_INVALID_STATS_PERIOD)
stats_period, stats_period_start, stats_period_end = calculate_stats_period(
stats_period, start, end
)
environments = self.get_environments(request, organization)
query_kwargs = build_query_params_from_request(
request, organization, projects, environments
)
context = serialize(
groups,
request.user,
StreamGroupSerializerSnuba(
environment_ids=[env.id for env in environments],
stats_period=stats_period,
stats_period_start=stats_period_start,
stats_period_end=stats_period_end,
collapse=collapse,
expand=expand,
start=start,
end=end,
search_filters=(
query_kwargs["search_filters"] if "search_filters" in query_kwargs else None
),
organization_id=organization.id,
project_ids=project_ids,
),
request=request,
)
response = Response(context)
return response
| OrganizationGroupIndexStatsEndpoint |
python | ray-project__ray | python/ray/serve/tests/unit/test_proxy_router.py | {
"start": 5352,
"end": 7764
} | class ____:
@pytest.mark.parametrize("is_head", [False, True])
def test_route_table_not_populated(self, mock_router, is_head: bool):
"""Proxy router should NOT be ready for traffic if:
- it has not received route table from controller
"""
ready_for_traffic, msg = mock_router.ready_for_traffic(is_head=is_head)
assert not ready_for_traffic
assert msg == NO_ROUTES_MESSAGE
def test_head_route_table_populated_no_replicas(self, mock_router):
"""Proxy router should be ready for traffic if:
- it has received route table from controller
- it hasn't received any replicas yet
- it lives on head node
"""
d_id = DeploymentID(name="A", app_name="B")
mock_router.update_routes({d_id: EndpointInfo(route="/")})
mock_router.handles[d_id].set_running_replicas_populated(False)
ready_for_traffic, msg = mock_router.ready_for_traffic(is_head=True)
assert ready_for_traffic
assert not msg
def test_worker_route_table_populated_no_replicas(self, mock_router):
"""Proxy router should NOT be ready for traffic if:
- it has received route table from controller
- it hasn't received any replicas yet
- it lives on a worker node
"""
d_id = DeploymentID(name="A", app_name="B")
mock_router.update_routes({d_id: EndpointInfo(route="/")})
mock_router.handles[d_id].set_running_replicas_populated(False)
ready_for_traffic, msg = mock_router.ready_for_traffic(is_head=False)
assert not ready_for_traffic
assert msg == NO_REPLICAS_MESSAGE
@pytest.mark.parametrize("is_head", [False, True])
def test_route_table_populated_with_replicas(self, mock_router, is_head: bool):
"""Proxy router should be ready for traffic if:
- it has received route table from controller
- it has received replicas from controller
"""
d_id = DeploymentID(name="A", app_name="B")
mock_router.update_routes({d_id: EndpointInfo(route="/")})
mock_router.handles[d_id].set_running_replicas_populated(True)
ready_for_traffic, msg = mock_router.ready_for_traffic(is_head=is_head)
assert ready_for_traffic
assert not msg
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestReadyForTraffic |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Data.py | {
"start": 8595,
"end": 12075
} | class ____(Node):
"""Concatenates record arrays and/or adds new columns"""
nodeName = 'ColumnJoin'
def __init__(self, name):
Node.__init__(self, name, terminals = {
'output': {'io': 'out'},
})
#self.items = []
self.ui = QtWidgets.QWidget()
self.layout = QtWidgets.QGridLayout()
self.ui.setLayout(self.layout)
self.tree = TreeWidget()
self.addInBtn = QtWidgets.QPushButton('+ Input')
self.remInBtn = QtWidgets.QPushButton('- Input')
self.layout.addWidget(self.tree, 0, 0, 1, 2)
self.layout.addWidget(self.addInBtn, 1, 0)
self.layout.addWidget(self.remInBtn, 1, 1)
self.addInBtn.clicked.connect(self.addInput)
self.remInBtn.clicked.connect(self.remInput)
self.tree.sigItemMoved.connect(self.update)
def ctrlWidget(self):
return self.ui
def addInput(self):
#print "ColumnJoinNode.addInput called."
term = Node.addInput(self, 'input', renamable=True, removable=True, multiable=True)
#print "Node.addInput returned. term:", term
item = QtWidgets.QTreeWidgetItem([term.name()])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def remInput(self):
sel = self.tree.currentItem()
term = sel.term
term.joinItem = None
sel.term = None
self.tree.removeTopLevelItem(sel)
self.removeTerminal(term)
self.update()
def process(self, display=True, **args):
order = self.order()
vals = []
for name in order:
if name not in args:
continue
val = args[name]
if isinstance(val, np.ndarray) and len(val.dtype) > 0:
vals.append(val)
else:
vals.append((name, None, val))
return {'output': functions.concatenateColumns(vals)}
def order(self):
return [str(self.tree.topLevelItem(i).text(0)) for i in range(self.tree.topLevelItemCount())]
def saveState(self):
state = Node.saveState(self)
state['order'] = self.order()
return state
def restoreState(self, state):
Node.restoreState(self, state)
inputs = self.inputs()
## Node.restoreState should have created all of the terminals we need
## However: to maintain support for some older flowchart files, we need
## to manually add any terminals that were not taken care of.
for name in [n for n in state['order'] if n not in inputs]:
Node.addInput(self, name, renamable=True, removable=True, multiable=True)
inputs = self.inputs()
order = [name for name in state['order'] if name in inputs]
for name in inputs:
if name not in order:
order.append(name)
self.tree.clear()
for name in order:
term = self[name]
item = QtWidgets.QTreeWidgetItem([name])
item.term = term
term.joinItem = item
#self.items.append((term, item))
self.tree.addTopLevelItem(item)
def terminalRenamed(self, term, oldName):
Node.terminalRenamed(self, term, oldName)
item = term.joinItem
item.setText(0, term.name())
self.update()
| ColumnJoinNode |
python | plotly__plotly.py | plotly/matplotlylib/mplexporter/renderers/base.py | {
"start": 212,
"end": 14698
} | class ____(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(
self, data, coordinates, linestyle, markerstyle, label, mplobj=None
):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ["M"] + (data.shape[0] - 1) * ["L"]
pathstyle = dict(facecolor="none", **style)
pathstyle["edgecolor"] = pathstyle.pop("color")
pathstyle["edgewidth"] = pathstyle.pop("linewidth")
self.draw_path(
data=data,
coordinates=coordinates,
pathcodes=pathcodes,
style=pathstyle,
mplobj=mplobj,
)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
# Before mpl 1.4.0, path_transform can be a false-y value, not a valid
# transformation matrix.
if Version(mpl.__version__) < Version("1.4.0"):
if path_transforms is None:
path_transforms = [np.eye(3)]
edgecolor = styles["edgecolor"]
if np.size(edgecolor) == 0:
edgecolor = ["none"]
facecolor = styles["facecolor"]
if np.size(facecolor) == 0:
facecolor = ["none"]
elements = [
paths,
path_transforms,
offsets,
edgecolor,
styles["linewidth"],
facecolor,
]
it = itertools
return it.islice(zip(*map(it.cycle, elements)), N)
def draw_path_collection(
self,
paths,
path_coordinates,
path_transforms,
offsets,
offset_coordinates,
offset_order,
styles,
mplobj=None,
):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms, offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {
"edgecolor": utils.export_color(ec),
"facecolor": utils.export_color(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles["alpha"],
"zorder": styles["zorder"],
}
self.draw_path(
data=vertices,
coordinates=path_coordinates,
pathcodes=pathcodes,
style=style,
offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj,
)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style["markerpath"]
pathstyle = dict(
(key, style[key])
for key in ["alpha", "edgecolor", "facecolor", "zorder", "edgewidth"]
)
pathstyle["dasharray"] = "10,0"
for vertex in data:
self.draw_path(
data=vertices,
coordinates="points",
pathcodes=pathcodes,
style=pathstyle,
offset=vertex,
offset_coordinates=coordinates,
mplobj=mplobj,
)
def draw_text(
self, text, position, coordinates, style, text_type=None, mplobj=None
):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(
self,
data,
coordinates,
pathcodes,
style,
offset=None,
offset_coordinates="data",
mplobj=None,
):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| Renderer |
python | Pylons__pyramid | tests/test_view.py | {
"start": 14507,
"end": 16218
} | class ____(BaseTest, unittest.TestCase):
def _callFUT(self, *arg, **kw):
from pyramid.view import render_view
return render_view(*arg, **kw)
def test_call_no_view_registered(self):
request = self._makeRequest()
context = self._makeContext()
result = self._callFUT(context, request, name='notregistered')
self.assertEqual(result, None)
def test_call_view_registered_secure(self):
request = self._makeRequest()
context = self._makeContext()
response = DummyResponse()
view = make_view(response)
self._registerView(request.registry, view, 'registered')
s = self._callFUT(context, request, name='registered', secure=True)
self.assertEqual(s, b'')
def test_call_view_registered_insecure_no_call_permissive(self):
context = self._makeContext()
request = self._makeRequest()
response = DummyResponse()
view = make_view(response)
self._registerView(request.registry, view, 'registered')
s = self._callFUT(context, request, name='registered', secure=False)
self.assertEqual(s, b'')
def test_call_view_registered_insecure_with_call_permissive(self):
context = self._makeContext()
request = self._makeRequest()
response = DummyResponse()
view = make_view(response)
def anotherview(context, request):
return DummyResponse(b'anotherview')
view.__call_permissive__ = anotherview
self._registerView(request.registry, view, 'registered')
s = self._callFUT(context, request, name='registered', secure=False)
self.assertEqual(s, b'anotherview')
| RenderViewTests |
python | keon__algorithms | tests/test_strings.py | {
"start": 13217,
"end": 13658
} | class ____(unittest.TestCase):
"""[summary]
Test for the file word_squares.py
Arguments:
unittest {[type]} -- [description]
"""
def test_word_squares(self):
self.assertEqual([['wall', 'area', 'lead', 'lady'], ['ball', 'area',
'lead', 'lady']], \
word_squares(["area", "lead", "wall",
"lady", "ball"]))
| TestWordSquares |
python | getsentry__sentry | src/sentry/issues/endpoints/project_stacktrace_link.py | {
"start": 1079,
"end": 3983
} | class ____(TypedDict):
file: str
filename: str
platform: str | None
abs_path: str | None
commit_id: str | None
group_id: str | None
line_no: str | None
module: str | None
package: str | None
sdk_name: str | None
def generate_context(parameters: QueryDict) -> StacktraceLinkContext:
return {
"file": parameters.get("file", ""),
# XXX: Temp change to support try_path_munging until refactored
"filename": parameters.get("file", ""),
"commit_id": parameters.get("commitId"),
"platform": parameters.get("platform"),
"sdk_name": parameters.get("sdkName"),
"abs_path": parameters.get("absPath"),
"module": parameters.get("module"),
"package": parameters.get("package"),
"line_no": parameters.get("lineNo"),
"group_id": parameters.get("groupId"),
}
def set_top_tags(
scope: Scope,
project: Project,
ctx: StacktraceLinkContext,
has_code_mappings: bool,
) -> None:
try:
scope.set_tag("project.slug", project.slug)
scope.set_tag("organization.slug", project.organization.slug)
scope.set_tag("organization.early_adopter", bool(project.organization.flags.early_adopter))
scope.set_tag("stacktrace_link.platform", ctx["platform"])
scope.set_tag("stacktrace_link.code_mappings", has_code_mappings)
scope.set_tag("stacktrace_link.file", ctx["file"])
# Add tag if filepath is Windows
if ctx["file"] and ctx["file"].find(":\\") > -1:
scope.set_tag("stacktrace_link.windows", True)
scope.set_tag("stacktrace_link.abs_path", ctx["abs_path"])
if ctx["platform"] == "python":
# This allows detecting a file that belongs to Python's 3rd party modules
scope.set_tag("stacktrace_link.in_app", "site-packages" not in str(ctx["abs_path"]))
except Exception:
# If errors arises we can still proceed
logger.exception("We failed to set a tag.")
def set_tags(scope: Scope, result: StacktraceLinkOutcome, integrations: list[None]) -> None:
scope.set_tag("stacktrace_link.found", result["source_url"] is not None)
scope.set_tag("stacktrace_link.source_url", result["source_url"])
scope.set_tag("stacktrace_link.error", result["error"])
if result["current_config"]:
scope.set_tag(
"stacktrace_link.tried_url", result["current_config"]["outcome"].get("attemptedUrl")
)
scope.set_tag(
"stacktrace_link.empty_root",
result["current_config"]["config"].automatically_generated == "",
)
scope.set_tag(
"stacktrace_link.auto_derived",
result["current_config"]["config"].automatically_generated is True,
)
scope.set_tag("stacktrace_link.has_integration", len(integrations) > 0)
@region_silo_endpoint
| StacktraceLinkContext |
python | walkccc__LeetCode | solutions/824. Goat Latin/824.py | {
"start": 0,
"end": 362
} | class ____:
def toGoatLatin(self, sentence: str) -> str:
ans = []
VOWELS = 'aeiouAEIOU'
i = 1
for word in sentence.split():
if i > 1:
ans.append(' ')
if word[0] in VOWELS:
ans.append(word)
else:
ans.append(word[1:] + word[0])
ans.append('ma' + 'a' * i)
i += 1
return ''.join(ans)
| Solution |
python | wandb__wandb | wandb/sdk/lib/retry.py | {
"start": 663,
"end": 1057
} | class ____(Exception):
"""Exception type designated for errors that may only be temporary.
Can have its own message and/or wrap another exception.
"""
def __init__(
self, msg: Optional[str] = None, exc: Optional[BaseException] = None
) -> None:
super().__init__(msg)
self.message = msg
self.exception = exc
_R = TypeVar("_R")
| TransientError |
python | apache__airflow | airflow-core/src/airflow/triggers/base.py | {
"start": 1279,
"end": 1550
} | class ____:
"""Arguments required for start task execution from triggerer."""
trigger_cls: str
next_method: str
trigger_kwargs: dict[str, Any] | None = None
next_kwargs: dict[str, Any] | None = None
timeout: timedelta | None = None
| StartTriggerArgs |
python | kamyu104__LeetCode-Solutions | Python/find-largest-value-in-each-tree-row.py | {
"start": 29,
"end": 664
} | class ____(object):
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def largestValuesHelper(root, depth, result):
if not root:
return
if depth == len(result):
result.append(root.val)
else:
result[depth] = max(result[depth], root.val)
largestValuesHelper(root.left, depth+1, result)
largestValuesHelper(root.right, depth+1, result)
result = []
largestValuesHelper(root, 0, result)
return result
# Time: O(n)
# Space: O(n)
| Solution |
python | run-llama__llama_index | llama-index-integrations/program/llama-index-program-evaporate/llama_index/program/evaporate/base.py | {
"start": 6210,
"end": 9790
} | class ____(BaseEvaporateProgram[DataFrameValuesPerColumn]):
"""
Multi-Value Evaporate program.
Given a set of fields, and texts extracts a list of `DataFrameRow` objects across
that texts.
Each DataFrameRow corresponds to a field, and each value in the row corresponds to
a value for the field.
Difference with DFEvaporateProgram is that 1) each DataFrameRow
is column-oriented (instead of row-oriented), and 2)
each DataFrameRow can be variable length (not guaranteed to have 1 value per
node).
"""
@classmethod
def from_defaults(
cls,
fields_to_extract: Optional[List[str]] = None,
fields_context: Optional[Dict[str, Any]] = None,
llm: Optional[LLM] = None,
schema_id_prompt: Optional[SchemaIDPrompt] = None,
fn_generate_prompt: Optional[FnGeneratePrompt] = None,
field_extract_query_tmpl: str = DEFAULT_FIELD_EXTRACT_QUERY_TMPL,
nodes_to_fit: Optional[List[BaseNode]] = None,
verbose: bool = False,
) -> "BaseEvaporateProgram":
# modify the default function generate prompt to return a list
fn_generate_prompt = fn_generate_prompt or FN_GENERATION_LIST_PROMPT
return super().from_defaults(
fields_to_extract=fields_to_extract,
fields_context=fields_context,
llm=llm,
schema_id_prompt=schema_id_prompt,
fn_generate_prompt=fn_generate_prompt,
field_extract_query_tmpl=field_extract_query_tmpl,
nodes_to_fit=nodes_to_fit,
verbose=verbose,
)
def fit(
self,
nodes: List[BaseNode],
field: str,
field_context: Optional[Any] = None,
expected_output: Optional[Any] = None,
inplace: bool = True,
) -> str:
"""Given the input Nodes and fields, synthesize the python code."""
fn = self._extractor.extract_fn_from_nodes(
nodes, field, expected_output=expected_output
)
logger.debug(f"Extracted function: {fn}")
if self._verbose:
print_text(f"Extracted function: {fn}\n", color="blue")
if inplace:
self._field_fns[field] = fn
return fn
@property
def output_cls(self) -> Type[DataFrameValuesPerColumn]:
"""Output class."""
return DataFrameValuesPerColumn
def _inference(
self, nodes: List[BaseNode], fn_str: str, field_name: str
) -> List[Any]:
"""Given the input, call the python code and return the result."""
results_by_node = self._extractor.run_fn_on_nodes(nodes, fn_str, field_name)
# flatten results
return [r for results in results_by_node for r in results]
def __call__(self, *args: Any, **kwds: Any) -> DataFrameValuesPerColumn:
"""Call evaporate on inference data."""
# TODO: either specify `nodes` or `texts` in kwds
if "nodes" in kwds:
nodes = kwds["nodes"]
elif "texts" in kwds:
nodes = [TextNode(text=t) for t in kwds["texts"]]
else:
raise ValueError("Must provide either `nodes` or `texts`.")
col_dict = {}
for field in self._fields:
col_dict[field] = self._inference(nodes, self._field_fns[field], field)
# convert col_dict to list of DataFrameRow objects
df_row_objs = []
for field in self._fields:
df_row_objs.append(DataFrameRow(row_values=col_dict[field]))
return DataFrameValuesPerColumn(columns=df_row_objs)
| MultiValueEvaporateProgram |
python | numba__numba | numba/cuda/cudadrv/error.py | {
"start": 425,
"end": 477
} | class ____(NvrtcError):
pass
| NvrtcCompilationError |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeVarTuple7.py | {
"start": 805,
"end": 1718
} | class ____(dict[tuple[Unpack[_Ys]], _T1], Generic[_T1, Unpack[_Ys]]):
def func1(self, a: tuple[Unpack[_Ys], int]):
pass
# This should generate an error because tuple cannot contain multiple
# TypeVarTuples.
def func2(self, *args: Unpack[_Xs]) -> tuple[Unpack[_Ys], Unpack[_Xs]]: ...
def func3(self) -> Union[Unpack[_Ys], int]:
return 3
def func4(self, *args: Unpack[_Xs]) -> Union[int, Unpack[_Ys], Unpack[_Xs]]:
return 3
def func5(self, a: Callable[[Unpack[_Ys], int], Any]):
pass
# This should generate an error because *_Ys cannot appear
# by itself in a return type for a Callable.
def func6(self, a: Callable[[int], Unpack[_Ys]]):
pass
Alias1 = Union[tuple[int, Unpack[_Xs]], _T1]
# This should generate an error because at most one TypeVarTuple is allowed.
Alias2 = Union[tuple[int, Unpack[_Xs]], tuple[Unpack[_Ys]]]
| Class5 |
python | PrefectHQ__prefect | src/prefect/concurrency/v1/_asyncio.py | {
"start": 456,
"end": 2152
} | class ____(TimeoutError):
"""Raised when acquiring a concurrency slot times out."""
@sync_compatible
async def acquire_concurrency_slots(
names: list[str],
task_run_id: UUID,
timeout_seconds: Optional[float] = None,
) -> list[MinimalConcurrencyLimitResponse]:
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
future = service.send((task_run_id, timeout_seconds))
try:
response = await asyncio.wrap_future(future)
except TimeoutError as timeout:
raise AcquireConcurrencySlotTimeoutError(
f"Attempt to acquire concurrency limits timed out after {timeout_seconds} second(s)"
) from timeout
except Exception as exc:
raise ConcurrencySlotAcquisitionError(
f"Unable to acquire concurrency limits {names!r}"
) from exc
else:
return _response_to_concurrency_limit_response(response)
@sync_compatible
async def release_concurrency_slots(
names: list[str], task_run_id: UUID, occupancy_seconds: float
) -> list[MinimalConcurrencyLimitResponse]:
async with get_client() as client:
response = await client.decrement_v1_concurrency_slots(
names=names,
task_run_id=task_run_id,
occupancy_seconds=occupancy_seconds,
)
return _response_to_concurrency_limit_response(response)
def _response_to_concurrency_limit_response(
response: httpx.Response,
) -> list[MinimalConcurrencyLimitResponse]:
data: list[MinimalConcurrencyLimitResponse] = response.json() or []
return [
MinimalConcurrencyLimitResponse.model_validate(limit) for limit in data if data
]
| AcquireConcurrencySlotTimeoutError |
python | huggingface__transformers | src/transformers/models/ministral/modular_ministral.py | {
"start": 8899,
"end": 11902
} | class ____(Qwen2Model):
def __init__(self, config: MinistralConfig):
super().__init__(config)
del self.has_sliding_layers
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
# It may already have been prepared by e.g. `generate`
if not isinstance(causal_mask_mapping := attention_mask, dict):
# Prepare mask arguments
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values,
"position_ids": position_ids,
}
# Create the masks
causal_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask_mapping[decoder_layer.attention_type],
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
| MinistralModel |
python | huggingface__transformers | tests/models/mgp_str/test_tokenization_mgp_str.py | {
"start": 914,
"end": 3946
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "alibaba-damo/mgp-str-base"
tokenizer_class = MgpstrTokenizer
test_rust_tokenizer = False
from_pretrained_kwargs = {}
test_seq2seq = False
@classmethod
def setUpClass(cls):
super().setUpClass()
vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: skip
vocab_tokens = dict(zip(vocab, range(len(vocab))))
cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(cls.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(vocab_tokens) + "\n")
@classmethod
def get_tokenizer(cls, pretrained_name=None, **kwargs):
pretrained_name = pretrained_name or cls.tmpdirname
return MgpstrTokenizer.from_pretrained(pretrained_name, **kwargs)
def get_input_output_texts(self, tokenizer):
input_text = "tester"
output_text = "tester"
return input_text, output_text
@unittest.skip(reason="MGP-STR always lower cases letters.")
def test_added_tokens_do_lower_case(self):
pass
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
special_token = "[SPECIAL_TOKEN]"
tokenizer.add_special_tokens({"cls_token": special_token})
encoded_special_token = tokenizer.encode([special_token], add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
decoded = tokenizer.decode(encoded_special_token, skip_special_tokens=True)
self.assertTrue(special_token not in decoded)
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
input_text, output_text = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2.replace(" ", ""), output_text)
@unittest.skip(reason="MGP-STR tokenizer only handles one sequence.")
def test_maximum_encoding_length_pair_input(self):
pass
@unittest.skip(reason="inputs cannot be pretokenized in MgpstrTokenizer")
def test_pretokenized_inputs(self):
pass
| MgpstrTokenizationTest |
python | getsentry__sentry | src/sentry/db/models/paranoia.py | {
"start": 872,
"end": 1133
} | class ____(BaseManager[M]):
"""
Only exposes objects that have NOT been soft-deleted.
"""
def get_queryset(self) -> ParanoidQuerySet[M]:
return ParanoidQuerySet(self.model, using=self._db).filter(date_deleted__isnull=True)
| ParanoidManager |
python | pytorch__pytorch | torch/_export/db/examples/dynamic_shape_map.py | {
"start": 94,
"end": 454
} | class ____(torch.nn.Module):
"""
functorch map() maps a function over the first tensor dimension.
"""
def forward(self, xs, y):
def body(x, y):
return x + y
return map(body, xs, y)
example_args = (torch.randn(3, 2), torch.randn(2))
tags = {"torch.dynamic-shape", "torch.map"}
model = DynamicShapeMap()
| DynamicShapeMap |
python | doocs__leetcode | solution/0600-0699/0624.Maximum Distance in Arrays/Solution.py | {
"start": 0,
"end": 341
} | class ____:
def maxDistance(self, arrays: List[List[int]]) -> int:
ans = 0
mi, mx = arrays[0][0], arrays[0][-1]
for arr in arrays[1:]:
a, b = abs(arr[0] - mx), abs(arr[-1] - mi)
ans = max(ans, a, b)
mi = min(mi, arr[0])
mx = max(mx, arr[-1])
return ans
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/sensors/test_emr_containers.py | {
"start": 1143,
"end": 4541
} | class ____:
def setup_method(self):
self.sensor = EmrContainerSensor(
task_id="test_emrcontainer_sensor",
virtual_cluster_id="vzwemreks",
job_id="job1234",
poll_interval=5,
max_retries=1,
aws_conn_id="aws_default",
)
# We're mocking all actual AWS calls and don't need a connection. This
# avoids an Airflow warning about connection cannot be found.
self.sensor.hook.get_connection = lambda _: None
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("PENDING",))
def test_poke_pending(self, mock_check_query_status):
assert not self.sensor.poke(None)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("SUBMITTED",))
def test_poke_submitted(self, mock_check_query_status):
assert not self.sensor.poke(None)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("RUNNING",))
def test_poke_running(self, mock_check_query_status):
assert not self.sensor.poke(None)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("COMPLETED",))
def test_poke_completed(self, mock_check_query_status):
assert self.sensor.poke(None)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("FAILED",))
def test_poke_failed(self, mock_check_query_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert "EMR Containers sensor failed" in str(ctx.value)
assert "FAILED" in str(ctx.value)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("CANCELLED",))
def test_poke_cancelled(self, mock_check_query_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert "EMR Containers sensor failed" in str(ctx.value)
assert "CANCELLED" in str(ctx.value)
@mock.patch.object(EmrContainerHook, "check_query_status", side_effect=("CANCEL_PENDING",))
def test_poke_cancel_pending(self, mock_check_query_status):
with pytest.raises(AirflowException) as ctx:
self.sensor.poke(None)
assert "EMR Containers sensor failed" in str(ctx.value)
assert "CANCEL_PENDING" in str(ctx.value)
@mock.patch("airflow.providers.amazon.aws.sensors.emr.EmrContainerSensor.poke")
def test_sensor_defer(self, mock_poke):
self.sensor.deferrable = True
mock_poke.return_value = False
with pytest.raises(TaskDeferred) as e:
self.sensor.execute(context=None)
assert isinstance(e.value.trigger, EmrContainerTrigger), (
f"{e.value.trigger} is not a EmrContainerTrigger"
)
@mock.patch("airflow.providers.amazon.aws.sensors.emr.EmrContainerSensor.poke")
def test_sensor_defer_with_timeout(self, mock_poke):
self.sensor.deferrable = True
mock_poke.return_value = False
self.sensor.max_retries = 1000
with pytest.raises(TaskDeferred) as e:
self.sensor.execute(context=None)
trigger = e.value.trigger
assert isinstance(trigger, EmrContainerTrigger), f"{trigger} is not a EmrContainerTrigger"
assert trigger.waiter_delay == self.sensor.poll_interval
assert trigger.attempts == self.sensor.max_retries
| TestEmrContainerSensor |
python | vyperlang__vyper | vyper/venom/parser.py | {
"start": 3060,
"end": 10267
} | class ____(Transformer):
def start(self, children) -> IRContext:
ctx = IRContext()
if len(children) > 0 and isinstance(children[-1], _DataSegment):
ctx.data_segment = children.pop().children
funcs = children
for fn_name, items in funcs:
fn = ctx.create_function(fn_name)
if ctx.entry_function is None:
ctx.entry_function = fn
fn.clear_basic_blocks()
# reconstruct blocks from flat list of labels and instructions.
# the grammar parses labels and statements as a flat sequence,
# so we need to group instructions by their preceding label.
# this makes the grammar compatible with LALR(1).
# blocks are implicitly defined by label declarations - each
# label starts a new block that contains all instructions until
# the next label or end of function.
current_block_label: Optional[str] = None
current_block_instructions: list[IRInstruction] = []
blocks: list[tuple[str, list[IRInstruction]]] = []
for item in items:
if isinstance(item, _LabelDecl):
if current_block_label is not None:
blocks.append((current_block_label, current_block_instructions))
current_block_label = item.label
current_block_instructions = []
elif isinstance(item, IRInstruction):
if current_block_label is None:
raise ValueError("Instruction found before any label declaration")
current_block_instructions.append(item)
if current_block_label is not None:
blocks.append((current_block_label, current_block_instructions))
for block_name, instructions in blocks:
bb = IRBasicBlock(IRLabel(block_name, True), fn)
fn.append_basic_block(bb)
for instruction in instructions:
assert isinstance(instruction, IRInstruction) # help mypy
bb.insert_instruction(instruction)
_set_last_var(fn)
_set_last_label(ctx)
return ctx
def function(self, children) -> tuple[str, list]:
name = children[0]
block_content = children[1] # this is the block_content node
return name, block_content
def block_content(self, children) -> list:
# children contains label_decls and statements
return children
def label_decl(self, children) -> _LabelDecl:
# children[0] is the label, rest are NEWLINE tokens
label = _unescape(str(children[0]))
return _LabelDecl(label)
def statement(self, children) -> IRInstruction:
# children[0] is the instruction/assignment, rest are NEWLINE tokens
return children[0]
def data_segment(self, children) -> _DataSegment:
return _DataSegment(children)
def data_section(self, children) -> DataSection:
label = IRLabel(children[0], True)
# skip NEWLINE tokens and collect DataItems
data_items = [child for child in children[1:] if isinstance(child, DataItem)]
return DataSection(label, data_items)
def data_item(self, children) -> DataItem:
# children[0] is the data content, rest are NEWLINE tokens
item = children[0]
if isinstance(item, IRLabel):
return DataItem(item)
# handle hex strings
assert isinstance(item, str)
assert item.startswith('x"')
assert item.endswith('"')
item = item.removeprefix('x"').removesuffix('"')
item = item.replace("_", "")
return DataItem(bytes.fromhex(item))
def lhs(self, children):
# unwrap VAR_IDENT or lhs_list
assert len(children) == 1
return children[0]
def lhs_list(self, children):
# list of VAR_IDENTs
return children
def assignment(self, children) -> IRInstruction:
left, value = children
# Multi-output assignment (e.g., %a, %b = invoke @f)
if isinstance(left, list):
if not isinstance(value, IRInstruction):
raise TypeError("Multi-target assignment requires an instruction on RHS")
outs = left
value.set_outputs(outs)
return value
# Single-target assignment
to = left
if isinstance(value, IRInstruction):
value.set_outputs([to])
return value
if isinstance(value, (IRLiteral, IRVariable, IRLabel, IRAbstractMemLoc)):
return IRInstruction("assign", [value], outputs=[to])
raise TypeError(f"Unexpected value {value} of type {type(value)}")
def expr(self, children) -> IRInstruction | IROperand:
return children[0]
def instruction(self, children) -> IRInstruction:
if len(children) == 1:
# just the opcode (IDENT)
opcode = str(children[0])
operands = []
else:
assert len(children) == 2
# IDENT and operands_list
opcode = str(children[0])
operands = children[1]
# reverse operands, venom internally represents top of stack
# as rightmost operand
if opcode == "invoke":
# reverse stack arguments but not label arg
# invoke <target> <stack arguments>
operands = [operands[0]] + list(reversed(operands[1:]))
# special cases: operands with labels look better un-reversed
elif opcode not in ("jmp", "jnz", "djmp", "phi"):
operands.reverse()
return IRInstruction(opcode, operands)
def operands_list(self, children) -> list[IROperand]:
return children
def operand(self, children) -> IROperand:
return children[0]
def func_name(self, children) -> str:
# func_name can be IDENT or ESCAPED_STRING
return _unescape(str(children[0]))
def label_name(self, children) -> str:
# label_name can be IDENT or ESCAPED_STRING
return _unescape(str(children[0]))
def label_ref(self, children) -> IRLabel:
# label_ref is "@" followed by IDENT or ESCAPED_STRING
label = _unescape(str(children[0]))
if label.startswith("@"):
label = label[1:]
return IRLabel(label, True)
def VAR_IDENT(self, var_ident) -> IRVariable:
return IRVariable(var_ident[1:])
def CONST(self, val) -> IRLiteral:
if str(val).startswith("0x"):
return IRLiteral(int(val, 16))
return IRLiteral(int(val))
def memloc(self, children) -> IRAbstractMemLoc:
id_str, size_str = children
mem_id = int(id_str)
size = int(size_str)
return IRAbstractMemLoc(size, force_id=mem_id)
def IDENT(self, val) -> str:
return val.value
def HEXSTR(self, val) -> str:
return val.value
def parse_venom(source: str) -> IRContext:
tree = VENOM_PARSER.parse(source)
ctx = VenomTransformer().transform(tree)
assert isinstance(ctx, IRContext) # help mypy
return ctx
| VenomTransformer |
python | sympy__sympy | sympy/codegen/fnodes.py | {
"start": 4763,
"end": 5210
} | class ____(Token):
""" Represents a call to a subroutine in Fortran.
Examples
========
>>> from sympy.codegen.fnodes import SubroutineCall
>>> from sympy import fcode
>>> fcode(SubroutineCall('mysub', 'x y'.split()))
' call mysub(x, y)'
"""
__slots__ = _fields = ('name', 'subroutine_args')
_construct_name = staticmethod(_name)
_construct_subroutine_args = staticmethod(_mk_Tuple)
| SubroutineCall |
python | numba__numba | numba/cuda/dispatcher.py | {
"start": 20356,
"end": 20933
} | class ____(Cache):
"""
Implements a cache that saves and loads CUDA kernels and compile results.
"""
_impl_class = CUDACacheImpl
def load_overload(self, sig, target_context):
# Loading an overload refreshes the context to ensure it is
# initialized. To initialize the correct (i.e. CUDA) target, we need to
# enforce that the current target is the CUDA target.
from numba.core.target_extension import target_override
with target_override('cuda'):
return super().load_overload(sig, target_context)
| CUDACache |
python | dask__dask | dask/dataframe/dask_expr/_cumulative.py | {
"start": 3788,
"end": 3951
} | class ____(CumulativeAggregations):
chunk_operation = M.cumprod
aggregate_operation = staticmethod(methods.cumprod_aggregate)
neutral_element = 1
| CumProd |
python | kubernetes-client__python | kubernetes/client/models/v1beta2_cel_device_selector.py | {
"start": 383,
"end": 8630
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str'
}
attribute_map = {
'expression': 'expression'
}
def __init__(self, expression=None, local_vars_configuration=None): # noqa: E501
"""V1beta2CELDeviceSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self.discriminator = None
self.expression = expression
@property
def expression(self):
"""Gets the expression of this V1beta2CELDeviceSelector. # noqa: E501
Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device (v1.34+ with the DRAConsumableCapacity feature enabled). Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501
:return: The expression of this V1beta2CELDeviceSelector. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1beta2CELDeviceSelector.
Expression is a CEL expression which evaluates a single device. It must evaluate to true when the device under consideration satisfies the desired criteria, and false when it does not. Any other result is an error and causes allocation of devices to abort. The expression's input is an object named \"device\", which carries the following properties: - driver (string): the name of the driver which defines this device. - attributes (map[string]object): the device's attributes, grouped by prefix (e.g. device.attributes[\"dra.example.com\"] evaluates to an object with all of the attributes which were prefixed by \"dra.example.com\". - capacity (map[string]object): the device's capacities, grouped by prefix. - allowMultipleAllocations (bool): the allowMultipleAllocations property of the device (v1.34+ with the DRAConsumableCapacity feature enabled). Example: Consider a device with driver=\"dra.example.com\", which exposes two attributes named \"model\" and \"ext.example.com/family\" and which exposes one capacity named \"modules\". This input to this expression would have the following fields: device.driver device.attributes[\"dra.example.com\"].model device.attributes[\"ext.example.com\"].family device.capacity[\"dra.example.com\"].modules The device.driver field can be used to check for a specific driver, either as a high-level precondition (i.e. you only want to consider devices from this driver) or as part of a multi-clause expression that is meant to consider devices from different drivers. The value type of each attribute is defined by the device definition, and users who write these expressions must consult the documentation for their specific drivers. The value type of each capacity is Quantity. If an unknown prefix is used as a lookup in either device.attributes or device.capacity, an empty map will be returned. Any reference to an unknown field will cause an evaluation error and allocation to abort. A robust expression should check for the existence of attributes before referencing them. For ease of use, the cel.bind() function is enabled, and can be used to simplify expressions that access multiple attributes with the same domain. For example: cel.bind(dra, device.attributes[\"dra.example.com\"], dra.someBool && dra.anotherBool) The length of the expression must be smaller or equal to 10 Ki. The cost of evaluating it is also limited based on the estimated number of logical steps. # noqa: E501
:param expression: The expression of this V1beta2CELDeviceSelector. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
self._expression = expression
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2CELDeviceSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta2CELDeviceSelector):
return True
return self.to_dict() != other.to_dict()
| V1beta2CELDeviceSelector |
python | viewflow__viewflow | viewflow/views/filters.py | {
"start": 57,
"end": 2395
} | class ____(object):
"""
A mixin that provides a way to show and handle a FilterSet in a request.
"""
filterset_class = None
filterset_initial = None
filter_fields = None
strict_filter = False
def get_filterset_class(self):
"""
Returns the filterset class to use in this view
"""
if self.filterset_class:
return self.filterset_class
elif self.filter_fields:
return filterset_factory(model=self.model, fields=self.filter_fields)
def get_filterset_kwargs(self, filterset_class):
"""
Returns the keyword arguments for instantiating the filterset.
"""
kwargs = {}
if self.viewset is not None and hasattr(self.viewset, "get_filterset_kwargs"):
kwargs = self.viewset.get_filterset_kwargs(self.request)
# filterset initial
data = self.request.GET or None
if self.filterset_initial:
if data is None:
data = self.filterset_initial
else:
data = data.copy()
for key, value in self.filterset_initial.items():
if key not in data:
if isinstance(value, (list, tuple, set)):
data.setlist(key, value)
else:
data[key] = value
return {
**kwargs,
"data": data,
"request": self.request,
}
def get_filterset(self, filterset_class, queryset):
"""
Returns an instance of the filterset to be used in this view.
"""
kwargs = self.get_filterset_kwargs(filterset_class)
return filterset_class(queryset=queryset, **kwargs)
def is_strict_filter(self):
return self.strict_filter
def get_queryset(self):
queryset = super().get_queryset()
self.filterset, filterset_class = None, self.get_filterset_class()
if filterset_class is not None:
self.filterset = self.get_filterset(filterset_class, queryset=queryset)
if self.filterset.is_valid() or not self.is_strict_filter():
queryset = self.filterset.qs
else:
queryset = self.filterset.queryset.none()
return queryset
| FilterableViewMixin |
python | django__django | django/db/models/lookups.py | {
"start": 7309,
"end": 7959
} | class ____(RegisterLookupMixin, Func):
"""
RegisterLookupMixin() is first so that get_lookup() and get_transform()
first examine self and then check output_field.
"""
bilateral = False
arity = 1
@property
def lhs(self):
return self.get_source_expressions()[0]
def get_bilateral_transforms(self):
if hasattr(self.lhs, "get_bilateral_transforms"):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append(self.__class__)
return bilateral_transforms
| Transform |
python | vyperlang__vyper | vyper/venom/passes/revert_to_assert.py | {
"start": 301,
"end": 1925
} | class ____(IRPass):
cfg: CFGAnalysis
def run_pass(self):
self.cfg = self.analyses_cache.request_analysis(CFGAnalysis)
fn = self.function
for bb in fn.get_basic_blocks():
if len(bb.instructions) != 1:
continue
term = bb.instructions[0]
if term.opcode != "revert" or any(op != IRLiteral(0) for op in term.operands):
continue
for pred in self.cfg.cfg_in(bb):
if pred.instructions[-1].opcode != "jnz":
continue
self._rewrite_jnz(pred, bb)
self.analyses_cache.invalidate_analysis(CFGAnalysis)
self.analyses_cache.invalidate_analysis(DFGAnalysis)
def _rewrite_jnz(self, pred, revert_bb):
term = pred.instructions[-1]
cond, then_label, else_label = term.operands
if then_label == revert_bb.label:
new_cond = self.function.get_next_variable()
iszero_inst = IRInstruction("iszero", [cond], outputs=[new_cond])
assert_inst = IRInstruction("assert", [iszero_inst.output])
pred.insert_instruction(iszero_inst, index=-1)
pred.insert_instruction(assert_inst, index=-1)
# rewrite the jnz into a jmp
term.opcode = "jmp"
term.operands = [else_label]
return
if else_label == revert_bb.label:
assert_inst = IRInstruction("assert", [cond])
pred.insert_instruction(assert_inst, index=-1)
term.opcode = "jmp"
term.operands = [then_label]
return
| RevertToAssert |
python | pytorch__pytorch | torch/_dynamo/variables/iter.py | {
"start": 11976,
"end": 12705
} | class ____(IteratorVariable):
def __init__(self, item: VariableTracker, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.item = item
# Repeat needs no mutation, clone self
def next_variable(self, tx: "InstructionTranslator") -> VariableTracker:
return self.item
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.extend_output(
[
codegen.create_load_python_module(itertools),
codegen.create_load_attr("repeat"),
]
)
)
codegen(self.item)
codegen.extend_output(create_call_function(1, False))
| RepeatIteratorVariable |
python | tiangolo__fastapi | tests/test_jsonable_encoder.py | {
"start": 543,
"end": 669
} | class ____:
def __init__(self, owner: Person, name: str):
self.owner = owner
self.name = name
@dataclass
| Pet |
python | sympy__sympy | sympy/matrices/matrices.py | {
"start": 12278,
"end": 13363
} | class ____(MatrixReductions):
"""Provides methods relating to the fundamental subspaces of a matrix.
Should not be instantiated directly. See ``subspaces.py`` for their
implementations."""
def columnspace(self, simplify=False):
return _columnspace(self, simplify=simplify)
def nullspace(self, simplify=False, iszerofunc=_iszero):
return _nullspace(self, simplify=simplify, iszerofunc=iszerofunc)
def rowspace(self, simplify=False):
return _rowspace(self, simplify=simplify)
# This is a classmethod but is converted to such later in order to allow
# assignment of __doc__ since that does not work for already wrapped
# classmethods in Python 3.6.
def orthogonalize(cls, *vecs, **kwargs):
return _orthogonalize(cls, *vecs, **kwargs)
columnspace.__doc__ = _columnspace.__doc__
nullspace.__doc__ = _nullspace.__doc__
rowspace.__doc__ = _rowspace.__doc__
orthogonalize.__doc__ = _orthogonalize.__doc__
orthogonalize = classmethod(orthogonalize) # type:ignore
| MatrixSubspaces |
python | django__django | django/contrib/admin/helpers.py | {
"start": 822,
"end": 1074
} | class ____(forms.Form):
action = forms.ChoiceField(label=_("Action:"))
select_across = forms.BooleanField(
label="",
required=False,
initial=0,
widget=forms.HiddenInput({"class": "select-across"}),
)
| ActionForm |
python | joke2k__faker | faker/providers/ssn/bg_BG/__init__.py | {
"start": 42,
"end": 447
} | class ____(BaseProvider):
"""
A Faker provider for the Bulgarian VAT IDs
"""
vat_id_formats = (
"BG#########",
"BG##########",
)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Bulgarian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats))
| Provider |
python | gevent__gevent | src/greentest/3.14/test_threading_local.py | {
"start": 6606,
"end": 7179
} | class ____(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def load_tests(loader, tests, pattern):
tests.addTest(DocTestSuite('_threading_local'))
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _thread._local
def tearDown(test):
_threading_local.local = local_orig
tests.addTests(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
return tests
if __name__ == '__main__':
unittest.main()
| PyThreadingLocalTest |
python | walkccc__LeetCode | solutions/755. Pour Water/755.py | {
"start": 0,
"end": 402
} | class ____:
def pourWater(self, heights: list[int], volume: int, k: int) -> list[int]:
i = k
while volume > 0:
volume -= 1
while i > 0 and heights[i] >= heights[i - 1]:
i -= 1
while i + 1 < len(heights) and heights[i] >= heights[i + 1]:
i += 1
while i > k and heights[i] == heights[i - 1]:
i -= 1
heights[i] += 1
return heights
| Solution |
python | ansible__ansible | lib/ansible/plugins/action/assert.py | {
"start": 1046,
"end": 4015
} | class ____(ActionBase):
"""Assert that one or more conditional expressions evaluate to true."""
_requires_connection = False
@classmethod
def finalize_task_arg(cls, name: str, value: t.Any, templar: TemplateEngine, context: t.Any) -> t.Any:
if name != 'that':
# `that` is the only key requiring special handling; delegate to base handling otherwise
return super().finalize_task_arg(name, value, templar, context)
if not isinstance(value, str):
# if `that` is not a string, we don't need to attempt to resolve it as a template before validation (which will also listify it)
return value
# if `that` is entirely a string template, we only want to resolve to the container and avoid templating the container contents
if _jinja_bits.is_possibly_all_template(value):
try:
templated_that = templar.resolve_to_container(value)
except AnsibleTemplateError:
pass
else:
if isinstance(templated_that, list): # only use `templated_that` if it is a list
return templated_that
return value
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
validation_result, new_module_args = self.validate_argument_spec(
argument_spec=dict(
fail_msg=dict(type=str_or_list_of_str, aliases=['msg'], default='Assertion failed'),
success_msg=dict(type=str_or_list_of_str, default='All assertions passed'),
quiet=dict(type='bool', default=False),
# explicitly not validating types `elements` here to let type rules for conditionals apply
that=dict(type=_check_type_list_strict, required=True),
),
)
fail_msg = new_module_args['fail_msg']
success_msg = new_module_args['success_msg']
quiet = new_module_args['quiet']
that_list = new_module_args['that']
if not quiet:
result['_ansible_verbose_always'] = True
for that in that_list:
test_result = self._templar.evaluate_conditional(conditional=that)
if not test_result:
result['failed'] = True
result['evaluated_to'] = test_result
result['assertion'] = that
result['msg'] = fail_msg
return result
result['changed'] = False
result['msg'] = success_msg
return result
def str_or_list_of_str(value: t.Any) -> str | list[str]:
if isinstance(value, str):
return value
if not isinstance(value, list) or any(not isinstance(item, str) for item in value):
raise TypeError("a string or list of strings is required")
return value
| ActionModule |
python | streamlit__streamlit | lib/streamlit/watcher/local_sources_watcher.py | {
"start": 1470,
"end": 11500
} | class ____:
"""Watch local Python sources and pages to trigger app reruns.
Purpose
-------
This watcher powers Streamlit's core developer workflow: save a Python file
and the app reruns. It tracks Python modules, the main script directory, and
configured watch folders to notify the runtime when a relevant file changes.
"""
def __init__(self, pages_manager: PagesManager) -> None:
self._pages_manager = pages_manager
self._main_script_path = os.path.realpath(self._pages_manager.main_script_path)
self._watch_folders = config.get_option("server.folderWatchList")
self._script_folder = os.path.dirname(self._main_script_path)
self._on_path_changed: list[Callable[[str], None]] = []
self._is_closed = False
self._cached_sys_modules: set[str] = set()
# Blacklist for folders that should not be watched
self._folder_black_list = FolderBlackList(
config.get_option("server.folderWatchBlacklist")
)
self._watched_modules: dict[str, WatchedModule] = {}
self._watched_pages: set[str] = set()
self.update_watched_pages()
def update_watched_pages(self) -> None:
old_page_paths = self._watched_pages.copy()
new_pages_paths: set[str] = set()
for page_info in self._pages_manager.get_pages().values():
if not page_info["script_path"]:
continue
page_path = os.path.realpath(page_info["script_path"])
new_pages_paths.add(page_path)
if page_path not in self._watched_pages:
self._register_watcher(
page_path,
module_name=None,
)
# Add custom watch path if it exists
for watch_folder in self._watch_folders:
# check if it is folder
if not os.path.isdir(watch_folder):
_LOGGER.warning("Watch folder is not a directory: %s", watch_folder)
continue
_LOGGER.debug("Registering watch folder: %s", watch_folder)
watch_folder_path = os.path.realpath(watch_folder)
if watch_folder_path not in self._watched_pages:
self._register_watcher(
watch_folder_path,
module_name=None,
is_directory=True,
)
for old_page_path in old_page_paths:
# Only remove pages that are no longer valid files
if old_page_path not in new_pages_paths and not os.path.isfile(
old_page_path
):
self._deregister_watcher(old_page_path)
self._watched_pages.remove(old_page_path)
self._watched_pages = self._watched_pages.union(new_pages_paths)
def register_file_change_callback(self, cb: Callable[[str], None]) -> None:
self._on_path_changed.append(cb)
def on_path_changed(self, filepath: str) -> None:
_LOGGER.debug("Path changed: %s", filepath)
norm_filepath = os.path.realpath(filepath)
if norm_filepath not in self._watched_modules:
# Check if this is a file in a watched directory
for watched_path in self._watched_modules:
if (
os.path.isdir(watched_path)
and os.path.commonpath([watched_path, norm_filepath])
== watched_path
):
_LOGGER.debug("File changed in watched directory: %s", filepath)
for cb in self._on_path_changed:
cb(filepath)
return
_LOGGER.error("Received event for non-watched path: %s", filepath)
return
# Workaround:
# Delete all watched modules so we can guarantee changes to the
# updated module are reflected on reload.
#
# In principle, for reloading a given module, we only need to unload
# the module itself and all of the modules which import it (directly
# or indirectly) such that when we exec the application code, the
# changes are reloaded and reflected in the running application.
#
# However, determining all import paths for a given loaded module is
# non-trivial, and so as a workaround we simply unload all watched
# modules.
for wm in self._watched_modules.values():
if wm.module_name is not None and wm.module_name in sys.modules:
del sys.modules[wm.module_name]
for cb in self._on_path_changed:
cb(filepath)
def close(self) -> None:
for wm in self._watched_modules.values():
wm.watcher.close()
self._watched_modules = {}
self._watched_pages = set()
self._is_closed = True
def _register_watcher(
self, filepath: str, module_name: str | None, is_directory: bool = False
) -> None:
global PathWatcher # noqa: PLW0603
if PathWatcher is None:
PathWatcher = get_default_path_watcher_class()
if PathWatcher is NoOpPathWatcher:
return
try:
# Instead of using **kwargs, explicitly pass the named parameters
glob_pattern = "**/*" if is_directory else None
wm = WatchedModule(
watcher=PathWatcher( # ty: ignore
filepath,
self.on_path_changed,
glob_pattern=glob_pattern, # Pass as named parameter
allow_nonexistent=False,
),
module_name=module_name,
)
self._watched_modules[filepath] = wm
except Exception as ex:
# If we don't have permission to read this file, or if the file
# doesn't exist, don't even add it to watchers.
_LOGGER.warning("Failed to watch file %s: %s", filepath, exc_info=ex)
return
def _deregister_watcher(self, filepath: str) -> None:
if filepath not in self._watched_modules:
return
if filepath == self._main_script_path:
return
wm = self._watched_modules[filepath]
wm.watcher.close()
del self._watched_modules[filepath]
def _file_is_new(self, filepath: str) -> bool:
return filepath not in self._watched_modules
def _file_should_be_watched(self, filepath: str) -> bool:
# Using short circuiting for performance.
return self._file_is_new(filepath) and (
file_util.file_is_in_folder_glob(filepath, self._script_folder)
or file_util.file_in_pythonpath(filepath)
)
def update_watched_modules(self) -> None:
if self._is_closed:
return
if set(sys.modules) != self._cached_sys_modules:
modules_paths = {
name: self._exclude_blacklisted_paths(get_module_paths(module))
for name, module in dict(sys.modules).items()
}
self._cached_sys_modules = set(sys.modules)
self._register_necessary_watchers(modules_paths)
def _register_necessary_watchers(self, module_paths: dict[str, set[str]]) -> None:
for name, paths in module_paths.items():
for path in paths:
if self._file_should_be_watched(path):
self._register_watcher(os.path.realpath(path), name)
def _exclude_blacklisted_paths(self, paths: set[str]) -> set[str]:
return {p for p in paths if not self._folder_black_list.is_blacklisted(p)}
def get_module_paths(module: ModuleType) -> set[str]:
paths_extractors: list[Callable[[ModuleType], list[str | None]]] = [
# https://docs.python.org/3/reference/datamodel.html
# __file__ is the pathname of the file from which the module was loaded
# if it was loaded from a file.
# The __file__ attribute may be missing for certain types of modules
lambda m: [m.__file__] if hasattr(m, "__file__") else [],
# https://docs.python.org/3/reference/import.html#__spec__
# The __spec__ attribute is set to the module spec that was used
# when importing the module. one exception is __main__,
# where __spec__ is set to None in some cases.
# https://www.python.org/dev/peps/pep-0451/#id16
# "origin" in an import context means the system
# (or resource within a system) from which a module originates
# ... It is up to the loader to decide on how to interpret
# and use a module's origin, if at all.
lambda m: [m.__spec__.origin]
if hasattr(m, "__spec__") and m.__spec__ is not None
else [],
# https://www.python.org/dev/peps/pep-0420/
# Handling of "namespace packages" in which the __path__ attribute
# is a _NamespacePath object with a _path attribute containing
# the various paths of the package.
lambda m: list(m.__path__._path)
if hasattr(m, "__path__")
# This check prevents issues with torch classes:
# https://github.com/streamlit/streamlit/issues/10992
and type(m.__path__).__name__ == "_NamespacePath"
and hasattr(m.__path__, "_path")
else [],
]
all_paths = set()
for extract_paths in paths_extractors:
potential_paths = []
try:
potential_paths = extract_paths(module)
except AttributeError:
# Some modules might not have __file__ or __spec__ attributes.
pass
except Exception:
_LOGGER.warning(
"Examining the path of %s raised:", module.__name__, exc_info=True
)
all_paths.update(
[os.path.realpath(str(p)) for p in potential_paths if _is_valid_path(p)]
)
return all_paths
def _is_valid_path(path: str | None) -> bool:
return isinstance(path, str) and (os.path.isfile(path) or os.path.isdir(path))
| LocalSourcesWatcher |
python | cython__cython | Cython/Compiler/TreeFragment.py | {
"start": 7548,
"end": 9280
} | class ____:
def __init__(self, code, name=None, pxds=None, temps=None, pipeline=None, level=None, initial_pos=None):
if pxds is None:
pxds = {}
if temps is None:
temps = []
if pipeline is None:
pipeline = []
if not name:
name = "(tree fragment)"
if isinstance(code, str):
def fmt(x): return u"\n".join(strip_common_indent(x.split(u"\n")))
fmt_code = fmt(code)
fmt_pxds = {}
for key, value in pxds.items():
fmt_pxds[key] = fmt(value)
mod = t = parse_from_strings(name, fmt_code, fmt_pxds, level=level, initial_pos=initial_pos)
if level is None:
t = t.body # Make sure a StatListNode is at the top
if not isinstance(t, StatListNode):
t = StatListNode(pos=mod.pos, stats=[t])
for transform in pipeline:
if transform is None:
continue
t = transform(t)
self.root = t
elif isinstance(code, Node):
if pxds:
raise NotImplementedError()
self.root = code
else:
raise ValueError("Unrecognized code format (accepts unicode and Node)")
self.temps = temps
def copy(self):
return copy_code_tree(self.root)
def substitute(self, nodes=None, temps=None, pos = None):
if nodes is None:
nodes = {}
if temps is None:
temps = []
return TemplateTransform()(self.root,
substitutions = nodes,
temps = self.temps + temps, pos = pos)
| TreeFragment |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 37676,
"end": 38126
} | class ____(Reduction):
_parameters = ["frame", "numeric_only", "split_every"]
_defaults = {"split_every": False, "numeric_only": False}
reduction_chunk = M.count
@classmethod
def reduction_aggregate(cls, df):
return df.sum().astype("int64")
@property
def chunk_kwargs(self):
if self.frame._meta.ndim < 2:
return dict()
else:
return dict(numeric_only=self.numeric_only)
| Count |
python | PyCQA__pylint | pylint/utils/ast_walker.py | {
"start": 883,
"end": 3959
} | class ____:
def __init__(self, linter: PyLinter) -> None:
# callbacks per node types
self.nbstatements = 0
self.visit_events: defaultdict[str, list[AstCallback]] = defaultdict(list)
self.leave_events: defaultdict[str, list[AstCallback]] = defaultdict(list)
self.linter = linter
self.exception_msg = False
def _is_method_enabled(self, method: AstCallback) -> bool:
if not hasattr(method, "checks_msgs"):
return True
return any(self.linter.is_message_enabled(m) for m in method.checks_msgs)
def add_checker(self, checker: BaseChecker) -> None:
"""Walk to the checker's dir and collect visit and leave methods."""
vcids: set[str] = set()
lcids: set[str] = set()
visits = self.visit_events
leaves = self.leave_events
for member in dir(checker):
cid = member[6:]
if cid == "default":
continue
if member.startswith("visit_"):
v_meth = getattr(checker, member)
# don't use visit_methods with no activated message:
if self._is_method_enabled(v_meth):
visits[cid].append(v_meth)
vcids.add(cid)
elif member.startswith("leave_"):
l_meth = getattr(checker, member)
# don't use leave_methods with no activated message:
if self._is_method_enabled(l_meth):
leaves[cid].append(l_meth)
lcids.add(cid)
visit_default = getattr(checker, "visit_default", None)
if visit_default:
for cls in nodes.ALL_NODE_CLASSES:
cid = cls.__name__.lower()
if cid not in vcids:
visits[cid].append(visit_default)
# For now, we have no "leave_default" method in Pylint
def walk(self, astroid: nodes.NodeNG) -> None:
"""Call visit events of astroid checkers for the given node, recurse on
its children, then leave events.
"""
cid = astroid.__class__.__name__.lower()
visit_events = self.visit_events[cid]
leave_events = self.leave_events[cid]
# pylint: disable = too-many-try-statements
try:
if astroid.is_statement:
self.nbstatements += 1
# generate events for this node on each checker
for callback in visit_events:
callback(astroid)
# recurse on children
for child in astroid.get_children():
self.walk(child)
for callback in leave_events:
callback(astroid)
except Exception:
if self.exception_msg is False:
file = getattr(astroid.root(), "file", None)
print(
f"Exception on node {astroid!r} in file '{file}'",
file=sys.stderr,
)
traceback.print_exc()
self.exception_msg = True
raise
| ASTWalker |
python | walkccc__LeetCode | solutions/1630. Arithmetic Subarrays/1630.py | {
"start": 0,
"end": 662
} | class ____:
def checkArithmeticSubarrays(
self,
nums: list[int],
l: list[int],
r: list[int],
) -> list[bool]:
return [self._isArithmetic(nums, a, b) for a, b in zip(l, r)]
def _isArithmetic(self, nums: list[int], l: int, r: int) -> bool:
if r - l < 2:
return True
numsSet = set()
mn = math.inf
mx = -math.inf
for i in range(l, r+1):
mn = min(mn, nums[i])
mx = max(mx, nums[i])
numsSet.add(nums[i])
if (mx - mn) % (r - l) != 0:
return False
interval = (mx - mn) // (r - l)
return all(mn + k * interval in numsSet
for k in range(1, r - l + 1))
| Solution |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/vertex_ai/hyperparameter_tuning_job.py | {
"start": 1872,
"end": 20751
} | class ____(GoogleBaseHook, OperationHelper):
"""Hook for Google Cloud Vertex AI Hyperparameter Tuning Job APIs."""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
**kwargs,
)
self._hyperparameter_tuning_job: HyperparameterTuningJob | None = None
def get_job_service_client(self, region: str | None = None) -> JobServiceClient:
"""Return JobServiceClient."""
if region and region != "global":
client_options = ClientOptions(api_endpoint=f"{region}-aiplatform.googleapis.com:443")
else:
client_options = ClientOptions()
return JobServiceClient(
credentials=self.get_credentials(), client_info=CLIENT_INFO, client_options=client_options
)
def get_hyperparameter_tuning_job_object(
self,
display_name: str,
custom_job: CustomJob,
metric_spec: dict[str, str],
parameter_spec: dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
max_failed_trial_count: int = 0,
search_algorithm: str | None = None,
measurement_selection: str | None = "best",
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
) -> HyperparameterTuningJob:
"""Return HyperparameterTuningJob object."""
return HyperparameterTuningJob(
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
)
def get_custom_job_object(
self,
display_name: str,
worker_pool_specs: list[dict] | list[gapic.WorkerPoolSpec],
base_output_dir: str | None = None,
project: str | None = None,
location: str | None = None,
labels: dict[str, str] | None = None,
encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
) -> CustomJob:
"""Return CustomJob object."""
return CustomJob(
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
project=project,
location=location,
credentials=self.get_credentials(),
labels=labels,
encryption_spec_key_name=encryption_spec_key_name,
staging_bucket=staging_bucket,
)
@staticmethod
def extract_hyperparameter_tuning_job_id(obj: dict) -> str:
"""Return unique id of the hyperparameter_tuning_job."""
return obj["name"].rpartition("/")[-1]
def cancel_hyperparameter_tuning_job(self) -> None:
"""Cancel HyperparameterTuningJob."""
if self._hyperparameter_tuning_job:
self._hyperparameter_tuning_job.cancel()
@GoogleBaseHook.fallback_to_default_project_id
def create_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
display_name: str,
metric_spec: dict[str, str],
parameter_spec: dict[str, hyperparameter_tuning._ParameterSpec],
max_trial_count: int,
parallel_trial_count: int,
# START: CustomJob param
worker_pool_specs: list[dict] | list[gapic.WorkerPoolSpec],
base_output_dir: str | None = None,
custom_job_labels: dict[str, str] | None = None,
custom_job_encryption_spec_key_name: str | None = None,
staging_bucket: str | None = None,
# END: CustomJob param
max_failed_trial_count: int = 0,
search_algorithm: str | None = None,
measurement_selection: str | None = "best",
hyperparameter_tuning_job_labels: dict[str, str] | None = None,
hyperparameter_tuning_job_encryption_spec_key_name: str | None = None,
# START: run param
service_account: str | None = None,
network: str | None = None,
timeout: int | None = None, # seconds
restart_job_on_worker_restart: bool = False,
enable_web_access: bool = False,
tensorboard: str | None = None,
sync: bool = True,
# END: run param
wait_job_completed: bool = True,
) -> HyperparameterTuningJob:
"""
Create a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param display_name: Required. The user-defined name of the HyperparameterTuningJob. The name can be
up to 128 characters long and can be consist of any UTF-8 characters.
:param metric_spec: Required. Dictionary representing metrics to optimize. The dictionary key is the
metric_id, which is reported by your training job, and the dictionary value is the optimization
goal of the metric('minimize' or 'maximize').
example: metric_spec = {'loss': 'minimize', 'accuracy': 'maximize'}
:param parameter_spec: Required. Dictionary representing parameters to optimize. The dictionary key
is the metric_id, which is passed into your training job as a command line key word argument, and
the dictionary value is the parameter specification of the metric.
:param max_trial_count: Required. The desired total number of Trials.
:param parallel_trial_count: Required. The desired number of Trials to run in parallel.
:param worker_pool_specs: Required. The spec of the worker pools including machine type and Docker
image. Can provided as a list of dictionaries or list of WorkerPoolSpec proto messages.
:param base_output_dir: Optional. GCS output directory of job. If not provided a timestamped
directory in the staging directory will be used.
:param custom_job_labels: Optional. The labels with user-defined metadata to organize CustomJobs.
Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain
lowercase letters, numeric characters, underscores and dashes. International characters are
allowed. See https://goo.gl/xmQnxf for more information and examples of labels.
:param custom_job_encryption_spec_key_name: Optional.Customer-managed encryption key name for a
CustomJob. If this is set, then all resources created by the CustomJob will be encrypted with the
provided encryption key.
:param staging_bucket: Optional. Bucket for produced custom job artifacts. Overrides staging_bucket
set in aiplatform.init.
:param max_failed_trial_count: Optional. The number of failed Trials that need to be seen before
failing the HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials must fail
before the whole job fails.
:param search_algorithm: The search algorithm specified for the Study. Accepts one of the following:
`None` - If you do not specify an algorithm, your job uses the default Vertex AI algorithm. The
default algorithm applies Bayesian optimization to arrive at the optimal solution with a more
effective search over the parameter space.
'grid' - A simple grid search within the feasible space. This option is particularly useful if
you want to specify a quantity of trials that is greater than the number of points in the
feasible space. In such cases, if you do not specify a grid search, the Vertex AI default
algorithm may generate duplicate suggestions. To use grid search, all parameter specs must be of
type `IntegerParameterSpec`, `CategoricalParameterSpace`, or `DiscreteParameterSpec`.
'random' - A simple random search within the feasible space.
:param measurement_selection: This indicates which measurement to use if/when the service
automatically selects the final measurement from previously reported intermediate measurements.
Accepts: 'best', 'last'
Choose this based on two considerations:
A) Do you expect your measurements to monotonically improve? If so, choose 'last'. On the other
hand, if you're in a situation where your system can "over-train" and you expect the performance
to get better for a while but then start declining, choose 'best'.
B) Are your measurements significantly noisy and/or irreproducible? If so, 'best' will tend to be
over-optimistic, and it may be better to choose 'last'.
If both or neither of (A) and (B) apply, it doesn't matter which selection type is chosen.
:param hyperparameter_tuning_job_labels: Optional. The labels with user-defined metadata to organize
HyperparameterTuningJobs. Label keys and values can be no longer than 64 characters (Unicode
codepoints), can only contain lowercase letters, numeric characters, underscores and dashes.
International characters are allowed. See https://goo.gl/xmQnxf for more information and examples
of labels.
:param hyperparameter_tuning_job_encryption_spec_key_name: Optional. Customer-managed encryption key
options for a HyperparameterTuningJob. If this is set, then all resources created by the
HyperparameterTuningJob will be encrypted with the provided encryption key.
:param service_account: Optional. Specifies the service account for workload run-as account. Users
submitting jobs must have act-as permission on this run-as account.
:param network: Optional. The full name of the Compute Engine network to which the job should be
peered. For example, projects/12345/global/networks/myVPC. Private services access must already
be configured for the network. If left unspecified, the job is not peered with any network.
:param timeout: The maximum job running time in seconds. The default is 7 days.
:param restart_job_on_worker_restart: Restarts the entire CustomJob if a worker gets restarted. This
feature can be used by distributed training jobs that are not resilient to workers leaving and
joining a job.
:param enable_web_access: Whether you want Vertex AI to enable interactive shell access to training
containers. https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell
:param tensorboard: Optional. The name of a Vertex AI
[Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] resource to which this CustomJob will
upload Tensorboard logs. Format:
``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` The training script should
write Tensorboard to following Vertex AI environment variable: AIP_TENSORBOARD_LOG_DIR
`service_account` is required with provided `tensorboard`. For more information on configuring
your service account please visit:
https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training
:param sync: Whether to execute this method synchronously. If False, this method will unblock and it
will be executed in a concurrent Future.
:param wait_job_completed: Whether to wait for the job completed.
"""
custom_job = self.get_custom_job_object(
project=project_id,
location=region,
display_name=display_name,
worker_pool_specs=worker_pool_specs,
base_output_dir=base_output_dir,
labels=custom_job_labels,
encryption_spec_key_name=custom_job_encryption_spec_key_name,
staging_bucket=staging_bucket,
)
self._hyperparameter_tuning_job = self.get_hyperparameter_tuning_job_object(
project=project_id,
location=region,
display_name=display_name,
custom_job=custom_job,
metric_spec=metric_spec,
parameter_spec=parameter_spec,
max_trial_count=max_trial_count,
parallel_trial_count=parallel_trial_count,
max_failed_trial_count=max_failed_trial_count,
search_algorithm=search_algorithm,
measurement_selection=measurement_selection,
labels=hyperparameter_tuning_job_labels,
encryption_spec_key_name=hyperparameter_tuning_job_encryption_spec_key_name,
)
self._hyperparameter_tuning_job.run(
service_account=service_account,
network=network,
timeout=timeout, # seconds
restart_job_on_worker_restart=restart_job_on_worker_restart,
enable_web_access=enable_web_access,
tensorboard=tensorboard,
sync=sync,
)
if wait_job_completed:
self._hyperparameter_tuning_job.wait()
else:
self._hyperparameter_tuning_job._wait_for_resource_creation()
return self._hyperparameter_tuning_job
@GoogleBaseHook.fallback_to_default_project_id
def get_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> types.HyperparameterTuningJob:
"""
Get a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.get_hyperparameter_tuning_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_hyperparameter_tuning_jobs(
self,
project_id: str,
region: str,
filter: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
read_mask: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListHyperparameterTuningJobsPager:
"""
List HyperparameterTuningJobs in a Location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param filter: The standard list filter.
Supported fields:
- ``display_name`` supports = and !=.
- ``state`` supports = and !=.
- ``model_display_name`` supports = and !=
Some examples of using the filter are:
- ``state="JOB_STATE_SUCCEEDED" AND display_name="my_job"``
- ``state="JOB_STATE_RUNNING" OR display_name="my_job"``
- ``NOT display_name="my_job"``
- ``state="JOB_STATE_FAILED"``
:param page_size: The standard list page size.
:param page_token: The standard list page token.
:param read_mask: Mask specifying which fields to read.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
parent = client.common_location_path(project_id, region)
result = client.list_hyperparameter_tuning_jobs(
request={
"parent": parent,
"filter": filter,
"page_size": page_size,
"page_token": page_token,
"read_mask": read_mask,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_hyperparameter_tuning_job(
self,
project_id: str,
region: str,
hyperparameter_tuning_job: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a HyperparameterTuningJob.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param hyperparameter_tuning_job: Required. The name of the HyperparameterTuningJob resource to be
deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_job_service_client(region)
name = client.hyperparameter_tuning_job_path(project_id, region, hyperparameter_tuning_job)
result = client.delete_hyperparameter_tuning_job(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| HyperparameterTuningJobHook |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 3444,
"end": 4214
} | class ____:
def test_task_key_typical_case(self):
@task
def my_task():
pass
assert my_task.task_key.startswith("my_task-")
def test_task_key_after_import(self):
from tests.generic_tasks import noop
assert noop.task_key.startswith("noop-")
def test_task_key_with_funky_class(self):
class Funky:
def __call__(self, x):
return x
# set up class to trigger certain code path
# see https://github.com/PrefectHQ/prefect/issues/15058
funky = Funky()
funky.__qualname__ = "__main__.Funky"
if hasattr(funky, "__code__"):
del funky.__code__
tt = task(funky)
assert tt.task_key.startswith("Funky-")
| TestTaskKey |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/dagster/development_to_production/assets.py | {
"start": 249,
"end": 1543
} | class ____(dg.Config):
base_item_id: int
@dg.asset(
io_manager_key="snowflake_io_manager",
)
def items(config: ItemsConfig) -> pd.DataFrame:
"""Items from the Hacker News API: each is a story or a comment on a story."""
rows = []
max_id = requests.get(
"https://hacker-news.firebaseio.com/v0/maxitem.json", timeout=5
).json()
# Hacker News API is 1-indexed, so adjust range by 1
for item_id in range(max_id - config.base_item_id + 1, max_id + 1):
item_url = f"https://hacker-news.firebaseio.com/v0/item/{item_id}.json"
rows.append(requests.get(item_url, timeout=5).json())
# ITEM_FIELD_NAMES is a list of the column names in the Hacker News dataset
result = pd.DataFrame(rows, columns=ITEM_FIELD_NAMES).drop_duplicates(subset=["id"])
result.rename(columns={"by": "user_id"}, inplace=True)
return result
@dg.asset(
io_manager_key="snowflake_io_manager",
)
def comments(items: pd.DataFrame) -> pd.DataFrame:
"""Comments from the Hacker News API."""
return items[items["type"] == "comment"]
@dg.asset(
io_manager_key="snowflake_io_manager",
)
def stories(items: pd.DataFrame) -> pd.DataFrame:
"""Stories from the Hacker News API."""
return items[items["type"] == "story"]
# end_assets
| ItemsConfig |
python | pyca__cryptography | tests/x509/test_x509_ext.py | {
"start": 88740,
"end": 91324
} | class ____:
def test_get_values_for_type(self):
san = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
names = san.get_values_for_type(x509.DNSName)
assert names == ["cryptography.io"]
def test_iter_names(self):
san = x509.SubjectAlternativeName(
[x509.DNSName("cryptography.io"), x509.DNSName("crypto.local")]
)
assert len(san) == 2
assert list(san) == [
x509.DNSName("cryptography.io"),
x509.DNSName("crypto.local"),
]
def test_indexing(self):
san = x509.SubjectAlternativeName(
[
x509.DNSName("cryptography.io"),
x509.DNSName("crypto.local"),
x509.DNSName("another.local"),
x509.RFC822Name("email@another.local"),
x509.UniformResourceIdentifier("http://another.local"),
]
)
assert san[-1] == san[4]
assert san[2:6:2] == [san[2], san[4]]
def test_invalid_general_names(self):
with pytest.raises(TypeError):
x509.SubjectAlternativeName(
[
x509.DNSName("cryptography.io"),
"invalid", # type:ignore[list-item]
]
)
def test_repr(self):
san = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
assert repr(san) == (
"<SubjectAlternativeName("
"<GeneralNames([<DNSName(value='cryptography.io')>])>)>"
)
def test_eq(self):
san = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
san2 = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
assert san == san2
def test_ne(self):
san = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
san2 = x509.SubjectAlternativeName(
[x509.RFC822Name("admin@cryptography.io")]
)
assert san != san2
assert san != object()
def test_hash(self):
san = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
san2 = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
san3 = x509.SubjectAlternativeName(
[x509.RFC822Name("admin@cryptography.io")]
)
assert hash(san) == hash(san2)
assert hash(san) != hash(san3)
def test_public_bytes(self):
ext = x509.SubjectAlternativeName([x509.DNSName("cryptography.io")])
assert ext.public_bytes() == b"0\x11\x82\x0fcryptography.io"
| TestSubjectAlternativeName |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 143988,
"end": 157111
} | class ____:
def test_minmax(self):
for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]:
D = np.arange(20, dtype=dtype).reshape(5,4)
X = self.spcreator(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
assert_equal(X.min().dtype, dtype)
assert_equal(X.max().dtype, dtype)
D *= -1
X = self.spcreator(D)
assert_equal(X.min(), -19)
assert_equal(X.max(), 0)
D += 5
X = self.spcreator(D)
assert_equal(X.min(), -14)
assert_equal(X.max(), 5)
# try a fully dense matrix
X = self.spcreator(np.arange(1, 10).reshape(3, 3))
assert_equal(X.min(), 1)
assert_equal(X.min().dtype, X.dtype)
assert_equal(X.min(explicit=True), 1)
X = -X
assert_equal(X.max(), -1)
assert_equal(X.max(explicit=True), -1)
# and a fully sparse matrix
Z = self.spcreator(np.zeros((1, 1)))
assert_equal(Z.min(), 0)
assert_equal(Z.max(), 0)
assert_equal(Z.max().dtype, Z.dtype)
# another test
D = np.arange(20, dtype=float).reshape(5,4)
D[0:2, :] = 0
X = self.spcreator(D)
assert_equal(X.min(), 0)
assert_equal(X.max(), 19)
# zero-size matrices
for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]:
X = self.spcreator(D)
assert_raises(ValueError, X.min)
assert_raises(ValueError, X.max)
def test_minmax_axis(self):
keep = not self.is_array_test
D = np.arange(50).reshape(5, 10)
# completely empty rows, leaving some completely full:
D[1, :] = 0
# empty at end for reduceat:
D[:, 9] = 0
# partial rows/cols:
D[3, 3] = 0
# entries on either side of 0:
D[2, 2] = -1
X = self.spcreator(D)
axes_even = [0, -2]
axes_odd = [1, -1]
for axis in axes_odd + axes_even:
assert_array_equal(
X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=keep)
)
assert_array_equal(
X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=keep)
)
for axis in axes_even:
assert_equal(
X.max(axis=axis, explicit=True).toarray(),
self.asdense([40, 41, 42, 43, 44, 45, 46, 47, 48, 0])
)
if np.any(X.data == 0):
# Noncanonical case
expected = self.asdense([20, 1, -1, 3, 4, 5, 0, 7, 8, 0])
else:
expected = self.asdense([20, 1, -1, 3, 4, 5, 6, 7, 8, 0])
assert_equal(X.min(axis=axis, explicit=True).toarray(), expected)
for axis in axes_odd:
expected_max = np.array([8, 0, 28, 38, 48])
expected_min = np.array([1, 0, -1, 30, 40])
if not self.is_array_test:
expected_max = expected_max.reshape((5, 1))
expected_min = expected_min.reshape((5, 1))
assert_equal(X.max(axis=axis, explicit=True).toarray(), expected_max)
assert_equal(X.min(axis=axis, explicit=True).toarray(), expected_min)
# full matrix
D = np.arange(1, 51).reshape(10, 5)
X = self.spcreator(D)
for axis in axes_odd + axes_even:
assert_array_equal(
X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=keep)
)
assert_array_equal(
X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=keep)
)
assert_equal(X.max(axis=(0, 1)), D.max(axis=(0, 1), keepdims=keep))
for axis in axes_even:
expected_max = D[-1, :]
expected_min = D[0, :]
if not self.is_array_test:
expected_max = D[None, -1, :]
expected_min = D[None, 0, :]
assert_equal(X.max(axis=axis, explicit=True).toarray(), expected_max)
assert_equal(X.min(axis=axis, explicit=True).toarray(), expected_min)
for axis in axes_odd:
expected_max = D[:, -1]
expected_min = D[:, 0]
if not self.is_array_test:
expected_max = D[:, -1, None]
expected_min = D[:, 0, None]
assert_equal(X.max(axis=axis, explicit=True).toarray(), expected_max)
assert_equal(X.min(axis=axis, explicit=True).toarray(), expected_min)
# empty matrix
D = self.asdense(np.zeros((10, 5)))
X = self.spcreator(D)
for axis in axes_even + axes_odd:
assert_equal(X.max(axis=axis, explicit=True).toarray(), D.max(axis=axis))
assert_equal(X.min(axis=axis, explicit=True).toarray(), D.min(axis=axis))
# zero-size matrices
D = self.asdense(np.zeros((0, 10)))
X = self.spcreator(D)
explicit_values = [True, False]
even_explicit_pairs = list(itertools.product(axes_even, explicit_values))
odd_explicit_pairs = list(itertools.product(axes_odd, explicit_values))
for axis, ex in even_explicit_pairs:
assert_raises(ValueError, X.min, axis=axis, explicit=ex)
assert_raises(ValueError, X.max, axis=axis, explicit=ex)
for axis, ex in odd_explicit_pairs:
assert_equal(X.max(axis=axis, explicit=ex).toarray(), D.max(axis=axis))
assert_equal(X.min(axis=axis, explicit=ex).toarray(), D.min(axis=axis))
D = self.asdense(np.zeros((10, 0)))
X = self.spcreator(D)
for axis, ex in odd_explicit_pairs:
assert_raises(ValueError, X.min, axis=axis, explicit=ex)
assert_raises(ValueError, X.max, axis=axis, explicit=ex)
for axis, ex in even_explicit_pairs:
assert_equal(X.max(axis=axis, explicit=ex).toarray(), D.max(axis=axis))
assert_equal(X.min(axis=axis, explicit=ex).toarray(), D.min(axis=axis))
def test_minmax_container_type(self):
dat = array([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spcreator(dat)
matrix_or_array = ndarray if self.is_array_test else np.matrix
spmatrix_or_sparray = sparray if self.is_array_test else spmatrix
assert isscalarlike(datsp.min())
assert isinstance(datsp.min(axis=0), spmatrix_or_sparray)
assert isinstance(datsp.min(axis=1), spmatrix_or_sparray)
assert isscalarlike(datsp.max())
assert isinstance(datsp.max(axis=0), spmatrix_or_sparray)
assert isinstance(datsp.max(axis=1), spmatrix_or_sparray)
assert isscalarlike(datsp.nanmin())
assert isinstance(datsp.nanmin(axis=0), spmatrix_or_sparray)
assert isinstance(datsp.nanmin(axis=1), spmatrix_or_sparray)
assert isscalarlike(datsp.nanmax())
assert isinstance(datsp.nanmax(axis=0), spmatrix_or_sparray)
assert isinstance(datsp.nanmax(axis=1), spmatrix_or_sparray)
assert isscalarlike(datsp.argmin())
assert isinstance(datsp.argmin(axis=0), matrix_or_array)
assert isinstance(datsp.argmin(axis=1), matrix_or_array)
assert isscalarlike(datsp.argmax())
assert isinstance(datsp.argmax(axis=0), matrix_or_array)
assert isinstance(datsp.argmax(axis=1), matrix_or_array)
def test_nanminmax(self):
D = self.asdense(np.arange(50).reshape(5,10), dtype=float)
D[1, :] = 0
D[:, 9] = 0
D[3, 3] = 0
D[2, 2] = -1
D[4, 2] = np.nan
D[1, 4] = np.nan
X = self.spcreator(D)
X_nan_maximum = X.nanmax()
assert np.isscalar(X_nan_maximum)
assert X_nan_maximum == np.nanmax(D)
X_nan_minimum = X.nanmin()
assert np.isscalar(X_nan_minimum)
assert X_nan_minimum == np.nanmin(D)
X_nan_minimum = X.nanmin(axis=(0, 1))
assert np.isscalar(X_nan_minimum)
assert X_nan_minimum == np.nanmin(D, axis=(0, 1))
axes = [-2, -1, 0, 1]
for axis in axes:
X_nan_maxima = X.nanmax(axis=axis)
assert_allclose(X_nan_maxima.toarray(), np.nanmax(D, axis=axis))
assert isinstance(X_nan_maxima, self.coo_container)
X_nan_minima = X.nanmin(axis=axis)
assert_allclose(X_nan_minima.toarray(), np.nanmin(D, axis=axis))
assert isinstance(X_nan_minima, self.coo_container)
def test_minmax_invalid_params(self):
dat = array([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spcreator(dat)
for fname in ('min', 'max'):
datfunc = getattr(dat, fname)
func = getattr(datsp, fname)
assert_raises(ValueError, func, axis=3)
assert_raises(TypeError, func, axis=1.5)
assert_raises(ValueError, func, axis=1, out=1)
assert_equal(func(axis=(0, 1)), datfunc(axis=(0, 1)))
def test_numpy_minmax(self):
# See gh-5987
# xref gh-7460 in 'numpy'
from scipy.sparse import _data
dat = array([[0, 1, 2],
[3, -4, 5],
[-6, 7, 9]])
datsp = self.spcreator(dat)
# We are only testing sparse matrices who have
# implemented 'min' and 'max' because they are
# the ones with the compatibility issues with
# the 'numpy' implementation.
if isinstance(datsp, _data._minmax_mixin):
assert_array_equal(np.min(datsp), np.min(dat))
assert_array_equal(np.max(datsp), np.max(dat))
def test_argmax(self):
from scipy.sparse import _data
D1 = np.array([
[-1, 5, 2, 3],
[0, 0, -1, -2],
[-1, -2, -3, -4],
[1, 2, 3, 4],
[1, 2, 0, 0],
])
D2 = D1.transpose()
# Non-regression test cases for gh-16929.
D3 = np.array([[4, 3], [7, 5]])
D4 = np.array([[4, 3], [7, 0]])
D5 = np.array([[5, 5, 3], [4, 9, 10], [3, 4, 9]])
for D in [D1, D2, D3, D4, D5]:
D = self.asdense(D)
mat = self.spcreator(D)
if not isinstance(mat, _data._minmax_mixin):
continue
assert_equal(mat.argmax(), np.argmax(D))
assert_equal(mat.argmin(), np.argmin(D))
assert_equal(mat.argmax(axis=0), np.argmax(D, axis=0))
assert_equal(mat.argmin(axis=0), np.argmin(D, axis=0))
assert_equal(mat.argmax(axis=1), np.argmax(D, axis=1))
assert_equal(mat.argmin(axis=1), np.argmin(D, axis=1))
# full matrix with explicit=True
mat = self.spcreator(self.asdense(D5))
assert_equal(mat.argmax(explicit=True), 5)
assert_equal((-mat).argmax(explicit=True), 2)
assert_equal(mat.argmin(explicit=True), 2)
assert_equal((-mat).argmin(explicit=True), 5)
# zero-size matrices
D6 = self.spcreator(np.empty((0, 5)))
D7 = self.spcreator(np.empty((5, 0)))
explicits = [True, False]
for mat, axis, ex in itertools.product([D6, D7], [None, 0, 1], explicits):
if axis is None or mat.shape[axis] == 0:
with pytest.raises(ValueError, match="Cannot apply"):
mat.argmax(axis=axis, explicit=ex)
with pytest.raises(ValueError, match="Cannot apply"):
mat.argmin(axis=axis, explicit=ex)
else:
if self.is_array_test:
expected = np.zeros(0)
else:
expected = np.zeros((0, 1) if axis == 1 else (1, 0))
assert_equal(mat.argmin(axis=axis, explicit=ex), expected)
assert_equal(mat.argmax(axis=axis, explicit=ex), expected)
mat = self.spcreator(D1)
assert_equal(mat.argmax(axis=0, explicit=True), self.asdense([3, 0, 3, 3]))
assert_equal(mat.argmin(axis=0, explicit=True), self.asdense([0, 2, 2, 2]))
expected_max = np.array([1, 2, 0, 3, 1])
expected_min = np.array([0, 3, 3, 0, 0])
if mat.nnz != 16:
# Noncanonical case
expected_min[-1] = 2
if not self.is_array_test:
expected_max = expected_max.reshape((5, 1))
expected_min = expected_min.reshape((5, 1))
assert_equal(mat.argmax(axis=1, explicit=True), expected_max)
assert_equal(asarray(mat.argmin(axis=1, explicit=True)), expected_min)
# all zeros
D = np.zeros((2, 2))
mat = self.spcreator(D)
if mat.nnz != 0:
# Noncanonical case
assert_equal(mat.argmin(axis=None, explicit=True), 0)
assert_equal(mat.argmax(axis=None, explicit=True), 0)
else:
# Canonical case
with pytest.raises(ValueError, match="Cannot apply"):
mat.argmin(axis=None, explicit=True)
with pytest.raises(ValueError, match="Cannot apply"):
mat.argmax(axis=None, explicit=True)
| _TestMinMax |
python | pallets__markupsafe | setup.py | {
"start": 406,
"end": 2238
} | class ____(build_ext):
"""This class allows C extension building to fail."""
def run(self):
try:
super().run()
except PlatformError as e:
raise BuildFailed() from e
def build_extension(self, ext):
try:
super().build_extension(ext)
except (CCompilerError, ExecError, PlatformError) as e:
raise BuildFailed() from e
except ValueError as e:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(sys.exc_info()[1]): # works with Python 2 and 3
raise BuildFailed() from e
raise
def run_setup(with_binary):
setup(
cmdclass={"build_ext": ve_build_ext},
ext_modules=ext_modules if with_binary else [],
)
def show_message(*lines):
print("=" * 74)
for line in lines:
print(line)
print("=" * 74)
supports_speedups = platform.python_implementation() not in {
"PyPy",
"Jython",
"GraalVM",
}
if os.environ.get("CIBUILDWHEEL", "0") == "1" and supports_speedups:
run_setup(True)
elif supports_speedups:
try:
run_setup(True)
except BuildFailed:
show_message(
"WARNING: The C extension could not be compiled, speedups are not enabled.",
"Failure information, if any, is above.",
"Retrying the build without the C extension now.",
)
run_setup(False)
show_message(
"WARNING: The C extension could not be compiled, speedups are not enabled.",
"Plain-Python build succeeded.",
)
else:
run_setup(False)
show_message(
"WARNING: C extensions are not supported on this Python"
" platform, speedups are not enabled.",
"Plain-Python build succeeded.",
)
| ve_build_ext |
python | PyCQA__isort | tests/unit/test_regressions.py | {
"start": 4130,
"end": 40866
} | class ____(object):
def on_email_deleted(self, email):
from hyperkitty.tasks import rebuild_thread_cache_new_email
# update or cleanup thread # noqa: E303 (isort issue)
if self.emails.count() == 0:
...
"""
assert isort.code(test_input) == test_input
assert isort.code(test_input, lines_after_imports=2) == test_input
def test_force_single_line_shouldnt_remove_preceding_comment_lines_issue_1296():
"""Tests to ensure force_single_line setting doesn't result in lost comments.
See: https://github.com/pycqa/isort/issues/1296
"""
test_input = """
# A comment
# A comment
# Oh no, I'm gone
from moo import foo
"""
# assert isort.code(test_input) == test_input
assert isort.code(test_input, force_single_line=True) == test_input
def test_ensure_new_line_before_comments_mixed_with_ensure_newline_before_comments_1295():
"""Tests to ensure that the black profile can be used in conjunction with
force_sort_within_sections.
See: https://github.com/pycqa/isort/issues/1295
"""
test_input = """
from openzwave.group import ZWaveGroup
from openzwave.network import ZWaveNetwork
# pylint: disable=import-error
from openzwave.option import ZWaveOption
"""
assert isort.code(test_input, profile="black") == test_input
assert isort.code(test_input, profile="black", force_sort_within_sections=True) == test_input
def test_trailing_comma_doesnt_introduce_broken_code_with_comment_and_wrap_issue_1302():
"""Tests to assert the combination of include_trailing_comma and a wrapped line doesn't break.
See: https://github.com/pycqa/isort/issues/1302.
"""
assert (
isort.code(
"""
from somewhere import very_very_very_very_very_very_long_symbol # some comment
""",
line_length=50,
include_trailing_comma=True,
)
== """
from somewhere import \\
very_very_very_very_very_very_long_symbol # some comment
"""
)
def test_ensure_sre_parse_is_identified_as_stdlib_issue_1304():
"""Ensure sre_parse is identified as STDLIB.
See: https://github.com/pycqa/isort/issues/1304.
"""
assert (
isort.place_module("sre_parse") == isort.place_module("sre") == isort.settings.STDLIB # type: ignore # noqa
)
def test_add_imports_shouldnt_move_lower_comments_issue_1300():
"""Ensure add_imports doesn't move comments immediately below imports.
See:: https://github.com/pycqa/isort/issues/1300.
"""
test_input = """from __future__ import unicode_literals
from os import path
# A comment for a constant
ANSWER = 42
"""
assert isort.code(test_input, add_imports=["from os import path"]) == test_input
def test_windows_newline_issue_1277():
"""Test to ensure windows new lines are correctly handled within indented scopes.
See: https://github.com/pycqa/isort/issues/1277
"""
assert (
isort.code("\ndef main():\r\n import time\r\n\n import sys\r\n")
== "\ndef main():\r\n import sys\r\n import time\r\n"
)
def test_windows_newline_issue_1278():
"""Test to ensure windows new lines are correctly handled within indented scopes.
See: https://github.com/pycqa/isort/issues/1278
"""
assert isort.check_code(
"\ntry:\r\n import datadog_agent\r\n\r\n "
"from ..log import CheckLoggingAdapter, init_logging\r\n\r\n init_logging()\r\n"
"except ImportError:\r\n pass\r\n"
)
def test_check_never_passes_with_indented_headings_issue_1301():
"""Test to ensure that test can pass even when there are indented headings.
See: https://github.com/pycqa/isort/issues/1301
"""
assert isort.check_code(
"""
try:
# stdlib
import logging
from os import abc, path
except ImportError:
pass
""",
import_heading_stdlib="stdlib",
)
def test_isort_shouldnt_fail_on_long_from_with_dot_issue_1190():
"""Test to ensure that isort will correctly handle formatting a long from import that contains
a dot.
See: https://github.com/pycqa/isort/issues/1190
"""
assert (
isort.code(
"""
from this_is_a_very_long_import_statement.that_will_occur_across_two_lines\\
.when_the_line_length.is_only_seventynine_chars import (
function1,
function2,
)
""",
line_length=79,
multi_line_output=3,
)
== """
from this_is_a_very_long_import_statement.that_will_occur_across_two_lines"""
""".when_the_line_length.is_only_seventynine_chars import (
function1,
function2
)
"""
)
def test_isort_shouldnt_add_extra_new_line_when_fass_and_n_issue_1315():
"""Test to ensure isort doesn't add a second extra new line when combining --fss and -n options.
See: https://github.com/pycqa/isort/issues/1315
"""
assert isort.check_code(
"""import sys
# Comment canary
from . import foo
""",
ensure_newline_before_comments=True, # -n
force_sort_within_sections=True, # -fss
show_diff=True, # for better debugging in the case the test case fails.
)
assert (
isort.code(
"""
from . import foo
# Comment canary
from .. import foo
""",
ensure_newline_before_comments=True,
force_sort_within_sections=True,
)
== """
from . import foo
# Comment canary
from .. import foo
"""
)
def test_isort_doesnt_rewrite_import_with_dot_to_from_import_issue_1280():
"""Test to ensure isort doesn't rewrite imports in the from of import y.x into from y import x.
This is because they are not technically fully equivalent to each other and can introduce broken
behaviour.
See: https://github.com/pycqa/isort/issues/1280
"""
assert isort.check_code(
"""
import test.module
import test.module as m
from test import module
from test import module as m
""",
show_diff=True,
)
def test_isort_shouldnt_introduce_extra_lines_with_fass_issue_1322():
"""Tests to ensure isort doesn't introduce extra lines when used with fass option.
See: https://github.com/pycqa/isort/issues/1322
"""
assert (
isort.code(
"""
import logging
# Comment canary
from foo import bar
import quux
""",
force_sort_within_sections=True,
ensure_newline_before_comments=True,
)
== """
import logging
# Comment canary
from foo import bar
import quux
"""
)
def test_comments_should_cause_wrapping_on_long_lines_black_mode_issue_1219():
"""Tests to ensure if isort encounters a single import line which is made too long with a comment
it is wrapped when using black profile.
See: https://github.com/pycqa/isort/issues/1219
"""
assert isort.check_code(
"""
from many_stop_words import (
get_stop_words as get_base_stopwords, # extended list of stop words, also for en
)
""",
show_diff=True,
profile="black",
)
def test_comment_blocks_should_stay_associated_without_extra_lines_issue_1156():
"""Tests to ensure isort doesn't add an extra line when there are large import blocks
or otherwise warp the intent.
See: https://github.com/pycqa/isort/issues/1156
"""
assert (
isort.code(
"""from top_level_ignored import config # isort:skip
####################################
# COMMENT BLOCK SEPARATING THESE #
####################################
from ast import excepthandler
import logging
"""
)
== """from top_level_ignored import config # isort:skip
import logging
####################################
# COMMENT BLOCK SEPARATING THESE #
####################################
from ast import excepthandler
"""
)
def test_comment_shouldnt_be_duplicated_with_fass_enabled_issue_1329():
"""Tests to ensure isort doesn't duplicate comments when imports occur with comment on top,
immediately after large comment blocks.
See: https://github.com/pycqa/isort/pull/1329/files.
"""
assert isort.check_code(
"""'''
Multi-line docstring
'''
# Comment for A.
import a
# Comment for B - not A!
import b
""",
force_sort_within_sections=True,
show_diff=True,
)
def test_wrap_mode_equal_to_line_length_with_indendet_imports_issue_1333():
assert isort.check_code(
"""
import a
import b
def function():
import a as b
import c as d
""",
line_length=17,
wrap_length=17,
show_diff=True,
)
def test_isort_skipped_nested_imports_issue_1339():
"""Ensure `isort:skip are honored in nested imports.
See: https://github.com/pycqa/isort/issues/1339.
"""
assert isort.check_code(
"""
def import_test():
from os ( # isort:skip
import path
)
""",
show_diff=True,
)
def test_windows_diff_too_large_misrepresentative_issue_1348(test_path):
"""Ensure isort handles windows files correctly when it come to producing a diff with --diff.
See: https://github.com/pycqa/isort/issues/1348
"""
diff_output = StringIO()
isort.file(test_path / "example_crlf_file.py", show_diff=diff_output)
diff_output.seek(0)
assert diff_output.read().endswith(
"-1,5 +1,5 @@\n+import a\r\n import b\r\n-import a\r\n \r\n \r\n def func():\r\n"
)
def test_combine_as_does_not_lose_comments_issue_1321():
"""Test to ensure isort doesn't lose comments when --combine-as is used.
See: https://github.com/pycqa/isort/issues/1321
"""
test_input = """
from foo import * # noqa
from foo import bar as quux # other
from foo import x as a # noqa
import operator as op # op comment
import datetime as dtime # dtime comment
from datetime import date as d # dcomm
from datetime import datetime as dt # dtcomm
"""
expected_output = """
import datetime as dtime # dtime comment
import operator as op # op comment
from datetime import date as d, datetime as dt # dcomm; dtcomm
from foo import * # noqa
from foo import bar as quux, x as a # other; noqa
"""
assert isort.code(test_input, combine_as_imports=True) == expected_output
def test_combine_as_does_not_lose_comments_issue_1381():
"""Test to ensure isort doesn't lose comments when --combine-as is used.
See: https://github.com/pycqa/isort/issues/1381
"""
test_input = """
from smtplib import SMTPConnectError, SMTPNotSupportedError # important comment
"""
assert "# important comment" in isort.code(test_input, combine_as_imports=True)
test_input = """
from appsettings import AppSettings, ObjectSetting, StringSetting # type: ignore
"""
assert "# type: ignore" in isort.code(test_input, combine_as_imports=True)
def test_incorrect_grouping_when_comments_issue_1396():
"""Test to ensure isort groups import correct independent of the comments present.
See: https://github.com/pycqa/isort/issues/1396
"""
assert (
isort.code(
"""from django.shortcuts import render
from apps.profiler.models import Project
from django.contrib.auth.decorators import login_required
from django.views.generic import (
# ListView,
# DetailView,
TemplateView,
# CreateView,
# View
)
""",
line_length=88,
known_first_party=["apps"],
known_django=["django"],
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"],
)
== """from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import \\
TemplateView # ListView,; DetailView,; CreateView,; View
from apps.profiler.models import Project
"""
)
assert (
isort.code(
"""from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from apps.profiler.models import Project
from django.views.generic import ( # ListView,; DetailView,; CreateView,; View
TemplateView,
)
""",
line_length=88,
known_first_party=["apps"],
known_django=["django"],
sections=["FUTURE", "STDLIB", "DJANGO", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"],
include_trailing_comma=True,
multi_line_output=3,
force_grid_wrap=0,
use_parentheses=True,
ensure_newline_before_comments=True,
)
== """from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.generic import ( # ListView,; DetailView,; CreateView,; View
TemplateView,
)
from apps.profiler.models import Project
"""
)
def test_reverse_relative_combined_with_force_sort_within_sections_issue_1395():
"""Test to ensure reverse relative combines well with other common isort settings.
See: https://github.com/pycqa/isort/issues/1395.
"""
assert isort.check_code(
"""from .fileA import a_var
from ..fileB import b_var
""",
show_diff=True,
reverse_relative=True,
force_sort_within_sections=True,
order_by_type=False,
case_sensitive=False,
multi_line_output=5,
sections=["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "APPLICATION", "LOCALFOLDER"],
lines_after_imports=2,
no_lines_before="LOCALFOLDER",
)
def test_isort_should_be_able_to_add_independent_of_doc_string_placement_issue_1420():
"""isort should be able to know when an import requested to be added is successfully added,
independent of where the top doc string is located.
See: https://github.com/PyCQA/isort/issues/1420
"""
assert isort.check_code(
'''"""module docstring"""
import os
''',
show_diff=True,
add_imports=["os"],
)
def test_comments_should_never_be_moved_between_imports_issue_1427():
"""isort should never move comments to different import statement.
See: https://github.com/PyCQA/isort/issues/1427
"""
assert isort.check_code(
"""from package import CONSTANT
from package import * # noqa
""",
force_single_line=True,
show_diff=True,
)
def test_isort_doesnt_misplace_comments_issue_1431():
"""Test to ensure isort won't misplace comments.
See: https://github.com/PyCQA/isort/issues/1431
"""
input_text = """from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent, # NOT DRY
)
from com.my_lovely_company.my_lovely_team.my_lovely_project.my_lovely_component import (
MyLovelyCompanyTeamProjectComponent as component, # DRY
)
"""
assert isort.code(input_text, profile="black") == input_text
def test_isort_doesnt_misplace_add_import_issue_1445():
"""Test to ensure isort won't misplace an added import depending on docstring position
See: https://github.com/PyCQA/isort/issues/1445
"""
assert (
isort.code(
'''#!/usr/bin/env python
"""module docstring"""
''',
add_imports=["import os"],
)
== '''#!/usr/bin/env python
"""module docstring"""
import os
'''
)
assert isort.check_code(
'''#!/usr/bin/env python
"""module docstring"""
import os
''',
add_imports=["import os"],
show_diff=True,
)
def test_isort_doesnt_mangle_code_when_adding_imports_issue_1444():
"""isort should NEVER mangle code. This particularly nasty and easy to reproduce bug,
caused isort to produce invalid code just by adding a single import statement depending
on comment placement.
See: https://github.com/PyCQA/isort/issues/1444
"""
assert (
isort.code(
'''
"""module docstring"""
''',
add_imports=["import os"],
)
== '''
"""module docstring"""
import os
'''
)
def test_isort_float_to_top_with_sort_on_off_tests():
"""Characterization test for current behaviour of float-to-top on isort: on/off sections.
- imports in isort:off sections stay where they are
- imports in isort:on sections float up, but to the top of the isort:on section (not the
top of the file)"""
assert (
isort.code(
"""
def foo():
pass
import a
# isort: off
import stays_in_section
x = 1
import stays_in_place
# isort: on
def bar():
pass
import floats_to_top_of_section
def baz():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
# isort: off
import stays_in_section
x = 1
import stays_in_place
# isort: on
import floats_to_top_of_section
def bar():
pass
def baz():
pass
"""
)
to_sort = """# isort: off
def foo():
pass
import stays_in_place
import no_float_to_to_top
import no_ordering
def bar():
pass
"""
# No changes if isort is off
assert isort.code(to_sort, float_to_top=True) == to_sort
def test_isort_doesnt_float_to_top_correctly_when_imports_not_at_top_issue_1382():
"""isort should float existing imports to the top, if they are currently below the top.
See: https://github.com/PyCQA/isort/issues/1382
"""
assert (
isort.code(
"""
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
def bar():
pass
"""
)
assert (
isort.code(
"""
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
)
== """import a
def foo():
pass
def bar():
pass
"""
)
assert (
isort.code(
'''"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''#!/usr/bin/env bash
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''#!/usr/bin/env bash
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
assert (
isort.code(
'''#!/usr/bin/env bash
"""My comment
"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
)
== '''#!/usr/bin/env bash
"""My comment
"""
import a
def foo():
pass
def bar():
pass
'''
)
def test_empty_float_to_top_shouldnt_error_issue_1453():
"""isort shouldn't error when float to top is set with a mostly empty file"""
assert isort.check_code(
"""
""",
show_diff=True,
float_to_top=True,
)
assert isort.check_code(
"""
""",
show_diff=True,
)
def test_import_sorting_shouldnt_be_endless_with_headers_issue_1454():
"""isort should never enter an endless sorting loop.
See: https://github.com/PyCQA/isort/issues/1454
"""
assert isort.check_code(
"""
# standard library imports
import sys
try:
# Comment about local lib
# related third party imports
from local_lib import stuff
except ImportError as e:
pass
""",
known_third_party=["local_lib"],
import_heading_thirdparty="related third party imports",
show_diff=True,
)
def test_isort_should_leave_non_import_from_lines_alone_issue_1488():
"""isort should never mangle non-import from statements.
See: https://github.com/PyCQA/isort/issues/1488
"""
raise_from_should_be_ignored = """
raise SomeException("Blah") \\
from exceptionsInfo.popitem()[1]
"""
assert isort.check_code(raise_from_should_be_ignored, show_diff=True)
yield_from_should_be_ignored = """
def generator_function():
yield \\
from other_function()[1]
"""
assert isort.check_code(yield_from_should_be_ignored, show_diff=True)
wont_ignore_comment_contiuation = """
# one
# two
def function():
# three \\
import b
import a
"""
assert (
isort.code(wont_ignore_comment_contiuation)
== """
# one
# two
def function():
# three \\
import a
import b
"""
)
will_ignore_if_non_comment_continuation = """
# one
# two
def function():
raise \\
import b
import a
"""
assert isort.check_code(will_ignore_if_non_comment_continuation, show_diff=True)
yield_from_parens_should_be_ignored = """
def generator_function():
(
yield
from other_function()[1]
)
"""
assert isort.check_code(yield_from_parens_should_be_ignored, show_diff=True)
yield_from_lots_of_parens_and_space_should_be_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
yield
from other_function()[1]
)))))))))))))
)))
"""
assert isort.check_code(yield_from_lots_of_parens_and_space_should_be_ignored, show_diff=True)
yield_from_should_be_ignored_when_following_import_statement = """
def generator_function():
import os
yield \\
from other_function()[1]
"""
assert isort.check_code(
yield_from_should_be_ignored_when_following_import_statement, show_diff=True
)
yield_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
yield
"""
assert isort.check_code(yield_at_file_end_ignored, show_diff=True)
raise_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
raise (
"""
assert isort.check_code(raise_at_file_end_ignored, show_diff=True)
raise_from_at_file_end_ignored = """
def generator_function():
(
(
((((
(((((
((
(((
raise \\
from \\
"""
assert isort.check_code(raise_from_at_file_end_ignored, show_diff=True)
def test_isort_float_to_top_correctly_identifies_single_line_comments_1499():
"""Test to ensure isort correctly handles the case where float to top is used
to push imports to the top and the top comment is a multiline type but only
one line.
See: https://github.com/PyCQA/isort/issues/1499
"""
assert isort.code(
'''#!/usr/bin/env bash
"""My comment"""
def foo():
pass
import a
def bar():
pass
''',
float_to_top=True,
) == (
'''#!/usr/bin/env bash
"""My comment"""
import a
def foo():
pass
def bar():
pass
'''
)
assert isort.code(
"""#!/usr/bin/env bash
'''My comment'''
def foo():
pass
import a
def bar():
pass
""",
float_to_top=True,
) == (
"""#!/usr/bin/env bash
'''My comment'''
import a
def foo():
pass
def bar():
pass
"""
)
assert isort.check_code(
"""#!/usr/bin/env bash
'''My comment'''
import a
x = 1
""",
float_to_top=True,
show_diff=True,
)
def test_isort_shouldnt_mangle_from_multi_line_string_issue_1507():
"""isort was seen mangling lines that happened to contain the word from after
a yield happened to be in a file. Clearly this shouldn't happen.
See: https://github.com/PyCQA/isort/issues/1507.
"""
assert isort.check_code(
'''
def a():
yield f(
"""
select %s from (values %%s) as t(%s)
"""
)
def b():
return (
"""
select name
from foo
"""
% main_table
)
def c():
query = (
"""
select {keys}
from (values %s) as t(id)
"""
)
def d():
query = f"""select t.id
from {table} t
{extra}"""
''',
show_diff=True,
)
def test_isort_should_keep_all_as_and_non_as_imports_issue_1523():
"""isort should keep as and non-as imports of the same path that happen to exist within the
same statement.
See: https://github.com/PyCQA/isort/issues/1523.
"""
assert isort.check_code(
"""
from selenium.webdriver import Remote, Remote as Driver
""",
show_diff=True,
combine_as_imports=True,
)
def test_isort_shouldnt_introduce_syntax_error_issue_1539():
"""isort should NEVER introduce syntax errors.
In 5.5.4 some strings that contained a line starting with from could lead to no empty paren.
See: https://github.com/PyCQA/isort/issues/1539.
"""
assert isort.check_code(
'''"""Foobar
from {}""".format(
"bar",
)
''',
show_diff=True,
)
assert isort.check_code(
'''"""Foobar
import {}""".format(
"bar",
)
''',
show_diff=True,
)
assert (
isort.code(
'''"""Foobar
from {}"""
from a import b, a
''',
)
== '''"""Foobar
from {}"""
from a import a, b
'''
)
assert (
isort.code(
'''"""Foobar
from {}"""
import b
import a
''',
)
== '''"""Foobar
from {}"""
import a
import b
'''
)
def test_isort_shouldnt_split_skip_issue_1548():
"""Ensure isort doesn't add a spurious new line if isort: skip is combined with float to top.
See: https://github.com/PyCQA/isort/issues/1548.
"""
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
import a
import b
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import # isort:skip
import a
import b
""",
show_diff=True,
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
a
)
import b
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert (
isort.code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
""",
profile="black",
float_to_top=True,
add_imports=["import os"],
)
== """from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
import os
"""
)
assert (
isort.code(
"""from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)""",
profile="black",
float_to_top=True,
add_imports=["import os"],
)
== """from tools.dependency_pruning.prune_dependencies import ( # isort:skip
)
import os
"""
)
def test_isort_shouldnt_split_skip_issue_1556():
assert isort.check_code(
"""
from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
from tools.developer_pruning.prune_developers import ( # isort:skip
prune_developers,
)
""",
show_diff=True,
profile="black",
float_to_top=True,
)
assert isort.check_code(
"""
from tools.dependency_pruning.prune_dependencies import ( # isort:skip
prune_dependencies,
)
from tools.developer_pruning.prune_developers import x # isort:skip
""",
show_diff=True,
profile="black",
float_to_top=True,
)
def test_isort_losing_imports_vertical_prefix_from_module_import_wrap_mode_issue_1542():
"""Ensure isort doesn't lose imports when a comment is combined with an import and
wrap mode VERTICAL_PREFIX_FROM_MODULE_IMPORT is used.
See: https://github.com/PyCQA/isort/issues/1542.
"""
assert (
isort.code(
"""
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD # xxxxxxxxxxxxxxxxxx
print(CCCCCCCCC)
""",
multi_line_output=9,
)
== """
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB # xxxxxxxxxxxxxxxxxx
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD
print(CCCCCCCCC)
"""
)
assert isort.check_code(
"""
from xxxxxxxxxxxxxxxx import AAAAAAAAAA, BBBBBBBBBB
from xxxxxxxxxxxxxxxx import CCCCCCCCC, DDDDDDDDD # xxxxxxxxxxxxxxxxxx isort: skip
print(CCCCCCCCC)
""",
show_diff=True,
multi_line_output=9,
)
def test_isort_adding_second_comma_issue_1621():
"""Ensure isort doesn't add a second comma when very long comment is present
See: https://github.com/PyCQA/isort/issues/1621.
"""
assert isort.check_code(
"""from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1, """
"""# Some really long comment bla bla bla bla bla
)
""",
profile="black",
show_diff=True,
)
assert (
isort.code(
"""from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1 """
"""# Some really long comment bla bla bla bla bla
)
""",
profile="black",
)
== """from .test import (
TestTestTestTestTestTest2 as TestTestTestTestTestTest1, """
"""# Some really long comment bla bla bla bla bla
)
"""
)
def test_isort_shouldnt_duplicate_comments_issue_1631():
assert isort.check_code(
"""
import a # a comment
import a as b # b comment
""",
show_diff=True,
)
assert (
isort.code(
"""
import a # a comment
import a as a # b comment
""",
remove_redundant_aliases=True,
)
== """
import a # a comment; b comment
"""
)
def test_isort_shouldnt_add_extra_new_lines_with_import_heading_issue_1670():
snippet = """#!/usr/bin/python3 -ttu
# Standard Library
import argparse
import datetime
import attr
import requests
def foo() -> int:
print("Hello world")
return 0
def spam():
# Standard Library
import collections
import logging
"""
assert (
isort.code(
snippet,
import_heading_stdlib="Standard Library",
)
== snippet
)
def test_isort_shouldnt_add_extra_line_float_to_top_issue_1667():
assert isort.check_code(
"""
import sys
sys.path.insert(1, 'path/containing/something_else/..')
import something_else # isort:skip
# Some constant
SOME_CONSTANT = 4
""",
show_diff=True,
float_to_top=True,
)
def test_isort_shouldnt_move_noqa_comment_issue_1594():
assert (
isort.code(
"""
from .test import TestTestTestTestTestTest1 # noqa: F401
from .test import TestTestTestTestTestTest2, TestTestTestTestTestTest3, """
"""TestTestTestTestTestTest4, TestTestTestTestTestTest5 # noqa: F401
""",
profile="black",
)
== """
from .test import TestTestTestTestTestTest1 # noqa: F401
from .test import ( # noqa: F401
TestTestTestTestTestTest2,
TestTestTestTestTestTest3,
TestTestTestTestTestTest4,
TestTestTestTestTestTest5,
)
"""
)
def test_isort_correctly_handles_unix_vs_linux_newlines_issue_1566():
import_statement = (
"from impacket.smb3structs import (\n"
"SMB2_CREATE, SMB2_FLAGS_DFS_OPERATIONS, SMB2_IL_IMPERSONATION, "
"SMB2_OPLOCK_LEVEL_NONE, SMB2Create,"
"\nSMB2Create_Response, SMB2Packet)\n"
)
assert isort.code(import_statement, line_length=120) == isort.code(
import_statement.replace("\n", "\r\n"), line_length=120
).replace("\r\n", "\n")
def test_isort_treats_src_paths_same_as_from_config_as_cli_issue_1711(tmpdir):
assert isort.check_code(
"""
import mymodule
import sqlalchemy
""",
show_diff=True,
)
config_file = tmpdir.join(".isort.cfg")
config_file.write(
"""
[settings]
src_paths=
api
"""
)
api_dir = tmpdir.mkdir("api")
api_dir.join("mymodule.py").write("# comment")
config = isort.settings.Config(str(config_file))
assert isort.check_code(
"""
import sqlalchemy
import mymodule
""",
show_diff=True,
config=config,
)
def test_isort_should_never_quietly_remove_imports_in_hanging_line_mode_issue_1741():
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=50,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz \\
# some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=54,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz # some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=53,
multi_line_output=2,
)
== """
from src import abcd, efg, qwerty, xyz \\
# some comment
"""
)
assert (
isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=30,
multi_line_output=2,
)
== """
from src import abcd, efg, \\
qwerty, xyz \\
# some comment
"""
)
@pytest.mark.parametrize("multi_line_output", range(12))
def test_isort_should_never_quietly_remove_imports_in_any_hangin_mode_issue_1741(
multi_line_output: int,
):
sorted_code = isort.code(
"""
from src import abcd, qwerty, efg, xyz # some comment
""",
line_length=30,
multi_line_output=multi_line_output,
)
assert "abcd" in sorted_code
assert "qwerty" in sorted_code
assert "efg" in sorted_code
assert "xyz" in sorted_code
def test_isort_should_keep_multi_noqa_with_star_issue_1744():
assert isort.check_code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union # noqa
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import * # noqa 1
from typing import IO, BinaryIO, Union # noqa 2
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union
""",
show_diff=True,
)
assert isort.check_code(
"""
from typing import *
from typing import IO, BinaryIO, Union # noqa
""",
show_diff=True,
)
assert (
isort.code(
"""
from typing import * # hi
from typing import IO, BinaryIO, Union # noqa
""",
combine_star=True,
)
== """
from typing import * # noqa; hi
"""
)
assert (
isort.code(
"""
from typing import * # noqa
from typing import IO, BinaryIO, Union # noqa
""",
combine_star=True,
)
== """
from typing import * # noqa
"""
)
def test_isort_should_keep_multiple_noqa_comments_force_single_line_mode_issue_1721():
assert isort.check_code(
"""
from some_very_long_filename_to_import_from_that_causes_a_too_long_import_row import ( # noqa: E501
CONSTANT_1,
)
from some_very_long_filename_to_import_from_that_causes_a_too_long_import_row import ( # noqa: E501
CONSTANT_2,
)
""",
show_diff=True,
profile="black",
force_single_line=True,
)
def test_isort_should_only_add_imports_to_valid_location_issue_1769():
assert (
isort.code(
'''v = """
""".split(
"\n"
)
''',
add_imports=["from __future__ import annotations"],
)
== '''from __future__ import annotations
v = """
""".split(
"\n"
)
'''
)
assert (
isort.code(
'''v=""""""''',
add_imports=["from __future__ import annotations"],
)
== '''from __future__ import annotations
v=""""""
'''
)
def test_literal_sort_at_top_of_file_issue_1792():
assert (
isort.code(
'''"""I'm a docstring! Look at me!"""
# isort: unique-list
__all__ = ["Foo", "Foo", "Bar"]
from typing import final # arbitrary
@final
| Something |
python | ray-project__ray | rllib/policy/tests/test_lstm.py | {
"start": 148,
"end": 5341
} | class ____(unittest.TestCase):
def test_basic(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [
[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]],
]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=np.ones_like(eps_ids),
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
)
self.assertEqual(
[f.tolist() for f in f_pad],
[
[101, 102, 103, 0, 201, 202, 203, 204, 205, 0, 0, 0],
[
[101],
[102],
[103],
[0],
[201],
[202],
[203],
[204],
[205],
[0],
[0],
[0],
],
],
)
self.assertEqual([s.tolist() for s in s_init], [[209, 109, 105]])
self.assertEqual(seq_lens.tolist(), [3, 4, 1])
def test_nested(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [
{
"a": np.array([1, 2, 3, 4, 13, 14, 15, 16]),
"b": {"ba": np.array([5, 6, 7, 8, 9, 10, 11, 12])},
}
]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=np.ones_like(eps_ids),
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
handle_nested_data=True,
)
check(
f_pad,
[
[
[1, 2, 3, 0, 4, 13, 14, 15, 16, 0, 0, 0],
[5, 6, 7, 0, 8, 9, 10, 11, 12, 0, 0, 0],
]
],
)
self.assertEqual([s.tolist() for s in s_init], [[209, 109, 105]])
self.assertEqual(seq_lens.tolist(), [3, 4, 1])
def test_multi_dim(self):
eps_ids = [1, 1, 1]
agent_ids = [1, 1, 1]
obs = np.ones((84, 84, 4))
f = [[obs, obs * 2, obs * 3]]
s = [[209, 208, 207]]
f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=np.ones_like(eps_ids),
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
)
self.assertEqual(
[f.tolist() for f in f_pad],
[
np.array([obs, obs * 2, obs * 3]).tolist(),
],
)
self.assertEqual([s.tolist() for s in s_init], [[209]])
self.assertEqual(seq_lens.tolist(), [3])
def test_batch_id(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
batch_ids = [1, 1, 2, 2, 3, 3, 4, 4]
agent_ids = [1, 1, 1, 1, 1, 1, 1, 1]
f = [
[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]],
]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
_, _, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=batch_ids,
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
)
self.assertEqual(seq_lens.tolist(), [2, 1, 1, 2, 2])
def test_multi_agent(self):
eps_ids = [1, 1, 1, 5, 5, 5, 5, 5]
agent_ids = [1, 1, 2, 1, 1, 2, 2, 3]
f = [
[101, 102, 103, 201, 202, 203, 204, 205],
[[101], [102], [103], [201], [202], [203], [204], [205]],
]
s = [[209, 208, 207, 109, 108, 107, 106, 105]]
f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=np.ones_like(eps_ids),
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
dynamic_max=False,
)
self.assertEqual(seq_lens.tolist(), [2, 1, 2, 2, 1])
self.assertEqual(len(f_pad[0]), 20)
self.assertEqual(len(s_init[0]), 5)
def test_dynamic_max_len(self):
eps_ids = [5, 2, 2]
agent_ids = [2, 2, 2]
f = [[1, 1, 1]]
s = [[1, 1, 1]]
f_pad, s_init, seq_lens = chop_into_sequences(
episode_ids=eps_ids,
unroll_ids=np.ones_like(eps_ids),
agent_indices=agent_ids,
feature_columns=f,
state_columns=s,
max_seq_len=4,
)
self.assertEqual([f.tolist() for f in f_pad], [[1, 0, 1, 1]])
self.assertEqual([s.tolist() for s in s_init], [[1, 1]])
self.assertEqual(seq_lens.tolist(), [1, 2])
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
| TestLSTMUtils |
python | ray-project__ray | python/ray/experimental/channel/shared_memory_channel.py | {
"start": 25385,
"end": 31961
} | class ____(ChannelInterface):
"""
Can be used to send data to different readers via different channels.
For example, if the reader is in the same worker process as the writer,
the data can be sent via IntraProcessChannel. If the reader is in a different
worker process, the data can be sent via shared memory channel.
Args:
writer: The actor that may write to the channel. None signifies the driver.
reader_and_node_list: A list of tuples, where each tuple contains a reader
actor handle and the node ID where the actor is located.
num_shm_buffers: The number of shared memory buffers per channel.
Note: In the case of multiple nodes, we only support 1 shared
memory buffer.
driver_actor_id: If this channel is read by a driver and that driver is an
actual actor, this will be the actor ID of that driver actor.
"""
def __init__(
self,
writer: Optional[ray.actor.ActorHandle],
reader_and_node_list: List[Tuple["ray.actor.ActorHandle", str]],
num_shm_buffers: int,
driver_actor_id: Optional[str] = None,
_channel_dict: Optional[Dict[ray.ActorID, ChannelInterface]] = None,
_channels: Optional[Set[ChannelInterface]] = None,
_writer_registered: bool = False,
_reader_registered: bool = False,
):
self._writer = writer
self._reader_and_node_list = reader_and_node_list
self._num_shm_buffers = num_shm_buffers
self._driver_actor_id = driver_actor_id
self._writer_registered = _writer_registered
self._reader_registered = _reader_registered
# A dictionary that maps the actor ID to the channel object.
self._channel_dict = _channel_dict or {}
# The set of channels is a deduplicated version of the _channel_dict values.
self._channels = _channels or set()
if self._channels:
# This CompositeChannel object is created by deserialization.
# We don't need to create channels again.
return
(
remote_reader_and_node_list,
local_reader_and_node_list,
) = utils.split_readers_by_locality(self._writer, self._reader_and_node_list)
# There are some local readers which are the same worker process as the writer.
# Create a local channel for the writer and the local readers.
num_local_readers = len(local_reader_and_node_list)
if num_local_readers > 0:
# Use num_readers = 1 when creating the local channel,
# because we have channel cache to support reading
# from the same channel multiple times.
local_channel = IntraProcessChannel(num_readers=1)
self._channels.add(local_channel)
actor_id = self._get_actor_id(self._writer)
self._channel_dict[actor_id] = local_channel
# There are some remote readers which are not the same Ray actor as the writer.
# We create a BufferedSharedMemoryChannel for readers on the same node, and
# a single Channel for readers on different nodes due to
# https://github.com/ray-project/ray/issues/49044
(
readers_same_node,
readers_different_node,
) = utils.split_actors_by_node_locality(
utils.get_actor_node(self._writer), remote_reader_and_node_list
)
if len(readers_same_node) != 0:
remote_channel = BufferedSharedMemoryChannel(
self._writer, readers_same_node, num_shm_buffers
)
self._channels.add(remote_channel)
for reader, _ in readers_same_node:
actor_id = self._get_actor_id(reader)
self._channel_dict[actor_id] = remote_channel
if len(readers_different_node) != 0:
remote_channel = Channel(self._writer, readers_different_node)
self._channels.add(remote_channel)
for reader, _ in readers_different_node:
actor_id = self._get_actor_id(reader)
self._channel_dict[actor_id] = remote_channel
def _get_actor_id(self, reader: ray.actor.ActorHandle) -> str:
return reader._actor_id.hex()
def ensure_registered_as_writer(self) -> None:
if self._writer_registered:
return
for channel in self._channels:
channel.ensure_registered_as_writer()
self._writer_registered = True
def ensure_registered_as_reader(self) -> None:
if self._reader_registered:
return
for channel in self._channels:
channel.ensure_registered_as_reader()
self._reader_registered = True
def __reduce__(self):
return CompositeChannel, (
self._writer,
self._reader_and_node_list,
self._num_shm_buffers,
self._driver_actor_id,
self._channel_dict,
self._channels,
self._writer_registered,
self._reader_registered,
)
def __str__(self) -> str:
return (
"CompositeChannel(_channels="
f"{[str(channel) for channel in self._channels]})"
)
def write(self, value: Any, timeout: Optional[float] = None) -> None:
self.ensure_registered_as_writer()
for channel in self._channels:
channel.write(value, timeout)
def read(self, timeout: Optional[float] = None) -> Any:
self.ensure_registered_as_reader()
return self._channel_dict[self._resolve_actor_id()].read(timeout)
def release_buffer(self, timeout: Optional[float] = None):
self.ensure_registered_as_reader()
self._channel_dict[self._resolve_actor_id()].release_buffer(timeout)
def _resolve_actor_id(self) -> str:
actor_id = ray.get_runtime_context().get_actor_id()
# If actor_id is None, read was called by the driver
# If the driver is an actor, driver_actor_id will be set to that actor id
if actor_id is None or actor_id == self._driver_actor_id:
# Use the actor ID of the DAGDriverProxyActor.
# The proxy actor is always the first actor in the reader_and_node_list.
assert len(self._reader_and_node_list) >= 1
driver_proxy_actor = self._reader_and_node_list[0][0]
actor_id = self._get_actor_id(driver_proxy_actor)
return actor_id
def close(self) -> None:
for channel in self._channels:
channel.close()
| CompositeChannel |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 1216,
"end": 1698
} | class ____(PrefectBaseModel):
"""Details associated with a REJECT state transition."""
type: Literal["reject_details"] = Field(
default="reject_details",
description=(
"The type of state transition detail. Used to ensure pydantic does not"
" coerce into a different type."
),
)
reason: Optional[str] = Field(
default=None, description="The reason why the state transition was rejected."
)
| StateRejectDetails |
python | PyCQA__pylint | tests/functional/t/test_compile.py | {
"start": 99,
"end": 206
} | class ____:
def method(self):
var = +4294967296
self.method.__code__.co_consts
| WrapperClass |
python | pytorch__pytorch | tools/stats/utilization_stats_lib.py | {
"start": 1171,
"end": 1390
} | class ____(DataClassJsonMixin): # type: ignore[misc, no-any-unimported]
cpu: UtilizationStats | None = None
memory: UtilizationStats | None = None
gpu_usage: list[GpuUsage] | None = None
@dataclass
| RecordData |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.