language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
jmcnamara__XlsxWriter
xlsxwriter/test/worksheet/test_cond_format10.py
{ "start": 375, "end": 2960 }
class ____(unittest.TestCase): """ Test assembling a complete Worksheet file. """ def test_assemble_xml_file(self): """Test writing a worksheet with conditional formatting.""" self.maxDiff = None fh = StringIO() worksheet = Worksheet() worksheet._set_filehandle(fh) worksheet.select() worksheet.write("A1", 10) worksheet.write("A2", 20) worksheet.write("A3", 30) worksheet.write("A4", 40) date = datetime.strptime("2011-01-01", "%Y-%m-%d") worksheet.conditional_format( "A1:A4", { "type": "date", "criteria": "greater than", "value": date, "format": None, }, ) worksheet._assemble_xml_file() exp = _xml_to_list( """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships"> <dimension ref="A1:A4"/> <sheetViews> <sheetView tabSelected="1" workbookViewId="0"/> </sheetViews> <sheetFormatPr defaultRowHeight="15"/> <sheetData> <row r="1" spans="1:1"> <c r="A1"> <v>10</v> </c> </row> <row r="2" spans="1:1"> <c r="A2"> <v>20</v> </c> </row> <row r="3" spans="1:1"> <c r="A3"> <v>30</v> </c> </row> <row r="4" spans="1:1"> <c r="A4"> <v>40</v> </c> </row> </sheetData> <conditionalFormatting sqref="A1:A4"> <cfRule type="cellIs" priority="1" operator="greaterThan"> <formula>40544</formula> </cfRule> </conditionalFormatting> <pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/> </worksheet> """ ) got = _xml_to_list(fh.getvalue()) self.assertEqual(exp, got)
TestAssembleWorksheet
python
getsentry__sentry
src/sentry/auth/providers/saml2/forms.py
{ "start": 1946, "end": 3774 }
class ____(forms.Form): entity_id = forms.CharField(label="Entity ID") sso_url = forms.URLField(label="Single Sign On URL", assume_scheme="https") slo_url = forms.URLField(label="Single Log Out URL", required=False, assume_scheme="https") x509cert = forms.CharField(label="x509 public certificate", widget=forms.Textarea) processor = lambda d: d.cleaned_data def process_metadata( form_cls: type[forms.Form], request: HttpRequest, helper: AuthHelper ) -> forms.Form | None: form = form_cls() if "action_save" not in request.POST: return form form = form_cls(request.POST) if not form.is_valid(): return form try: assert hasattr(form_cls, "processor") data = form_cls.processor(form) except SSLError: errors = form.errors.setdefault("__all__", ErrorList()) errors.append( "Could not verify SSL certificate. Ensure that your IdP instance has a valid SSL certificate that is linked to a trusted root certificate." ) return form except Exception: errors = form.errors.setdefault("__all__", ErrorList()) errors.append("Failed to parse provided SAML2 metadata") return form saml_form = SAMLForm(data) if not saml_form.is_valid(): field_errors = [ "{}: {}".format(k, ", ".join(force_str(i) for i in v)) for k, v in saml_form.errors.items() ] error_list = ", ".join(field_errors) errors = form._errors.setdefault("__all__", ErrorList()) # type: ignore[attr-defined] # XXX: ._errors is an internal attr errors.append(f"Invalid metadata: {error_list}") return form helper.bind_state("idp", data) # Data is bound, do not respond with a form to signal the next steps return None
SAMLForm
python
joke2k__faker
faker/providers/date_time/ar_AA/__init__.py
{ "start": 80, "end": 50372 }
class ____(DateTimeProvider): DAY_NAMES = { "0": "السبت", "1": "الأحد", "2": "الإثنين", "3": "الثلاثاء", "4": "الأربعاء", "5": "الخميس", "6": "الجمعة", } MONTH_NAMES = { "01": "كانون الثّاني", "02": "شباط", "03": "آذار", "04": "نيسان", "05": "أيّار", "06": "حزيران", "07": "تمّوز", "08": "آب", "09": "أيلول", "10": "تشرين الأول", "11": "تشرين الثاني", "12": "كانون الأول", } centuries = [ "الأول", "الثاني", "الثالث", "الرابع", "الخامس", "السادس", "السابع", "الثامن", "التاسع", "العاشر", "الحادي عشر", "الثاني عشر", "الثالث عشر", "الرابع عشر", "الخامس عشر", "السادس عشر", "الثامن عشر", "التاسع عشر", "العشرين", "الحادي والعشرين", "الثاني والعشرين", ] countries = [ Country( timezones=["أوروب/أندورا"], alpha_2_code="AD", alpha_3_code="AND", continent="أوروبا", name="أندورا", capital="أندورا لا فيلا", ), Country( timezones=["آسيا/كابل"], alpha_2_code="AF", alpha_3_code="AFG", continent="آسيا", name="أفغانستان", capital="كابل", ), Country( timezones=["أمريكا/أنتيغوا"], alpha_2_code="AG", alpha_3_code="ATG", continent="أمريكا الشمالية", name="أنتيغوا وباربودا", capital="سانت جونز", ), Country( timezones=["أوروبا/تيرانا"], alpha_2_code="AL", alpha_3_code="ALB", continent="أوروبا", name="ألبانيا", capital="تيرانا", ), Country( timezones=["آسيا/يريفان"], alpha_2_code="AM", alpha_3_code="ARM", continent="آسيا", name="أرمينيا", capital="يريفان", ), Country( timezones=["إفريقيا/لواندا"], alpha_2_code="AO", alpha_3_code="AGO", continent="إفريقيا", name="أنغولا", capital="لواندا", ), Country( timezones=[ "أمريكا/الأرجنتين/بوينس_آيرس", "أمريكا/الأرجنتين/Cordoba", "أمريكا/الأرجنتين/خوخوي", "أمريكا/الأرجنتين/توكومان", "أمريكا/الأرجنتين/كاتاماركا", "أمريكا/الأرجنتين/لا_ريوخا", "أمريكا/الأرجنتين/سان_خوان", "أمريكا/الأرجنتين/مندوزا", "أمريكا/الأرجنتين/ريو_غاليغوس", "أمريكا/الأرجنتين/أوشوايا", ], alpha_2_code="AR", alpha_3_code="ARG", continent="أمريكا الجنوبية", name="الأرجنتين", capital="بوينس آيرس", ), Country( timezones=["أوروبا/النمسا"], alpha_2_code="AT", alpha_3_code="AUT", continent="أوروبا", name="النمسا", capital="فيينا", ), Country( timezones=[ "أستراليا/لورد_هاو", "أستراليا/هوبارت", "أستراليا/كري", "أستراليا/ملبورن", "أستراليا/سدني", "أستراليا/بروكن_هل", "أستراليا/بريزبن", "أستراليا/ليندمان", "أستراليا/أديلايد", "أستراليا/داروين", "أستراليا/برث", ], alpha_2_code="AU", alpha_3_code="AUS", continent="أوقيانوسيا", name="أستراليا", capital="كانبرا", ), Country( timezones=["آسيا/باكو"], alpha_2_code="AZ", alpha_3_code="AZE", continent="آسيا", name="أذربيجان", capital="باكو", ), Country( timezones=["أمريكا/باربادوس"], alpha_2_code="BB", alpha_3_code="BRB", continent="أمريكا الشمالية", name="باربادوس", capital="بريدج تاون", ), Country( timezones=["آسيا/دكا"], alpha_2_code="BD", alpha_3_code="BGD", continent="آسيا", name="بنغلادش", capital="دكا", ), Country( timezones=["أوروبا/بروكسل"], alpha_2_code="BE", alpha_3_code="BEL", continent="أوروبا", name="بلجيكا", capital="بروكسل", ), Country( timezones=["إفريقيا/واغادوغو"], alpha_2_code="BF", alpha_3_code="BFA", continent="إفريقيا", name="بوركينا فاسو", capital="واغادوغو", ), Country( timezones=["أوروبا/صوفيا"], alpha_2_code="BG", alpha_3_code="BGR", continent="أوروبا", name="بلغاريا", capital="صوفيا", ), Country( timezones=["آسيا/البحرين"], alpha_2_code="BH", alpha_3_code="BHR", continent="آسيا", name="البحرين", capital="المنامة", ), Country( timezones=["إفريقيا/بوجمبورا"], alpha_2_code="BI", alpha_3_code="BDI", continent="إفريقيا", name="بوروندي", capital="بوجمبورا", ), Country( timezones=["إفريقيا/بورتو نوفو"], alpha_2_code="BJ", alpha_3_code="BEN", continent="إفريقيا", name="بنين", capital="بورتو نوفو", ), Country( timezones=["آسيا/بروناي"], alpha_2_code="BN", alpha_3_code="BRN", continent="آسيا", name="اتحاد بروناي (دار السلام)", capital="بندر سري بكاوان", ), Country( timezones=["أمريكا/لاباز"], alpha_2_code="BO", alpha_3_code="BOL", continent="أمريكا الجنوبية", name="بوليفيا", capital="سوكري", ), Country( timezones=[ "أمريكا/نورونها", "أمريكا/بليم", "أمريكا/فورتاليزا", "أمريكا/ريسيفي", "أمريكا/أراغوينا", "أمريكا/ماسايو", "أمريكا/باهيا", "أمريكا/ساو_باولو", "أمريكا/كامبو_غراندي", "أمريكا/كويابا", "أمريكا/بورتو_فاليو", "أمريكا/بوا_فيستا", "أمريكا/ماناوس", "أمريكا/إيرونيبي", "أمريكا/ريو_برانكو", ], alpha_2_code="BR", alpha_3_code="BRA", continent="أمريكا الجنوبية", name="البرازيل", capital="برازيليا", ), Country( timezones=["أمريكا/ناساو"], alpha_2_code="BS", alpha_3_code="BHS", continent="أمريكا الشمالية", name="باهاماس", capital="ناساو", ), Country( timezones=["آسيا/تيمفو"], alpha_2_code="BT", alpha_3_code="BTN", continent="آسيا", name="بوتان", capital="تيمفو", ), Country( timezones=["إفريقيا/غابورون"], alpha_2_code="BW", alpha_3_code="BWA", continent="إفريقيا", name="بوتسوانا", capital="غابورون", ), Country( timezones=["أوروبا/مينسك"], alpha_2_code="BY", alpha_3_code="BLR", continent="أوروبا", name="روسيا البيضاء", capital="مينسك", ), Country( timezones=["أمريكا/بليز"], alpha_2_code="BZ", alpha_3_code="BLZ", continent="أمريكا الشمالية", name="بليز", capital="بلموبان", ), Country( timezones=[ "أمريكا/سينت_جونز", "أمريكا/هاليفاكس", "أمريكا/جليس_باي", "أمريكا/مونكتون", "أمريكا/جووس_باي", "أمريكا/بلانك_سابلون", "أمريكا/مونتريال", "أمريكا/تورونتو", "أمريكا/نيبيغون", "أمريكا/ثاندر_باي", "أمريكا/بانغيرتانغ", "أمريكا/إيكواليوت", "أمريكا/أتيكوكان", "أمريكا/رانكن_إنلت", "أمريكا/وينيبيغ", "أمريكا/رايني_ريفر", "أمريكا/كامبريدج_باي", "أمريكا/ريجينا", "أمريكا/سويفت_كارنت", "أمريكا/إدمونتون", "أمريكا/يلو_نايف", "أمريكا/إنوفك", "أمريكا/دوسن_كريك", "أمريكا/فانكوفر", "أمريكا/وايت_هورس", "أمريكا/داوسون", ], alpha_2_code="CA", alpha_3_code="CAN", continent="أمريكا الشمالية", name="كندا", capital="أوتاوا", ), Country( timezones=["إفريقيا/كينشاسا", "إفريقيا/لوبومباشي"], alpha_2_code="CD", alpha_3_code="COD", continent="إفريقيا", name="جمهورية الكونغو الديمقراطية", capital="كينشاسا", ), Country( timezones=["إفريقيا/برازافيل"], alpha_2_code="CG", alpha_3_code="COG", continent="إفريقيا", name="جمهورية الكونغو", capital="برازافيل", ), Country( timezones=["إفريقيا/أبيدجان"], alpha_2_code="CI", alpha_3_code="CIV", continent="إفريقيا", name="ساحل العاج", capital="ياموسوكرو", ), Country( timezones=["أمريكا/سانتياغو", "المحيط_الهاديء/جزيرة_القيامة"], alpha_2_code="CL", alpha_3_code="CHL", continent="أمريكا الجنوبية", name="تشيلي", capital="سانتياغو", ), Country( timezones=["إفريقيا/دوالا"], alpha_2_code="CM", alpha_3_code="CMR", continent="إفريقيا", name="الكاميرون", capital="ياوندي", ), Country( timezones=[ "آسيا/شانغهاي", "آسيا/هاربن", "آسيا/تشونغتشينغ", "آسيا/أورومتشي", "آسيا/كاشغر", ], alpha_2_code="CN", alpha_3_code="CHN", continent="آسيا", name="جمهورية الصين الشعبية", capital="بكين", ), Country( timezones=["أمريكا/بوغوتا"], alpha_2_code="CO", alpha_3_code="COL", continent="أمريكا الجنوبية", name="كولومبيا", capital="بوغوتا", ), Country( timezones=["أمريكا/كوستاريكا"], alpha_2_code="CR", alpha_3_code="CRI", continent="أمريكا الشمالية", name="كوستاريكا", capital="سان خوسيه", ), Country( timezones=["أمريكا/هافانا"], alpha_2_code="CU", alpha_3_code="CUB", continent="أمريكا الشمالية", name="كوبا", capital="هافانا", ), Country( timezones=["الأطلنطي/الرأس_الأخضر"], alpha_2_code="CV", alpha_3_code="CPV", continent="إفريقيا", name="جمهورية الرأس الأخضر", capital="برايا", ), Country( timezones=["آسيا/نيقوسيا"], alpha_2_code="CY", alpha_3_code="CYP", continent="آسيا", name="قبرص", capital="نيقوسيا", ), Country( timezones=["أوروبا/براغ"], alpha_2_code="CZ", alpha_3_code="CZE", continent="أوروبا", name="جمهورية التشيك", capital="براغ", ), Country( timezones=["أوروبا/برلين"], alpha_2_code="DE", alpha_3_code="DEU", continent="أوروبا", name="ألمانيا", capital="برلين", ), Country( timezones=["إفريقيا/جيبوتي"], alpha_2_code="DJ", alpha_3_code="DJI", continent="إفريقيا", name="جيبوتي", capital="جيبوتي", ), Country( timezones=["أوروبا/كوبنهاغن"], alpha_2_code="DK", alpha_3_code="DNK", continent="أوروبا", name="الدنمارك", capital="كوبنهاغن", ), Country( timezones=["أمريكا/دومينيكا"], alpha_2_code="DM", alpha_3_code="DMA", continent="أمريكا الشمالية", name="دومينيكا", capital="روسياو", ), Country( timezones=["أمريكا/سانتو_دومينغو"], alpha_2_code="DO", alpha_3_code="DOM", continent="أمريكا الشمالية", name="جمهورية الدومينيكان", capital="سانتو دومينغو", ), Country( timezones=["أمريكا/غواياكيل", "المحيط_الهاديء/أرخبيل_غالاباغوس"], alpha_2_code="EC", alpha_3_code="ECU", continent="أمريكا الجنوبية", name="الإكوادور", capital="كيتو", ), Country( timezones=["أوروبا/تالين"], alpha_2_code="EE", alpha_3_code="EST", continent="أوروبا", name="إستونيا", capital="تالين", ), Country( timezones=["إفريقيا/القاهرة"], alpha_2_code="EG", alpha_3_code="EGY", continent="إفريقيا", name="مصر", capital="القاهرة", ), Country( timezones=["إفريقيا/أسمرة"], alpha_2_code="ER", alpha_3_code="ERI", continent="إفريقيا", name="إرتيريا", capital="أسمرة", ), Country( timezones=["إفريقيا/أديس أبابا"], alpha_2_code="ET", alpha_3_code="ETH", continent="إفريقيا", name="إثيوبيا", capital="أديس أبابا", ), Country( timezones=["أوروبا/هلسنكي"], alpha_2_code="FI", alpha_3_code="FIN", continent="أوروبا", name="فنلندا", capital="هلسنكي", ), Country( timezones=["المحيط_الهاديء/فيجي"], alpha_2_code="FJ", alpha_3_code="FJI", continent="أوقيانوسيا", name="فيجي", capital="سوفا", ), Country( timezones=["أوروبا/باريس"], alpha_2_code="FR", alpha_3_code="FRA", continent="أوروبا", name="فرنسا", capital="باريس", ), Country( timezones=["إفريقيا/ليبرفيل"], alpha_2_code="GA", alpha_3_code="GAB", continent="إفريقيا", name="الغابون", capital="ليبرفيل", ), Country( timezones=["آسيا/تبليسي"], alpha_2_code="GE", alpha_3_code="GEO", continent="آسيا", name="جورجيا", capital="تبليسي", ), Country( timezones=["إفريقيا/أكرا"], alpha_2_code="GH", alpha_3_code="GHA", continent="إفريقيا", name="غانا", capital="أكرا", ), Country( timezones=["إفريقيا/بانجول"], alpha_2_code="GM", alpha_3_code="GMB", continent="إفريقيا", name="غامبيا", capital="بانجول", ), Country( timezones=["إفريقيا/كوناكري"], alpha_2_code="GN", alpha_3_code="GIN", continent="إفريقيا", name="غينيا", capital="كوناكري", ), Country( timezones=["أوروبا/أثينا"], alpha_2_code="GR", alpha_3_code="GRC", continent="أوروبا", name="اليونان", capital="أثينا", ), Country( timezones=["أمريكا/غواتيمالا"], alpha_2_code="GT", alpha_3_code="GTM", continent="أمريكا الشمالية", name="غواتيمالا", capital="غواتيمالا سيتي", ), Country( timezones=["أمريكا/غواتيمالا"], alpha_2_code="HT", alpha_3_code="HTI", continent="أمريكا الشمالية", name="هايتي", capital="بورت أو برانس", ), Country( timezones=["إفريقيا/بيساو"], alpha_2_code="GW", alpha_3_code="GNB", continent="إفريقيا", name="غينيا بيساو", capital="بيساو", ), Country( timezones=["أمريكا/غيانا"], alpha_2_code="GY", alpha_3_code="GUY", continent="أمريكا الجنوبية", name="غيانا", capital="جورج تاون", ), Country( timezones=["أمريكا/تيجوسيجالبا"], alpha_2_code="HN", alpha_3_code="HND", continent="أمريكا الشمالية", name="هندوراس", capital="تيجوسيجالبا", ), Country( timezones=["أوروبا/بودابست"], alpha_2_code="HU", alpha_3_code="HUN", continent="أوروبا", name="هنغاريا", capital="بودابست", ), Country( timezones=[ "آسيا/جاكرتا", "آسيا/بونتياناك", "آسيا/ماكاسار", "آسيا/جايابورا", ], alpha_2_code="ID", alpha_3_code="IDN", continent="آسيا", name="إندونسيا", capital="جاكرتا", ), Country( timezones=["أوروبا/دبلن"], alpha_2_code="IE", alpha_3_code="IRL", continent="أوروبا", name="إيرلندا", capital="دبلن", ), Country( timezones=["آسيا/القدس"], alpha_2_code="IL", alpha_3_code="ISR", continent="آسيا", name="فلسطين", capital="القدس", ), Country( timezones=["آسيا/كالكتا"], alpha_2_code="IN", alpha_3_code="IND", continent="آسيا", name="الهند", capital="نيو دلهي", ), Country( timezones=["آسيا/بغداد"], alpha_2_code="IQ", alpha_3_code="IRQ", continent="آسيا", name="العراق", capital="بغداد", ), Country( timezones=["آسيا/طهران"], alpha_2_code="IR", alpha_3_code="IRN", continent="آسيا", name="إيران", capital="طهران", ), Country( timezones=["الأطلنطي/ريكيافيك"], alpha_2_code="IS", alpha_3_code="ISL", continent="أوروبا", name="آيسلندا", capital="ريكيافيك", ), Country( timezones=["أوروبا/روما"], alpha_2_code="IT", alpha_3_code="ITA", continent="أوروبا", name="إيطاليا", capital="روما", ), Country( timezones=["أمريكا/جامايكا"], alpha_2_code="JM", alpha_3_code="JAM", continent="أمريكا الشمالية", name="جامايكا", capital="كينغستون", ), Country( timezones=["آسيا/عمّان"], alpha_2_code="JO", alpha_3_code="JOR", continent="آسيا", name="الأردن", capital="عمّان", ), Country( timezones=["آسيا/طوكيو"], alpha_2_code="JP", alpha_3_code="JPN", continent="آسيا", name="اليابان", capital="طوكيو", ), Country( timezones=["إفريقيا/نيروبي"], alpha_2_code="KE", alpha_3_code="KEN", continent="إفريقيا", name="كينيا", capital="نيروبي", ), Country( timezones=["آسيا/بشكيك"], alpha_2_code="KG", alpha_3_code="KGZ", continent="آسيا", name="قيرغيزستان", capital="بشكيك", ), Country( timezones=[ "المحيط_الهاديء/تاراوا", "المحيط_الهاديء/إيديربيري", "المحيط_الهاديء/كريتيماتي", ], alpha_2_code="KI", alpha_3_code="KIR", continent="أوقيانوسيا", name="كيريباتي", capital="جنوب تاراوا", ), Country( timezones=["آسيا/بيونغ_يانغ"], alpha_2_code="KP", alpha_3_code="PRK", continent="آسيا", name="كوريا الشمالية", capital="بيونغ يانغ", ), Country( timezones=["آسيا/سيؤول"], alpha_2_code="KR", alpha_3_code="KOR", continent="آسيا", name="؛كوريا الجنوبية", capital="سيؤول", ), Country( timezones=["آسيا/الكويت"], alpha_2_code="KW", alpha_3_code="KWT", continent="آسيا", name="الكويت", capital="الكويت", ), Country( timezones=["آسيا/بيروت"], alpha_2_code="LB", alpha_3_code="LBN", continent="آسيا", name="لبنان", capital="بيروت", ), Country( timezones=["أوروبا/فادوز"], alpha_2_code="LI", alpha_3_code="LIE", continent="أوروبا", name="ليختنشتاين", capital="فادوز", ), Country( timezones=["إفريقيا/مونروفيا"], alpha_2_code="LR", alpha_3_code="LBR", continent="إفريقيا", name="ليبيريا", capital="مونروفيا", ), Country( timezones=["إفريقيا/ماسيرو"], alpha_2_code="LS", alpha_3_code="LSO", continent="إفريقيا", name="ليسوتو", capital="ماسيرو", ), Country( timezones=["أوروبا/فيلنيوس"], alpha_2_code="LT", alpha_3_code="LTU", continent="أوروبا", name="ليتوانيا", capital="فيلنيوس", ), Country( timezones=["أوروبا/لوكسمبرغ"], alpha_2_code="LU", alpha_3_code="LUX", continent="أوروبا", name="لوكسمبرغ", capital="لوكسمبرغ سيتي", ), Country( timezones=["أوروبا/ربيغ"], alpha_2_code="LV", alpha_3_code="LVA", continent="أوروبا", name="لاتفيا", capital="ربيغ", ), Country( timezones=["إفريقيا/طرابلس"], alpha_2_code="LY", alpha_3_code="LBY", continent="إفريقيا", name="ليبيا", capital="طرابلس", ), Country( timezones=["الهندي/أنتاناناريفو"], alpha_2_code="MG", alpha_3_code="MDG", continent="إفريقيا", name="مدغشقر", capital="أنتاناناريفو", ), Country( timezones=["المحيط_الهاديء/ماجورو", "المحيط_الهاديء/كواجلين_أتول"], alpha_2_code="MH", alpha_3_code="MHL", continent="أوقيانوسيا", name="جزر مارشال", capital="ماجورو", ), Country( timezones=["أوروبا/سكوبيه"], alpha_2_code="MK", alpha_3_code="MKD", continent="أوروبا", name="جمهورية مقدونيا", capital="سكوبيه", ), Country( timezones=["إفريقيا/باماكو"], alpha_2_code="ML", alpha_3_code="MLI", continent="إفريقيا", name="مالي", capital="باماكو", ), Country( timezones=["آسيا/رانغون"], alpha_2_code="MM", alpha_3_code="MMR", continent="آسيا", name="ميانمار", capital="نايبيداو", ), Country( timezones=["آسيا/أولان_باتور", "آسيا/Hovd", "آسيا/تشويبالسان"], alpha_2_code="MN", alpha_3_code="MNG", continent="آسيا", name="مانغوليا", capital="أولان باتور", ), Country( timezones=["إفريقيا/نواكشط"], alpha_2_code="MR", alpha_3_code="MRT", continent="إفريقيا", name="موريتانيا", capital="نواكشط", ), Country( timezones=["أوروبا/مالطا"], alpha_2_code="MT", alpha_3_code="MLT", continent="أوروبا", name="مالطا", capital="فاليتا", ), Country( timezones=["الهندي/موريشيوس"], alpha_2_code="MU", alpha_3_code="MUS", continent="إفريقيا", name="موريشيوس", capital="بور لويس", ), Country( timezones=["الهندي/جزر_المالديف"], alpha_2_code="MV", alpha_3_code="MDV", continent="آسيا", name="جمهورية المالديف", capital="ماليه", ), Country( timezones=["إفريقيا/بلانتاير"], alpha_2_code="MW", alpha_3_code="MWI", continent="إفريقيا", name="ملاوي", capital="ليلونغوي", ), Country( timezones=[ "أمريكا/ميكسيكو_سيتي", "أمريكا/كانكون", "أمريكا/ميرديا", "أمريكا/مونتيري", "أمريكا/مازاتلان", "أمريكا/شيواوا", "أمريكا/ارموسييو_سونورا", "أمريكا/تيخوانا", ], alpha_2_code="MX", alpha_3_code="MEX", continent="أمريكا الشمالية", name="المكسيك", capital="ميكسيكو سيتي§", ), Country( timezones=["آسيا/كوالا_لامبور", "آسيا/Kuching"], alpha_2_code="MY", alpha_3_code="MYS", continent="آسيا", name="ماليزيا", capital="كوالا لامبور", ), Country( timezones=["إفريقيا/مابوتو"], alpha_2_code="MZ", alpha_3_code="MOZ", continent="إفريقيا", name="موزمبيق", capital="مابوتو", ), Country( timezones=["إفريقيا/ويندهوك"], alpha_2_code="NA", alpha_3_code="NAM", continent="إفريقيا", name="ناميبيا", capital="ويندهوك", ), Country( timezones=["إفريقيا/نيامي"], alpha_2_code="NE", alpha_3_code="NER", continent="إفريقيا", name="النيجر", capital="نيامي", ), Country( timezones=["إفريقيا/لاغوس"], alpha_2_code="NG", alpha_3_code="NGA", continent="إفريقيا", name="نيجيريا", capital="أبوجا", ), Country( timezones=["أمريكا/ماناغوا"], alpha_2_code="NI", alpha_3_code="NIC", continent="أمريكا الشمالية", name="نيكاراغوا", capital="ماناغوا", ), Country( timezones=["أوروبا/أمستردام"], alpha_2_code="NL", alpha_3_code="NLD", continent="أوروبا", name="هولندا", capital="أمستردام", ), Country( timezones=["أوروبا/أوسلو"], alpha_2_code="NO", alpha_3_code="NOR", continent="أوروبا", name="النرويج", capital="أوسلو", ), Country( timezones=["آسيا/كاتماندو"], alpha_2_code="NP", alpha_3_code="NPL", continent="آسيا", name="النيبال", capital="كاتماندو", ), Country( timezones=["المحيط_الهاديء/ناورو"], alpha_2_code="NR", alpha_3_code="NRU", continent="أوقيانوسيا", name="ناورو", capital="يارين", ), Country( timezones=["المحيط_الهاديء/أوكلاند", "المحيط_الهاديء/تشاتهام"], alpha_2_code="NZ", alpha_3_code="NZL", continent="أوقيانوسيا", name="نيوزيلاندا", capital="ويلينغتون", ), Country( timezones=["آسيا/مسقط"], alpha_2_code="OM", alpha_3_code="OMN", continent="آسيا", name="عمان", capital="مسقط", ), Country( timezones=["أمريكا/بنما"], alpha_2_code="PA", alpha_3_code="PAN", continent="أمريكا الشمالية", name="بنما", capital="بنما", ), Country( timezones=["أمريكا/ليما"], alpha_2_code="PE", alpha_3_code="PER", continent="أمريكا الجنوبية", name="البيرو", capital="ليما", ), Country( timezones=["المحيط_الهاديء/بورت_مورسبي"], alpha_2_code="PG", alpha_3_code="PNG", continent="أوقيانوسيا", name="بابوا غينيا الجديدة", capital="بورت مورسبي", ), Country( timezones=["آسيا/مانيلا"], alpha_2_code="PH", alpha_3_code="PHL", continent="آسيا", name="الفيليبين", capital="مانيلا", ), Country( timezones=["آسيا/كاراتشي"], alpha_2_code="PK", alpha_3_code="PAK", continent="آسيا", name="باكستان", capital="إسلام أباد", ), Country( timezones=["أوروبا/وارسو"], alpha_2_code="PL", alpha_3_code="POL", continent="أوروبا", name="بولندا", capital="وارسو", ), Country( timezones=["أوروبا/لشبونة", "الأطلنطي/ماديرا", "الأطلنطي/الأزور"], alpha_2_code="PT", alpha_3_code="PRT", continent="أوروبا", name="البرتغال", capital="لشبونة", ), Country( timezones=["المحيط_الهاديء/بالاو"], alpha_2_code="PW", alpha_3_code="PLW", continent="أوقيانوسيا", name="بالاو", capital="نجيرولمد", ), Country( timezones=["أمريكا/أسونسيون"], alpha_2_code="PY", alpha_3_code="PRY", continent="أمريكا الجنوبية", name="بابرغوي", capital="أسونسيون", ), Country( timezones=["آسيا/قطر"], alpha_2_code="QA", alpha_3_code="QAT", continent="آسيا", name="قطر", capital="الدوحة", ), Country( timezones=["أوروبا/بوخارست"], alpha_2_code="RO", alpha_3_code="ROU", continent="أوروبا", name="رومانيا", capital="بوخارست", ), Country( timezones=[ "أوروبا/كالينينغراد", "أوروبا/موسكو", "أوروبا/Volgograd", "أوروبا/سمارة", "آسيا/يكاترينبورغ", "آسيا/أومسك", "آسيا/نوفوسيبيرسك", "آسيا/كراسنوياسك", "آسيا/إروتسك", "آسيا/ياكوتسك", "آسيا/فالديفوستوك", "آسيا/ساخالن", "آسيا/ماغادان", "آسيا/كامشتكا", "آسيا/أنادير", ], alpha_2_code="RU", alpha_3_code="RUS", continent="أوروبا", name="روسيا", capital="موسكو", ), Country( timezones=["إفريقيا/كيغالي"], alpha_2_code="RW", alpha_3_code="RWA", continent="إفريقيا", name="رواندا", capital="كيغالي", ), Country( timezones=["آسيا/الرياض"], alpha_2_code="SA", alpha_3_code="SAU", continent="آسيا", name="المملكة العربية السعودية", capital="الرياض", ), Country( timezones=["المحيط_الهاديء/غوادالكانال"], alpha_2_code="SB", alpha_3_code="SLB", continent="أوقيانوسيا", name="جزر سولمون", capital="هونيارا", ), Country( timezones=["الهندي/ماهي"], alpha_2_code="SC", alpha_3_code="SYC", continent="إفريقيا", name="سيشل", capital="فيكتوريا", ), Country( timezones=["إفريقيا/الخرطوم"], alpha_2_code="SD", alpha_3_code="SDN", continent="إفريقيا", name="السودان", capital="الخرطوم", ), Country( timezones=["أوروبا/ستوكهولم"], alpha_2_code="SE", alpha_3_code="SWE", continent="أوروبا", name="السويد", capital="ستوكهولم", ), Country( timezones=["آسيا/سنغافورة"], alpha_2_code="SG", alpha_3_code="SGP", continent="آسيا", name="سنغافورة", capital="سنغافورة", ), Country( timezones=["أوروبا/ليوبليانا"], alpha_2_code="SI", alpha_3_code="SVN", continent="أوروبا", name="سلوفانيا", capital="ليوبليانا", ), Country( timezones=["أوروبا/براتيسلافا"], alpha_2_code="SK", alpha_3_code="SVK", continent="أوروبا", name="سلوفاكيا", capital="براتيسلافا", ), Country( timezones=["إفريقيا/فريتاون"], alpha_2_code="SL", alpha_3_code="SLE", continent="إفريقيا", name="سيراليون", capital="فريتاون", ), Country( timezones=["أوروبا/سان_مارينو"], alpha_2_code="SM", alpha_3_code="SMR", continent="أوروبا", name="جمهورية سان مارينو", capital="سان مارينو", ), Country( timezones=["إفريقيا/داكار"], alpha_2_code="SN", alpha_3_code="SEN", continent="إفريقيا", name="السنغال", capital="داكار", ), Country( timezones=["إفريقيا/مقديشو"], alpha_2_code="SO", alpha_3_code="SOM", continent="إفريقيا", name="الصومال", capital="مقديشو", ), Country( timezones=["أمريكا/باراماريبو"], alpha_2_code="SR", alpha_3_code="SUR", continent="أمريكا الجنوبية", name="Suriname", capital="باراماريبو", ), Country( timezones=["إفريقيا/ساو_تومي"], alpha_2_code="ST", alpha_3_code="STP", continent="إفريقيا", name=" ساو تومي وبرينسيب", capital="ساو تومي", ), Country( timezones=["آسيا/دممشق"], alpha_2_code="SY", alpha_3_code="SYR", continent="آسيا", name="سوريا", capital="دمشق", ), Country( timezones=["إفريقيا/لومي"], alpha_2_code="TG", alpha_3_code="TGO", continent="إفريقيا", name="توغو", capital="لومي", ), Country( timezones=["آسيا/بانغوك"], alpha_2_code="TH", alpha_3_code="THA", continent="آسيا", name="تايلند", capital="بناغوك", ), Country( timezones=["آسيا/دوشنبه"], alpha_2_code="TJ", alpha_3_code="TJK", continent="آسيا", name="طاجكيستان", capital="دوشنبه", ), Country( timezones=["آسيا/عشق_آباد"], alpha_2_code="TM", alpha_3_code="TKM", continent="آسيا", name="تركمانستان", capital="عشق آباد", ), Country( timezones=["إفريقيا/تونس"], alpha_2_code="TN", alpha_3_code="TUN", continent="إفريقيا", name="تونس", capital="تونس", ), Country( timezones=["المحيط_الهاديء/تونغاتابو"], alpha_2_code="TO", alpha_3_code="TON", continent="أوقيانوسيا", name="تونغا", capital="نوكو ألوفا", ), Country( timezones=["أوروبا/إسطنبول"], alpha_2_code="TR", alpha_3_code="TUR", continent="آسيا", name="تركيا", capital="أنقرة", ), Country( timezones=["أمريكا/بورت_أوف_سبين"], alpha_2_code="TT", alpha_3_code="TTO", continent="أمريكا الشمالية", name="ترينيداد وتوباغو", capital="بورت أوف سبين", ), Country( timezones=["المحيط_الهاديء/فونافوتي"], alpha_2_code="TV", alpha_3_code="TUV", continent="أوقيانوسيا", name="توفالو", capital="فونافوتي", ), Country( timezones=["إفريقيا/دار_السلام"], alpha_2_code="TZ", alpha_3_code="TZA", continent="إفريقيا", name="تانزانيا", capital="دودوما", ), Country( timezones=[ "أوروبا/كييف", "أوروبا/أوجهورود", "أوروبا/زاباروجيا", "أوروبا/سيمفروبول", ], alpha_2_code="UA", alpha_3_code="UKR", continent="أوروبا", name="أوكرانيا", capital="كييف", ), Country( timezones=["إفريقيا/كامبالا"], alpha_2_code="UG", alpha_3_code="UGA", continent="إفريقيا", name="أوغندا", capital="كامبالا", ), Country( timezones=[ "أمريكا/نيويورك", "أمريكا/ديترويت", "أمريكا/كنتاكي/لويسفيل", "أمريكا/كنتاكي/مونتيسللو", "أمريكا/إنديانا/إنديانابولس", "أمريكا/إنديانا/مارنغو", "أمريكا/إنديانا/نوكس", "أمريكا/إنديانا/فيفاي", "أمريكا/شيكاغو", "أمريكا/إنديانا/فانسان", "أمريكا/إنديانا/بيترزبيرغ", "أمريكا/مينومني", "أمريكا/نورث_داكوتا/سينتر", "أمريكا/نورث_داكوتا/نيو_سالم", "أمريكا/دنفر", "أمريكا/بويسي", "أمريكا/شيبروك", "أمريكا/فينيكس", "أمريكا/لوس_أنجيلوس", "أمريكا/أنكوريج", "أمريكا/جونو", "أمريكا/ياكوتات", "أمريكا/نوم", "أمريكا/أداك", "المحيط_الهاديء/هونولولو", ], alpha_2_code="US", alpha_3_code="USA", continent="أمريكا الشمالية", name="الولايات المتحدة الأمريكية", capital="واشنطن", ), Country( timezones=["أمريكا/مونتفيدو"], alpha_2_code="UY", alpha_3_code="URY", continent="أمريكا الجنوبية", name="أوروغواي", capital="مونتفيدو", ), Country( timezones=["آسيا/سمرقند", "آسيا/طشقند"], alpha_2_code="UZ", alpha_3_code="UZB", continent="آسيا", name="أوزبكستان", capital="طشقند", ), Country( timezones=["أوروبا/الفاتيكان"], alpha_2_code="VA", alpha_3_code="VAT", continent="أوروبا", name="الفاتيكان", capital="الفاتيكان", ), Country( timezones=["أمريكا/كاركاس"], alpha_2_code="VE", alpha_3_code="VEN", continent="أمريكا الجنوبية", name="فنزويلا", capital="كاركاس", ), Country( timezones=["آسيا/سايغون"], alpha_2_code="VN", alpha_3_code="VNM", continent="آسيا", name="فيتنام", capital="هانوي", ), Country( timezones=["المحيط_الهاديء/أيفاتي"], alpha_2_code="VU", alpha_3_code="VUT", continent="أوقيانوسيا", name="فانواتو", capital="بورت فيلا", ), Country( timezones=["آسيا/عدن"], alpha_2_code="YE", alpha_3_code="YEM", continent="آسيا", name="اليمن", capital="صنعاء", ), Country( timezones=["إفريقيا/لوساكا"], alpha_2_code="ZM", alpha_3_code="ZMB", continent="إفريقيا", name="زامبيا", capital="لوساكا", ), Country( timezones=["إفريقيا/هراري"], alpha_2_code="ZW", alpha_3_code="ZWE", continent="إفريقيا", name="زيمبابوي", capital="هراري", ), Country( timezones=["إفريقيا/الجزائر"], alpha_2_code="DZ", alpha_3_code="DZA", continent="إفريقيا", name="الجزائر", capital="الجزائر", ), Country( timezones=["أوروبا/سراييفو"], alpha_2_code="BA", alpha_3_code="BIH", continent="أوروبا", name="البوسنة والهرسك", capital="سراييفو", ), Country( timezones=["آسيا/بنوم_بنه"], alpha_2_code="KH", alpha_3_code="KHM", continent="آسيا", name="كمبوديا", capital="بنوم بنه", ), Country( timezones=["إفريقيا/بانغي"], alpha_2_code="CF", alpha_3_code="CAF", continent="إفريقيا", name="جمهورية أفريقيا الوسطى", capital="بانغي", ), Country( timezones=["إفريقيا/نجامينا"], alpha_2_code="TD", alpha_3_code="TCD", continent="إفريقيا", name="تشاد", capital="نجامينا", ), Country( timezones=["الهندي/كومورو"], alpha_2_code="KM", alpha_3_code="COM", continent="إفريقيا", name="جزر القمر", capital="موروني", ), Country( timezones=["أوروبا/زغرب"], alpha_2_code="HR", alpha_3_code="HRV", continent="أوروبا", name="كرواتيا", capital="زغرب", ), Country( timezones=["آسيا/ديلي"], alpha_2_code="TL", alpha_3_code="TLS", continent="آسيا", name="تيمور الشرقية", capital="ديلي", ), Country( timezones=["أمريكا/السلفادور"], alpha_2_code="SV", alpha_3_code="SLV", continent="أمريكا الشمالية", name="السلفادور", capital="سان سلفادور", ), Country( timezones=["إفريقيا/مالابو"], alpha_2_code="GQ", alpha_3_code="GNQ", continent="إفريقيا", name="غينيا الاستوائية", capital="مالابو", ), Country( timezones=["أمريكا/غرينادا"], alpha_2_code="GD", alpha_3_code="GRD", continent="أمريكا الشمالية", name="غرينادا", capital="سانت جورجز", ), Country( timezones=[ "آسيا/ألماتي", "آسيا/كيزيلوردا", "آسيا/أقتوبي", "آسيا/أقتاو", "آسيا/أورال", ], alpha_2_code="KZ", alpha_3_code="KAZ", continent="آسيا", name="كازاخستان", capital="أستانة", ), Country( timezones=["آسيا/فيينتيان"], alpha_2_code="LA", alpha_3_code="LAO", continent="آسيا", name="لاوس", capital="فيينتيان", ), Country( timezones=[ "المحيط_الهاديء/تشوك", "المحيط_الهاديء/بونابي", "المحيط_الهاديء/كورساي", ], alpha_2_code="FM", alpha_3_code="FSM", continent="أوقيانوسيا", name="ولايات ميكرونيسيا المتحدة", capital="باليكير", ), Country( timezones=["أوروبا/كيشيناو"], alpha_2_code="MD", alpha_3_code="MDA", continent="أوروبا", name="مولدافيا", capital="كيشيناو", ), Country( timezones=["أوروبا/موناكو"], alpha_2_code="MC", alpha_3_code="MCO", continent="أوروبا", name="موناكو", capital="موناكو", ), Country( timezones=["أوروبا/بودغوريتسا"], alpha_2_code="ME", alpha_3_code="MNE", continent="أوروبا", name="الجبل الأسود", capital="بودغوريتسا", ), Country( timezones=["إفريقيا/الدار_البيضاء"], alpha_2_code="MA", alpha_3_code="MAR", continent="إفريقيا", name="المغرب", capital="الرباط", ), Country( timezones=["أمريكا/سانت_كيتس"], alpha_2_code="KN", alpha_3_code="KNA", continent="أمريكا الشمالية", name="سانت كيتس ونيفيس", capital="باستير", ), Country( timezones=["أمريكا/سانت_لوسيا"], alpha_2_code="LC", alpha_3_code="LCA", continent="أمريكا الشمالية", name="سانت لوسيا", capital="كاستريس", ), Country( timezones=["أمريكا/سينت_فينسينت"], alpha_2_code="VC", alpha_3_code="VCT", continent="أمريكا الشمالية", name="سانت فينسنت والغرينادين", capital="كينغستاون", ), Country( timezones=["المحيط_الهاديء/أبيا"], alpha_2_code="WS", alpha_3_code="WSM", continent="أوقيانوسيا", name="ساموا", capital="أبيا", ), Country( timezones=["أوروبا/بلغراد"], alpha_2_code="RS", alpha_3_code="SRB", continent="أوروبا", name="صربيا", capital="بلغراد", ), Country( timezones=["إفريقيا/جوهانسبرغ"], alpha_2_code="ZA", alpha_3_code="ZAF", continent="إفريقيا", name="جنوب إفريقيا", capital="بريتوريا", ), Country( timezones=["أوروبا/مدريد", "إفريقيا/سبتة", "الأطلنطي/الكناري"], alpha_2_code="ES", alpha_3_code="ESP", continent="أوروبا", name="إسبانيا", capital="مدريد", ), Country( timezones=["آسيا/كولمبو"], alpha_2_code="LK", alpha_3_code="LKA", continent="آسيا", name="سريلانكا", capital="سري جاياواردنابورا كوتي", ), Country( timezones=["إفريقيا/مبابان"], alpha_2_code="SZ", alpha_3_code="SWZ", continent="إفريقيا", name="سوازيلاند", capital="مبابان", ), Country( timezones=["أوروبا/زيورخ"], alpha_2_code="CH", alpha_3_code="CHE", continent="أوروبا", name="سويسرا", capital="برن", ), Country( timezones=["آسيا/دبي"], alpha_2_code="AE", alpha_3_code="ARE", continent="آسيا", name="الإمارات العربية المتحدة", capital="أبو ظبي", ), Country( timezones=["أوروبا/لندن"], alpha_2_code="GB", alpha_3_code="GBR", continent="أوروبا", name="المملكة المتحدة", capital="لندن", ), ] AM_PM = { "AM": "ص", "PM": "م", } def month_name(self) -> str: month = self.date("%m") return self.MONTH_NAMES[month] def am_pm(self) -> str: date = self.date("%p") return self.AM_PM[date] def day_of_week(self) -> str: day = self.date("%w") return self.DAY_NAMES[day]
Provider
python
h5py__h5py
h5py/tests/test_dataset_swmr.py
{ "start": 78, "end": 1614 }
class ____(TestCase): """ Testing SWMR functions when reading a dataset. Skip this test if the HDF5 library does not have the SWMR features. """ def setUp(self): TestCase.setUp(self) self.data = np.arange(13).astype('f') self.dset = self.f.create_dataset('data', chunks=(13,), maxshape=(None,), data=self.data) fname = self.f.filename self.f.close() self.f = h5py.File(fname, 'r', swmr=True) self.dset = self.f['data'] def test_initial_swmr_mode_on(self): """ Verify that the file is initially in SWMR mode""" self.assertTrue(self.f.swmr_mode) def test_read_data(self): self.assertArrayEqual(self.dset, self.data) def test_refresh(self): self.dset.refresh() def test_force_swmr_mode_on_raises(self): """ Verify when reading a file cannot be forcibly switched to swmr mode. When reading with SWMR the file must be opened with swmr=True.""" with self.assertRaises(Exception): self.f.swmr_mode = True self.assertTrue(self.f.swmr_mode) def test_force_swmr_mode_off_raises(self): """ Switching SWMR write mode off is only possible by closing the file. Attempts to forcibly switch off the SWMR mode should raise a ValueError. """ with self.assertRaises(ValueError): self.f.swmr_mode = False self.assertTrue(self.f.swmr_mode) @pytest.mark.thread_unsafe(reason="Can't enable global SWMR flag twice")
TestDatasetSwmrRead
python
pyqtgraph__pyqtgraph
pyqtgraph/opengl/items/GLLinePlotItem.py
{ "start": 372, "end": 453 }
class ____(enum.Flag): POSITION = enum.auto() COLOR = enum.auto()
DirtyFlag
python
google__flatbuffers
tests/py_test.py
{ "start": 81900, "end": 85001 }
class ____(unittest.TestCase): """TestVtableDeduplication verifies that vtables are deduplicated.""" def test_vtable_deduplication(self): b = flatbuffers.Builder(0) b.StartObject(4) b.PrependByteSlot(0, 0, 0) b.PrependByteSlot(1, 11, 0) b.PrependByteSlot(2, 22, 0) b.PrependInt16Slot(3, 33, 0) obj0 = b.EndObject() b.StartObject(4) b.PrependByteSlot(0, 0, 0) b.PrependByteSlot(1, 44, 0) b.PrependByteSlot(2, 55, 0) b.PrependInt16Slot(3, 66, 0) obj1 = b.EndObject() b.StartObject(4) b.PrependByteSlot(0, 0, 0) b.PrependByteSlot(1, 77, 0) b.PrependByteSlot(2, 88, 0) b.PrependInt16Slot(3, 99, 0) obj2 = b.EndObject() got = b.Bytes[b.Head() :] want = bytearray([ 240, 255, 255, 255, # == -12. offset to dedupped vtable. 99, 0, 88, 77, 248, 255, 255, 255, # == -8. offset to dedupped vtable. 66, 0, 55, 44, 12, 0, 8, 0, 0, 0, 7, 0, 6, 0, 4, 0, 12, 0, 0, 0, 33, 0, 22, 11, ]) self.assertEqual((len(want), want), (len(got), got)) table0 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj0) table1 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj1) table2 = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - obj2) def _checkTable(tab, voffsett_value, b, c, d): # vtable size got = tab.GetVOffsetTSlot(0, 0) self.assertEqual(12, got, 'case 0, 0') # object size got = tab.GetVOffsetTSlot(2, 0) self.assertEqual(8, got, 'case 2, 0') # default value got = tab.GetVOffsetTSlot(4, 0) self.assertEqual(voffsett_value, got, 'case 4, 0') got = tab.GetSlot(6, 0, N.Uint8Flags) self.assertEqual(b, got, 'case 6, 0') val = tab.GetSlot(8, 0, N.Uint8Flags) self.assertEqual(c, val, 'failed 8, 0') got = tab.GetSlot(10, 0, N.Uint8Flags) self.assertEqual(d, got, 'failed 10, 0') _checkTable(table0, 0, 11, 22, 33) _checkTable(table1, 0, 44, 55, 66) _checkTable(table2, 0, 77, 88, 99) def test_vtable_deduplication_respects_object_size(self): """Vtables can't be shared if object sizes differ.""" b = flatbuffers.Builder(0) b.StartObject(1) b.PrependInt32Slot(0, 1, 0) first = b.EndObject() b.StartObject(1) b.PrependInt64Slot(0, 2, 0) second = b.EndObject() b.Finish(second) # The second object has to point to a different vtable than the first one. table_first = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - first) table_second = flatbuffers.table.Table(b.Bytes, len(b.Bytes) - second) self.assertEqual(8, table_first.GetVOffsetTSlot(2, 0)) self.assertEqual(14, table_second.GetVOffsetTSlot(2, 0)) # Ensure two distinct vtables exist so dedup considers object size. self.assertEqual(2, len(b.vtables))
TestVtableDeduplication
python
dagster-io__dagster
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/commands/ci/state.py
{ "start": 719, "end": 886 }
class ____(BaseModel): git_url: Optional[str] commit_hash: Optional[str] build_config: Optional[models.Build] # copied from dagster_cloud.yaml
BuildMetadata
python
huggingface__transformers
src/transformers/pipelines/keypoint_matching.py
{ "start": 1981, "end": 7056 }
class ____(Pipeline): """ Keypoint matching pipeline using any `AutoModelForKeypointMatching`. This pipeline matches keypoints between two images. """ _load_processor = False _load_image_processor = True _load_feature_extractor = False _load_tokenizer = False def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) requires_backends(self, "vision") def _sanitize_parameters(self, threshold=None, timeout=None): preprocess_params = {} if timeout is not None: preprocess_params["timeout"] = timeout postprocess_params = {} if threshold is not None: postprocess_params["threshold"] = threshold return preprocess_params, {}, postprocess_params @overload def __call__(self, inputs: ImagePair, threshold: float = 0.0, **kwargs: Any) -> list[Match]: ... @overload def __call__(self, inputs: list[ImagePair], threshold: float = 0.0, **kwargs: Any) -> list[list[Match]]: ... def __call__( self, inputs: list[ImagePair] | ImagePair, threshold: float = 0.0, **kwargs: Any, ) -> list[Match] | list[list[Match]]: """ Find matches between keypoints in two images. Args: inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`): The pipeline handles three types of images: - A string containing a http link pointing to an image - A string containing a local path to an image - An image loaded in PIL directly The pipeline accepts either a single pair of images or a batch of image pairs, which must then be passed as a string. Images in a batch must all be in the same format: all as http links, all as local paths, or all as PIL images. threshold (`float`, *optional*, defaults to 0.0): The threshold to use for keypoint matching. Keypoints matched with a lower matching score will be filtered out. A value of 0 means that all matched keypoints will be returned. kwargs: `timeout (`float`, *optional*, defaults to None)` The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and the call may block forever. Return: Union[list[Match], list[list[Match]]]: A list of matches or a list if a single image pair is provided, or of lists of matches if a batch of image pairs is provided. Each match is a dictionary containing the following keys: - **keypoint_image_0** (`Keypoint`): The keypoint in the first image (x, y coordinates). - **keypoint_image_1** (`Keypoint`): The keypoint in the second image (x, y coordinates). - **score** (`float`): The matching score between the two keypoints. """ if inputs is None: raise ValueError("Cannot call the keypoint-matching pipeline without an inputs argument!") formatted_inputs = validate_image_pairs(inputs) outputs = super().__call__(formatted_inputs, threshold=threshold, **kwargs) if len(formatted_inputs) == 1: return outputs[0] return outputs def preprocess(self, images, timeout=None): images = [load_image(image, timeout=timeout) for image in images] model_inputs = self.image_processor(images=images, return_tensors="pt") model_inputs = model_inputs.to(self.dtype) target_sizes = [image.size for image in images] preprocess_outputs = {"model_inputs": model_inputs, "target_sizes": target_sizes} return preprocess_outputs def _forward(self, preprocess_outputs): model_inputs = preprocess_outputs["model_inputs"] model_outputs = self.model(**model_inputs) forward_outputs = {"model_outputs": model_outputs, "target_sizes": [preprocess_outputs["target_sizes"]]} return forward_outputs def postprocess(self, forward_outputs, threshold=0.0) -> list[Match]: model_outputs = forward_outputs["model_outputs"] target_sizes = forward_outputs["target_sizes"] postprocess_outputs = self.image_processor.post_process_keypoint_matching( model_outputs, target_sizes=target_sizes, threshold=threshold ) postprocess_outputs = postprocess_outputs[0] pair_result = [] for kp_0, kp_1, score in zip( postprocess_outputs["keypoints0"], postprocess_outputs["keypoints1"], postprocess_outputs["matching_scores"], ): kp_0 = Keypoint(x=kp_0[0].item(), y=kp_0[1].item()) kp_1 = Keypoint(x=kp_1[0].item(), y=kp_1[1].item()) pair_result.append(Match(keypoint_image_0=kp_0, keypoint_image_1=kp_1, score=score.item())) pair_result = sorted(pair_result, key=lambda x: x["score"], reverse=True) return pair_result
KeypointMatchingPipeline
python
sqlalchemy__sqlalchemy
test/orm/test_deprecations.py
{ "start": 18738, "end": 19826 }
class ____(fixtures.DeclarativeMappedTest): """test for [ticket:3963]""" @classmethod def setup_classes(cls): Base = cls.DeclarativeBasic class A(Base): __tablename__ = "a" id = Column(Integer, primary_key=True) bs = relationship("B") class B(Base): __tablename__ = "b" id = Column(Integer, primary_key=True) a_id = Column(ForeignKey("a.id")) cs = relationship("C") class C(Base): __tablename__ = "c" id = Column(Integer, primary_key=True) b_id = Column(ForeignKey("b.id")) @classmethod def insert_data(cls, connection): A, B, C = cls.classes("A", "B", "C") s = Session(connection) s.add(A(id=1, bs=[B(cs=[C()])])) s.add(A(id=2)) s.commit() def _run_tests(self, query, expected): def go(): for a, _ in query: for b in a.bs: b.cs self.assert_sql_count(testing.db, go, expected)
LazyLoadOptSpecificityTest
python
tensorflow__tensorflow
tensorflow/python/training/monitored_session.py
{ "start": 45746, "end": 47937 }
class ____: """Wrapper around a `tf.compat.v1.Session`. This wrapper is used as a base class for various session wrappers that provide additional functionality such as monitoring, coordination, and recovery. In addition to the methods exported by `SessionInterface` the wrapper provides a method to check for stop and never raises exceptions from calls to `close()`. """ def __init__(self, sess): """Creates a `_WrappedSession`. Args: sess: A `tf.compat.v1.Session` or `_WrappedSession` object. The wrapped session. """ self._sess = sess self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession) @property def graph(self): return self._sess.graph @property def sess_str(self): return self._sess.sess_str def should_stop(self): """Return true if this session should not be used anymore. Always return True if the session was closed. Returns: True if the session should stop, False otherwise. """ if self._check_stop(): return True if self._sess: return self._wrapped_is_stoppable and self._sess.should_stop() return True def _check_stop(self): """Hook for subclasses to provide their own stop condition. Returns: True if the session should stop, False otherwise. """ return False def close(self): if self._sess: try: self._sess.close() except _PREEMPTION_ERRORS as e: logging.error( 'An error occurred when attempting to close the ' 'session. This may be due to a preemption in a ' 'connected worker or parameter server. Error: %s', e) finally: self._sess = None def run(self, *args, **kwargs): return self._sess.run(*args, **kwargs) def run_step_fn(self, step_fn, raw_session, run_with_hooks): # `_RecoverableSession` sets `run_with_hooks` to `_CoordinatedSession.run`. # It is `None` when called from `_CoordinatedSession`. In that case # `self.run` is `_CoordinatedSession.run`. run_with_hooks = run_with_hooks or self.run return step_fn(_MonitoredSession.StepContext(raw_session, run_with_hooks))
_WrappedSession
python
openai__openai-python
src/openai/resources/batches.py
{ "start": 20489, "end": 21002 }
class ____: def __init__(self, batches: AsyncBatches) -> None: self._batches = batches self.create = async_to_streamed_response_wrapper( batches.create, ) self.retrieve = async_to_streamed_response_wrapper( batches.retrieve, ) self.list = async_to_streamed_response_wrapper( batches.list, ) self.cancel = async_to_streamed_response_wrapper( batches.cancel, )
AsyncBatchesWithStreamingResponse
python
huggingface__transformers
src/transformers/models/gemma3n/modular_gemma3n.py
{ "start": 26516, "end": 31789 }
class ____(PreTrainedConfig): r""" This is the configuration class to store the configuration of a [`Gemma3nForConditionalGeneration`]. It is used to instantiate a Gemma3nForConditionalGeneration according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of Gemma3n-E4B. e.g. [google/gemma-3n-E4B](https://huggingface.co/google/gemma-3n-E4B) Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: text_config (`Union[Gemma3nTextConfig, dict]`, *optional*): The config object of the text backbone. vision_config (`Union[AutoConfig, dict]`, *optional*): Custom vision config or dict. audio_config (`Union[AutoConfig, dict]`, *optional*): Custom audio config or dict. audio_soft_tokens_per_image (`int`, *optional*, defaults to 188): The number of soft tokens per audio clip. vision_soft_tokens_per_image (`int`, *optional*, defaults to 256): The number of soft tokens per image. boi_token_id (`int`, *optional*, defaults to 255999): The begin-of-image token index to wrap the image prompt. eoi_token_id (`int`, *optional*, defaults to 262144): The end-of-image token index to wrap the image prompt. image_token_id (`int`, *optional*, defaults to 262145): The image token index to encode the image prompt. boa_token_id (`int`, *optional*, defaults to 256000): The begin-of-audio token index to wrap the audio prompt. eoa_token_id (`int`, *optional*, defaults to 262272): The end-of-audio token index to wrap the audio prompt. audio_token_id (`int`, *optional*, defaults to 262273): The audio token index to encode the audio prompt. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. Example: ```python >>> from transformers import Gemma3nForConditionalGeneration, Gemma3nConfig, Gemma3nTextConfig >>> # Initializing a MobileNet vision config, which is loaded from TIMM >>> vision_config = Gemma3nVisionConfig() >>> # Initializing a Gemma3n Audio config >>> audio_config = Gemma3nAudioConfig() >>> # Initializing a Gemma3n Text config >>> text_config = Gemma3nTextConfig() >>> # Initializing a Gemma3n gemma-3-4b style configuration >>> configuration = Gemma3nConfig(text_config, vision_config, audio_config) >>> # Initializing a model from the gemma-3-4b style configuration >>> model = Gemma3nTextConfig(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "gemma3n" sub_configs = { "text_config": Gemma3nTextConfig, "vision_config": Gemma3nVisionConfig, "audio_config": Gemma3nAudioConfig, } def __init__( self, text_config: Optional[Union[Gemma3nTextConfig, dict[str, Any]]] = None, vision_config: Optional[Union[Gemma3nVisionConfig, dict[str, Any]]] = None, audio_config: Optional[Union[Gemma3nAudioConfig, dict[str, Any]]] = None, audio_soft_tokens_per_image: int = 188, vision_soft_tokens_per_image: int = 256, boi_token_id: int = 255_999, eoi_token_id: int = 262_144, image_token_id: int = 262_145, boa_token_id: int = 256_000, eoa_token_id: int = 262_272, audio_token_id: int = 262_273, initializer_range: float = 0.02, **kwargs, ): super().__init__(**kwargs) if isinstance(text_config, dict): text_config = Gemma3nTextConfig(**text_config) elif text_config is None: text_config = Gemma3nTextConfig() logger.info("text_config is None. Using default Gemma3nTextConfig.") if isinstance(vision_config, dict): vision_config = Gemma3nVisionConfig(**vision_config) elif vision_config is None: vision_config = Gemma3nVisionConfig() logger.info("vision_config is None. Using default Gemma3nVisionConfig.") if isinstance(audio_config, dict): audio_config = Gemma3nAudioConfig(**audio_config) elif audio_config is None: audio_config = Gemma3nAudioConfig() logger.info("audio_config is None. Using default Gemma3nAudioConfig.") self.text_config = text_config self.vision_config = vision_config self.audio_config = audio_config self.audio_soft_tokens_per_image = audio_soft_tokens_per_image self.vision_soft_tokens_per_image = vision_soft_tokens_per_image self.boi_token_id = boi_token_id self.eoi_token_id = eoi_token_id self.image_token_id = image_token_id self.boa_token_id = boa_token_id self.eoa_token_id = eoa_token_id self.audio_token_id = audio_token_id self.initializer_range = initializer_range
Gemma3nConfig
python
altair-viz__altair
altair/vegalite/v6/schema/core.py
{ "start": 261225, "end": 261495 }
class ____(VegaLiteSchema): """ConditionalValueDefTextExprRef schema wrapper.""" _schema = {"$ref": "#/definitions/ConditionalValueDef<(Text|ExprRef)>"} def __init__(self, *args, **kwds): super().__init__(*args, **kwds)
ConditionalValueDefTextExprRef
python
apache__airflow
airflow-core/tests/unit/ti_deps/deps/fake_models.py
{ "start": 1098, "end": 1183 }
class ____: def __init__(self, **kwds): self.__dict__.update(kwds)
FakeTask
python
scipy__scipy
scipy/fft/_pocketfft/tests/test_basic.py
{ "start": 8367, "end": 8546 }
class ____(_TestIFFTBase): def setup_method(self): self.cdt = np.complex64 self.rdt = np.float32 self.rtol = 1e-5 self.atol = 1e-4
TestSingleIFFT
python
dagster-io__dagster
python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py
{ "start": 20086, "end": 20344 }
class ____(graphene.ObjectType): class Meta: interfaces = (GrapheneMessageEvent, GrapheneStepEvent) name = "StepExpectationResultEvent" expectation_result = graphene.NonNull(GrapheneExpectationResult)
GrapheneStepExpectationResultEvent
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 15082, "end": 15258 }
class ____(sgqlc.types.Enum): """ See source code for more info. """ __schema__ = graphql_schema __choices__ = ("MERGE_MESSAGE", "PR_TITLE")
MergeCommitTitle
python
pytorch__pytorch
torch/distributed/checkpoint/storage.py
{ "start": 419, "end": 511 }
class ____: index: MetadataIndex size_in_bytes: int storage_data: Any
WriteResult
python
keras-team__keras
keras/src/backend/jax/distribution_lib_test.py
{ "start": 1077, "end": 17923 }
class ____(testing.TestCase): def _create_jax_layout(self, sharding): # Use jax_layout.Format or jax_layout.Layout if available. if hasattr(jax_layout, "Format"): return jax_layout.Format(sharding=sharding) elif hasattr(jax_layout, "Layout"): return jax_layout.Layout(sharding=sharding) return sharding def test_get_device_count(self): self.assertEqual(backend_dlib.get_device_count(), 8) self.assertEqual(backend_dlib.get_device_count("cpu"), 8) def test_list_devices(self): self.assertEqual(len(distribution_lib.list_devices()), 8) self.assertEqual(len(distribution_lib.list_devices("cpu")), 8) self.assertEqual(len(distribution_lib.list_devices("cpu")), 8) def test_device_conversion(self): devices = distribution_lib.list_devices("cpu") jax_devices = jax.devices("cpu") for d, jax_d in zip(devices, jax_devices): converted_jax_device = backend_dlib._to_backend_device(d) self.assertIsInstance(converted_jax_device, jax.Device) self.assertEqual(jax_d, converted_jax_device) @mock.patch.object(jax.distributed, "initialize", return_value=None) def test_initialize_with_all_job_addresses(self, mock_jax_initialize): backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 2, 0) mock_jax_initialize.assert_called_once_with( coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0 ) def test_initialize_validate_job_and_process(self): with self.assertRaisesRegex( ValueError, "has 2 jobs, but num_processes is 3" ): backend_dlib.initialize("10.0.0.1:1234,10.0.0.2:2345", 3, 0) @mock.patch.object(jax.distributed, "initialize", return_value=None) def test_initialize_with_coordinator_address(self, mock_jax_initialize): backend_dlib.initialize("10.0.0.1:1234", 2, 0) mock_jax_initialize.assert_called_once_with( coordinator_address="10.0.0.1:1234", num_processes=2, process_id=0 ) def test_distribute_tensor(self): jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) inputs = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("batch", None) ) @functools.partial(jax.jit, static_argnames="target_layout") def test_function(inputs, target_layout): return distribution_lib.distribute_tensor(inputs, target_layout) result = test_function(inputs, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2)) # Test without jit result = distribution_lib.distribute_tensor(inputs, target_layout) self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2)) def test_distribute_variable(self): # This test only verify the single worker/process behavior. jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) variable = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("model", None) ) result = backend_dlib.distribute_variable(variable, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2)) def test_distribute_input_data(self): # This test only verify the single worker/process behavior. # The multi-process test lives in g3. jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) input_data = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("batch", None) ) result = backend_dlib.distribute_variable(input_data, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue(result.sharding.is_equivalent_to(target_layout, ndim=2)) def test_distribute_tensor_with_jax_layout(self): jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) inputs = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = self._create_jax_layout( sharding=jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("batch", None) ) ) @functools.partial(jax.jit, static_argnames="target_layout") def test_function(inputs, target_layout): return distribution_lib.distribute_tensor(inputs, target_layout) result = test_function(inputs, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue( result.sharding.is_equivalent_to(target_layout.sharding, ndim=2) ) # Test without jit. result = distribution_lib.distribute_tensor(inputs, target_layout) self.assertTrue( result.sharding.is_equivalent_to(target_layout.sharding, ndim=2) ) def test_distribute_variable_with_jax_layout(self): # This test only verify the single worker/process behavior. jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) variable = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = self._create_jax_layout( sharding=jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("model", None) ) ) result = backend_dlib.distribute_variable(variable, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue( result.sharding.is_equivalent_to(target_layout.sharding, ndim=2) ) def test_distribute_input_data_with_jax_layout(self): # This test only verify the single worker/process behavior. jax_mesh = jax.sharding.Mesh( np.array(jax.devices()).reshape(2, 4), ("batch", "model") ) input_data = jax.numpy.array(np.random.normal(size=(16, 8))) target_layout = self._create_jax_layout( sharding=jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("batch", None) ) ) result = backend_dlib.distribute_variable(input_data, target_layout) # Note that the returned tensor has a different sharding implementation # which is GSPMDSharding, but it should be equivalent as the target # layout specified. self.assertTrue( result.sharding.is_equivalent_to(target_layout.sharding, ndim=2) ) def test_processes(self): self.assertEqual(backend_dlib.process_id(), 0) self.assertEqual(backend_dlib.num_processes(), 1) def test_to_backend_mesh(self): devices = [f"cpu:{i}" for i in range(8)] shape = (4, 2) axis_names = ["batch", "model"] mesh = distribution_lib.DeviceMesh(shape, axis_names, devices) jax_mesh = backend_dlib._to_backend_mesh(mesh) self.assertIsInstance(jax_mesh, jax.sharding.Mesh) self.assertEqual(jax_mesh.devices.shape, shape) self.assertEqual(jax_mesh.axis_names, ("batch", "model")) def test_to_backend_layout(self): axes = ["data", None] mesh = distribution_lib.DeviceMesh( (4, 2), ["data", "model"], [f"cpu:{i}" for i in range(8)] ) layout = distribution_lib.TensorLayout(axes, mesh) jax_sharding = backend_dlib._to_backend_layout(layout) jax_mesh = backend_dlib._to_backend_mesh(mesh) self.assertEqual( jax_sharding, jax.sharding.NamedSharding( jax_mesh, jax.sharding.PartitionSpec("data", None) ), ) def test_validation_for_device_mesh(self): axes = ["data", None] layout = distribution_lib.TensorLayout(axes, device_mesh=None) with self.assertRaisesRegex( ValueError, "Cannot create sharding when device mesh is not set" ): backend_dlib._to_backend_layout(layout) def test_variable_assignment_reuse_layout(self): shape = (4, 2) axis_names = ["batch", "model"] device_mesh = distribution_lib.DeviceMesh( shape, axis_names, backend_dlib.list_devices() ) layout_map = distribution_lib.LayoutMap(device_mesh) layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout( [None, "model"] ) layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( layout_map=layout_map, batch_dim_name="batch" ) with distribution.scope(): dense_layer = layers.Dense(8) dense_layer.build((16, 16)) self.assertEqual( dense_layer.kernel._value.sharding.spec, (None, "model") ) self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",)) # Assign a numpy value to dense layer to mimic the model weight loading new_kernel = np.random.normal(size=(16, 8)) new_bias = np.random.normal(size=(8)) dense_layer.kernel.assign(new_kernel) dense_layer.bias.assign(new_bias) # Make sure the loaded value still use the layout when it is # initialized, even outside of the distribution scope. self.assertEqual( dense_layer.kernel._value.sharding.spec, (None, "model") ) self.assertEqual(dense_layer.bias._value.sharding.spec, ("model",)) def test_e2e_data_parallel_model(self): distribution = distribution_lib.DataParallel( devices=backend_dlib.list_devices() ) with distribution.scope(): inputs = layers.Input(shape=[28, 28, 1]) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = models.Model(inputs=inputs, outputs=y) # Make sure all the weights are properly sharded. for weight in model.weights: self.assertTrue(weight._value.sharding.is_fully_replicated) inputs = np.random.normal(size=(32, 28, 28, 1)) labels = np.random.normal(size=(32, 10)) with distribution.scope(): model.compile(loss="mse") model.fit(inputs, labels) def test_e2e_model_parallel_model(self): shape = (4, 2) axis_names = ["batch", "model"] device_mesh = distribution_lib.DeviceMesh( shape, axis_names, backend_dlib.list_devices() ) layout_map = distribution_lib.LayoutMap(device_mesh) layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout( [None, "model"] ) layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"]) distribution = distribution_lib.ModelParallel( layout_map=layout_map, batch_dim_name="batch" ) with distribution.scope(): inputs = layers.Input(shape=[28, 28, 1]) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = models.Model(inputs=inputs, outputs=y) for weight in model.weights: if "kernel" in weight.name: self.assertEqual(weight._value.sharding.spec, (None, "model")) elif "bias" in weight.name: self.assertEqual(weight._value.sharding.spec, ("model",)) else: self.assertTrue(weight._value.sharding.is_fully_replicated) inputs = np.random.normal(size=(32, 28, 28, 1)) labels = np.random.normal(size=(32, 10)) with distribution.scope(): model.compile(loss="mse") model.fit(inputs, labels) def test_e2e_model_parallel_with_output_sharding(self): shape = (4, 2) axis_names = ["batch", "model"] device_mesh = distribution_lib.DeviceMesh( shape, axis_names, backend_dlib.list_devices() ) layout_map = distribution_lib.LayoutMap(device_mesh) layout_map[".*dense.*kernel"] = distribution_lib.TensorLayout( [None, "model"] ) layout_map[".*dense.*bias"] = distribution_lib.TensorLayout(["model"]) # Force the dense layer output to be batch parallel only, and not # sharded on model dimension. layout_map[".*dense.*output"] = ("batch", None) distribution = distribution_lib.ModelParallel( layout_map=layout_map, batch_dim_name="batch" ) sharding_capture = ShardingCaptureLayer() with distribution.scope(): inputs = layers.Input(shape=[28, 28, 1]) y = layers.Flatten()(inputs) y = layers.Dense(units=200, use_bias=False, activation="relu")(y) y = sharding_capture(y) y = layers.Dropout(0.4)(y) y = layers.Dense(units=10, activation="softmax")(y) model = models.Model(inputs=inputs, outputs=y) for weight in model.weights: if "kernel" in weight.name: self.assertEqual(weight._value.sharding.spec, (None, "model")) elif "bias" in weight.name: self.assertEqual(weight._value.sharding.spec, ("model",)) else: self.assertTrue(weight._value.sharding.is_fully_replicated) inputs = np.random.normal(size=(32, 28, 28, 1)) labels = np.random.normal(size=(32, 10)) with distribution.scope(): model.compile(loss="mse") model.fit(inputs, labels) # Note that the intermediate_tensor_layout is only captured during the # actual training, and not at the model building time. intermediate_tensor_layout = jax.sharding.NamedSharding( backend_dlib._to_backend_mesh(distribution.device_mesh), jax.sharding.PartitionSpec("batch", None), ) self.assertTrue( sharding_capture.captured_input_sharding.is_equivalent_to( intermediate_tensor_layout, ndim=2 ) ) def test_distribute_data_input(self): per_process_batch = jax.numpy.arange(24).reshape( 6, 4 ) # Example input array devices = jax.devices()[:4] # Simulate 4 devices batch_dim_size, model_dim_size = 2, 2 mesh = jax.sharding.Mesh( np.array(devices).reshape(batch_dim_size, model_dim_size), axis_names=["batch", "model"], ) layout = jax.sharding.NamedSharding( mesh, jax.sharding.PartitionSpec("batch", None) ) result = backend_dlib.distribute_data_input( per_process_batch, layout, "batch" ) # Check the shape of the global batch array self.assertEqual( result.shape, (6, 4) ) # (per_replica_batch_size * num_model_replicas_total, 4) # Check the sharding of the global batch array self.assertEqual(len(result.addressable_shards), len(devices)) # Since batch_dim_size=2, there are 2 model replicas so there is one # replication of data for model replica #1 and another replication of # data for model replica #2. Within each model replica, the data is # sharded to two shards. Therefore, each shard has 1/2 of # per_process_batch. for shard in result.addressable_shards: self.assertEqual(shard.data.shape, (3, 4))
JaxDistributionLibTest
python
django__django
tests/gis_tests/geoadmin/models.py
{ "start": 270, "end": 774 }
class ____(admin.GISModelAdmin): gis_widget_kwargs = { "attrs": { "default_lat": 55, "default_lon": 37, }, } site = admin.AdminSite(name="gis_admin_modeladmin") site.register(City, admin.ModelAdmin) site_gis = admin.AdminSite(name="gis_admin_gismodeladmin") site_gis.register(City, admin.GISModelAdmin) site_gis_custom = admin.AdminSite(name="gis_admin_gismodeladmin") site_gis_custom.register(City, CityAdminCustomWidgetKwargs)
CityAdminCustomWidgetKwargs
python
getsentry__sentry
src/sentry/rules/processing/delayed_processing.py
{ "start": 2123, "end": 2733 }
class ____(NamedTuple): """ Represents all the data that uniquely identifies a condition class and its single respective Snuba query that must be made. Multiple instances of the same condition class can share the single query. """ cls_id: str interval: str environment_id: int comparison_interval: str | None = None def __repr__(self) -> str: return ( f"<UniqueConditionQuery:\nid: {self.cls_id},\ninterval: {self.interval},\nenv id: {self.environment_id},\n" f"comp interval: {self.comparison_interval}\n>" )
UniqueConditionQuery
python
python-markdown__markdown
markdown/extensions/abbr.py
{ "start": 6600, "end": 7141 }
class ____(InlineProcessor): """ Abbreviation inline pattern. """ def __init__(self, pattern: str, title: str): super().__init__(pattern) self.title = title def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: abbr = etree.Element('abbr') abbr.text = AtomicString(m.group('abbr')) abbr.set('title', self.title) return abbr, m.start(0), m.end(0) def makeExtension(**kwargs): # pragma: no cover return AbbrExtension(**kwargs)
AbbrInlineProcessor
python
pytorch__pytorch
torch/_guards.py
{ "start": 4228, "end": 4603 }
class ____(NamedTuple): compile_id: CompileId # This starts off as 0, and every time we restart analysis it goes # up by one attempt: int def __str__(self) -> str: # Keep this in sync with tlparse repo if self.attempt == 0: return str(self.compile_id) else: return f"{self.compile_id}_{self.attempt}"
TraceId
python
spack__spack
lib/spack/spack/vendor/attr/validators.py
{ "start": 1685, "end": 3025 }
class ____: type = attrib() def __call__(self, inst, attr, value): """ We use a callable class to be able to change the ``__repr__``. """ if not isinstance(value, self.type): raise TypeError( "'{name}' must be {type!r} (got {value!r} that is a " "{actual!r}).".format( name=attr.name, type=self.type, actual=value.__class__, value=value, ), attr, self.type, value, ) def __repr__(self): return "<instance_of validator for type {type!r}>".format( type=self.type ) def instance_of(type): """ A validator that raises a `TypeError` if the initializer is called with a wrong type for this particular attribute (checks are performed using `isinstance` therefore it's also valid to pass a tuple of types). :param type: The type to check for. :type type: type or tuple of types :raises TypeError: With a human readable error message, the attribute (of type `attrs.Attribute`), the expected type, and the value it got. """ return _InstanceOfValidator(type) @attrs(repr=False, frozen=True, slots=True)
_InstanceOfValidator
python
huggingface__transformers
tests/models/dac/test_feature_extraction_dac.py
{ "start": 1564, "end": 3340 }
class ____: # Ignore copy def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, hop_length=512, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.hop_length = hop_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate # Ignore copy def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "hop_length": self.hop_length, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: audio_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size audio_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: audio_inputs = [np.asarray(x) for x in audio_inputs] return audio_inputs @require_torch # Copied from transformers.tests.encodec.test_feature_extraction_dac.EnCodecFeatureExtractionTest with Encodec->Dac
DacFeatureExtractionTester
python
django__django
tests/model_regress/models.py
{ "start": 1413, "end": 1502 }
class ____(models.Model): pkey = models.IntegerField(unique=True, db_index=True)
Model1
python
PyCQA__pylint
tests/functional/n/not_callable.py
{ "start": 295, "end": 836 }
class ____: """callable object""" def __call__(self): return self INSTANCE = Correct() CALLABLE_INSTANCE = MetaCorrect() CORRECT = CALLABLE_INSTANCE() incorrect = INSTANCE() # [not-callable] LIST = [] incorrect = LIST() # [not-callable] DICT = {} incorrect = DICT() # [not-callable] TUPLE = () incorrect = TUPLE() # [not-callable] INT = 1 incorrect = INT() # [not-callable] # Test calling properties. Pylint can detect when using only the # getter, but it doesn't infer properly when having a getter # and a setter.
MetaCorrect
python
tensorflow__tensorflow
tensorflow/python/eager/polymorphic_function/tracing_compilation_test.py
{ "start": 64392, "end": 72410 }
class ____(test.TestCase, parameterized.TestCase): """Tests for recognizable export signatures from concrete functions.""" @test_util.run_v2_only def testBasic(self): @compiled_fn def fn(a, b): return a + b, a * b # Call the function to make def_function happy fn(array_ops.ones([]), array_ops.ones([])) fn_op = fn.get_concrete_function( tensor_lib.TensorSpec(shape=(None,), dtype=dtypes.float32), tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), ) self.assertEqual(['a', 'b'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'a', b'b'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs], ) self.assertLen(fn_op.graph.structured_outputs, 2) self.assertAllClose( [3.0, 2.0], fn_op(constant_op.constant(1.0), constant_op.constant(2.0)) ) self.assertAllClose( [3.0, 2.0], fn_op(a=constant_op.constant(1.0), b=constant_op.constant(2.0)), ) def testVariable(self): @compiled_fn def fn(a, b): return a + b, a * b # Call the function to make def_function happy fn(array_ops.ones([]), array_ops.ones([])) fn_op = fn.get_concrete_function( tensor_lib.TensorSpec(shape=(None,), dtype=dtypes.float32), variables.Variable(1.0), ) self.assertEqual(['a', 'b'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'a', b'b'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs], ) self.assertLen(fn_op.graph.structured_outputs, 2) def testDictReturned(self): @compiled_fn def fn(x, z=(1.0, 2.0), y=3.0): z1, z2 = z return {'alpha': x + y + z1, 'beta': x * y + z2} # Call the function to make def_function happy fn(array_ops.ones([])) fn_op = fn.get_concrete_function( x=tensor_lib.TensorSpec(shape=(None,), dtype=dtypes.float32), y=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), ) self.assertEqual(['x', 'y'], [inp.op.name for inp in fn_op.inputs]) self.assertEqual( [b'x', b'y'], [inp.op.get_attr('_user_specified_name') for inp in fn_op.inputs], ) self.assertEqual( {'alpha', 'beta'}, set(fn_op.graph.structured_outputs.keys()) ) fn_op2 = fn.get_concrete_function( z=( tensor_lib.TensorSpec( shape=(None,), dtype=dtypes.float32, name='z_first' ), tensor_lib.TensorSpec( shape=(), dtype=dtypes.float32, name='z_second' ), ), y=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'), x=4.0, ) self.assertEqual( ['z_first', 'z_second', 'custom'], [inp.op.name for inp in fn_op2.inputs], ) self.assertEqual( [b'z_first', b'z_second', b'custom'], [inp.op.get_attr('_user_specified_name') for inp in fn_op2.inputs], ) fn_op3 = fn.get_concrete_function( tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='custom'), z=( tensor_lib.TensorSpec( shape=(None,), dtype=dtypes.float32, name='z1' ), tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='z2'), ), y=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), ) self.assertEqual( ['custom', 'z1', 'z2', 'y'], [inp.op.name for inp in fn_op3.inputs] ) self.assertEqual( [b'custom', b'z1', b'z2', b'y'], [inp.op.get_attr('_user_specified_name') for inp in fn_op3.inputs], ) def testMethod(self): class HasMethod(object): def method(self, x): return x has_method = HasMethod() compiled_method = compiled_fn(has_method.method) class_op = compiled_method.get_concrete_function( tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32) ) self.assertEqual(['x'], [inp.op.name for inp in class_op.inputs]) self.assertEqual( [b'x'], [inp.op.get_attr('_user_specified_name') for inp in class_op.inputs], ) method_op = compiled_method.get_concrete_function( tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32) ) self.assertEqual(['x'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'x'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs], ) # TODO(allenl): It should be possible to override names when exporting. Do # TensorSpec names need to go in cache keys? Or maybe get_concrete_function # should always retrace? self.skipTest('Not working') method_op = has_method.method.get_concrete_function( tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='y') ) self.assertEqual(['y'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs], ) def testMethodSignature(self): class HasMethod(object): def method(self, x): hash(self) # No weak proxies passed as `self` return x has_method = HasMethod() compiled_method = compiled_fn( has_method.method, input_signature=( tensor_lib.TensorSpec(shape=None, dtype=dtypes.float64, name='y'), ), ) method_op = compiled_method.get_concrete_function() self.assertEqual(['y'], [inp.op.name for inp in method_op.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op.inputs], ) method_op2 = compiled_method.get_concrete_function() self.assertEqual(['y'], [inp.op.name for inp in method_op2.inputs]) self.assertEqual( [b'y'], [inp.op.get_attr('_user_specified_name') for inp in method_op2.inputs], ) def testVariadic(self): @compiled_fn def variadic_fn(x, *args, **kwargs): return x + math_ops.add_n(list(args) + list(kwargs.values())) # Call the function to make def_function happy variadic_fn(array_ops.ones([]), array_ops.ones([])) variadic_op = variadic_fn.get_concrete_function( tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32, name='y'), tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), tensor_lib.TensorSpec( shape=(), dtype=dtypes.float32, name='second_variadic' ), z=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), zz=tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='cust'), ) self.assertEqual( ['x', 'y', 'args_1', 'second_variadic', 'z', 'cust'], [inp.op.name for inp in variadic_op.inputs], ) self.assertEqual( [b'x', b'y', b'args_1', b'second_variadic', b'z', b'cust'], [inp.op.get_attr('_user_specified_name') for inp in variadic_op.inputs], ) def testVariadicInputSignature(self): @compiled_fn( input_signature=( tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32), tensor_lib.TensorSpec(shape=None, dtype=dtypes.float32, name='y'), tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32), tensor_lib.TensorSpec(shape=(), dtype=dtypes.float32, name='z'), ), name='variadic_fn', ) def variadic_fn(x, *args): return x + math_ops.add_n(list(args)) # Call the function to make def_function happy variadic_fn( array_ops.ones([]), array_ops.ones([]), array_ops.ones([]), array_ops.ones([]), ) variadic_op = variadic_fn.get_concrete_function() self.assertIn(b'variadic_fn', variadic_op.name) self.assertEqual( ['x', 'y', 'args_1', 'z'], [inp.op.name for inp in variadic_op.inputs] ) self.assertEqual( [b'x', b'y', b'args_1', b'z'], [inp.op.get_attr('_user_specified_name') for inp in variadic_op.inputs], )
CompilationArgumentNamingTest
python
huggingface__transformers
tests/utils/test_video_utils.py
{ "start": 1756, "end": 10821 }
class ____(unittest.TestCase): """ Tests that the `transforms` can be applied to a 4-dim array directly, i.e. to a whole video. """ def test_make_batched_videos_pil(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32) pil_image = PIL.Image.fromarray(video[0]) videos_list = make_batched_videos(pil_image) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], np.array(pil_image))) # Test a list of videos is converted to a list of 1 video video = get_random_video(16, 32) pil_video = [PIL.Image.fromarray(frame) for frame in video] videos_list = make_batched_videos(pil_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a nested list of videos is not modified video = get_random_video(16, 32) pil_video = [PIL.Image.fromarray(frame) for frame in video] videos = [pil_video, pil_video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) def test_make_batched_videos_numpy(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32)[0] videos_list = make_batched_videos(video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], video)) # Test a 4d array of videos is converted to a a list of 1 video video = get_random_video(16, 32) videos_list = make_batched_videos(video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a list of videos is converted to a list of videos video = get_random_video(16, 32) videos = [video, video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], np.ndarray) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) @require_torch def test_make_batched_videos_torch(self): # Test a single image is converted to a list of 1 video with 1 frame video = get_random_video(16, 32)[0] torch_video = torch.from_numpy(video) videos_list = make_batched_videos(torch_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], torch.Tensor) self.assertEqual(videos_list[0].shape, (1, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0][0], video)) # Test a 4d array of videos is converted to a a list of 1 video video = get_random_video(16, 32) torch_video = torch.from_numpy(video) videos_list = make_batched_videos(torch_video) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], torch.Tensor) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) # Test a list of videos is converted to a list of videos video = get_random_video(16, 32) torch_video = torch.from_numpy(video) videos = [torch_video, torch_video] videos_list = make_batched_videos(videos) self.assertIsInstance(videos_list, list) self.assertIsInstance(videos_list[0], torch.Tensor) self.assertEqual(videos_list[0].shape, (8, 16, 32, 3)) self.assertTrue(np.array_equal(videos_list[0], video)) def test_resize(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(16, 32, return_torch=True) # Size can be an int or a tuple of ints. size_dict = SizeDict(**get_size_dict((8, 8), param_name="size")) resized_video = video_processor.resize(video, size=size_dict) self.assertIsInstance(resized_video, torch.Tensor) self.assertEqual(resized_video.shape, (8, 3, 8, 8)) def test_normalize(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) array = torch.randn(4, 3, 16, 32) mean = [0.1, 0.5, 0.9] std = [0.2, 0.4, 0.6] # mean and std can be passed as lists or NumPy arrays. expected = (array - torch.tensor(mean)[:, None, None]) / torch.tensor(std)[:, None, None] normalized_array = video_processor.normalize(array, mean, std) torch.testing.assert_close(normalized_array, expected) def test_center_crop(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(16, 32, return_torch=True) # Test various crop sizes: bigger on all dimensions, on one of the dimensions only and on both dimensions. crop_sizes = [8, (8, 64), 20, (32, 64)] for size in crop_sizes: size_dict = SizeDict(**get_size_dict(size, default_to_square=True, param_name="crop_size")) cropped_video = video_processor.center_crop(video, size_dict) self.assertIsInstance(cropped_video, torch.Tensor) expected_size = (size, size) if isinstance(size, int) else size self.assertEqual(cropped_video.shape, (8, 3, *expected_size)) def test_convert_to_rgb(self): video_processor = BaseVideoProcessor(model_init_kwargs=VideosKwargs) video = get_random_video(20, 20, return_torch=True) rgb_video = video_processor.convert_to_rgb(video[:, :1]) self.assertEqual(rgb_video.shape, (8, 3, 20, 20)) rgb_video = video_processor.convert_to_rgb(torch.cat([video, video[:, :1]], dim=1)) self.assertEqual(rgb_video.shape, (8, 3, 20, 20)) def test_group_and_reorder_videos(self): """Tests that videos can be grouped by frame size and number of frames""" video_1 = get_random_video(20, 20, num_frames=3, return_torch=True) video_2 = get_random_video(20, 20, num_frames=5, return_torch=True) # Group two videos of same size but different number of frames grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group two videos of different size but same number of frames video_3 = get_random_video(15, 20, num_frames=3, return_torch=True) grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_3]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group all three videos where some have same size or same frame count # But since none have frames and sizes identical, we'll have 3 groups grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_2, video_3]) self.assertEqual(len(grouped_videos), 3) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 3) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group if we had some videos with identical shapes grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_3]) self.assertEqual(len(grouped_videos), 2) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 2) self.assertEqual(video_1.shape, regrouped_videos[0].shape) # Group if we had all videos with identical shapes grouped_videos, grouped_videos_index = group_videos_by_shape([video_1, video_1, video_1]) self.assertEqual(len(grouped_videos), 1) regrouped_videos = reorder_videos(grouped_videos, grouped_videos_index) self.assertTrue(len(regrouped_videos), 1) self.assertEqual(video_1.shape, regrouped_videos[0].shape) @require_vision @require_av
BaseVideoProcessorTester
python
dagster-io__dagster
python_modules/dagster/dagster/_core/definitions/selector.py
{ "start": 10511, "end": 10968 }
class ____: """The information needed to resolve a partition range.""" start: str end: str def to_graphql_input(self): return { "start": self.start, "end": self.end, } @staticmethod def from_graphql_input(graphql_data): return PartitionRangeSelector( start=graphql_data["start"], end=graphql_data["end"], ) @record(kw_only=False)
PartitionRangeSelector
python
joblib__joblib
joblib/test/test_memory.py
{ "start": 37754, "end": 46352 }
class ____(StoreBackendBase): """A dummy store backend that does nothing.""" def _open_item(self, *args, **kwargs): """Open an item on store.""" "Does nothing" def _item_exists(self, location): """Check if an item location exists.""" "Does nothing" def _move_item(self, src, dst): """Move an item from src to dst in store.""" "Does nothing" def create_location(self, location): """Create location on store.""" "Does nothing" def exists(self, obj): """Check if an object exists in the store""" return False def clear_location(self, obj): """Clear object on store""" "Does nothing" def get_items(self): """Returns the whole list of items available in cache.""" return [] def configure(self, location, *args, **kwargs): """Configure the store""" "Does nothing" @parametrize("invalid_prefix", [None, dict(), list()]) def test_register_invalid_store_backends_key(invalid_prefix): # verify the right exceptions are raised when passing a wrong backend key. with raises(ValueError) as excinfo: register_store_backend(invalid_prefix, None) excinfo.match(r"Store backend name should be a string*") def test_register_invalid_store_backends_object(): # verify the right exceptions are raised when passing a wrong backend # object. with raises(ValueError) as excinfo: register_store_backend("fs", None) excinfo.match(r"Store backend should inherit StoreBackendBase*") def test_memory_default_store_backend(): # test an unknown backend falls back into a FileSystemStoreBackend with raises(TypeError) as excinfo: Memory(location="/tmp/joblib", backend="unknown") excinfo.match(r"Unknown location*") def test_warning_on_unknown_location_type(): class NonSupportedLocationClass: pass unsupported_location = NonSupportedLocationClass() with warns(UserWarning) as warninfo: _store_backend_factory("local", location=unsupported_location) expected_mesage = ( "Instantiating a backend using a " "NonSupportedLocationClass as a location is not " "supported by joblib" ) assert expected_mesage in str(warninfo[0].message) def test_instanciate_incomplete_store_backend(): # Verify that registering an external incomplete store backend raises an # exception when one tries to instantiate it. backend_name = "isb" register_store_backend(backend_name, IncompleteStoreBackend) assert (backend_name, IncompleteStoreBackend) in _STORE_BACKENDS.items() with raises(TypeError) as excinfo: _store_backend_factory(backend_name, "fake_location") excinfo.match( r"Can't instantiate abstract class IncompleteStoreBackend " "(without an implementation for|with) abstract methods*" ) def test_dummy_store_backend(): # Verify that registering an external store backend works. backend_name = "dsb" register_store_backend(backend_name, DummyStoreBackend) assert (backend_name, DummyStoreBackend) in _STORE_BACKENDS.items() backend_obj = _store_backend_factory(backend_name, "dummy_location") assert isinstance(backend_obj, DummyStoreBackend) def test_instanciate_store_backend_with_pathlib_path(): # Instantiate a FileSystemStoreBackend using a pathlib.Path object path = pathlib.Path("some_folder") backend_obj = _store_backend_factory("local", path) try: assert backend_obj.location == "some_folder" finally: # remove cache folder after test shutil.rmtree("some_folder", ignore_errors=True) def test_filesystem_store_backend_repr(tmpdir): # Verify string representation of a filesystem store backend. repr_pattern = 'FileSystemStoreBackend(location="{location}")' backend = FileSystemStoreBackend() assert backend.location is None repr(backend) # Should not raise an exception assert str(backend) == repr_pattern.format(location=None) # backend location is passed explicitly via the configure method (called # by the internal _store_backend_factory function) backend.configure(tmpdir.strpath) assert str(backend) == repr_pattern.format(location=tmpdir.strpath) repr(backend) # Should not raise an exception def test_memory_objects_repr(tmpdir): # Verify printable reprs of MemorizedResult, MemorizedFunc and Memory. def my_func(a, b): return a + b memory = Memory(location=tmpdir.strpath, verbose=0) memorized_func = memory.cache(my_func) memorized_func_repr = "MemorizedFunc(func={func}, location={location})" assert str(memorized_func) == memorized_func_repr.format( func=my_func, location=memory.store_backend.location ) memorized_result = memorized_func.call_and_shelve(42, 42) memorized_result_repr = ( 'MemorizedResult(location="{location}", func="{func}", args_id="{args_id}")' ) assert str(memorized_result) == memorized_result_repr.format( location=memory.store_backend.location, func=memorized_result.func_id, args_id=memorized_result.args_id, ) assert str(memory) == "Memory(location={location})".format( location=memory.store_backend.location ) def test_memorized_result_pickle(tmpdir): # Verify a MemoryResult object can be pickled/depickled. Non regression # test introduced following issue # https://github.com/joblib/joblib/issues/747 memory = Memory(location=tmpdir.strpath) @memory.cache def g(x): return x**2 memorized_result = g.call_and_shelve(4) memorized_result_pickle = pickle.dumps(memorized_result) memorized_result_loads = pickle.loads(memorized_result_pickle) assert ( memorized_result.store_backend.location == memorized_result_loads.store_backend.location ) assert memorized_result.func == memorized_result_loads.func assert memorized_result.args_id == memorized_result_loads.args_id assert str(memorized_result) == str(memorized_result_loads) def compare(left, right, ignored_attrs=None): if ignored_attrs is None: ignored_attrs = [] left_vars = vars(left) right_vars = vars(right) assert set(left_vars.keys()) == set(right_vars.keys()) for attr in left_vars.keys(): if attr in ignored_attrs: continue assert left_vars[attr] == right_vars[attr] @pytest.mark.parametrize( "memory_kwargs", [ {"compress": 3, "verbose": 2}, {"mmap_mode": "r", "verbose": 5, "backend_options": {"parameter": "unused"}}, ], ) def test_memory_pickle_dump_load(tmpdir, memory_kwargs): memory = Memory(location=tmpdir.strpath, **memory_kwargs) memory_reloaded = pickle.loads(pickle.dumps(memory)) # Compare Memory instance before and after pickle roundtrip compare(memory.store_backend, memory_reloaded.store_backend) compare( memory, memory_reloaded, ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), ) assert hash(memory) == hash(memory_reloaded) func_cached = memory.cache(f) func_cached_reloaded = pickle.loads(pickle.dumps(func_cached)) # Compare MemorizedFunc instance before/after pickle roundtrip compare(func_cached.store_backend, func_cached_reloaded.store_backend) compare( func_cached, func_cached_reloaded, ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), ) assert hash(func_cached) == hash(func_cached_reloaded) # Compare MemorizedResult instance before/after pickle roundtrip memorized_result = func_cached.call_and_shelve(1) memorized_result_reloaded = pickle.loads(pickle.dumps(memorized_result)) compare(memorized_result.store_backend, memorized_result_reloaded.store_backend) compare( memorized_result, memorized_result_reloaded, ignored_attrs=set(["store_backend", "timestamp", "_func_code_id"]), ) assert hash(memorized_result) == hash(memorized_result_reloaded) def test_info_log(tmpdir, caplog): caplog.set_level(logging.INFO) x = 3 memory = Memory(location=tmpdir.strpath, verbose=20) @memory.cache def f(x): return x**2 _ = f(x) assert "Querying" in caplog.text caplog.clear() memory = Memory(location=tmpdir.strpath, verbose=0) @memory.cache def f(x): return x**2 _ = f(x) assert "Querying" not in caplog.text caplog.clear()
DummyStoreBackend
python
django__django
tests/fixtures/tests.py
{ "start": 1405, "end": 1731 }
class ____(TestCaseFixtureLoadingTests): """ Make sure that subclasses can remove fixtures from parent class (#21089). """ fixtures = [] def test_class_fixtures(self): "There were no fixture objects installed" self.assertEqual(Article.objects.count(), 0)
SubclassTestCaseFixtureLoadingTests
python
ray-project__ray
rllib/core/rl_module/apis/self_supervised_loss_api.py
{ "start": 331, "end": 2321 }
class ____(abc.ABC): """An API to be implemented by RLModules that bring their own self-supervised loss. Learners will call these model's `compute_self_supervised_loss()` method instead of the Learner's own `compute_loss_for_module()` method. The call signature is identical to the Learner's `compute_loss_for_module()` method except of an additional mandatory `learner` kwarg. """ @abc.abstractmethod def compute_self_supervised_loss( self, *, learner: "Learner", module_id: ModuleID, config: "AlgorithmConfig", batch: Dict[str, Any], fwd_out: Dict[str, TensorType], **kwargs, ) -> TensorType: """Computes the loss for a single module. Think of this as computing loss for a single agent. For multi-agent use-cases that require more complicated computation for loss, consider overriding the `compute_losses` method instead. Args: learner: The Learner calling this loss method on the RLModule. module_id: The ID of the RLModule (within a MultiRLModule). config: The AlgorithmConfig specific to the given `module_id`. batch: The sample batch for this particular RLModule. fwd_out: The output of the forward pass for this particular RLModule. Returns: A single total loss tensor. If you have more than one optimizer on the provided `module_id` and would like to compute gradients separately using these different optimizers, simply add up the individual loss terms for each optimizer and return the sum. Also, for recording/logging any individual loss terms, you can use the `Learner.metrics.log_value( key=..., value=...)` or `Learner.metrics.log_dict()` APIs. See: :py:class:`~ray.rllib.utils.metrics.metrics_logger.MetricsLogger` for more information. """
SelfSupervisedLossAPI
python
langchain-ai__langchain
libs/langchain/langchain_classic/agents/conversational_chat/base.py
{ "start": 1303, "end": 6325 }
class ____(Agent): """An agent designed to hold a conversation in addition to using tools.""" output_parser: AgentOutputParser = Field(default_factory=ConvoOutputParser) """Output parser for the agent.""" template_tool_response: str = TEMPLATE_TOOL_RESPONSE """Template for the tool response.""" @classmethod @override def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser: return ConvoOutputParser() @property def _agent_type(self) -> str: raise NotImplementedError @property def observation_prefix(self) -> str: """Prefix to append the observation with. Returns: "Observation: " """ return "Observation: " @property def llm_prefix(self) -> str: """Prefix to append the llm call with. Returns: "Thought: " """ return "Thought:" @classmethod def _validate_tools(cls, tools: Sequence[BaseTool]) -> None: super()._validate_tools(tools) validate_tools_single_input(cls.__name__, tools) @classmethod def create_prompt( cls, tools: Sequence[BaseTool], system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: list[str] | None = None, output_parser: BaseOutputParser | None = None, ) -> BasePromptTemplate: """Create a prompt for the agent. Args: tools: The tools to use. system_message: The `SystemMessage` to use. human_message: The `HumanMessage` to use. input_variables: The input variables to use. output_parser: The output parser to use. Returns: A `PromptTemplate`. """ tool_strings = "\n".join( [f"> {tool.name}: {tool.description}" for tool in tools], ) tool_names = ", ".join([tool.name for tool in tools]) _output_parser = output_parser or cls._get_default_output_parser() format_instructions = human_message.format( format_instructions=_output_parser.get_format_instructions(), ) final_prompt = format_instructions.format( tool_names=tool_names, tools=tool_strings, ) if input_variables is None: input_variables = ["input", "chat_history", "agent_scratchpad"] messages = [ SystemMessagePromptTemplate.from_template(system_message), MessagesPlaceholder(variable_name="chat_history"), HumanMessagePromptTemplate.from_template(final_prompt), MessagesPlaceholder(variable_name="agent_scratchpad"), ] return ChatPromptTemplate(input_variables=input_variables, messages=messages) def _construct_scratchpad( self, intermediate_steps: list[tuple[AgentAction, str]], ) -> list[BaseMessage]: """Construct the scratchpad that lets the agent continue its thought process.""" thoughts: list[BaseMessage] = [] for action, observation in intermediate_steps: thoughts.append(AIMessage(content=action.log)) human_message = HumanMessage( content=self.template_tool_response.format(observation=observation), ) thoughts.append(human_message) return thoughts @classmethod def from_llm_and_tools( cls, llm: BaseLanguageModel, tools: Sequence[BaseTool], callback_manager: BaseCallbackManager | None = None, output_parser: AgentOutputParser | None = None, system_message: str = PREFIX, human_message: str = SUFFIX, input_variables: list[str] | None = None, **kwargs: Any, ) -> Agent: """Construct an agent from an LLM and tools. Args: llm: The language model to use. tools: A list of tools to use. callback_manager: The callback manager to use. output_parser: The output parser to use. system_message: The `SystemMessage` to use. human_message: The `HumanMessage` to use. input_variables: The input variables to use. **kwargs: Any additional arguments. Returns: An agent. """ cls._validate_tools(tools) _output_parser = output_parser or cls._get_default_output_parser() prompt = cls.create_prompt( tools, system_message=system_message, human_message=human_message, input_variables=input_variables, output_parser=_output_parser, ) llm_chain = LLMChain( llm=llm, prompt=prompt, callback_manager=callback_manager, ) tool_names = [tool.name for tool in tools] return cls( llm_chain=llm_chain, allowed_tools=tool_names, output_parser=_output_parser, **kwargs, )
ConversationalChatAgent
python
kamyu104__LeetCode-Solutions
Python/count-number-of-ways-to-place-houses.py
{ "start": 889, "end": 1176 }
class ____(object): def countHousePlacements(self, n): """ :type n: int :rtype: int """ MOD = 10**9+7 prev, curr = 1, 2 for _ in xrange(n-1): prev, curr = curr, (prev+curr)%MOD return pow(curr, 2, MOD)
Solution2
python
scipy__scipy
scipy/signal/tests/test_filter_design.py
{ "start": 80920, "end": 85699 }
class ____: @xfail_xp_backends("torch", reason="accuracy is bad") def test_lowpass(self, xp): wp = 0.2 ws = 0.3 rp = 3 rs = 60 N, Wn = cheb1ord(xp.asarray(wp), ws, rp, rs, False) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'low', False) w, h = freqz(b, a) w /= np.pi assert np.all(-rp - 0.1 < dB(h[w <= wp])) assert np.all(dB(h[ws <= w]) < -rs + 0.1) assert N == 8 xp_assert_close(Wn, xp.asarray(0.2), rtol=1e-15, check_0d=False) @xfail_xp_backends("torch", reason="accuracy is bad") def test_highpass(self, xp): wp = 0.3 ws = 0.2 rp = 3 rs = 70 N, Wn = cheb1ord(xp.asarray(wp), ws, rp, rs, False) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'high', False) w, h = freqz(b, a) w /= np.pi assert np.all(-rp - 0.1 < dB(h[wp <= w])) assert np.all(dB(h[w <= ws]) < -rs + 0.1) assert N == 9 xp_assert_close(Wn, xp.asarray(0.3), rtol=1e-15, check_0d=False) def test_bandpass(self, xp): wp = [0.2, 0.5] ws = [0.1, 0.6] rp = 3 rs = 80 N, Wn = cheb1ord(xp.asarray(wp), xp.asarray(ws), rp, rs, False) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'band', False) w, h = freqz(b, a) w /= np.pi assert np.all(-rp - 0.1 < dB(h[np.logical_and(wp[0] <= w, w <= wp[1])])) assert np.all(dB(h[np.logical_or(w <= ws[0], ws[1] <= w)]) < -rs + 0.1) assert N == 9 xp_assert_close(Wn, xp.asarray([0.2, 0.5]), rtol=1e-15) @skip_xp_backends( cpu_only=True, exceptions=["cupy"], reason="optimize.fminbound" ) def test_bandstop(self, xp): wp = [0.1, 0.6] ws = [0.2, 0.5] rp = 3 rs = 90 N, Wn = cheb1ord(xp.asarray(wp), xp.asarray(ws), rp, rs, False) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'stop', False) w, h = freqz(b, a) w /= np.pi assert np.all(-rp - 0.1 < dB(h[np.logical_or(w <= wp[0], wp[1] <= w)])) assert np.all(dB(h[np.logical_and(ws[0] <= w, w <= ws[1])]) < -rs + 0.1) assert N == 10 xp_assert_close(Wn, xp.asarray([0.14758232569947785, 0.6]), rtol=1e-5) def test_analog(self, xp): wp = 700 ws = 100. rp = 3 rs = 70 N, Wn = cheb1ord(wp, xp.asarray(ws), rp, rs, True) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'high', True) w, h = freqs(b, a) assert np.all(-rp - 0.1 < dB(h[wp <= w])) assert np.all(dB(h[w <= ws]) < -rs + 0.1) assert N == 4 assert math.isclose(Wn, 700.0, rel_tol=1e-15) assert array_namespace(Wn) == xp assert cheb1ord(xp.asarray(1), 1.2, 1, 80, analog=True)[0] == 17 @xfail_xp_backends("torch", reason="accuracy issues") def test_fs_param(self, xp): wp = 4800 ws = 7200. rp = 3 rs = 60 fs = 48000 N, Wn = cheb1ord(wp, xp.asarray(ws), rp, rs, False, fs=fs) b, a = cheby1(N, rp, _xp_copy_to_numpy(Wn), 'low', False, fs=fs) w, h = freqz(b, a, fs=fs) assert np.all(-rp - 0.1 < dB(h[w <= wp])) assert np.all(dB(h[ws <= w]) < -rs + 0.1) assert N == 8 assert math.isclose(Wn, 4800.0, rel_tol=1e-15) assert array_namespace(Wn) == xp def test_invalid_input(self): with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, 3, 2) assert "gpass should be smaller than gstop" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, -1, 2) assert "gpass should be larger than 0.0" in str(exc_info.value) with pytest.raises(ValueError) as exc_info: cheb1ord(0.2, 0.3, 1, -2) assert "gstop should be larger than 0.0" in str(exc_info.value) def test_ellip_cheb1(self): # The purpose of the test is to compare to some known output from past # scipy versions. The values to compare to are generated with scipy # 1.9.1 (there is nothing special about this particular version though) n, wn = cheb1ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert n == 7 n2, w2 = cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) assert not (wn == w2).all() def test_fs_validation(self): wp = 0.2 ws = 0.3 rp = 3 rs = 60 with pytest.raises(ValueError, match="Sampling.*single scalar"): cheb1ord(wp, ws, rp, rs, False, fs=np.array([10, 20])) @make_xp_test_case(cheb2ord) @pytest.mark.skipif(DEFAULT_F32, reason="XXX needs figuring out") @skip_xp_backends("dask.array", reason="https://github.com/dask/dask/issues/11883")
TestCheb1ord
python
django-import-export__django-import-export
tests/core/tests/test_widgets.py
{ "start": 10202, "end": 11830 }
class ____(TestCase, RowDeprecationTestMixin): def setUp(self): self.time = time(20, 15, 0) self.widget = widgets.TimeWidget("%H:%M:%S") def test_render(self): self.assertEqual(self.widget.render(self.time), "20:15:00") def test_render_derived_time(self): derived_time = CustomTime(20, 15, 0) self.assertEqual(self.widget.render(derived_time), "20:15:00") def test_render_none(self): self.assertEqual(self.widget.render(None), "") def test_render_invalid_type(self): self.assertEqual(self.widget.render(int(1)), "") def test_render_coerce_to_string_is_False(self): self.widget = widgets.TimeWidget(coerce_to_string=False) self.assertEqual(self.time, self.widget.render(self.time)) def test_clean(self): self.assertEqual(self.widget.clean("20:15:00"), self.time) @override_settings(TIME_INPUT_FORMATS=None) def test_default_format(self): self.widget = widgets.TimeWidget() self.assertEqual(("%H:%M:%S",), self.widget.formats) @patch("import_export.widgets.logger") def test_clean_raises_ValueError(self, mock_logger): self.widget = widgets.TimeWidget("x") with self.assertRaisesRegex( ValueError, "Value could not be parsed using defined formats." ): self.widget.clean("20:15:00") mock_logger.debug.assert_called_with( "time data '20:15:00' does not match format 'x'" ) def test_clean_returns_time_when_time_passed(self): self.assertEqual(self.time, self.widget.clean(self.time))
TimeWidgetTest
python
run-llama__llama_index
llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/utils.py
{ "start": 1739, "end": 4978 }
class ____(Exception): """ Custom exception class to indicate a situation where an operation needs to be retried. This exception should be raised when an operation fails due to anticipated recoverable reasons, suggesting to the caller that a retry logic might be appropriate. """ def __init__( self, message="Operation failed, requiring a retry", cause=None ) -> None: """ Initialize a RetryException instance. :param message: Detailed information about the exception, a string by default set as "Operation failed, requiring a retry" :param cause: The original exception object that caused this exception, optional """ super().__init__(message) self.cause = cause def __str__(self) -> str: """ Return a string representation of the exception, including the original exception information if present. :return: String representation of the exception details """ if self.cause: return f"{super().__str__()} caused by: {self.cause}" else: return super().__str__() def __raise_exception_for_retry(response: Response, process: str) -> None: """Log the error and raise a specific exception based on the response.""" error_message = f"Failed to {process}: {response.text}" raise RetryException(cause=error_message) logger = get_stream_logger(name="DashScopeResponseHandler") def dashscope_response_handler( response: Response, process: str, result_class: Type[T], url: str = "" ) -> T: """Handle the response from the DashScope API.""" if response is None: raise ValueError( f"DashScopeParse {process} [URL:{url}] http response object is none." ) if not isinstance(process, str) or not process: raise ValueError( "DashScopeParse func [dashscope_response_handler] process parameter is empty." ) if response.status_code != 200: logger.error( f"DashScopeParse {process} [URL:{url}] response http status code is not 200: [{response.status_code}:{response.text}]" ) if response.status_code == 429: __raise_exception_for_retry(response, process) __raise_exception(response, process) try: response_data = response.json() except Exception as e: logger.error( f"DashScopeParse {process} [URL:{url}] response data is not json: {response.text}." ) __raise_exception(response, process) if not __is_response_successful(response_data): logger.error( f"DashScopeParse {process} [URL:{url}] response fail: {response.text}." ) __raise_exception(response, process) if "data" not in response_data: logger.error( f"DashScopeParse {process} [URL:{url}] response data does not contain 'data' key: {response_data}." ) __raise_exception(response, process) if "request_id" in response_data and process != "query": logger.info( f"DashScopeParse {process} [URL:{url}] request_id: {response_data['request_id']}." ) return result_class.from_dict(response_data["data"])
RetryException
python
gevent__gevent
src/gevent/tests/test__semaphore.py
{ "start": 1917, "end": 10973 }
class ____(greentest.TestCase): # Tests that the object can be acquired correctly across # multiple threads. # Used as a base class. # See https://github.com/gevent/gevent/issues/1437 def _getTargetClass(self): return Semaphore def _makeOne(self): # Create an object that is associated with the current hub. If # we don't do this now, it gets initialized lazily the first # time it would have to block, which, in the event of threads, # would be from an arbitrary thread. return self._getTargetClass()(1) def _makeThreadMain(self, thread_running, thread_acquired, sem, acquired, exc_info, **thread_acquire_kwargs): from gevent._hub_local import get_hub_if_exists import sys def thread_main(): thread_running.set() try: acquired.append( sem.acquire(**thread_acquire_kwargs) ) except: exc_info[:] = sys.exc_info() raise # Print finally: hub = get_hub_if_exists() if hub is not None: hub.join() hub.destroy(destroy_loop=True) thread_acquired.set() return thread_main IDLE_ITERATIONS = 5 def _do_test_acquire_in_one_then_another(self, release=True, require_thread_acquired_to_finish=False, **thread_acquire_kwargs): from gevent import monkey self.assertFalse(monkey.is_module_patched('threading')) import threading thread_running = threading.Event() thread_acquired = threading.Event() sem = self._makeOne() # Make future acquires block sem.acquire() exc_info = [] acquired = [] t = threading.Thread(target=self._makeThreadMain( thread_running, thread_acquired, sem, acquired, exc_info, **thread_acquire_kwargs )) t.daemon = True t.start() thread_running.wait(10) # implausibly large time if release: sem.release() # Spin the loop to be sure the release gets through. # (Release schedules the notifier to run, and when the # notifier run it sends the async notification to the # other thread. Depending on exactly where we are in the # event loop, and the limit to the number of callbacks # that get run (including time-based) the notifier may or # may not be immediately ready to run, so this can take up # to two iterations.) for _ in range(self.IDLE_ITERATIONS): gevent.idle() if thread_acquired.wait(timing.LARGE_TICK): break self.assertEqual(acquired, [True]) if not release and thread_acquire_kwargs.get("timeout"): # Spin the loop to be sure that the timeout has a chance to # process. Interleave this with something that drops the GIL # so the background thread has a chance to notice that. for _ in range(self.IDLE_ITERATIONS): gevent.idle() if thread_acquired.wait(timing.LARGE_TICK): break thread_acquired.wait(timing.LARGE_TICK * 5) if require_thread_acquired_to_finish: self.assertTrue(thread_acquired.is_set()) try: self.assertEqual(exc_info, []) finally: exc_info = None return sem, acquired def test_acquire_in_one_then_another(self): self._do_test_acquire_in_one_then_another(release=True) def test_acquire_in_one_then_another_timed(self): sem, acquired_in_thread = self._do_test_acquire_in_one_then_another( release=False, require_thread_acquired_to_finish=True, timeout=timing.SMALLEST_RELIABLE_DELAY) self.assertEqual([False], acquired_in_thread) # This doesn't, of course, notify anything, because # the waiter has given up. sem.release() notifier = getattr(sem, '_notifier', None) self.assertIsNone(notifier) def test_acquire_in_one_wait_greenlet_wait_thread_gives_up(self): # The waiter in the thread both arrives and gives up while # the notifier is already running...or at least, that's what # we'd like to arrange, but the _notify_links function doesn't # drop the GIL/object lock, so the other thread is stuck and doesn't # actually get to call into the acquire method. from gevent import monkey self.assertFalse(monkey.is_module_patched('threading')) import threading sem = self._makeOne() # Make future acquires block sem.acquire() def greenlet_one(): ack = sem.acquire() # We're running in the notifier function right now. It switched to # us. thread.start() gevent.sleep(timing.LARGE_TICK) return ack exc_info = [] acquired = [] glet = gevent.spawn(greenlet_one) thread = threading.Thread(target=self._makeThreadMain( threading.Event(), threading.Event(), sem, acquired, exc_info, timeout=timing.LARGE_TICK )) thread.daemon = True gevent.idle() sem.release() glet.join() for _ in range(3): gevent.idle() thread.join(timing.LARGE_TICK) self.assertEqual(glet.value, True) self.assertEqual([], exc_info) self.assertEqual([False], acquired) self.assertTrue(glet.dead, glet) glet = None def assertOneHasNoHub(self, sem): self.assertIsNone(sem.hub, sem) @greentest.skipOnPyPyOnWindows("Flaky there; can't reproduce elsewhere") def test_dueling_threads(self, acquire_args=(), create_hub=None): # pylint:disable=too-many-locals,too-many-statements # Threads doing nothing but acquiring and releasing locks, without # having any other greenlets to switch to. # https://github.com/gevent/gevent/issues/1698 from gevent import monkey from gevent._hub_local import get_hub_if_exists self.assertFalse(monkey.is_module_patched('threading')) import threading from time import sleep as native_sleep sem = self._makeOne() self.assertOneHasNoHub(sem) count = 10000 results = [-1, -1] run = True def do_it(ix): if create_hub: gevent.get_hub() try: for i in range(count): if not run: break acquired = sem.acquire(*acquire_args) assert acquire_args or acquired if acquired: sem.release() results[ix] = i if not create_hub: # We don't artificially create the hub. self.assertIsNone( get_hub_if_exists(), (get_hub_if_exists(), ix, i) ) if create_hub and i % 10 == 0: gevent.sleep(timing.SMALLEST_RELIABLE_DELAY) elif i % 100 == 0: native_sleep(timing.SMALLEST_RELIABLE_DELAY) except Exception as ex: # pylint:disable=broad-except import traceback; traceback.print_exc() results[ix] = str(ex) ex = None finally: hub = get_hub_if_exists() if hub is not None: hub.join() hub.destroy(destroy_loop=True) t1 = threading.Thread(target=do_it, args=(0,)) t1.daemon = True t2 = threading.Thread(target=do_it, args=(1,)) t2.daemon = True t1.start() t2.start() t1.join(1) t2.join(1) while t1.is_alive() or t2.is_alive(): cur = list(results) t1.join(7) t2.join(7) if cur == results: # Hmm, after two seconds, no progress run = False break self.assertEqual(results, [count - 1, count - 1]) def test_dueling_threads_timeout(self): self.test_dueling_threads((True, 4)) def test_dueling_threads_with_hub(self): self.test_dueling_threads(create_hub=True) # XXX: Need a test with multiple greenlets in a non-primary # thread. Things should work, just very slowly; instead of moving through # greenlet.switch(), they'll be moving with async watchers.
TestSemaphoreMultiThread
python
kamyu104__LeetCode-Solutions
Python/finding-3-digit-even-numbers.py
{ "start": 3135, "end": 4363 }
class ____(object): def findEvenNumbers(self, digits): """ :type digits: List[int] :rtype: List[int] """ k = 3 def backtracking(curr, digit_cnt, result): if len(curr) == k: result.append(reduce(lambda x, y: x*10+y, curr)) return for i, (digit, cnt) in enumerate(digit_cnt): if (not curr and digit == 0) or (len(curr) == k-1 and digit%2 != 0): continue digit_cnt[i][1] -= 1 digit_cnt[i], digit_cnt[-1] = digit_cnt[-1], digit_cnt[i] removed = [] if digit_cnt[-1][1] == 0: removed = digit_cnt.pop() curr.append(digit) backtracking(curr, digit_cnt, result) curr.pop() if removed: digit_cnt.append(removed) digit_cnt[i], digit_cnt[-1] = digit_cnt[-1], digit_cnt[i] digit_cnt[i][1] += 1 cnt = collections.Counter(digits) digit_cnt = map(list, cnt.iteritems()) result = [] backtracking([], digit_cnt, result) result.sort() return result
Solution4
python
airbytehq__airbyte
airbyte-ci/connectors/live-tests/src/live_tests/commons/models.py
{ "start": 4759, "end": 4838 }
class ____(Enum): SOURCE = "source" DESTINATION = "destination"
ActorType
python
pandas-dev__pandas
pandas/tests/arrays/interval/test_interval.py
{ "start": 6822, "end": 8790 }
class ____: def test_min_max_invalid_axis(self, left_right_dtypes): left, right = left_right_dtypes arr = IntervalArray.from_arrays(left, right) msg = "`axis` must be fewer than the number of dimensions" for axis in [-2, 1]: with pytest.raises(ValueError, match=msg): arr.min(axis=axis) with pytest.raises(ValueError, match=msg): arr.max(axis=axis) msg = "'>=' not supported between" with pytest.raises(TypeError, match=msg): arr.min(axis="foo") with pytest.raises(TypeError, match=msg): arr.max(axis="foo") def test_min_max(self, left_right_dtypes, index_or_series_or_array): # GH#44746 left, right = left_right_dtypes arr = IntervalArray.from_arrays(left, right) # The expected results below are only valid if monotonic assert left.is_monotonic_increasing assert Index(arr).is_monotonic_increasing MIN = arr[0] MAX = arr[-1] indexer = np.arange(len(arr)) np.random.default_rng(2).shuffle(indexer) arr = arr.take(indexer) arr_na = arr.insert(2, np.nan) arr = index_or_series_or_array(arr) arr_na = index_or_series_or_array(arr_na) for skipna in [True, False]: res = arr.min(skipna=skipna) assert res == MIN assert type(res) == type(MIN) res = arr.max(skipna=skipna) assert res == MAX assert type(res) == type(MAX) res = arr_na.min(skipna=False) assert np.isnan(res) res = arr_na.max(skipna=False) assert np.isnan(res) for kws in [{"skipna": True}, {}]: res = arr_na.min(**kws) assert res == MIN assert type(res) == type(MIN) res = arr_na.max(**kws) assert res == MAX assert type(res) == type(MAX)
TestReductions
python
dask__distributed
distributed/shuffle/_exceptions.py
{ "start": 294, "end": 637 }
class ____(OSError): def __str__(self) -> str: return ( "P2P ran out of available disk space while temporarily storing transferred data. " "Please make sure that P2P has enough disk space available by increasing the number of " "workers or the size of the attached disk." )
P2POutOfDiskError
python
ray-project__ray
python/ray/data/_internal/datasource/iceberg_datasink.py
{ "start": 3876, "end": 19467 }
class ____( Datasink[Union[List["DataFile"], tuple[List["DataFile"], Dict[str, List[Any]]]]] ): """ Iceberg datasink to write a Ray Dataset into an existing Iceberg table. This datasink handles concurrent writes by: - Each worker writes Parquet files to storage and returns DataFile metadata - The driver collects all DataFile objects and performs a single commit """ def __init__( self, table_identifier: str, catalog_kwargs: Optional[Dict[str, Any]] = None, snapshot_properties: Optional[Dict[str, str]] = None, mode: SaveMode = SaveMode.APPEND, overwrite_filter: Optional["Expr"] = None, upsert_kwargs: Optional[Dict[str, Any]] = None, overwrite_kwargs: Optional[Dict[str, Any]] = None, upsert_commit_memory: Optional[int] = None, ): """ Initialize the IcebergDatasink Args: table_identifier: The identifier of the table such as `default.taxi_dataset` catalog_kwargs: Optional arguments to use when setting up the Iceberg catalog snapshot_properties: Custom properties to write to snapshot summary mode: Write mode - APPEND, UPSERT, or OVERWRITE. Defaults to APPEND. - APPEND: Add new data without checking for duplicates - UPSERT: Update existing rows or insert new ones based on a join condition - OVERWRITE: Replace table data (all data or filtered subset) overwrite_filter: Optional filter for OVERWRITE mode to perform partial overwrites. Must be a Ray Data expression from `ray.data.expressions`. Only rows matching this filter are replaced. If None with OVERWRITE mode, replaces all table data. upsert_kwargs: Optional arguments for upsert operations. Supported parameters: join_cols (List[str]), case_sensitive (bool), branch (str). Note: This implementation uses a copy-on-write strategy that always updates all columns for matched keys and inserts all new keys. overwrite_kwargs: Optional arguments to pass through to PyIceberg's table.overwrite() method. Supported parameters include case_sensitive (bool) and branch (str). See PyIceberg documentation for details. upsert_commit_memory: [For UPSERT mode only] The heap memory in bytes to reserve for the upsert commit operation. If None, uses Ray's default memory allocation. Note: Schema evolution is automatically enabled. New columns in the incoming data are automatically added to the table schema. """ self.table_identifier = table_identifier self._catalog_kwargs = (catalog_kwargs or {}).copy() self._snapshot_properties = snapshot_properties or {} self._mode = mode self._overwrite_filter = overwrite_filter self._upsert_kwargs = (upsert_kwargs or {}).copy() self._overwrite_kwargs = (overwrite_kwargs or {}).copy() self._upsert_commit_memory = upsert_commit_memory # Validate kwargs are only set for relevant modes if self._upsert_kwargs and self._mode != SaveMode.UPSERT: raise ValueError( f"upsert_kwargs can only be specified when mode is SaveMode.UPSERT, but mode is {self._mode}" ) if self._overwrite_kwargs and self._mode != SaveMode.OVERWRITE: raise ValueError( f"overwrite_kwargs can only be specified when mode is SaveMode.OVERWRITE, but mode is {self._mode}" ) # Remove invalid parameters from overwrite_kwargs if present for invalid_param, reason in [ ( "overwrite_filter", "should be passed as a separate parameter to write_iceberg()", ), ( "delete_filter", "is an internal PyIceberg parameter; use 'overwrite_filter' instead", ), ]: if self._overwrite_kwargs.pop(invalid_param, None) is not None: logger.warning( f"Removed '{invalid_param}' from overwrite_kwargs: {reason}" ) if "name" in self._catalog_kwargs: self._catalog_name = self._catalog_kwargs.pop("name") else: self._catalog_name = "default" self._table: "Table" = None self._write_uuid: uuid.UUID = None def __getstate__(self) -> dict: """Exclude `_table` during pickling.""" state = self.__dict__.copy() state.pop("_table", None) return state def __setstate__(self, state: dict) -> None: self.__dict__.update(state) self._table = None def _get_catalog(self) -> "Catalog": from pyiceberg import catalog return catalog.load_catalog(self._catalog_name, **self._catalog_kwargs) def _reload_table(self) -> None: """Reload the Iceberg table from the catalog.""" catalog = self._get_catalog() self._table = catalog.load_table(self.table_identifier) def _get_join_cols(self) -> List[str]: """Get join columns for upsert, using table identifier fields as fallback.""" join_cols = self._upsert_kwargs.get("join_cols", []) if not join_cols: # Use table's identifier fields as fallback for field_id in self._table.metadata.schema().identifier_field_ids: col_name = self._table.metadata.schema().find_column_name(field_id) if col_name: join_cols.append(col_name) return join_cols def _update_schema(self, incoming_schema: "pa.Schema") -> None: """ Update the table schema to accommodate incoming data using union-by-name semantics. This automatically handles: - Adding new columns from the incoming schema - Type promotion (e.g., int32 -> int64) where compatible - Preserving existing columns not in the incoming schema - Concurrent schema updates from multiple workers (with retry logic) Note: Each worker calls this once with the first block's schema. All blocks within a worker are validated to have the same schema before calling this. Args: incoming_schema: The PyArrow schema from the incoming data """ from pyiceberg.exceptions import CommitFailedException max_retries = 3 for attempt in range(max_retries): try: with self._table.update_schema() as update: update.union_by_name(incoming_schema) # Succeeded, reload to get latest table version and exit. self._reload_table() return except CommitFailedException: if attempt < max_retries - 1: logger.debug( f"Schema update conflict - another worker modified schema, " f"reloading and retrying (attempt {attempt + 1}/{max_retries})" ) self._reload_table() else: logger.error( "Failed to update schema after %d retries due to conflicts.", max_retries, ) raise def on_write_start(self) -> None: """Initialize table for writing and create a shared write UUID.""" self._table = self._get_catalog().load_table(self.table_identifier) self._write_uuid = uuid.uuid4() # Validate join_cols for UPSERT mode before writing any files if self._mode == SaveMode.UPSERT: join_cols = self._upsert_kwargs.get("join_cols", []) if not join_cols: # Check if table has identifier fields as fallback identifier_field_ids = ( self._table.metadata.schema().identifier_field_ids ) if not identifier_field_ids: raise ValueError( "join_cols must be specified in upsert_kwargs for UPSERT mode " "when table has no identifier fields" ) def write( self, blocks: Iterable[Block], ctx: TaskContext ) -> Union[List["DataFile"], tuple[List["DataFile"], Dict[str, List[Any]]]]: """ Write blocks to Parquet files in storage and return DataFile metadata. This runs on each worker in parallel. Files are written directly to storage (S3, HDFS, etc.) and only metadata is returned to the driver. Args: blocks: Iterable of Ray Data blocks to write ctx: TaskContext object containing task-specific information Returns: For APPEND/OVERWRITE: List of DataFile objects. For UPSERT: Tuple of (List of DataFile objects, Dict mapping col names to key values). """ from pyiceberg.io.pyarrow import _dataframe_to_data_files if self._table is None: self._reload_table() all_data_files = [] join_keys_dict = defaultdict(list) first_schema = None # Extract join keys for copy-on-write upsert extract_join_keys = self._mode == SaveMode.UPSERT for block in blocks: pa_table = BlockAccessor.for_block(block).to_arrow() if pa_table.num_rows > 0: # Schema validation and update strategy: # 1. Each worker validates all its blocks have the same schema # 2. Schema is updated once per worker (not per block) to minimize conflicts # 3. Concurrent updates from different workers are handled by retry logic if first_schema is None: first_schema = pa_table.schema # Update schema once per worker (handles concurrent updates from other workers) self._update_schema(first_schema) elif not pa_table.schema.equals(first_schema): raise ValueError( f"Schema mismatch within worker: expected {first_schema}, " f"got {pa_table.schema}. All blocks must have the same schema." ) # Extract join key values for copy-on-write upsert # These will be used to build a delete filter for matching rows # Example: Upserting [{col_a: 2}, {col_a: 3}] extracts {col_a: [2, 3]} # which becomes filter: col_a IN (2, 3) if extract_join_keys: join_cols = self._get_join_cols() for col in join_cols: join_keys_dict[col].extend(pa_table[col].to_pylist()) # Write data files to storage (distributed!) # _dataframe_to_data_files writes Parquet files and returns DataFile metadata data_files = list( _dataframe_to_data_files( table_metadata=self._table.metadata, write_uuid=self._write_uuid, df=pa_table, io=self._table.io, ) ) all_data_files.extend(data_files) # Return appropriate type based on whether we extracted join keys if extract_join_keys: return (all_data_files, join_keys_dict) return all_data_files def on_write_complete(self, write_result: WriteResult) -> None: """ Complete the write by committing all data files in a single transaction. This runs on the driver after all workers finish writing files. Collects all DataFile objects from all workers and performs a single atomic commit based on the configured mode. """ # Reload table to get latest metadata self._reload_table() # Collect all data files and join keys (if applicable) from all workers all_data_files = [] join_keys_dicts = [] # Check if we're in upsert mode (returns tuple with join keys) use_copy_on_write_upsert = self._mode == SaveMode.UPSERT for write_return in write_result.write_returns: if not write_return: continue if use_copy_on_write_upsert: # For copy-on-write upsert, write() returns (data_files, join_keys_dict) data_files, join_keys_dict = write_return all_data_files.extend(data_files) # Collect join keys dicts (will be merged in Ray task) join_keys_dicts.append(join_keys_dict) else: # For other modes, write() returns just data_files all_data_files.extend(write_return) if not all_data_files: return # Create transaction and commit based on mode if self._mode == SaveMode.APPEND: self._commit_append(all_data_files) elif self._mode == SaveMode.OVERWRITE: self._commit_overwrite(all_data_files) elif self._mode == SaveMode.UPSERT: # Execute entire commit in Ray task to avoid OOM on driver join_cols = self._get_join_cols() # Configure task options with memory if specified task_options = {} if self._upsert_commit_memory is not None: task_options["memory"] = self._upsert_commit_memory ray.get( _commit_upsert_task.options(**task_options).remote( table_identifier=self.table_identifier, catalog_name=self._catalog_name, catalog_kwargs=self._catalog_kwargs, data_files=all_data_files, join_keys_dicts=join_keys_dicts, join_cols=join_cols, snapshot_properties=self._snapshot_properties, ) ) else: raise ValueError(f"Unsupported mode: {self._mode}") def _commit_append(self, data_files: List["DataFile"]) -> None: """Commit data files using APPEND mode.""" txn = self._table.transaction() _append_and_commit(txn, data_files, self._snapshot_properties) def _commit_overwrite(self, data_files: List["DataFile"]) -> None: """Commit data files using OVERWRITE mode.""" txn = self._table.transaction() # Delete matching data if filter provided if self._overwrite_filter is not None: from ray.data._internal.datasource.iceberg_datasource import ( _IcebergExpressionVisitor, ) visitor = _IcebergExpressionVisitor() pyi_filter = visitor.visit(self._overwrite_filter) txn.delete( delete_filter=pyi_filter, snapshot_properties=self._snapshot_properties, **self._overwrite_kwargs, ) else: # Full overwrite - delete all from pyiceberg.expressions import AlwaysTrue txn.delete( delete_filter=AlwaysTrue(), snapshot_properties=self._snapshot_properties, **self._overwrite_kwargs, ) # Append new data files and commit _append_and_commit(txn, data_files, self._snapshot_properties)
IcebergDatasink
python
sqlalchemy__sqlalchemy
test/orm/inheritance/test_basic.py
{ "start": 36450, "end": 40330 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): global foo, bar, blub foo = Table( "foo", metadata, Column( "id", Integer, primary_key=True, test_needs_autoincrement=True ), Column("type", String(30)), Column("data", String(20)), ) bar = Table( "bar", metadata, Column("id", Integer, ForeignKey("foo.id"), primary_key=True), Column("bar_data", String(20)), ) blub = Table( "blub", metadata, Column( "blub_id", Integer, primary_key=True, test_needs_autoincrement=True, ), Column("foo_id", Integer, ForeignKey("foo.id")), Column("bar_id", Integer, ForeignKey("bar.id")), Column("blub_data", String(20)), ) @classmethod def setup_classes(cls): class Foo(cls.Basic): pass class Bar(Foo): pass class Blub(Bar): pass @testing.combinations( ("polymorphic", True), ("test_get_nonpolymorphic", False), id_="ia" ) def test_get(self, polymorphic): foo, Bar, Blub, blub, bar, Foo = ( self.tables.foo, self.classes.Bar, self.classes.Blub, self.tables.blub, self.tables.bar, self.classes.Foo, ) if polymorphic: self.mapper_registry.map_imperatively( Foo, foo, polymorphic_on=foo.c.type, polymorphic_identity="foo" ) self.mapper_registry.map_imperatively( Bar, bar, inherits=Foo, polymorphic_identity="bar" ) self.mapper_registry.map_imperatively( Blub, blub, inherits=Bar, polymorphic_identity="blub" ) else: self.mapper_registry.map_imperatively(Foo, foo) self.mapper_registry.map_imperatively(Bar, bar, inherits=Foo) self.mapper_registry.map_imperatively(Blub, blub, inherits=Bar) sess = fixture_session() f = Foo() b = Bar() bl = Blub() sess.add(f) sess.add(b) sess.add(bl) sess.flush() if polymorphic: def go(): assert sess.get(Foo, f.id) is f assert sess.get(Foo, b.id) is b assert sess.get(Foo, bl.id) is bl assert sess.get(Bar, b.id) is b assert sess.get(Bar, bl.id) is bl assert sess.get(Blub, bl.id) is bl # test class mismatches - item is present # in the identity map but we requested a subclass assert sess.get(Blub, f.id) is None assert sess.get(Blub, b.id) is None assert sess.get(Bar, f.id) is None self.assert_sql_count(testing.db, go, 0) else: # this is testing the 'wrong' behavior of using get() # polymorphically with mappers that are not configured to be # polymorphic. the important part being that get() always # returns an instance of the query's type. def go(): assert sess.get(Foo, f.id) is f bb = sess.get(Foo, b.id) assert isinstance(b, Foo) and bb.id == b.id bll = sess.get(Foo, bl.id) assert isinstance(bll, Foo) and bll.id == bl.id assert sess.get(Bar, b.id) is b bll = sess.get(Bar, bl.id) assert isinstance(bll, Bar) and bll.id == bl.id assert sess.get(Blub, bl.id) is bl self.assert_sql_count(testing.db, go, 3)
GetTest
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/dataclass5.py
{ "start": 658, "end": 839 }
class ____: x: int x_squared: int # This should generate an error because there is no # override __init__ method and no synthesized __init__. d = D(3) @dataclass(eq=False)
D
python
yandexdataschool__Practical_RL
week02_value_based/mdp.py
{ "start": 6695, "end": 15141 }
class ____(MDP): """ Winter is here. You and your friends were tossing around a frisbee at the park when you made a wild throw that left the frisbee out in the middle of the lake. The water is mostly frozen, but there are a few holes where the ice has melted. If you step into one of those holes, you'll fall into the freezing water. At this time, there's an international frisbee shortage, so it's absolutely imperative that you navigate across the lake and retrieve the disc. However, the ice is slippery, so you won't always move in the direction you intend. The surface is described using a grid like the following SFFF FHFH FFFH HFFG S : starting point, safe F : frozen surface, safe H : hole, fall to your doom G : goal, where the frisbee is located The episode ends when you reach the goal or fall in a hole. You receive a reward of 1 if you reach the goal, and zero otherwise. """ MAPS = { "4x4": [ "SFFF", "FHFH", "FFFH", "HFFG" ], "8x8": [ "SFFFFFFF", "FFFFFFFF", "FFFHFFFF", "FFFFFHFF", "FFFHFFFF", "FHHFFFHF", "FHFFHFHF", "FFFHFFFG" ], } def __init__(self, desc=None, map_name="4x4", slip_chance=0.2, seed=None): if desc is None and map_name is None: raise ValueError('Must provide either desc or map_name') elif desc is None: desc = self.MAPS[map_name] assert ''.join(desc).count( 'S') == 1, "this implementation supports having exactly one initial state" assert all(c in "SFHG" for c in ''.join(desc)), "all cells must be either of S, F, H or G" self.desc = desc = np.asarray(list(map(list, desc)), dtype='str') self.lastaction = None nrow, ncol = desc.shape states = [(i, j) for i in range(nrow) for j in range(ncol)] actions = ["left", "down", "right", "up"] initial_state = states[np.array(desc == b'S').ravel().argmax()] def move(row, col, movement): if movement == 'left': col = max(col - 1, 0) elif movement == 'down': row = min(row + 1, nrow - 1) elif movement == 'right': col = min(col + 1, ncol - 1) elif movement == 'up': row = max(row - 1, 0) else: raise ("invalid action") return (row, col) transition_probs = {s: {} for s in states} rewards = {s: {} for s in states} for (row, col) in states: if desc[row, col] in "GH": continue for action_i in range(len(actions)): action = actions[action_i] transition_probs[(row, col)][action] = {} rewards[(row, col)][action] = {} for movement_i in [(action_i - 1) % len(actions), action_i, (action_i + 1) % len(actions)]: movement = actions[movement_i] newrow, newcol = move(row, col, movement) prob = (1. - slip_chance) if movement == action else ( slip_chance / 2.) if prob == 0: continue if (newrow, newcol) not in transition_probs[row, col][ action]: transition_probs[row, col][action][ newrow, newcol] = prob else: transition_probs[row, col][action][ newrow, newcol] += prob if desc[newrow, newcol] == 'G': rewards[row, col][action][newrow, newcol] = 1.0 MDP.__init__(self, transition_probs, rewards, initial_state, seed) def render(self): desc_copy = np.copy(self.desc) desc_copy[self._current_state] = '*' print('\n'.join(map(''.join, desc_copy)), end='\n\n') def plot_graph(mdp, s_node_size='1,5', a_node_size='0,5', rankdir='LR', ): """ Function for pretty drawing MDP graph with graphviz library. Requirements: graphviz : https://www.graphviz.org/ for ubuntu users: sudo apt-get install graphviz python library for graphviz for pip users: pip install graphviz :param mdp: :param s_node_size: size of state nodes :param a_node_size: size of action nodes :param rankdir: order for drawing :return: dot object """ s_node_attrs = {'shape': 'doublecircle', 'color': '#85ff75', 'style': 'filled', 'width': str(s_node_size), 'height': str(s_node_size), 'fontname': 'Arial', 'fontsize': '24'} a_node_attrs = {'shape': 'circle', 'color': 'lightpink', 'style': 'filled', 'width': str(a_node_size), 'height': str(a_node_size), 'fontname': 'Arial', 'fontsize': '20'} s_a_edge_attrs = {'style': 'bold', 'color': 'red', 'ratio': 'auto'} a_s_edge_attrs = {'style': 'dashed', 'color': 'blue', 'ratio': 'auto', 'fontname': 'Arial', 'fontsize': '16'} graph = Digraph(name='MDP') graph.attr(rankdir=rankdir) for state_node in mdp._transition_probs: graph.node(state_node, **s_node_attrs) for posible_action in mdp.get_possible_actions(state_node): action_node = state_node + "-" + posible_action graph.node(action_node, label=str(posible_action), **a_node_attrs) graph.edge(state_node, state_node + "-" + posible_action, **s_a_edge_attrs) for posible_next_state in mdp.get_next_states(state_node, posible_action): probability = mdp.get_transition_prob( state_node, posible_action, posible_next_state) reward = mdp.get_reward( state_node, posible_action, posible_next_state) if reward != 0: label_a_s_edge = 'p = ' + str(probability) + \ ' ' + 'reward =' + str(reward) else: label_a_s_edge = 'p = ' + str(probability) graph.edge(action_node, posible_next_state, label=label_a_s_edge, **a_s_edge_attrs) return graph def plot_graph_with_state_values(mdp, state_values): """ Plot graph with state values""" graph = plot_graph(mdp) for state_node in mdp._transition_probs: value = state_values[state_node] graph.node(state_node, label=str(state_node) + '\n' + 'V =' + str(value)[:4]) return graph def get_optimal_action_for_plot(mdp, state_values, state, get_action_value, gamma=0.9): """ Finds optimal action using formula above. """ if mdp.is_terminal(state): return None next_actions = mdp.get_possible_actions(state) q_values = [get_action_value(mdp, state_values, state, action, gamma) for action in next_actions] optimal_action = next_actions[np.argmax(q_values)] return optimal_action def plot_graph_optimal_strategy_and_state_values(mdp, state_values, get_action_value, gamma=0.9): """ Plot graph with state values and """ graph = plot_graph(mdp) opt_s_a_edge_attrs = {'style': 'bold', 'color': 'green', 'ratio': 'auto', 'penwidth': '6'} for state_node in mdp._transition_probs: value = state_values[state_node] graph.node(state_node, label=str(state_node) + '\n' + 'V =' + str(value)[:4]) for action in mdp.get_possible_actions(state_node): if action == get_optimal_action_for_plot(mdp, state_values, state_node, get_action_value, gamma): graph.edge(state_node, state_node + "-" + action, **opt_s_a_edge_attrs) return graph
FrozenLakeEnv
python
getsentry__sentry
src/sentry/uptime/apps.py
{ "start": 36, "end": 182 }
class ____(AppConfig): name = "sentry.uptime" def ready(self) -> None: from sentry.uptime.endpoints import serializers # NOQA
Config
python
astropy__astropy
astropy/coordinates/tests/test_representation.py
{ "start": 54694, "end": 66366 }
class ____: @pytest.mark.parametrize("matrix", list(matrices.values())) def test_transform(self, matrix): """Test ``.transform()`` on rotation and general matrices.""" # set up representation ds1 = UnitSphericalCosLatDifferential( d_lon_coslat=[1, 2] * u.mas / u.yr, d_lat=[3, 4] * u.mas / u.yr, ) s1 = UnitSphericalRepresentation(lon=[1, 2] * u.deg, lat=[3, 4] * u.deg) # transform representation & get comparison (thru CartesianRep) s2 = s1.transform(matrix) ds2 = ds1.transform(matrix, s1, s2) dexpected = UnitSphericalCosLatDifferential.from_cartesian( ds1.to_cartesian(base=s1).transform(matrix), base=s2 ) assert_allclose_quantity(ds2.d_lon_coslat, dexpected.d_lon_coslat) assert_allclose_quantity(ds2.d_lat, dexpected.d_lat) def test_cartesian_spherical_roundtrip(): s1 = CartesianRepresentation( x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc ) s2 = SphericalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = SphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.lon, s4.lon) assert_allclose_quantity(s2.lat, s4.lat) assert_allclose_quantity(s2.distance, s4.distance) def test_cartesian_setting_with_other(): s1 = CartesianRepresentation( x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc ) s1[0] = SphericalRepresentation(0.0 * u.deg, 0.0 * u.deg, 1 * u.kpc) assert_allclose_quantity(s1.x, [1.0, 2000.0] * u.kpc) assert_allclose_quantity(s1.y, [0.0, 4.0] * u.pc) assert_allclose_quantity(s1.z, [0.0, 6000.0] * u.pc) with pytest.raises(ValueError, match="loss of information"): s1[1] = UnitSphericalRepresentation(0.0 * u.deg, 10.0 * u.deg) def test_cartesian_physics_spherical_roundtrip(): s1 = CartesianRepresentation( x=[1, 2000.0] * u.kpc, y=[3000.0, 4.0] * u.pc, z=[5.0, 6000.0] * u.pc ) s2 = PhysicsSphericalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = PhysicsSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.theta, s4.theta) assert_allclose_quantity(s2.r, s4.r) def test_spherical_physics_spherical_roundtrip(): s1 = SphericalRepresentation(lon=3 * u.deg, lat=4 * u.deg, distance=3 * u.kpc) s2 = PhysicsSphericalRepresentation.from_representation(s1) s3 = SphericalRepresentation.from_representation(s2) s4 = PhysicsSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.lon, s3.lon) assert_allclose_quantity(s1.lat, s3.lat) assert_allclose_quantity(s1.distance, s3.distance) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.theta, s4.theta) assert_allclose_quantity(s2.r, s4.r) assert_allclose_quantity(s1.lon, s4.phi) assert_allclose_quantity(s1.lat, 90.0 * u.deg - s4.theta) assert_allclose_quantity(s1.distance, s4.r) def test_cartesian_cylindrical_roundtrip(): s1 = CartesianRepresentation( x=np.array([1.0, 2000.0]) * u.kpc, y=np.array([3000.0, 4.0]) * u.pc, z=np.array([5.0, 600.0]) * u.cm, ) s2 = CylindricalRepresentation.from_representation(s1) s3 = CartesianRepresentation.from_representation(s2) s4 = CylindricalRepresentation.from_representation(s3) assert_allclose_quantity(s1.x, s3.x) assert_allclose_quantity(s1.y, s3.y) assert_allclose_quantity(s1.z, s3.z) assert_allclose_quantity(s2.rho, s4.rho) assert_allclose_quantity(s2.phi, s4.phi) assert_allclose_quantity(s2.z, s4.z) def test_unit_spherical_roundtrip(): s1 = UnitSphericalRepresentation( lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin ) s2 = CartesianRepresentation.from_representation(s1) s3 = SphericalRepresentation.from_representation(s2) s4 = UnitSphericalRepresentation.from_representation(s3) assert_allclose_quantity(s1.lon, s4.lon) assert_allclose_quantity(s1.lat, s4.lat) def test_no_unnecessary_copies(): s1 = UnitSphericalRepresentation( lon=[10.0, 30.0] * u.deg, lat=[5.0, 6.0] * u.arcmin ) s2 = s1.represent_as(UnitSphericalRepresentation) assert s2 is s1 assert np.may_share_memory(s1.lon, s2.lon) assert np.may_share_memory(s1.lat, s2.lat) s3 = s1.represent_as(SphericalRepresentation) assert np.may_share_memory(s1.lon, s3.lon) assert np.may_share_memory(s1.lat, s3.lat) s4 = s1.represent_as(CartesianRepresentation) s5 = s4.represent_as(CylindricalRepresentation) assert np.may_share_memory(s5.z, s4.z) def test_representation_repr(): r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) assert ( repr(r1) == "<SphericalRepresentation (lon, lat, distance) in (deg, deg, kpc)\n" " (1., 2.5, 1.)>" ) r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert repr(r2) == "<CartesianRepresentation (x, y, z) in kpc\n (1., 2., 3.)>" r3 = CartesianRepresentation( x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc ) assert ( repr(r3) == "<CartesianRepresentation (x, y, z) in kpc\n" " [(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)]>" ) def test_representation_repr_multi_d(): """Regression test for #5889.""" cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m") assert ( repr(cr) == "<CartesianRepresentation (x, y, z) in m\n" " [[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n" " [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n" " [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]]>" ) # This was broken before. assert ( repr(cr.T) == "<CartesianRepresentation (x, y, z) in m\n" " [[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n" " [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n" " [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]]>" ) def test_representation_str(): r1 = SphericalRepresentation(lon=1 * u.deg, lat=2.5 * u.deg, distance=1 * u.kpc) assert str(r1) == "(1., 2.5, 1.) (deg, deg, kpc)" r2 = CartesianRepresentation(x=1 * u.kpc, y=2 * u.kpc, z=3 * u.kpc) assert str(r2) == "(1., 2., 3.) kpc" r3 = CartesianRepresentation( x=[1, 2, 3] * u.kpc, y=4 * u.kpc, z=[9, 10, 11] * u.kpc ) assert str(r3) == "[(1., 4., 9.), (2., 4., 10.), (3., 4., 11.)] kpc" def test_representation_str_multi_d(): """Regression test for #5889.""" cr = CartesianRepresentation(np.arange(27).reshape(3, 3, 3), unit="m") assert ( str(cr) == "[[(0., 9., 18.), (1., 10., 19.), (2., 11., 20.)],\n" " [(3., 12., 21.), (4., 13., 22.), (5., 14., 23.)],\n" " [(6., 15., 24.), (7., 16., 25.), (8., 17., 26.)]] m" ) # This was broken before. assert ( str(cr.T) == "[[(0., 9., 18.), (3., 12., 21.), (6., 15., 24.)],\n" " [(1., 10., 19.), (4., 13., 22.), (7., 16., 25.)],\n" " [(2., 11., 20.), (5., 14., 23.), (8., 17., 26.)]] m" ) def test_subclass_representation(): from astropy.coordinates.builtin_frames import ICRS class Longitude180(Longitude): def __new__(cls, angle, unit=None, wrap_angle=180 * u.deg, **kwargs): return super().__new__( cls, angle, unit=unit, wrap_angle=wrap_angle, **kwargs ) class SphericalWrap180Representation(SphericalRepresentation): attr_classes = {"lon": Longitude180, "lat": Latitude, "distance": u.Quantity} class ICRSWrap180(ICRS): frame_specific_representation_info = ( ICRS._frame_specific_representation_info.copy() ) frame_specific_representation_info[SphericalWrap180Representation] = ( frame_specific_representation_info[SphericalRepresentation] ) default_representation = SphericalWrap180Representation c = ICRSWrap180(ra=-1 * u.deg, dec=-2 * u.deg, distance=1 * u.m) assert c.ra.value == -1 assert c.ra.unit is u.deg assert c.dec.value == -2 assert c.dec.unit is u.deg def test_minimal_subclass(): # Basically to check what we document works; # see doc/coordinates/representations.rst class LogDRepresentation(BaseRepresentation): attr_classes = {"lon": Longitude, "lat": Latitude, "logd": u.Dex} def to_cartesian(self): d = self.logd.physical x = d * np.cos(self.lat) * np.cos(self.lon) y = d * np.cos(self.lat) * np.sin(self.lon) z = d * np.sin(self.lat) return CartesianRepresentation(x=x, y=y, z=z, copy=False) @classmethod def from_cartesian(cls, cart): s = np.hypot(cart.x, cart.y) r = np.hypot(s, cart.z) lon = np.arctan2(cart.y, cart.x) lat = np.arctan2(cart.z, s) return cls(lon=lon, lat=lat, logd=u.Dex(r), copy=False) ld1 = LogDRepresentation(90.0 * u.deg, 0.0 * u.deg, 1.0 * u.dex(u.kpc)) ld2 = LogDRepresentation(lon=90.0 * u.deg, lat=0.0 * u.deg, logd=1.0 * u.dex(u.kpc)) assert np.all(ld1.lon == ld2.lon) assert np.all(ld1.lat == ld2.lat) assert np.all(ld1.logd == ld2.logd) c = ld1.to_cartesian() assert_allclose_quantity(c.xyz, [0.0, 10.0, 0.0] * u.kpc, atol=1.0 * u.npc) ld3 = LogDRepresentation.from_cartesian(c) assert np.all(ld3.lon == ld2.lon) assert np.all(ld3.lat == ld2.lat) assert np.all(ld3.logd == ld2.logd) s = ld1.represent_as(SphericalRepresentation) assert_allclose_quantity(s.lon, ld1.lon) assert_allclose_quantity(s.distance, 10.0 * u.kpc) assert_allclose_quantity(s.lat, ld1.lat) with pytest.raises(TypeError): LogDRepresentation(0.0 * u.deg, 1.0 * u.deg) with pytest.raises(TypeError): LogDRepresentation( 0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), lon=1.0 * u.deg ) with pytest.raises(TypeError): LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), True, False) with pytest.raises(TypeError): LogDRepresentation(0.0 * u.deg, 1.0 * u.deg, 1.0 * u.dex(u.kpc), foo="bar") # if we define it a second time, even the qualnames are the same, # so we raise with pytest.raises(ValueError): class LogDRepresentation(BaseRepresentation): attr_classes = {"lon": Longitude, "lat": Latitude, "logr": u.Dex} def test_duplicate_warning(): from astropy.coordinates.representation import ( DUPLICATE_REPRESENTATIONS, REPRESENTATION_CLASSES, ) with pytest.warns(DuplicateRepresentationWarning): class UnitSphericalRepresentation(BaseRepresentation): attr_classes = {"lon": Longitude, "lat": Latitude} assert "unitspherical" in DUPLICATE_REPRESENTATIONS assert "unitspherical" not in REPRESENTATION_CLASSES assert ( "astropy.coordinates.representation.spherical.UnitSphericalRepresentation" in REPRESENTATION_CLASSES ) assert ( __name__ + ".test_duplicate_warning.<locals>.UnitSphericalRepresentation" in REPRESENTATION_CLASSES )
TestUnitSphericalCosLatDifferential
python
kamyu104__LeetCode-Solutions
Python/find-the-number-of-good-pairs-i.py
{ "start": 564, "end": 825 }
class ____(object): def numberOfPairs(self, nums1, nums2, k): """ :type nums1: List[int] :type nums2: List[int] :type k: int :rtype: int """ return sum(x%(k*y) == 0 for x in nums1 for y in nums2)
Solution2
python
prakhar1989__Algorithms
tests/unionfind_test.py
{ "start": 134, "end": 1599 }
class ____(unittest.TestCase): def setUp(self): self.uf = UnionFind() self.uf.insert("a", "b") self.uf.insert("b", "c") self.uf.insert("i", "j") def test_get_parent_method(self): self.assertEqual("a", self.uf.get_leader("a")) self.assertEqual("a", self.uf.get_leader("b")) self.assertEqual("a", self.uf.get_leader("c")) self.assertEqual("i", self.uf.get_leader("j")) self.assertEqual("i", self.uf.get_leader("i")) self.assertNotEqual(self.uf.get_leader("a"), self.uf.get_leader("i")) def test_insert_method(self): self.uf.insert("c", "d") self.assertEqual(self.uf.get_leader("c"), self.uf.get_leader("d")) self.assertEqual(self.uf.get_leader("a"), self.uf.get_leader("d")) def test_insert_one_node(self): self.uf.insert('z') self.assertEqual(self.uf.get_leader('z'), 'z') self.assertEqual(self.uf.count_groups(), 3) def test_make_union_method(self): self.uf.make_union(self.uf.get_leader("a"), self.uf.get_leader("i")) self.assertEqual(self.uf.get_leader("a"), self.uf.get_leader("i")) def test_make_union_with_invalid_leader_raises_exception(self): self.assertRaises(Exception, self.uf.make_union, "a", "z") def test_get_count(self): self.uf.insert("z", "y") self.assertEqual(self.uf.count_groups(), 3) if __name__ == "__main__": unittest.main()
test_unionfind
python
astropy__astropy
astropy/utils/masked/tests/test_function_helpers.py
{ "start": 25408, "end": 30032 }
class ____(InvariantMaskTestSetup): def test_fix(self): self.check(np.fix) # Check np.fix with out argument for completeness # (Note: could be done in self.check, but np.fix is the only # invariant mask function that has `out`, so no point.) out = np.zeros_like(self.ma) result = np.fix(self.ma, out=out) assert result is out expected = np.fix(self.a) assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, self.mask_a) def test_angle(self): a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) mask_a = np.array([True, False, True, False]) ma = Masked(a, mask=mask_a) out = np.angle(ma) expected = np.angle(ma.unmasked) assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, mask_a) def test_i0(self): self.check(np.i0) def test_sinc(self): self.check(np.sinc) def test_where(self): mask = [True, False, True] out = np.where(mask, self.ma, 1000.0) expected = np.where(mask, self.a, 1000.0) expected_mask = np.where(mask, self.mask_a, False) assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, expected_mask) mask2 = Masked(mask, [True, False, False]) out2 = np.where(mask2, self.ma, 1000.0) expected2 = np.where(mask, self.a, 1000.0) expected_mask2 = np.where(mask, self.mask_a, False) | mask2.mask assert_array_equal(out2.unmasked, expected2) assert_array_equal(out2.mask, expected_mask2) def test_where_single_arg(self): m = Masked(np.arange(3), mask=[True, False, False]) out = np.where(m) expected = m.nonzero() assert isinstance(out, tuple) and len(out) == 1 assert_array_equal(out[0], expected[0]) def test_where_wrong_number_of_arg(self): with pytest.raises(ValueError, match="either both or neither"): np.where([True, False, False], self.a) def test_choose(self): a = np.array([0, 1]).reshape((2, 1)) result = np.choose(a, (self.ma, self.mb)) expected = np.choose(a, (self.a, self.b)) expected_mask = np.choose(a, (self.mask_a, self.mask_b)) assert_array_equal(result.unmasked, expected) assert_array_equal(result.mask, expected_mask) out = np.zeros_like(result) result2 = np.choose(a, (self.ma, self.mb), out=out) assert result2 is out assert_array_equal(result2, result) with pytest.raises(TypeError): np.choose(a, (self.ma, self.mb), out=np.zeros_like(expected)) def test_choose_masked(self): ma = Masked(np.array([-1, 1]), mask=[True, False]).reshape((2, 1)) out = ma.choose((self.ma, self.mb)) expected = np.choose(ma.filled(0), (self.a, self.b)) expected_mask = np.choose(ma.filled(0), (self.mask_a, self.mask_b)) | ma.mask assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, expected_mask) with pytest.raises(ValueError): ma.unmasked.choose((self.ma, self.mb)) @pytest.mark.parametrize("default", [-1.0, np.ma.masked, Masked(-1, mask=True)]) def test_select(self, default): a, mask_a, ma = self.a, self.mask_a, self.ma out = np.select([a < 1.5, a > 3.5], [ma, ma + 1], default=default) expected = np.select( [a < 1.5, a > 3.5], [a, a + 1], default=-1 if default is not np.ma.masked else 0, ) expected_mask = np.select( [a < 1.5, a > 3.5], [mask_a, mask_a], default=getattr(default, "mask", False), ) assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, expected_mask) def test_real_if_close(self): a = np.array([1 + 0j, 0 + 1j, 1 + 1j, 0 + 0j]) mask_a = np.array([True, False, True, False]) ma = Masked(a, mask=mask_a) out = np.real_if_close(ma) expected = np.real_if_close(a) assert_array_equal(out.unmasked, expected) assert_array_equal(out.mask, mask_a) def test_tril(self): self.check(np.tril) def test_triu(self): self.check(np.triu) def test_unwrap(self): self.check(np.unwrap) def test_nan_to_num(self): self.check(np.nan_to_num) ma = Masked([np.nan, 1.0], mask=[True, False]) o = np.nan_to_num(ma, copy=False) assert_masked_equal(o, Masked([0.0, 1.0], mask=[True, False])) assert ma is o
TestUfuncLike
python
PrefectHQ__prefect
src/prefect/server/events/filters.py
{ "start": 18378, "end": 22681 }
class ____(EventDataFilter): id: Optional[list[str]] = Field( default=None, description="Only include events for resources with these IDs" ) id_prefix: Optional[list[str]] = Field( default=None, description=( "Only include events for resources with IDs starting with these prefixes" ), ) labels: Optional[ResourceSpecification] = Field( default=None, description="Only include events for related resources with these labels", ) def includes(self, event: Event) -> bool: resources = [event.resource] + event.related if not any(self._includes(resource) for resource in resources): return False return True def _includes(self, resource: Resource) -> bool: if self.id: if not any(resource.id == resource_id for resource_id in self.id): return False if self.id_prefix: if not any(resource.id.startswith(prefix) for prefix in self.id_prefix): return False if self.labels: if not self.labels.matches(resource): return False return True @db_injector def build_where_clauses( self, db: PrefectDBInterface ) -> Sequence["ColumnExpressionArgument[bool]"]: filters: list["ColumnExpressionArgument[bool]"] = [] if self.id: filters.append(db.EventResource.resource_id.in_(self.id)) if self.id_prefix: filters.append( sa.or_( *[ db.EventResource.resource_id.startswith(prefix) for prefix in self.id_prefix ] ) ) if self.labels: label_filters: list[ColumnElement[bool]] = [] labels = self.labels.deepcopy() # On the event_resources table, resource_id and resource_role are unpacked # into columns, so we should search there for them if resource_ids := labels.pop("prefect.resource.id", None): label_ops = LabelOperations(resource_ids) resource_id_column = db.EventResource.resource_id if values := label_ops.positive.simple: label_filters.append(resource_id_column.in_(values)) if values := label_ops.negative.simple: label_filters.append(resource_id_column.notin_(values)) for prefix in label_ops.positive.prefixes: label_filters.append(resource_id_column.startswith(prefix)) for prefix in label_ops.negative.prefixes: label_filters.append(sa.not_(resource_id_column.startswith(prefix))) if roles := labels.pop("prefect.resource.role", None): label_filters.append(db.EventResource.resource_role.in_(roles)) if labels: for _, (label, values) in enumerate(labels.items()): # Empty label value arrays should match nothing if not values: label_filters.append(sa.false()) continue label_ops = LabelOperations(values) label_column = db.EventResource.resource[label].astext if label_ops.negative.simple or label_ops.negative.prefixes: label_filters.append(label_column.is_not(None)) if values := label_ops.positive.simple: label_filters.append(label_column.in_(values)) if values := label_ops.negative.simple: label_filters.append(label_column.notin_(values)) for prefix in label_ops.positive.prefixes: label_filters.append(label_column.startswith(prefix)) for prefix in label_ops.negative.prefixes: label_filters.append(sa.not_(label_column.startswith(prefix))) filters.append(sa.and_(*label_filters)) if filters: assert self._top_level_filter is not None filters = [db.Event.id.in_(self._top_level_filter.where(*filters))] return filters
EventAnyResourceFilter
python
psf__requests
src/requests/exceptions.py
{ "start": 3785, "end": 3917 }
class ____(RequestException): """Requests encountered an error when trying to rewind a body.""" # Warnings
UnrewindableBodyError
python
pennersr__django-allauth
tests/apps/socialaccount/providers/pinterest/tests.py
{ "start": 294, "end": 1647 }
class ____(OAuth2TestsMixin, TestCase): provider_id = PinterestProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ { "data": { "url": "https://www.pinterest.com/muravskiyyarosl/", "first_name": "Jane", "last_name": "Doe", "id": "351247977031674143" } } """, ) def get_expected_to_str(self): return "Jane Doe" @override_settings( SOCIALACCOUNT_AUTO_SIGNUP=False, SOCIALACCOUNT_PROVIDERS={ "pinterest": { "API_VERSION": "v5", } }, ) def test_login_v5(self): self.provider_id = PinterestProvider.id resp = self.login( MockedResponse( HTTPStatus.OK, """ { "account_type": "BUSINESS", "profile_image": "https://i.pinimg.com/280x280_RS/5c/88/2f/5c882f4b02468fcd6cda2ce569c2c166.jpg", "website_url": "https://sns-sdks.github.io/", "username": "enjoylifebot" } """, ), ) assert resp.status_code == HTTPStatus.FOUND
PinterestTests
python
pytorch__pytorch
torch/_inductor/cudagraph_utils.py
{ "start": 833, "end": 966 }
class ____: "Unique counter of a function wrapped in cudagraphify_impl" id: int @dataclasses.dataclass(frozen=True)
FunctionID
python
huggingface__transformers
src/transformers/models/owlv2/modeling_owlv2.py
{ "start": 40594, "end": 42670 }
class ____(Owlv2PreTrainedModel): config: Owlv2VisionConfig main_input_name = "pixel_values" input_modalities = ("image",) def __init__(self, config: Owlv2VisionConfig): super().__init__(config) self.vision_model = Owlv2VisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @auto_docstring def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[tuple, BaseModelOutputWithPooling]: r""" Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, Owlv2VisionModel >>> model = Owlv2VisionModel.from_pretrained("google/owlv2-base-patch16") >>> processor = AutoProcessor.from_pretrained("google/owlv2-base-patch16") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) @auto_docstring # Copied from transformers.models.owlvit.modeling_owlvit.OwlViTModel with google/owlvit-base-patch32->google/owlv2-base-patch16-ensemble, OWLVIT->OWLV2,OwlViT->Owlv2,owlvit->owlv2,OWL-ViT->OWLv2
Owlv2VisionModel
python
getsentry__sentry
src/sentry/api/serializers/models/organization.py
{ "start": 10327, "end": 18478 }
class ____(Serializer): def get_attrs( self, item_list: Sequence[Organization], user: User | RpcUser | AnonymousUser, **kwargs: Any ) -> MutableMapping[Organization, MutableMapping[str, Any]]: avatars = { a.organization_id: a for a in OrganizationAvatar.objects.filter(organization__in=item_list) } configs_by_org_id: Mapping[int, RpcOrganizationAuthConfig] = { config.organization_id: config for config in auth_service.get_org_auth_config( organization_ids=[o.id for o in item_list] ) } auth_providers = self._serialize_auth_providers(configs_by_org_id, item_list, user) data: MutableMapping[Organization, MutableMapping[str, Any]] = {} for item in item_list: data[item] = { "avatar": avatars.get(item.id), "auth_provider": auth_providers.get(item.id, None), "has_api_key": configs_by_org_id[item.id].has_api_key, } return data def _serialize_auth_providers( self, configs_by_org_id: Mapping[int, RpcOrganizationAuthConfig], item_list: Sequence[Organization], user: User | RpcUser | AnonymousUser, ) -> Mapping[int, Any]: from .auth_provider import AuthProviderSerializer auth_provider_serializer = AuthProviderSerializer() return { o.id: serialize( configs_by_org_id[o.id].auth_provider, user=user, serializer=auth_provider_serializer, organization=o, ) for o in item_list } def get_feature_set( self, obj: Organization, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser ) -> list[str]: from sentry import features # Retrieve all registered organization features org_features = [ feature for feature in features.all( feature_type=features.OrganizationFeature, api_expose_only=True ).keys() if feature.startswith(_ORGANIZATION_SCOPE_PREFIX) ] feature_set = set() with sentry_sdk.start_span(op="features.check", name="check batch features"): # Check features in batch using the entity handler batch_features = features.batch_has(org_features, actor=user, organization=obj) # batch_has has found some features if batch_features: for feature_name, active in batch_features.get( f"organization:{obj.id}", {} ).items(): if active: # Remove organization prefix feature_set.add(feature_name[len(_ORGANIZATION_SCOPE_PREFIX) :]) # This feature_name was found via `batch_has`, don't check again using `has` org_features.remove(feature_name) with sentry_sdk.start_span(op="features.check", name="check individual features"): # Remaining features should not be checked via the entity handler for feature_name in org_features: if features.has(feature_name, obj, actor=user, skip_entity=True): # Remove the organization scope prefix feature_set.add(feature_name[len(_ORGANIZATION_SCOPE_PREFIX) :]) if "onboarding" in feature_set: if obj.date_added > START_DATE_FOR_CHECKING_ONBOARDING_COMPLETION: all_required_onboarding_tasks_complete = OrganizationOption.objects.filter( organization_id=obj.id, key="onboarding:complete" ).exists() # Do not include the onboarding feature if all required onboarding tasks are completed # The required tasks are defined in https://github.com/getsentry/sentry/blob/797e317dadcec25b0426851c6b29c0e1d2d0c3c2/src/sentry/models/organizationonboardingtask.py#L147 if all_required_onboarding_tasks_complete: feature_set.remove("onboarding") else: # Retaining the old logic to prevent older organizations from seeing the quick start sidebar again if OrganizationOption.objects.filter(organization=obj).exists(): feature_set.remove("onboarding") # Include api-keys feature if they previously had any api-keys if "api-keys" not in feature_set and attrs["has_api_key"]: feature_set.add("api-keys") # Organization flag features (not provided through the features module) options_as_features = OrganizationOption.objects.filter( organization=obj, key__in=ORGANIZATION_OPTIONS_AS_FEATURES.keys() ) for option in options_as_features: for option_feature, option_function in ORGANIZATION_OPTIONS_AS_FEATURES[option.key]: if option_function(option): feature_set.add(option_feature) if getattr(obj.flags, "allow_joinleave"): feature_set.add("open-membership") if not getattr(obj.flags, "disable_shared_issues"): feature_set.add("shared-issues") if "dynamic-sampling" not in feature_set and "mep-rollout-flag" in feature_set: feature_set.remove("mep-rollout-flag") if options.get("performance.hide-metrics-ui") and "mep-rollout-flag" in feature_set: feature_set.remove("mep-rollout-flag") return sorted(feature_set) def serialize( self, obj: Organization, attrs: Mapping[str, Any], user: User | RpcUser | AnonymousUser, **kwargs: Any, ) -> OrganizationSerializerResponse: if attrs.get("avatar"): avatar: SerializedAvatarFields = { "avatarType": attrs["avatar"].get_avatar_type_display(), "avatarUuid": attrs["avatar"].ident if attrs["avatar"].file_id else None, "avatarUrl": attrs["avatar"].absolute_url(), } else: avatar = {"avatarType": "letter_avatar", "avatarUuid": None, "avatarUrl": None} status = OrganizationStatus(obj.status) include_feature_flags = kwargs.get("include_feature_flags", True) has_auth_provider = attrs.get("auth_provider", None) is not None context: OrganizationSerializerResponse = { "id": str(obj.id), "slug": obj.slug, "status": {"id": status.name.lower(), "name": status.label}, "name": obj.name or obj.slug, "dateCreated": obj.date_added, "isEarlyAdopter": bool(obj.flags.early_adopter), "require2FA": bool(obj.flags.require_2fa), # requireEmailVerification has been deprecated "requireEmailVerification": False, "avatar": avatar, "allowMemberInvite": not obj.flags.disable_member_invite, "allowMemberProjectCreation": not obj.flags.disable_member_project_creation, "allowSuperuserAccess": not obj.flags.prevent_superuser_access, "links": { "organizationUrl": generate_organization_url(obj.slug), "regionUrl": generate_region_url(), }, "hasAuthProvider": has_auth_provider, } if include_feature_flags: context["features"] = self.get_feature_set(obj, attrs, user) context["extraOptions"] = { "traces": { "spansExtractionDate": options.get("performance.traces.spans_extraction_date"), "checkSpanExtractionDate": options.get( "performance.traces.check_span_extraction_date" ), } } if "access" in kwargs: context["access"] = kwargs["access"].scopes tasks_to_serialize = list(onboarding_tasks.fetch_onboarding_tasks(obj, user)) context["onboardingTasks"] = serialize(tasks_to_serialize, user) return context
OrganizationSerializer
python
facebook__pyre-check
client/commands/server_state.py
{ "start": 812, "end": 1109 }
class ____(enum.Enum): READY = "READY" DISCONNECTED = "DISCONNECTED" NOT_CONNECTED = "NOT_CONNECTED" SUSPENDED = "SUSPENDED" BUCK_BUILDING = "BUCK_BUILDING" INCREMENTAL_CHECK = "INCREMENTAL_CHECK" STARTING = "STARTING" @dataclasses.dataclass(frozen=True)
ConnectionStatus
python
kennethreitz__tablib
src/tablib/packages/dbfpy/fields.py
{ "start": 6732, "end": 7225 }
class ____(DbfFieldDef): """Definition of the character field.""" typeCode = "C" defaultValue = b'' def decodeValue(self, value): """Return string object. Return value is a ``value`` argument with stripped right spaces. """ return value.rstrip(b' ').decode('utf-8') def encodeValue(self, value): """Return raw data string encoded from a ``value``.""" return str(value)[:self.length].ljust(self.length)
DbfCharacterFieldDef
python
optuna__optuna
optuna/distributions.py
{ "start": 14357, "end": 15657 }
class ____(IntDistribution): """A uniform distribution on integers. This object is instantiated by :func:`~optuna.trial.Trial.suggest_int`, and passed to :mod:`~optuna.samplers` in general. .. note:: If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by :math:`\\mathsf{step}`, :math:`\\mathsf{high}` will be replaced with the maximum of :math:`k \\times \\mathsf{step} + \\mathsf{low} < \\mathsf{high}`, where :math:`k` is an integer. Attributes: low: Lower endpoint of the range of the distribution. ``low`` is included in the range. ``low`` must be less than or equal to ``high``. high: Upper endpoint of the range of the distribution. ``high`` is included in the range. ``high`` must be greater than or equal to ``low``. step: A discretization step. ``step`` must be a positive integer. """ def __init__(self, low: int, high: int, step: int = 1) -> None: super().__init__(low=low, high=high, log=False, step=step) def _asdict(self) -> dict: d = copy.deepcopy(self.__dict__) d.pop("log") return d @deprecated_class("3.0.0", "6.0.0", text=_int_distribution_deprecated_msg)
IntUniformDistribution
python
run-llama__llama_index
llama-index-core/llama_index/core/indices/multi_modal/base.py
{ "start": 1539, "end": 17686 }
class ____(VectorStoreIndex): """ Multi-Modal Vector Store Index. Args: use_async (bool): Whether to use asynchronous calls. Defaults to False. show_progress (bool): Whether to show tqdm progress bars. Defaults to False. store_nodes_override (bool): set to True to always store Node objects in index store and document store even if vector store keeps text. Defaults to False """ image_namespace = "image" index_struct_cls = MultiModelIndexDict def __init__( self, nodes: Optional[Sequence[BaseNode]] = None, index_struct: Optional[MultiModelIndexDict] = None, embed_model: Optional[BaseEmbedding] = None, storage_context: Optional[StorageContext] = None, use_async: bool = False, store_nodes_override: bool = False, show_progress: bool = False, # Image-related kwargs # image_vector_store going to be deprecated. image_store can be passed from storage_context # keep image_vector_store here for backward compatibility image_vector_store: Optional[BasePydanticVectorStore] = None, image_embed_model: EmbedType = "clip:ViT-B/32", is_image_to_text: bool = False, # is_image_vector_store_empty is used to indicate whether image_vector_store is empty # those flags are used for cases when only one vector store is used is_image_vector_store_empty: bool = False, is_text_vector_store_empty: bool = False, **kwargs: Any, ) -> None: """Initialize params.""" image_embed_model = resolve_embed_model( image_embed_model, callback_manager=kwargs.get("callback_manager") ) assert isinstance(image_embed_model, MultiModalEmbedding) self._image_embed_model = image_embed_model self._is_image_to_text = is_image_to_text self._is_image_vector_store_empty = is_image_vector_store_empty self._is_text_vector_store_empty = is_text_vector_store_empty storage_context = storage_context or StorageContext.from_defaults() if image_vector_store is not None: if self.image_namespace not in storage_context.vector_stores: storage_context.add_vector_store( image_vector_store, self.image_namespace ) else: # overwrite image_store from storage_context storage_context.vector_stores[self.image_namespace] = image_vector_store if self.image_namespace not in storage_context.vector_stores: storage_context.add_vector_store(SimpleVectorStore(), self.image_namespace) self._image_vector_store = storage_context.vector_stores[self.image_namespace] super().__init__( nodes=nodes, index_struct=index_struct, embed_model=embed_model, storage_context=storage_context, show_progress=show_progress, use_async=use_async, store_nodes_override=store_nodes_override, **kwargs, ) @property def image_vector_store(self) -> BasePydanticVectorStore: return self._image_vector_store @property def image_embed_model(self) -> MultiModalEmbedding: return self._image_embed_model @property def is_image_vector_store_empty(self) -> bool: return self._is_image_vector_store_empty @property def is_text_vector_store_empty(self) -> bool: return self._is_text_vector_store_empty def as_retriever(self, **kwargs: Any) -> MultiModalVectorIndexRetriever: return MultiModalVectorIndexRetriever( self, node_ids=list(self.index_struct.nodes_dict.values()), **kwargs, ) def as_query_engine( self, llm: Optional[LLMType] = None, **kwargs: Any, ) -> SimpleMultiModalQueryEngine: retriever = cast(MultiModalVectorIndexRetriever, self.as_retriever(**kwargs)) llm = llm or Settings.llm assert isinstance(llm, (BaseLLM, MultiModalLLM)) class_name = llm.class_name() if "multi" not in class_name: logger.warning( f"Warning: {class_name} does not appear to be a multi-modal LLM. This may not work as expected." ) return SimpleMultiModalQueryEngine( retriever, multi_modal_llm=llm, # type: ignore **kwargs, ) def as_chat_engine( self, chat_mode: ChatMode = ChatMode.BEST, llm: Optional[LLMType] = None, **kwargs: Any, ) -> BaseChatEngine: llm = llm or Settings.llm assert isinstance(llm, (BaseLLM, MultiModalLLM)) class_name = llm.class_name() if "multi" not in class_name: logger.warning( f"Warning: {class_name} does not appear to be a multi-modal LLM. This may not work as expected." ) if chat_mode == ChatMode.CONTEXT: from llama_index.core.chat_engine.multi_modal_context import ( MultiModalContextChatEngine, ) return MultiModalContextChatEngine.from_defaults( retriever=self.as_retriever(**kwargs), multi_modal_llm=llm, **kwargs, ) # TODO: handle CONDENSE_PLUS_CONTEXT return super().as_chat_engine(chat_mode, llm, **kwargs) @classmethod def from_vector_store( cls, vector_store: BasePydanticVectorStore, embed_model: Optional[EmbedType] = None, # Image-related kwargs image_vector_store: Optional[BasePydanticVectorStore] = None, image_embed_model: EmbedType = "clip", **kwargs: Any, ) -> "MultiModalVectorStoreIndex": if not vector_store.stores_text: raise ValueError( "Cannot initialize from a vector store that does not store text." ) storage_context = StorageContext.from_defaults(vector_store=vector_store) return cls( nodes=[], storage_context=storage_context, image_vector_store=image_vector_store, image_embed_model=image_embed_model, embed_model=( resolve_embed_model( embed_model, callback_manager=kwargs.get("callback_manager") ) if embed_model else Settings.embed_model ), **kwargs, ) def _get_node_with_embedding( self, nodes: Sequence[BaseNode], show_progress: bool = False, is_image: bool = False, ) -> List[BaseNode]: """ Get tuples of id, node, and embedding. Allows us to store these nodes in a vector store. Embeddings are called in batches. """ id_to_text_embed_map = None if is_image: assert all(isinstance(node, ImageNode) for node in nodes) id_to_embed_map = embed_image_nodes( nodes, # type: ignore embed_model=self._image_embed_model, show_progress=show_progress, ) # text field is populate, so embed them if self._is_image_to_text: id_to_text_embed_map = embed_nodes( nodes, embed_model=self._embed_model, show_progress=show_progress, ) # TODO: refactor this change of image embed model to same as text self._image_embed_model = self._embed_model # type: ignore else: id_to_embed_map = embed_nodes( nodes, embed_model=self._embed_model, show_progress=show_progress, ) results = [] for node in nodes: embedding = id_to_embed_map[node.node_id] result = node.model_copy() result.embedding = embedding if is_image and id_to_text_embed_map: assert isinstance(result, ImageNode) text_embedding = id_to_text_embed_map[node.node_id] result.text_embedding = text_embedding result.embedding = ( text_embedding # TODO: re-factor to make use of both embeddings ) results.append(result) return results async def _aget_node_with_embedding( self, nodes: Sequence[BaseNode], show_progress: bool = False, is_image: bool = False, ) -> List[BaseNode]: """ Asynchronously get tuples of id, node, and embedding. Allows us to store these nodes in a vector store. Embeddings are called in batches. """ id_to_text_embed_map = None if is_image: assert all(isinstance(node, ImageNode) for node in nodes) id_to_embed_map = await async_embed_image_nodes( nodes, # type: ignore embed_model=self._image_embed_model, show_progress=show_progress, ) if self._is_image_to_text: id_to_text_embed_map = await async_embed_nodes( nodes, embed_model=self._embed_model, show_progress=show_progress, ) # TODO: refactor this change of image embed model to same as text self._image_embed_model = self._embed_model # type: ignore else: id_to_embed_map = await async_embed_nodes( nodes, embed_model=self._embed_model, show_progress=show_progress, ) results = [] for node in nodes: embedding = id_to_embed_map[node.node_id] result = node.model_copy() result.embedding = embedding if is_image and id_to_text_embed_map: assert isinstance(result, ImageNode) text_embedding = id_to_text_embed_map[node.node_id] result.text_embedding = text_embedding result.embedding = ( text_embedding # TODO: re-factor to make use of both embeddings ) results.append(result) return results async def _async_add_nodes_to_index( self, index_struct: IndexDict, nodes: Sequence[BaseNode], show_progress: bool = False, **insert_kwargs: Any, ) -> None: """Asynchronously add nodes to index.""" if not nodes: return image_nodes: List[ImageNode] = [] text_nodes: List[BaseNode] = [] new_text_ids: List[str] = [] new_img_ids: List[str] = [] for node in nodes: if isinstance(node, ImageNode): image_nodes.append(node) if isinstance(node, TextNode) and node.text: text_nodes.append(node) if len(text_nodes) > 0: # embed all nodes as text - include image nodes that have text attached text_nodes = await self._aget_node_with_embedding( text_nodes, show_progress, is_image=False ) new_text_ids = await self.storage_context.vector_stores[ DEFAULT_VECTOR_STORE ].async_add(text_nodes, **insert_kwargs) else: self._is_text_vector_store_empty = True if len(image_nodes) > 0: # embed image nodes as images directly image_nodes = await self._aget_node_with_embedding( # type: ignore image_nodes, show_progress, is_image=True, ) new_img_ids = await self.storage_context.vector_stores[ self.image_namespace ].async_add(image_nodes, **insert_kwargs) else: self._is_image_vector_store_empty = True # if the vector store doesn't store text, we need to add the nodes to the # index struct and document store all_nodes = text_nodes + image_nodes all_new_ids = new_text_ids + new_img_ids if not self._vector_store.stores_text or self._store_nodes_override: for node, new_id in zip(all_nodes, all_new_ids): # NOTE: remove embedding from node to avoid duplication node_without_embedding = node.model_copy() node_without_embedding.embedding = None index_struct.add_node(node_without_embedding, text_id=new_id) self._docstore.add_documents( [node_without_embedding], allow_update=True ) def _add_nodes_to_index( self, index_struct: IndexDict, nodes: Sequence[BaseNode], show_progress: bool = False, **insert_kwargs: Any, ) -> None: """Add document to index.""" if not nodes: return image_nodes: List[ImageNode] = [] text_nodes: List[TextNode] = [] new_text_ids: List[str] = [] new_img_ids: List[str] = [] for node in nodes: if isinstance(node, ImageNode): image_nodes.append(node) if isinstance(node, TextNode) and node.text: text_nodes.append(node) if len(text_nodes) > 0: # embed all nodes as text - include image nodes that have text attached text_nodes = self._get_node_with_embedding( # type: ignore text_nodes, show_progress, is_image=False ) new_text_ids = self.storage_context.vector_stores[DEFAULT_VECTOR_STORE].add( text_nodes, **insert_kwargs ) else: self._is_text_vector_store_empty = True if len(image_nodes) > 0: # embed image nodes as images directly # check if we should use text embedding for images instead of default image_nodes = self._get_node_with_embedding( # type: ignore image_nodes, show_progress, is_image=True, ) new_img_ids = self.storage_context.vector_stores[self.image_namespace].add( image_nodes, **insert_kwargs ) else: self._is_image_vector_store_empty = True # if the vector store doesn't store text, we need to add the nodes to the # index struct and document store all_nodes = text_nodes + image_nodes all_new_ids = new_text_ids + new_img_ids if not self._vector_store.stores_text or self._store_nodes_override: for node, new_id in zip(all_nodes, all_new_ids): # NOTE: remove embedding from node to avoid duplication node_without_embedding = node.model_copy() node_without_embedding.embedding = None index_struct.add_node(node_without_embedding, text_id=new_id) self._docstore.add_documents( [node_without_embedding], allow_update=True ) def delete_ref_doc( self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any ) -> None: """Delete a document and it's nodes by using ref_doc_id.""" # delete from all vector stores for vector_store in self._storage_context.vector_stores.values(): vector_store.delete(ref_doc_id) if self._store_nodes_override or self._vector_store.stores_text: ref_doc_info = self._docstore.get_ref_doc_info(ref_doc_id) if ref_doc_info is not None: for node_id in ref_doc_info.node_ids: self._index_struct.delete(node_id) self._vector_store.delete(node_id) if delete_from_docstore: self._docstore.delete_ref_doc(ref_doc_id, raise_error=False) self._storage_context.index_store.add_index_struct(self._index_struct)
MultiModalVectorStoreIndex
python
numba__numba
numba/core/typed_passes.py
{ "start": 12867, "end": 13928 }
class ____(FunctionPass): _name = "parfor_fusion_pass" def __init__(self): FunctionPass.__init__(self) def run_pass(self, state): """ Do fusion of parfor nodes. """ # Ensure we have an IR and type information. assert state.func_ir parfor_pass = _parfor_ParforFusionPass(state.func_ir, state.typemap, state.calltypes, state.return_type, state.typingctx, state.targetctx, state.flags.auto_parallel, state.flags, state.metadata, state.parfor_diagnostics) parfor_pass.run() return True @register_pass(mutates_CFG=True, analysis_only=False)
ParforFusionPass
python
fastai__fastai
nbs/examples/distrib_pytorch.py
{ "start": 151, "end": 1275 }
class ____(nn.Sequential): def __init__(self): super().__init__( nn.Conv2d(1, 32, 3, 1), nn.ReLU(), nn.Conv2d(32, 64, 3, 1), nn.MaxPool2d(2), nn.Dropout2d(0.25), Flatten(), nn.Linear(9216, 128), nn.ReLU(), nn.Dropout2d(0.5), nn.Linear(128, 10), nn.LogSoftmax(dim=1) ) batch_size,test_batch_size = 256,512 epochs,lr = 5,1e-2 kwargs = {'num_workers': 1, 'pin_memory': True} transform=transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) train_loader = DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transform), batch_size=batch_size, shuffle=True, **kwargs) test_loader = DataLoader( datasets.MNIST('../data', train=False, transform=transform), batch_size=test_batch_size, shuffle=True, **kwargs) if __name__ == '__main__': data = DataLoaders(train_loader, test_loader) learn = Learner(data, Net(), loss_func=F.nll_loss, opt_func=Adam, metrics=accuracy) with learn.distrib_ctx(): learn.fit_one_cycle(epochs, lr)
Net
python
dagster-io__dagster
python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/pex_builder/deploy.py
{ "start": 1301, "end": 8506 }
class ____: deps_cache_from_tag: Optional[str] deps_cache_to_tag: Optional[str] def build_locations( dagster_cloud_url: str, dagster_cloud_api_token: str, locations: list[parse_workspace.Location], output_directory: str, upload_pex: bool, deps_cache_tags: DepsCacheTags, python_version: version.Version, build_method: deps.BuildMethod = deps.BuildMethod.DOCKER_FALLBACK, ) -> list[LocationBuild]: location_builds = [] for location in locations: local_packages, deps_requirements = deps.get_deps_requirements( location.directory, python_version=python_version, ) location_builds.append( LocationBuild( location=location, local_packages=local_packages, deps_requirements=deps_requirements, ) ) # dedup requirements so each is only built once builds_for_requirements_hash: dict[str, list[LocationBuild]] = {} for location_build in location_builds: requirements_hash = location_build.deps_requirements.hash builds_for_requirements_hash.setdefault(requirements_hash, []).append(location_build) # build each deps pex once and assign to all related builds for requirements_hash in builds_for_requirements_hash: builds = builds_for_requirements_hash[requirements_hash] deps_requirements = builds[0].deps_requirements # if a --deps-cache-from is specified, don't build deps.pex files if it is already published if upload_pex and deps_cache_tags.deps_cache_from_tag: published_deps_pex_info = pex_registry.get_cached_deps_details( dagster_cloud_url, dagster_cloud_api_token, deps_requirements.hash, deps_cache_tags.deps_cache_from_tag, ) else: published_deps_pex_info = None if published_deps_pex_info: published_deps_pex = published_deps_pex_info["deps_pex_name"] ui.print( f"Found published deps.pex {published_deps_pex} for requirements_hash {deps_requirements.hash}, cache_tag {deps_cache_tags.deps_cache_from_tag}, " "skipping rebuild.", ) for location_build in builds: location_build.published_deps_pex = published_deps_pex location_build.dagster_version = published_deps_pex_info["dagster_version"] else: ui.print( f"No published deps.pex found for requirements_hash {deps_requirements.hash}, cache_tag {deps_cache_tags.deps_cache_from_tag}, will rebuild.", ) try: deps_pex_path, dagster_version = deps.build_deps_from_requirements( deps_requirements, output_directory, build_method=build_method, ) except deps.DepsBuildFailure as err: logging.error("Failed to build dependencies: %s", err.stderr) sys.exit(1) for location_build in builds: location_build.deps_pex_path = deps_pex_path location_build.dagster_version = dagster_version # build each source once for location_build in location_builds: location_build.source_pex_path = source.build_source_pex( location_build.location.directory, location_build.local_packages.local_package_paths, output_directory, python_version, ) # compute pex tags for location_build in location_builds: deps_pex = ( location_build.deps_pex_path if location_build.deps_pex_path else location_build.published_deps_pex ) if not deps_pex or not location_build.source_pex_path: raise ValueError("No deps.pex or source.pex") location_build.pex_tag = util.build_pex_tag([deps_pex, location_build.source_pex_path]) return location_builds def get_user_specified_base_image_for( dagster_cloud_url: str, dagster_cloud_api_token: str, location_build: LocationBuild ) -> Optional[str]: # Full path to base image is supplied base_image = os.getenv("SERVERLESS_BASE_IMAGE") if base_image: return base_image # Tag suffix for base image is supplied - uses custom uploaded image base_image_tag = os.getenv("SERVERLESS_BASE_IMAGE_TAG") if base_image_tag: # Point to user's registry info with this tag suffix with gql.graphql_client_from_url(dagster_cloud_url, dagster_cloud_api_token) as client: registry_info = gql.get_ecr_info(client) return f"{registry_info['registry_url']}:{base_image_tag}" return None def notify(deployment_name: Optional[str], location_name: str, action: str): if github_event: github_context.update_pr_comment( github_event, action=action, deployment_name=deployment_name, location_name=location_name, ) github_event: Optional[github_context.GithubEvent] = None def load_github_event(project_dir): global github_event # noqa: PLW0603 github_event = github_context.get_github_event(project_dir) @click.command() @click.argument("dagster_cloud_file", type=click.Path(exists=True)) @click.argument("build_output_dir", type=click.Path(exists=False)) @click.option( "--upload-pex", is_flag=True, show_default=True, default=False, help="Upload PEX files to registry.", ) @click.option( "--deps-cache-from", type=str, required=False, help=( "Try to reuse a pre-existing deps pex file. A deps pex file is reused if it was " "built with a --deps-cache-to value that matches this flag value, AND the requirements.txt " "and setup.py were identical." ), ) @click.option( "--deps-cache-to", type=str, required=False, help=( "Allow reusing the generated deps pex and associate with the given tag. " "See --deps-cache-from for how to reuse deps pex files." ), ) @click.option( "--update-code-location", is_flag=True, show_default=True, default=False, help="Update code location to use new PEX files.", ) @click.option( "--code-location-details", callback=util.parse_kv, help=( "Syntax: --code-location-details deployment=NAME,commit_hash=HASH. " "When not provided, details are inferred from the github action environment." ), ) @util.python_version_option() @click.option( "--build-sdists/--no-build-sdists", is_flag=True, default=False, help="Whether to build source only Python dependencies (sdists).", ) def cli( dagster_cloud_file, build_output_dir, upload_pex, deps_cache_to, deps_cache_from, update_code_location, code_location_details, python_version, build_sdists, ): logging.error( "This entrypoint is obsolete. Please use `dagster-cloud serverless" " deploy-python-executable`." ) sys.exit(1) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) cli()
DepsCacheTags
python
sphinx-doc__sphinx
sphinx/domains/c/_ast.py
{ "start": 13230, "end": 14005 }
class ____(ASTPostfixOp): def __init__(self, expr: ASTExpression) -> None: self.expr = expr def __eq__(self, other: object) -> bool: if not isinstance(other, ASTPostfixArray): return NotImplemented return self.expr == other.expr def __hash__(self) -> int: return hash(self.expr) def _stringify(self, transform: StringifyTransform) -> str: return '[' + transform(self.expr) + ']' def describe_signature( self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol ) -> None: signode += addnodes.desc_sig_punctuation('[', '[') self.expr.describe_signature(signode, mode, env, symbol) signode += addnodes.desc_sig_punctuation(']', ']')
ASTPostfixArray
python
tornadoweb__tornado
tornado/test/tcpclient_test.py
{ "start": 1325, "end": 1880 }
class ____(TCPServer): def __init__(self, family): super().__init__() self.streams = [] # type: List[IOStream] self.queue = Queue() # type: Queue[IOStream] sockets = bind_sockets(0, "localhost", family) self.add_sockets(sockets) self.port = sockets[0].getsockname()[1] def handle_stream(self, stream, address): self.streams.append(stream) self.queue.put(stream) def stop(self): super().stop() for stream in self.streams: stream.close()
TestTCPServer
python
getsentry__sentry
src/sentry_plugins/bitbucket/plugin.py
{ "start": 1148, "end": 7341 }
class ____(CorePluginMixin, IssuePlugin2): description = "Integrate Bitbucket issues by linking a repository to a project." slug = "bitbucket" title: str | _StrPromise = "Bitbucket" conf_title = title conf_key = "bitbucket" auth_provider = "bitbucket" required_field = "repo" feature_descriptions = [ FeatureDescription( """ Track commits and releases (learn more [here](https://docs.sentry.io/learn/releases/)) """, IntegrationFeatures.COMMITS, ), FeatureDescription( """ Create Bitbucket issues from Sentry """, IntegrationFeatures.ISSUE_BASIC, ), FeatureDescription( """ Link Sentry issues to existing Bitbucket issues """, IntegrationFeatures.ISSUE_BASIC, ), ] def get_client(self, user): auth = self.get_auth(user=user) if auth is None: raise PluginError("You still need to associate an identity with Bitbucket.") return BitbucketClient(auth) def get_group_urls(self): return super().get_group_urls() + [ re_path( r"^autocomplete", IssueGroupActionEndpoint.as_view(view_method_name="view_autocomplete", plugin=self), name=f"sentry-api-0-plugins-{self.slug}-autocomplete", ) ] def get_url_module(self) -> str: return "sentry_plugins.bitbucket.urls" def is_configured(self, project) -> bool: return bool(self.get_option("repo", project)) def get_new_issue_fields(self, request: Request, group, event, **kwargs): fields = super().get_new_issue_fields(request, group, event, **kwargs) return [ { "name": "repo", "label": "Bitbucket Repository", "default": self.get_option("repo", group.project), "type": "text", "readonly": True, }, *fields, { "name": "issue_type", "label": "Issue type", "default": ISSUE_TYPES[0][0], "type": "select", "choices": ISSUE_TYPES, }, { "name": "priority", "label": "Priority", "default": PRIORITIES[0][0], "type": "select", "choices": PRIORITIES, }, ] def get_link_existing_issue_fields(self, request: Request, group, event, **kwargs): return [ { "name": "issue_id", "label": "Issue", "default": "", "type": "select", "has_autocomplete": True, }, { "name": "comment", "label": "Comment", "default": absolute_uri( group.get_absolute_url(params={"referrer": "bitbucket_plugin"}) ), "type": "textarea", "help": ( "Leave blank if you don't want to " "add a comment to the Bitbucket issue." ), "required": False, }, ] def message_from_error(self, exc: Exception) -> str: if isinstance(exc, ApiError) and exc.code == 404: return ERR_404 return super().message_from_error(exc) def create_issue(self, request: Request, group, form_data): client = self.get_client(request.user) try: response = client.create_issue( repo=self.get_option("repo", group.project), data=form_data ) except Exception as e: self.raise_error(e, identity=client.auth) return response["local_id"] def link_issue(self, request: Request, group, form_data, **kwargs): client = self.get_client(request.user) repo = self.get_option("repo", group.project) try: issue = client.get_issue(repo=repo, issue_id=form_data["issue_id"]) except Exception as e: self.raise_error(e, identity=client.auth) comment = form_data.get("comment") if comment: try: client.create_comment(repo, issue["local_id"], {"content": comment}) except Exception as e: self.raise_error(e, identity=client.auth) return {"title": issue["title"]} def get_issue_label(self, group, issue_id: str) -> str: return "Bitbucket-%s" % issue_id def get_issue_url(self, group, issue_id: str) -> str: repo = self.get_option("repo", group.project) return f"https://bitbucket.org/{repo}/issue/{issue_id}/" def view_autocomplete(self, request: Request, group, **kwargs): field = request.GET.get("autocomplete_field") query = request.GET.get("autocomplete_query") if field != "issue_id" or not query: return Response({"issue_id": []}) repo = self.get_option("repo", group.project) client = self.get_client(request.user) try: response = client.search_issues(repo, query) except Exception as e: return Response( {"error_type": "validation", "errors": [{"__all__": self.message_from_error(e)}]}, status=400, ) issues = [ {"text": "(#{}) {}".format(i["local_id"], i["title"]), "id": i["local_id"]} for i in response.get("issues", []) ] return Response({field: issues}) def get_configure_plugin_fields(self, project, **kwargs): return [ { "name": "repo", "label": "Repository Name", "type": "text", "placeholder": "e.g. getsentry/sentry", "help": "Enter your repository name, including the owner.", "required": True, } ] def setup(self, bindings): bindings.add("repository.provider", BitbucketRepositoryProvider, id="bitbucket")
BitbucketPlugin
python
PyCQA__pylint
tests/functional/a/arguments_differ.py
{ "start": 2708, "end": 3031 }
class ____(SuperClass): @staticmethod def impl(*args, **kwargs): """ Acceptable use of vararg in subclass because it does not violate LSP. """ super().impl(*args, **kwargs) @staticmethod def should_have_been_decorated_as_static(arg1, arg2): return arg1 + arg2
MyClass
python
ray-project__ray
python/ray/_private/thirdparty/pynvml/pynvml.py
{ "start": 254616, "end": 255324 }
class ____(_PrintableStructure): _fields_ = [ ('version', c_uint), ('ibGuid', c_char * 16), ('chassisSerialNumber', c_char * 16), ('slotNumber', c_char), ('trayIndex', c_char), ('hostId', c_char), ('peerType', c_char), ('moduleId', c_char) ] def __init__(self): super(c_nvmlPlatformInfo_v2_t, self).__init__(version=nvmlPlatformInfo_v2) nvmlPlatformInfo_v1 = 0x100002c nvmlPlatformInfo_v2 = 0x200002c def nvmlDeviceGetPlatformInfo(device, platformInfo): fn = _nvmlGetFunctionPointer("nvmlDeviceGetPlatformInfo") ret = fn(device, platformInfo) _nvmlCheckReturn(ret) return NVML_SUCCESS
c_nvmlPlatformInfo_v2_t
python
bokeh__bokeh
tests/unit/bokeh/embed/test_elements.py
{ "start": 2712, "end": 3096 }
class ____: pass #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
Test_script_for_render_items
python
ansible__ansible
lib/ansible/executor/process/worker.py
{ "start": 1798, "end": 2074 }
class ____(Queue): """Queue that raises AnsibleError items on get().""" def get(self, *args, **kwargs): result = super(WorkerQueue, self).get(*args, **kwargs) if isinstance(result, AnsibleError): raise result return result
WorkerQueue
python
ray-project__ray
python/ray/experimental/channel/accelerator_context.py
{ "start": 557, "end": 8603 }
class ____: """ Provides a unified interface for managing different accelerator backends This includes stream management, event creation, device context control, and communicator support for distributed communication. """ def __init__(self, torch_module_name: str, communicator_cls: Type[Communicator]): """ Initializes an accelerator context with the specified torch device module and communicator class. Args: torch_module_name: Name of the torch device module (e.g., "cuda", "cpu"). communicator_cls: Class used to handle communication. """ # The name of the torch module (e.g., 'cuda', 'npu') self._torch_module_name: str = torch_module_name # The Communicator class used to manage communication self._communicator_cls: Type[Communicator] = communicator_cls # Import the torch backend module (e.g., torch.cuda) if the device is not 'cpu'. if torch_module_name != "cpu": self._torch_mod = importlib.import_module(f"torch.{torch_module_name}") @staticmethod def get() -> "AcceleratorContext": """ Returns the singleton instance of the accelerator context. If a custom accelerator has been registered, initializes the context based on the registration. Otherwise, selects an appropriate runtime based on the available device (CUDA or CPU) and registers the corresponding default communicator. Returns: AcceleratorContext: A singleton instance of the appropriate runtime context. """ global _default_accelerator_context, _global_custom_context with _accelerator_context_lock: if _global_custom_context is not None: return _global_custom_context if _default_accelerator_context is None: if len(ray.get_gpu_ids()) > 0: from ray.experimental.channel.nccl_group import _NcclGroup _default_accelerator_context = AcceleratorContext( "cuda", _NcclGroup ) else: from ray.experimental.channel.cpu_communicator import ( CPUCommunicator, ) _default_accelerator_context = AcceleratorContext( "cpu", CPUCommunicator ) return _default_accelerator_context @staticmethod def set(accelerator_context: "AcceleratorContext") -> None: """ Overwrites the default accelerator context. Args: accelerator_context: The context to register. """ global _global_custom_context # Accelerator context is registered. _global_custom_context = accelerator_context def get_accelerator_devices(self) -> List["torch.device"]: """ Gets the torch device list configured for this process. Returns: List[torch.device]: The torch device list. """ import torch if self._torch_module_name == "cpu": return [torch.device("cpu")] if self._torch_module_name == "cuda": accelerator_ids = [str(id) for id in ray.get_gpu_ids()] accelerator_manager = get_accelerator_manager_for_resource("GPU") else: accelerator_ids = [ str(id) for id in ray.get_runtime_context().get_accelerator_ids()[ self._torch_module_name.upper() ] ] accelerator_manager = get_accelerator_manager_for_resource( self._torch_module_name.upper() ) device_ids = [] if len(accelerator_ids) > 0: accelerator_visible_list = ( accelerator_manager.get_current_process_visible_accelerator_ids() ) if accelerator_visible_list is None: accelerator_visible_list = [] # If there are multiple Accelerators, return a list of devices. # If using fractional Accelerators, these IDs are not guaranteed # to be unique across different processes. for accelerator_id in accelerator_ids: try: device_ids.append(accelerator_visible_list.index(accelerator_id)) except ValueError: raise RuntimeError( f"{accelerator_manager.get_visible_accelerator_ids_env_var()} set incorrectly. " f"expected to include {accelerator_id}. " "Did you override this environment" " variable? If not, please help file an issue on Github." ) else: # If called on the driver or outside of Ray Train, return the # 0th device. device_ids.append(0) return [ torch.device(f"{self._torch_module_name}:{device_id}") for device_id in device_ids ] def get_device_context(self, device: "torch.device") -> ContextManager: """ Retrieves the context manager for the specified accelerator device. There is no device context for CPU, returning a nullcontext. Args: device: The target device for which the context manager is required. Returns: ContextManager: A context manager specific to the device type. """ if device.type == "cpu": return nullcontext() return self._torch_mod.device(device) def current_stream(self): """ Retrieves the current execution stream for the accelerator device. """ return self._torch_mod.current_stream() def create_event(self): """ Creates an event object for the accelerator device. """ return self._torch_mod.Event() def generate_communicator_id(self) -> str: """ Generates a communication identifier for communication group. """ return self._communicator_cls.generate_communicator_id() def create_communicator(self, *args, **kwargs) -> Communicator: """ Creates a communication group for collective operations. """ return self._communicator_cls(*args, **kwargs) @property def module_name(self) -> str: """ Gets the name of the torch module backing the accelerator. """ return self._torch_module_name @property def communicator_cls(self) -> Optional[Type[Communicator]]: """ Returns the communicator class. """ return self._communicator_cls @property def accelerator_count(self) -> int: """ Returns the number of accelerators assigned by ray. """ if self._torch_module_name == "cuda": return len(ray.get_gpu_ids()) else: accelerator_ids = ray.get_runtime_context().get_accelerator_ids() return len(accelerator_ids.get(self._torch_module_name.upper(), [])) def register_accelerator_context( torch_module_name: str, communicator_cls: Type[Communicator] ): """ Registers the accelerator context with the specified device type and communicator. Args: torch_module_name: The name of the device module under torch. communicator_cls: The communicator class associated with the device. """ accelerator_context = AcceleratorContext(torch_module_name, communicator_cls) AcceleratorContext.set(accelerator_context) def is_accelerator_context_registered(): """ Checks whether a custom accelerator context has been registered. Returns: bool: True if a custom accelerator context is registered (_global_custom_context is not None), False otherwise. """ if _global_custom_context is not None: return True return False
AcceleratorContext
python
numba__numba
numba/tests/test_jit_module.py
{ "start": 462, "end": 720 }
class ____(SerialMixin, unittest.TestCase): source_lines = """ from numba import jit_module def inc(x): return x + 1 def add(x, y): return x + y def inc_add(x): y = inc(x) return add(x, y) import numpy as np mean = np.mean
TestJitModule
python
pallets__click
src/click/types.py
{ "start": 6684, "end": 7474 }
class ____(ParamType): name = "text" def convert( self, value: t.Any, param: Parameter | None, ctx: Context | None ) -> t.Any: if isinstance(value, bytes): enc = _get_argv_encoding() try: value = value.decode(enc) except UnicodeError: fs_enc = sys.getfilesystemencoding() if fs_enc != enc: try: value = value.decode(fs_enc) except UnicodeError: value = value.decode("utf-8", "replace") else: value = value.decode("utf-8", "replace") return value return str(value) def __repr__(self) -> str: return "STRING"
StringParamType
python
google__pytype
pytype/pytd/parse/node_test.py
{ "start": 147, "end": 256 }
class ____(Node): """Simple node for equality testing. Not equal to anything else.""" a: Any b: Any
Node1
python
huggingface__transformers
src/transformers/models/t5gemma/modeling_t5gemma.py
{ "start": 54356, "end": 59569 }
class ____(T5GemmaPreTrainedModel): def __init__(self, config: T5GemmaConfig, is_encoder_decoder: Optional[bool] = None): r""" is_encoder_decoder (`Optional`, *optional*): Whether use encoder_decoder for token classification. When set to False, only encoder is used. """ if is_encoder_decoder is not None: config.is_encoder_decoder = is_encoder_decoder super().__init__(config) self.num_labels = config.num_labels if config.is_encoder_decoder: self.model = T5GemmaModel(config) else: self.model = T5GemmaEncoderModel(config) hidden_size = config.encoder.hidden_size if config.is_encoder_decoder: hidden_size = config.decoder.hidden_size classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1) self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout) self.post_init() def get_input_embeddings(self): return self.model.get_input_embeddings() def set_input_embeddings(self, value): self.model.set_input_embeddings(value) @can_return_tuple @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.Tensor] = None, decoder_position_ids: Optional[torch.LongTensor] = None, encoder_outputs: Optional[BaseModelOutput] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> TokenClassifierOutput: r""" decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None): raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode." ) if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None): if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = self._shift_right(input_ids) if self.config.is_encoder_decoder: outputs: Seq2SeqModelOutput = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=False, **kwargs, ) last_hidden_state = outputs.last_hidden_state hidden_states = outputs.decoder_hidden_states attentions = outputs.decoder_attentions else: outputs: BaseModelOutput = self.model( input_ids, attention_mask=attention_mask, position_ids=position_ids, inputs_embeds=inputs_embeds, **kwargs, ) last_hidden_state = outputs.last_hidden_state hidden_states = outputs.hidden_states attentions = outputs.attentions logits = self.score(last_hidden_state) loss = None if labels is not None: loss = self.loss_function(logits, labels, self.config) return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=hidden_states, attentions=attentions, ) __all__ = [ "T5GemmaForConditionalGeneration", "T5GemmaModel", "T5GemmaEncoderModel", "T5GemmaPreTrainedModel", "T5GemmaForSequenceClassification", "T5GemmaForTokenClassification", ]
T5GemmaForTokenClassification
python
realpython__materials
build-a-django-content-aggregator/source_code_step_2/podcasts/tests.py
{ "start": 98, "end": 996 }
class ____(TestCase): def setUp(self): self.episode = Episode.objects.create( title="My Awesome Podcast Episode", description="Look mom, I made it!", pub_date=timezone.now(), link="https://myawesomeshow.com", image="https://image.myawesomeshow.com", podcast_name="My Python Podcast", guid="de194720-7b4c-49e2-a05f-432436d3fetr", ) def test_episode_content(self): self.assertEqual(self.episode.description, "Look mom, I made it!") self.assertEqual(self.episode.link, "https://myawesomeshow.com") self.assertEqual( self.episode.guid, "de194720-7b4c-49e2-a05f-432436d3fetr" ) def test_episode_str_representation(self): self.assertEqual( str(self.episode), "My Python Podcast: My Awesome Podcast Episode" )
PodCastsTests
python
ray-project__ray
python/ray/data/tests/test_namespace_expressions.py
{ "start": 3651, "end": 4668 }
class ____: """Tests for string length operations.""" def test_string_length( self, dataset_format, method_name, input_values, expected_results ): """Test string length methods.""" data = [{"name": v} for v in input_values] ds = _create_dataset(data, dataset_format) method = getattr(col("name").str, method_name) result = ds.with_column("result", method()).to_pandas() expected = pd.DataFrame({"name": input_values, "result": expected_results}) assert rows_same(result, expected) @pytest.mark.parametrize("dataset_format", DATASET_FORMATS) @pytest.mark.parametrize( "method_name,input_values,expected_values", [ ("upper", ["alice", "bob"], ["ALICE", "BOB"]), ("lower", ["ALICE", "BOB"], ["alice", "bob"]), ("capitalize", ["alice", "bob"], ["Alice", "Bob"]), ("title", ["alice smith", "bob jones"], ["Alice Smith", "Bob Jones"]), ("swapcase", ["AlIcE"], ["aLiCe"]), ], )
TestStringLength
python
airbytehq__airbyte
airbyte-integrations/connectors/source-iterable/source_iterable/streams.py
{ "start": 1044, "end": 4553 }
class ____(HttpStream, ABC): # in case we get a 401 error (api token disabled or deleted) on a stream slice, do not make further requests within the current stream # to prevent 429 error on other streams ignore_further_slices = False url_base = "https://api.iterable.com/api/" primary_key = "id" def __init__(self, authenticator): self._cred = authenticator self._slice_retry = 0 super().__init__(authenticator) @property def retry_factor(self) -> int: return 20 # With factor 20 it would be from 20 to 400 seconds delay @property def max_retries(self) -> Union[int, None]: return 10 @property @abstractmethod def data_field(self) -> str: """ :return: Default field name to get data from response """ @property def availability_strategy(self) -> Optional["AvailabilityStrategy"]: return None def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]: """ Iterable API does not support pagination """ return None def check_generic_error(self, response: requests.Response) -> bool: """ https://github.com/airbytehq/oncall/issues/1592#issuecomment-1499109251 https://github.com/airbytehq/oncall/issues/1985 """ codes = ["Generic Error", "GenericError"] msg_pattern = "Please try again later" if response.status_code == 500: # I am not sure that all 500 errors return valid json try: response_json = json.loads(response.text) except ValueError: return if response_json.get("code") in codes and msg_pattern in response_json.get("msg", ""): return True def request_kwargs( self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None, ) -> Mapping[str, Any]: """ https://requests.readthedocs.io/en/latest/user/advanced/#timeouts https://github.com/airbytehq/oncall/issues/1985#issuecomment-1559276465 """ return {"timeout": (60, 300)} def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]: response_json = response.json() or {} records = response_json.get(self.data_field, []) for record in records: yield record def should_retry(self, response: requests.Response) -> bool: if self.check_generic_error(response): self._slice_retry += 1 if self._slice_retry < 3: return True return False return response.status_code == 429 or 500 <= response.status_code < 600 def read_records( self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_slice: Mapping[str, Any] = None, stream_state: Mapping[str, Any] = None, ) -> Iterable[Mapping[str, Any]]: self._slice_retry = 0 if self.ignore_further_slices: return try: yield from super().read_records(sync_mode, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state) except (HTTPError, UserDefinedBackoffException, DefaultBackoffException) as e: response = e.response if self.check_generic_error(response): return raise e
IterableStream
python
joke2k__faker
faker/providers/date_time/hi_IN/__init__.py
{ "start": 46, "end": 855 }
class ____(DateTimeProvider): def day_of_week(self) -> str: day = self.date("%w") DAY_NAMES = { "0": "सोमवार", "1": "मंगलवार", "2": "बुधवार", "3": "गुरुवार", "4": "जुम्मा", "5": "शनिवार", "6": "रविवार", } return DAY_NAMES[day] def month_name(self) -> str: month = self.month() MONTH_NAMES = { "01": "जनवरी", "02": "फ़रवरी", "03": "मार्च", "04": "अप्रैल", "05": "मई", "06": "जून", "07": "जुलाई", "08": "अगस्त", "09": "सितंबर", "10": "अक्टूबर", "11": "नवंबर", "12": "दिसंबर", } return MONTH_NAMES[month]
Provider
python
ray-project__ray
python/ray/tests/test_output.py
{ "start": 4989, "end": 6272 }
class ____: pass ray.init(address="{address}") # Will hang forever due to infeasible resource. ray.get(A.remote().__ray_ready__.remote()) """.format( address=ray_start_cluster_head_with_env_vars.address ) proc = run_string_as_driver_nonblocking(script, env={"PYTHONUNBUFFERED": "1"}) def _check_for_infeasible_msg(): l = proc.stdout.readline().decode("ascii") if len(l) > 0: print(l) return "(autoscaler" in l and "No available node types can fulfill" in l wait_for_condition(_check_for_infeasible_msg, timeout=30) os.kill(proc.pid, signal.SIGTERM) proc.wait() @pytest.mark.parametrize( "ray_start_cluster_head_with_env_vars", [ { "num_cpus": 1, "env_vars": { "RAY_enable_autoscaler_v2": "0", "RAY_debug_dump_period_milliseconds": "1000", }, }, { "num_cpus": 1, "env_vars": { "RAY_enable_autoscaler_v2": "1", "RAY_debug_dump_period_milliseconds": "1000", }, }, ], indirect=True, ) def test_autoscaler_warn_deadlock(ray_start_cluster_head_with_env_vars): script = """ import ray import time @ray.remote(num_cpus=1)
A
python
huggingface__transformers
tests/pipelines/test_pipelines_table_question_answering.py
{ "start": 924, "end": 16502 }
class ____(unittest.TestCase): # Putting it there for consistency, but TQA do not have fast tokenizer # which are needed to generate automatic tests model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING @require_torch def test_small_model_pt(self, dtype="float32"): model_id = "lysandre/tiny-tapas-random-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id, dtype=dtype) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_torch def test_small_model_pt_fp16(self): self.test_small_model_pt(dtype="float16") @require_torch def test_slow_tokenizer_sqa_pt(self, dtype="float32"): model_id = "lysandre/tiny-tapas-random-sqa" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id, dtype=dtype) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer, max_new_tokens=20) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_torch def test_slow_tokenizer_sqa_pt_fp16(self): self.test_slow_tokenizer_sqa_pt(dtype="float16") @slow @require_torch def test_integration_wtq_pt(self, dtype="float32"): table_querier = pipeline("table-question-answering", dtype=dtype) data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_integration_wtq_pt_fp16(self): self.test_integration_wtq_pt(dtype="float16") @slow @require_torch def test_integration_sqa_pt(self, dtype="float32"): table_querier = pipeline( "table-question-answering", model="google/tapas-base-finetuned-sqa", tokenizer="google/tapas-base-finetuned-sqa", dtype=dtype, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_integration_sqa_pt_fp16(self): self.test_integration_sqa_pt(dtype="float16") @slow @require_torch def test_large_model_pt_tapex(self, dtype="float32"): model_id = "microsoft/tapex-large-finetuned-wtq" table_querier = pipeline( "table-question-answering", model=model_id, dtype=dtype, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = [ "How many movies has George Clooney played in?", "How old is Mr Clooney ?", "What's the date of birth of Leonardo ?", ] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": " 69"}, {"answer": " 59"}, {"answer": " 10 june 1996"}, ] self.assertListEqual(results, expected_results)
TQAPipelineTests
python
apache__airflow
providers/amazon/tests/unit/amazon/aws/operators/test_sagemaker_transform.py
{ "start": 3048, "end": 18198 }
class ____: def setup_method(self): self.sagemaker = SageMakerTransformOperator( task_id="test_sagemaker_operator", config=copy.deepcopy(CONFIG), wait_for_completion=False, check_interval=5, check_if_model_exists=False, ) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(sagemaker, "serialize", return_value="") def test_integer_fields(self, _, mock_create_transform, __, ___, mock_desc): mock_desc.side_effect = [ ClientError({"Error": {"Code": "ValidationException"}}, "op"), {"ModelName": "model_name"}, ] mock_create_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.execute(None) assert self.sagemaker.integer_fields == EXPECTED_INTEGER_FIELDS for key1, key2, *key3_org in EXPECTED_INTEGER_FIELDS: if key3_org: (key3,) = key3_org assert self.sagemaker.config[key1][key2][key3] == int(self.sagemaker.config[key1][key2][key3]) else: self.sagemaker.config[key1][key2] == int(self.sagemaker.config[key1][key2]) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(sagemaker, "serialize", return_value="") def test_execute(self, _, mock_transform, __, mock_model, mock_desc): mock_desc.side_effect = [ ClientError({"Error": {"Code": "ValidationException"}}, "op"), {"ModelName": "model_name"}, ] mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.execute(None) mock_model.assert_called_once_with(CREATE_MODEL_PARAMS) mock_transform.assert_called_once_with( CREATE_TRANSFORM_PARAMS_INTEGER_FIELDS, wait_for_completion=False, check_interval=5, max_ingestion_time=None, ) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(SageMakerHook, "create_transform_job") # @mock.patch.object(sagemaker, "serialize", return_value="") def test_log_correct_url(self, mock_transform, __, ___, mock_desc): region = "us-east-1" job_name = CONFIG["Transform"]["TransformJobName"] mock_desc.side_effect = [ ClientError({"Error": {"Code": "ValidationException"}}, "op"), {"ModelName": "model_name"}, ] mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } aws_domain = SageMakerTransformJobLink.get_aws_domain("aws") job_run_url = ( f"https://console.{aws_domain}/sagemaker/home?region={region}#/transform-jobs/{job_name}" ) with mock.patch.object(self.sagemaker.log, "info") as mock_log_info: self.sagemaker.execute(None) # assert job_run_id == JOB_RUN_ID mock_log_info.assert_any_call("You can monitor this SageMaker Transform job at %s", job_run_url) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "create_transform_job") def test_execute_with_failure(self, mock_transform, _, mock_desc): mock_desc.side_effect = [ ClientError({"Error": {"Code": "ValidationException"}}, "op"), None, ] mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 404}, } with pytest.raises(AirflowException): self.sagemaker.execute(None) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(sagemaker, "serialize", return_value="") def test_execute_with_check_if_job_exists(self, _, __, ___, mock_transform, mock_desc): mock_desc.side_effect = [ ClientError({"Error": {"Code": "ValidationException"}}, "op"), {"ModelName": "model_name"}, ] mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.execute(None) mock_transform.assert_called_once_with( CREATE_TRANSFORM_PARAMS_INTEGER_FIELDS, wait_for_completion=False, check_interval=5, max_ingestion_time=None, ) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(sagemaker, "serialize", return_value="") def test_execute_without_check_if_job_exists(self, _, __, ___, mock_transform, ____): mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.check_if_job_exists = False self.sagemaker.execute(None) mock_transform.assert_called_once_with( CREATE_TRANSFORM_PARAMS_INTEGER_FIELDS, wait_for_completion=False, check_interval=5, max_ingestion_time=None, ) @mock.patch( # since it is divided by 1000000000, the added timestamp should be 1234567890. "airflow.providers.amazon.aws.operators.sagemaker.time.time_ns", return_value=MOCK_UNIX_TIME ) @mock.patch.object( SageMakerHook, "describe_transform_job", return_value={"ModelName": "model_name-1234567890"} ) @mock.patch.object( SageMakerHook, "create_transform_job", return_value={ "ResponseMetadata": {"HTTPStatusCode": 200}, }, ) @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object( SageMakerHook, "describe_model", side_effect=[ None, ClientError({"Error": {"Code": "ValidationException"}}, "op"), "model_name-1234567890", ], ) @mock.patch.object(sagemaker, "serialize", return_value="") def test_when_model_already_exists_it_should_add_timestamp_to_model_name( self, _, mock_describe_model, mock_create_model, __, ___, timestamp_mock ): self.sagemaker.check_if_job_exists = False self.sagemaker.check_if_model_exists = True model_config = {"ModelName": "model_name"} self.sagemaker.config["Model"] = model_config self.sagemaker.execute(None) mock_describe_model.assert_has_calls( [mock.call("model_name"), mock.call("model_name-1234567890"), mock.call("model_name-1234567890")] ) mock_create_model.assert_called_once_with({"ModelName": "model_name-1234567890"}) @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(sagemaker, "serialize", return_value="") def test_when_model_already_exists_it_should_raise_airflow_exception( self, _, mock_describe_model, mock_create_model, __, ___ ): mock_describe_model.side_effect = [None] self.sagemaker.check_if_job_exists = False self.sagemaker.check_if_model_exists = True self.sagemaker.action_if_model_exists = "fail" model_config = {"ModelName": "model_name"} self.sagemaker.config["Model"] = model_config with pytest.raises(AirflowException) as context: self.sagemaker.execute(None) assert str(context.value) == "A SageMaker model with name model_name already exists." mock_describe_model.assert_called_once_with("model_name") mock_create_model.assert_not_called() @mock.patch.object(SageMakerHook, "describe_transform_job", return_value={"ModelName": "model_name"}) @mock.patch.object( SageMakerHook, "create_transform_job", return_value={ "ResponseMetadata": {"HTTPStatusCode": 200}, }, ) @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(sagemaker, "serialize", return_value="") def test_execute_without_check_if_model_exists(self, _, mock_describe_model, mock_create_model, __, ___): self.sagemaker.check_if_job_exists = False self.sagemaker.check_if_model_exists = False model_config = {"ModelName": "model_name"} self.sagemaker.config["Model"] = model_config self.sagemaker._get_unique_model_name = mock.Mock() self.sagemaker.execute(None) mock_create_model.assert_called_once_with(model_config) mock_describe_model.assert_called_once_with("model_name") self.sagemaker._get_unique_model_name.assert_not_called() @mock.patch.object(SageMakerTransformOperator, "_get_unique_name") def test_get_unique_model_name_calls_get_unique_name_correctly(self, get_unique_name_mock): def describe_func(): pass self.sagemaker._get_unique_model_name("model_name", True, describe_func) get_unique_name_mock.assert_called_once_with( "model_name", True, describe_func, self.sagemaker._check_if_model_exists, "model", ) @mock.patch.object(SageMakerTransformOperator, "_check_if_resource_exists") def test_check_if_model_exists_calls_check_if_resource_exists_correctly(self, check_resource_exists_mock): def describe_func(): pass self.sagemaker._check_if_model_exists("model_name", describe_func) check_resource_exists_mock.assert_called_once_with("model_name", "model", describe_func) @mock.patch("airflow.providers.amazon.aws.operators.sagemaker.SageMakerTransformOperator.defer") @mock.patch.object( SageMakerHook, "describe_transform_job", return_value={"TransformJobStatus": "Failed", "FailureReason": "it failed"}, ) @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") def test_operator_failed_before_defer(self, _, mock_transform, mock_describe_transform_job, mock_defer): mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.deferrable = True self.sagemaker.wait_for_completion = True self.sagemaker.check_if_job_exists = False with pytest.raises(AirflowException): self.sagemaker.execute(context=None) assert not mock_defer.called @mock.patch("airflow.providers.amazon.aws.operators.sagemaker.SageMakerTransformOperator.defer") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object( SageMakerHook, "describe_transform_job", return_value={"TransformJobStatus": "Completed"} ) @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") def test_operator_complete_before_defer( self, _, mock_transform, mock_describe_transform_job, mock_describe_model, mock_defer ): mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } mock_describe_model.return_value = {"PrimaryContainer": {"ModelPackageName": "package-name"}} self.sagemaker.deferrable = True self.sagemaker.wait_for_completion = True self.sagemaker.check_if_job_exists = False self.sagemaker.execute(context=None) assert not mock_defer.called @mock.patch.object( SageMakerHook, "describe_transform_job", return_value={"TransformJobStatus": "InProgress"} ) @mock.patch.object(SageMakerHook, "create_transform_job") @mock.patch.object(SageMakerHook, "create_model") def test_operator_defer(self, _, mock_transform, mock_describe_transform_job): mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.deferrable = True self.sagemaker.wait_for_completion = True self.sagemaker.check_if_job_exists = False with pytest.raises(TaskDeferred) as exc: self.sagemaker.execute(context=None) assert isinstance(exc.value.trigger, SageMakerTrigger), "Trigger is not a SagemakerTrigger" @mock.patch.object(SageMakerHook, "describe_transform_job") @mock.patch.object(SageMakerHook, "create_model") @mock.patch.object(SageMakerHook, "describe_model") @mock.patch.object(SageMakerHook, "get_conn") @mock.patch.object(SageMakerHook, "create_transform_job") def test_operator_lineage_data(self, mock_transform, mock_conn, mock_model, _, mock_desc): self.sagemaker.check_if_job_exists = False mock_conn.return_value.describe_model_package.return_value = { "InferenceSpecification": {"Containers": [{"ModelDataUrl": "s3://model-bucket/model-path"}]}, } mock_model.return_value = {"PrimaryContainer": {"ModelPackageName": "package-name"}} mock_desc.return_value = { "TransformInput": {"DataSource": {"S3DataSource": {"S3Uri": "s3://input-bucket/input-path"}}}, "TransformOutput": {"S3OutputPath": "s3://output-bucket/output-path"}, "ModelName": "model_name", } mock_transform.return_value = { "TransformJobArn": "test_arn", "ResponseMetadata": {"HTTPStatusCode": 200}, } self.sagemaker.execute(None) assert self.sagemaker.get_openlineage_facets_on_complete(None) == OperatorLineage( inputs=[ Dataset(namespace="s3://input-bucket", name="input-path"), Dataset(namespace="s3://model-bucket", name="model-path"), ], outputs=[Dataset(namespace="s3://output-bucket", name="output-path")], ) def test_template_fields(self): validate_template_fields(self.sagemaker)
TestSageMakerTransformOperator
python
bokeh__bokeh
src/bokeh/models/glyphs.py
{ "start": 47842, "end": 51732 }
class ____(XYGlyph, TextGlyph): ''' Render text. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) __example__ = "examples/reference/models/Text.py" _args = ('x', 'y', 'text', 'angle', 'x_offset', 'y_offset') x = NumberSpec(default=field("x"), help=""" The x-coordinates to locate the text anchors. """) y = NumberSpec(default=field("y"), help=""" The y-coordinates to locate the text anchors. """) text = StringSpec(default=field("text"), help=""" The text values to render. """) angle = AngleSpec(default=0, help=""" The angles to rotate the text, as measured from the horizontal. """) x_offset = FloatSpec(default=0, help=""" Offset values in pixels to apply to the x-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in |screen units| from a given data position. """) y_offset = FloatSpec(default=0, help=""" Offset values in pixels to apply to the y-coordinates. This is useful, for instance, if it is desired to "float" text a fixed distance in |screen units| from a given data position. """) anchor = DataSpec(TextAnchor, default=value("auto"), help=""" Position within the bounding box of this glyph to which ``x`` and ``y`` coordinates are anchored to. This can be a named anchor point like ``top_left`` or ``center``, or a percentage from from left to right and top to bottom, or a combination of those, independently in width and height. If set to ``auto``, then anchor point will be determined from text ``align`` and ``baseline``. .. note:: This property is experimental and may change at any point. """) padding = Padding(default=0, help=""" Extra space between the text of a glyphs and its bounding box (border). .. note:: This property is experimental and may change at any point. """) border_radius = BorderRadius(default=0, help=""" Allows the box to have rounded corners. For the best results, it should be used in combination with ``padding``. .. note:: This property is experimental and may change at any point. """) outline_shape = DataSpec(Enum(OutlineShapeName), default="box", help=""" Specify the shape of the outline for the text box. The default outline is of a text box is its bounding box (or rectangle). This can be changed to a selection of pre-defined shapes, like circle, ellipse, diamond, parallelogram, etc. Those shapes are circumscribed onto the bounding box, so that the contents of a box fit inside those shapes. This property is in effect only when either border line, background fill and/or background hatch properties are set. The user can choose ``"none"`` to avoid drawing any shape, even if border or background visuals are set. .. note:: This property is experimental and may change at any point. .. note:: Currently hit testing only uses the bounding box of text contents of the glyph, which is equivalent to using box/rectangle shape. """) text_props = Include(TextProps, help=""" The {prop} values for the text. """) background_fill_props = Include(FillProps, prefix="background", help=""" The {prop} values for the text bounding box. """) background_hatch_props = Include(HatchProps, prefix="background", help=""" The {prop} values for the text bounding box. """) border_line_props = Include(LineProps, prefix="border", help=""" The {prop} values for the text bounding box. """) background_fill_color = Override(default=None) background_hatch_color = Override(default=None) border_line_color = Override(default=None) @abstract
Text
python
doocs__leetcode
solution/1400-1499/1481.Least Number of Unique Integers after K Removals/Solution.py
{ "start": 0, "end": 261 }
class ____: def findLeastNumOfUniqueInts(self, arr: List[int], k: int) -> int: cnt = Counter(arr) for i, v in enumerate(sorted(cnt.values())): k -= v if k < 0: return len(cnt) - i return 0
Solution
python
scipy__scipy
scipy/stats/_stats_py.py
{ "start": 418915, "end": 419878 }
class ____: # A very simple, array-API compatible beta distribution for use in # hypothesis tests. May be replaced by new infrastructure beta # distribution in due time. def __init__(self, a, b, *, loc=None, scale=None): self.a = a self.b = b self.loc = loc self.scale = scale def cdf(self, x): if self.loc is not None or self.scale is not None: loc = 0 if self.loc is None else self.loc scale = 1 if self.scale is None else self.scale return special.betainc(self.a, self.b, (x - loc)/scale) return special.betainc(self.a, self.b, x) def sf(self, x): if self.loc is not None or self.scale is not None: loc = 0 if self.loc is None else self.loc scale = 1 if self.scale is None else self.scale return special.betaincc(self.a, self.b, (x - loc)/scale) return special.betaincc(self.a, self.b, x)
_SimpleBeta
python
streamlit__streamlit
lib/streamlit/testing/v1/element_tree.py
{ "start": 8813, "end": 9141 }
class ____(Element): proto: AlertProto = field(repr=False) icon: str def __init__(self, proto: AlertProto, root: ElementTree) -> None: self.proto = proto self.key = None self.root = root @property def value(self) -> str: return self.proto.body @dataclass(repr=False)
AlertBase
python
pennersr__django-allauth
tests/apps/socialaccount/providers/meetup/tests.py
{ "start": 240, "end": 1630 }
class ____(OAuth2TestsMixin, TestCase): provider_id = MeetupProvider.id def get_mocked_response(self): return MockedResponse( HTTPStatus.OK, """ {"id": 1, "lang": "en_US", "city": "Bhubaneswar", "photo": { "thumb_link":"", "photo_id": 240057062, "highres_link":"", "base_url": "http://photos2.meetupstatic.com", "type": "member", "name": "Abhishek Jaiswal", "other_services": {}, "country": "in", "topics": [{"name": "Open Source", "urlkey": "opensource", "id": 563}, {"name": "Python", "urlkey": "python", "id": 1064}, {"name": "Software Development", "urlkey": "softwaredev", "id": 3833}, {"name": "Computer programming", "urlkey": "computer-programming", "id": 48471}, {"name": "Python Web Development", "urlkey": "python-web-development", "id": 917242}, {"name": "Data Science using Python", "urlkey": "data-science-using-python", "id": 1481522}], "lon": 85.83999633789062, "joined": 1411642310000, "id": 173662372, "status": "active", "link": "http://www.meetup.com/members/173662372", "hometown": "Kolkata", "lat": 20.270000457763672, "visited": 1488829924000, "self": {"common": {}}}}""", ) def get_expected_to_str(self): return "Meetup"
MeetupTests
python
apache__airflow
providers/standard/tests/unit/standard/operators/test_python.py
{ "start": 8165, "end": 15951 }
class ____(BasePythonTest): opcls = PythonOperator @pytest.fixture(autouse=True) def setup_tests(self): self.run = False def do_run(self): self.run = True def is_run(self): return self.run def test_python_operator_run(self): """Tests that the python callable is invoked on task run.""" ti = self.create_ti(self.do_run) assert not self.is_run() ti.run() assert self.is_run() @pytest.mark.parametrize("not_callable", [{}, None]) def test_python_operator_python_callable_is_callable(self, not_callable): """Tests that PythonOperator will only instantiate if the python_callable argument is callable.""" with pytest.raises(AirflowException, match="`python_callable` param must be callable"): PythonOperator(python_callable=not_callable, task_id="python_operator") def test_python_callable_arguments_are_templatized(self): """Test PythonOperator op_args are templatized""" # Create a named tuple and ensure it is still preserved # after the rendering is done Named = namedtuple("Named", ["var1", "var2"]) named_tuple = Named("{{ ds }}", "unchanged") task = self.render_templates( lambda: 0, op_args=[4, date(2019, 1, 1), "dag {{dag.dag_id}} ran on {{ds}}.", named_tuple], ) rendered_op_args = task.op_args assert len(rendered_op_args) == 4 assert rendered_op_args[0] == 4 assert rendered_op_args[1] == date(2019, 1, 1) assert rendered_op_args[2] == f"dag {self.dag_id} ran on {self.ds_templated}." assert rendered_op_args[3] == Named(self.ds_templated, "unchanged") def test_python_callable_keyword_arguments_are_templatized(self): """Test PythonOperator op_kwargs are templatized""" task = self.render_templates( lambda: 0, op_kwargs={ "an_int": 4, "a_date": date(2019, 1, 1), "a_templated_string": "dag {{dag.dag_id}} ran on {{ds}}.", }, ) rendered_op_kwargs = task.op_kwargs assert rendered_op_kwargs["an_int"] == 4 assert rendered_op_kwargs["a_date"] == date(2019, 1, 1) assert rendered_op_kwargs["a_templated_string"] == f"dag {self.dag_id} ran on {self.ds_templated}." def test_python_callable_keyword_arguments_callable_not_templatized(self): """Test PythonOperator op_kwargs are not templatized if it's a callable""" def a_fn(): return 4 task = self.render_templates( lambda: 0, op_kwargs={ "a_callable": a_fn, }, ) rendered_op_kwargs = task.op_kwargs assert rendered_op_kwargs["a_callable"] == a_fn def test_python_operator_shallow_copy_attr(self): def not_callable(x): raise RuntimeError("Should not be triggered") original_task = PythonOperator( python_callable=not_callable, op_kwargs={"certain_attrs": ""}, task_id=self.task_id, ) new_task = copy.deepcopy(original_task) # shallow copy op_kwargs assert id(original_task.op_kwargs["certain_attrs"]) == id(new_task.op_kwargs["certain_attrs"]) # shallow copy python_callable assert id(original_task.python_callable) == id(new_task.python_callable) def test_conflicting_kwargs(self): # dag is not allowed since it is a reserved keyword def func(dag): # An ValueError should be triggered since we're using dag as a reserved keyword raise RuntimeError(f"Should not be triggered, dag: {dag}") ti = self.create_ti(func, op_args=[1]) error_message = re.escape("The key 'dag' in args is a part of kwargs and therefore reserved.") with pytest.raises(ValueError, match=error_message): ti.run() def test_provide_context_does_not_fail(self): """Ensures that provide_context doesn't break dags in 2.0.""" def func(custom, dag): assert custom == 1, "custom should be 1" assert dag is not None, "dag should be set" error_message = "Invalid arguments were passed to PythonOperator \\(task_id: task_test-provide-context-does-not-fail\\). Invalid arguments were:\n\\*\\*kwargs: {'provide_context': True}" with pytest.raises((TypeError, AirflowException), match=error_message): self.run_as_task(func, op_kwargs={"custom": 1}, provide_context=True) def test_context_with_conflicting_op_args(self): def func(custom, dag): assert custom == 1, "custom should be 1" assert dag is not None, "dag should be set" self.run_as_task(func, op_kwargs={"custom": 1}) def test_context_with_kwargs(self): def func(**context): # check if context is being set assert len(context) > 0, "Context has not been injected" self.run_as_task(func, op_kwargs={"custom": 1}) @pytest.mark.parametrize( ("show_return_value_in_logs", "should_shown"), [ pytest.param(NOTSET, True, id="default"), pytest.param(True, True, id="show"), pytest.param(False, False, id="hide"), ], ) def test_return_value_log(self, show_return_value_in_logs, should_shown, caplog): caplog.set_level(logging.INFO, logger=LOGGER_NAME) def func(): return "test_return_value" if show_return_value_in_logs is NOTSET: self.run_as_task(func) else: self.run_as_task(func, show_return_value_in_logs=show_return_value_in_logs) if should_shown: assert "Done. Returned value was: test_return_value" in caplog.messages assert "Done. Returned value not shown" not in caplog.messages else: assert "Done. Returned value was: test_return_value" not in caplog.messages assert "Done. Returned value not shown" in caplog.messages def test_python_operator_templates_exts(self): def func(): return "test_return_value" with self.dag_maker(self.dag_id, template_searchpath=TEMPLATE_SEARCHPATH, serialized=True): python_operator = PythonOperator( task_id="python_operator", python_callable=func, show_return_value_in_logs=False, templates_exts=["test_ext"], ) assert python_operator.template_ext == ["test_ext"] def test_python_operator_has_default_logger_name(self): python_operator = PythonOperator(task_id="task", python_callable=partial(int, 2)) logger_name: str = "airflow.task.operators.airflow.providers.standard.operators.python.PythonOperator" assert python_operator.log.name == logger_name def test_custom_logger_name_is_correctly_set(self): """ Ensure the custom logger name is correctly set when the Operator is created, and when its state is resumed via __setstate__. """ logger_name: str = "airflow.task.operators.custom.logger" python_operator = PythonOperator( task_id="task", python_callable=partial(int, 2), logger_name="custom.logger" ) assert python_operator.log.name == logger_name setstate_operator = pickle.loads(pickle.dumps(python_operator)) assert setstate_operator.log.name == logger_name def test_custom_logger_name_can_be_empty_string(self): python_operator = PythonOperator(task_id="task", python_callable=partial(int, 2), logger_name="") assert python_operator.log.name == "airflow.task.operators"
TestPythonOperator
python
PrefectHQ__prefect
src/integrations/prefect-databricks/prefect_databricks/flows.py
{ "start": 660, "end": 765 }
class ____(Exception): """Raised when Databricks jobs runs submit terminates"""
DatabricksJobTerminated
python
getsentry__sentry
src/sentry/migrations/0952_fix_span_item_event_type_alerts.py
{ "start": 377, "end": 2137 }
class ____(Enum): ERROR = 0 DEFAULT = 1 TRANSACTION = 2 TRACE_ITEM_SPAN = 3 TRACE_ITEM_LOG = 4 def fix_span_item_event_type_alerts( apps: StateApps, schema_editor: BaseDatabaseSchemaEditor ) -> None: SnubaQuery = apps.get_model("sentry", "SnubaQuery") SnubaQueryEventType = apps.get_model("sentry", "SnubaQueryEventType") for snuba_query in RangeQuerySetWrapperWithProgressBar( SnubaQuery.objects.filter(dataset="events_analytics_platform") ): event_type_objects = SnubaQueryEventType.objects.filter(snuba_query=snuba_query) log_event_type: Any | None = None span_event_type: Any | None = None transaction_event_type: Any | None = None for event_type in event_type_objects: if event_type.type == EventType.TRACE_ITEM_SPAN.value: span_event_type = event_type if event_type.type == EventType.TRACE_ITEM_LOG.value: log_event_type = event_type if event_type.type == EventType.TRANSACTION.value: transaction_event_type = event_type # We have always explicitly set event type for logs, so if log event type # exists, we know it's a log alert, so skip the rest of the logic. if log_event_type is not None: continue # If it's not a log alerts and dataset is events_analytics_platform, # we know it's a span alert. if span_event_type is None: SnubaQueryEventType.objects.create( snuba_query=snuba_query, type=EventType.TRACE_ITEM_SPAN.value ) # Always delete transaction event type. if transaction_event_type is not None: transaction_event_type.delete()
EventType
python
google__jax
tests/pallas/tpu_sparsecore_pallas_test.py
{ "start": 60745, "end": 60825 }
class ____(TCTilingMixin, ScalarSubcoreTest): pass
ScalarSubcoreTestWithTCTiling
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 81973, "end": 82859 }
class ____(sgqlc.types.Input): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ( "name", "url", "access_token", "type", "owner_id", "github_pat", "client_mutation_id", ) name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name") url = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="url") access_token = sgqlc.types.Field(String, graphql_name="accessToken") type = sgqlc.types.Field( sgqlc.types.non_null(MigrationSourceType), graphql_name="type" ) owner_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="ownerId") github_pat = sgqlc.types.Field(String, graphql_name="githubPat") client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
CreateMigrationSourceInput
python
PyCQA__pylint
tests/functional/i/invalid/invalid_repr_returned.py
{ "start": 595, "end": 726 }
class ____: """ __repr__ returns bytes """ def __repr__(self): # [invalid-repr-returned] return b"123"
FirstBadRepr
python
sqlalchemy__sqlalchemy
test/orm/dml/test_evaluator.py
{ "start": 1494, "end": 12437 }
class ____(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): class LiberalJson(JSON): operator_classes = JSON.operator_classes | OperatorClass.MATH Table( "users", metadata, Column("id", Integer, primary_key=True), Column("name", String(64)), Column("othername", String(64)), Column("json", LiberalJson), ) @classmethod def setup_classes(cls): class User(cls.Basic): pass @classmethod def setup_mappers(cls): users, User = cls.tables.users, cls.classes.User cls.mapper_registry.map_imperatively(User, users) def test_compare_to_value(self): User = self.classes.User eval_eq( User.name == "foo", testcases=[ (User(name="foo"), True), (User(name="bar"), False), (User(name=None), None), ], ) eval_eq( User.id < 5, testcases=[ (User(id=3), True), (User(id=5), False), (User(id=None), None), ], ) def test_compare_to_callable_bind(self): User = self.classes.User eval_eq( User.name == bindparam("x", callable_=lambda: "foo"), testcases=[ (User(name="foo"), True), (User(name="bar"), False), (User(name=None), None), ], ) def test_compare_to_none(self): User = self.classes.User eval_eq( User.name == None, # noqa testcases=[ (User(name="foo"), False), (User(name=None), True), (None, None), ], ) def test_raise_on_unannotated_matched_column(self): """test originally for warning emitted in #4073, now updated for #8656.""" User = self.classes.User compiler = evaluator._EvaluatorCompiler(User) with expect_raises_message( evaluator.UnevaluatableError, "Cannot evaluate column: othername", ): compiler.process(User.name == Column("othername", String)) def test_raise_on_unannotated_unmatched_column(self): User = self.classes.User compiler = evaluator._EvaluatorCompiler(User) assert_raises_message( evaluator.UnevaluatableError, "Cannot evaluate column: foo", compiler.process, User.id == Column("foo", Integer), ) # if we let the above method through as we did # prior to [ticket:3366], we would get # AttributeError: 'User' object has no attribute 'foo' # u1 = User(id=5) # meth(u1) def test_true_false(self): User = self.classes.User eval_eq( User.name == False, # noqa testcases=[ (User(name="foo"), False), (User(name=True), False), (User(name=False), True), (None, None), ], ) eval_eq( User.name == True, # noqa testcases=[ (User(name="foo"), False), (User(name=True), True), (User(name=False), False), (None, None), ], ) def test_boolean_ops(self): User = self.classes.User eval_eq( and_(User.name == "foo", User.id == 1), testcases=[ (User(id=1, name="foo"), True), (User(id=2, name="foo"), False), (User(id=1, name="bar"), False), (User(id=2, name="bar"), False), (User(id=1, name=None), None), (None, None), ], ) eval_eq( or_(User.name == "foo", User.id == 1), testcases=[ (User(id=1, name="foo"), True), (User(id=2, name="foo"), True), (User(id=1, name="bar"), True), (User(id=2, name="bar"), False), (User(id=1, name=None), True), (User(id=2, name=None), None), (None, None), ], ) eval_eq( not_(User.id == 1), testcases=[ (User(id=1), False), (User(id=2), True), (User(id=None), None), ], ) @testing.combinations( lambda User: User.name + "_foo" == "named_foo", lambda User: User.name.startswith("nam"), lambda User: User.name.endswith("named"), ) def test_string_ops(self, expr): User = self.classes.User test_expr = testing.resolve_lambda(expr, User=User) eval_eq( test_expr, testcases=[ (User(name="named"), True), (User(name="othername"), False), ], ) def test_in(self): User = self.classes.User eval_eq( User.name.in_(["foo", "bar"]), testcases=[ (User(id=1, name="foo"), True), (User(id=2, name="bat"), False), (User(id=1, name="bar"), True), (User(id=1, name=None), None), (None, None), ], ) eval_eq( User.name.not_in(["foo", "bar"]), testcases=[ (User(id=1, name="foo"), False), (User(id=2, name="bat"), True), (User(id=1, name="bar"), False), (User(id=1, name=None), None), (None, None), ], ) def test_multiple_expressions(self): User = self.classes.User evaluator = compiler.process(User.id > 5, User.name == "ed") is_(evaluator(User(id=7, name="ed")), True) is_(evaluator(User(id=7, name="noted")), False) is_(evaluator(User(id=4, name="ed")), False) def test_in_tuples(self): User = self.classes.User eval_eq( tuple_(User.id, User.name).in_([(1, "foo"), (2, "bar")]), testcases=[ (User(id=1, name="foo"), True), (User(id=2, name="bat"), False), (User(id=1, name="bar"), False), (User(id=2, name="bar"), True), (User(id=1, name=None), None), (None, None), ], ) eval_eq( tuple_(User.id, User.name).not_in([(1, "foo"), (2, "bar")]), testcases=[ (User(id=1, name="foo"), False), (User(id=2, name="bat"), True), (User(id=1, name="bar"), True), (User(id=2, name="bar"), False), (User(id=1, name=None), None), (None, None), ], ) def test_null_propagation(self): User = self.classes.User eval_eq( (User.name == "foo") == (User.id == 1), testcases=[ (User(id=1, name="foo"), True), (User(id=2, name="foo"), False), (User(id=1, name="bar"), False), (User(id=2, name="bar"), True), (User(id=None, name="foo"), None), (User(id=None, name=None), None), (None, None), ], ) def test_hybrids(self, registry): @registry.mapped class SomeClass: __tablename__ = "sc" id = Column(Integer, primary_key=True) data = Column(String) @hybrid_property def foo_data(self): return self.data + "_foo" eval_eq( SomeClass.foo_data == "somedata_foo", testcases=[ (SomeClass(data="somedata"), True), (SomeClass(data="otherdata"), False), (SomeClass(data=None), None), ], ) def test_custom_op_no_impl(self): """test #3162""" User = self.classes.User with expect_raises_message( evaluator.UnevaluatableError, r"Custom operator '\^\^' can't be evaluated in " "Python unless it specifies", ): compiler.process(User.name.op("^^")("bar")) def test_custom_op(self): """test #3162""" User = self.classes.User eval_eq( User.name.op("^^", python_impl=lambda a, b: a + "_foo_" + b)("bar") == "name_foo_bar", testcases=[ (User(name="name"), True), (User(name="notname"), False), ], ) @testing.combinations( (lambda User: User.id + 5, "id", 10, 15, None), ( # note this one uses concat_op, not operator.add lambda User: User.name + " name", "name", "some value", "some value name", None, ), ( lambda User: User.id + "name", "id", 10, evaluator.UnevaluatableError, r"Cannot evaluate math operator \"add\" for " r"datatypes INTEGER, VARCHAR", ), ( lambda User: User.json + 12, "json", {"foo": "bar"}, evaluator.UnevaluatableError, r"Cannot evaluate math operator \"add\" for " r"datatypes LiberalJson\(\), INTEGER", ), ( lambda User: User.json + {"bar": "bat"}, "json", {"foo": "bar"}, evaluator.UnevaluatableError, r"Cannot evaluate concatenate operator \"concat_op\" for " r"datatypes LiberalJson\(\), LiberalJson\(\)", ), ( lambda User: User.json - 12, "json", {"foo": "bar"}, evaluator.UnevaluatableError, r"Cannot evaluate math operator \"sub\" for " r"datatypes LiberalJson\(\), INTEGER", ), ( lambda User: User.json - "foo", "json", {"foo": "bar"}, evaluator.UnevaluatableError, r"Cannot evaluate math operator \"sub\" for " r"datatypes LiberalJson\(\), VARCHAR", ), ) def test_math_op_type_exclusions( self, expr, attrname, initial_value, expected, message ): """test #8507""" User = self.classes.User expr = testing.resolve_lambda(expr, User=User) if expected is evaluator.UnevaluatableError: with expect_raises_message(evaluator.UnevaluatableError, message): compiler.process(expr) else: obj = User(**{attrname: initial_value}) new_value = compiler.process(expr)(obj) eq_(new_value, expected)
EvaluateTest
python
getsentry__sentry
src/sentry/auth/providers/oauth2.py
{ "start": 2477, "end": 4998 }
class ____(AuthView): access_token_url: str | None = None client_id: str | None = None client_secret: str | None = None def __init__( self, access_token_url: str | None = None, client_id: str | None = None, client_secret: str | None = None, *args: Any, **kwargs: Any, ) -> None: super().__init__(*args, **kwargs) if access_token_url is not None: self.access_token_url = access_token_url if client_id is not None: self.client_id = client_id if client_secret is not None: self.client_secret = client_secret def get_token_params(self, code: str, redirect_uri: str) -> Mapping[str, str | None]: return { "grant_type": "authorization_code", "code": code, "redirect_uri": redirect_uri, "client_id": self.client_id, "client_secret": self.client_secret, } def exchange_token( self, request: HttpRequest, pipeline: AuthHelper, code: str ) -> Mapping[str, Any]: # TODO: this needs the auth yet data = self.get_token_params(code=code, redirect_uri=_get_redirect_url()) req = safe_urlopen(self.access_token_url, data=data) body = safe_urlread(req) if req.headers["Content-Type"].startswith("application/x-www-form-urlencoded"): return dict(parse_qsl(body)) return orjson.loads(body) def dispatch(self, request: HttpRequest, pipeline: AuthHelper) -> HttpResponseBase: error = request.GET.get("error") state = request.GET.get("state") code = request.GET.get("code") if error: return pipeline.error(error) if state != pipeline.fetch_state("state"): return pipeline.error(ERR_INVALID_STATE) if code is None: return pipeline.error("no code was provided") data = self.exchange_token(request, pipeline, code) if "error_description" in data: return pipeline.error(data["error_description"]) if "error" in data: logging.info("Error exchanging token: %s", data["error"]) return pipeline.error("Unable to retrieve your token") # we can either expect the API to be implicit and say "im looking for # blah within state data" or we need to pass implementation + call a # hook here pipeline.bind_state("data", data) return pipeline.next_step()
OAuth2Callback
python
pytorch__pytorch
torch/_export/pass_base.py
{ "start": 1435, "end": 18513 }
class ____(PassBase): """ Interpreter-based pass class to help users maintain the IR spec while writing transformations. """ @staticmethod def _create_dummy_node_metadata(): return NodeMetadata({"stack_trace": "".join(traceback.format_stack(limit=1))}) class ExportTracer(PythonKeyTracer): def __init__( self, callback: "_ExportPassBaseDeprecatedDoNotUse", codegen: CodeGen ) -> None: super().__init__() self.callback = callback self.root = torch.nn.Module() self.graph = torch.fx.Graph() self.graph.set_codegen(codegen) self.tensor_attrs: dict[str, torch.Tensor] = {} # type: ignore[assignment] self.fake_tensor_mode: Optional[FakeTensorMode] = None self.submodules: dict[torch.nn.Module, str] = {} def trace(self) -> None: # type: ignore[override] raise ExportPassBaseError("ExportTracer doesn't support trace().") def create_arg(self, a: Argument) -> torch.fx.Node: if isinstance(a, torch.nn.Module): if a not in self.submodules: name_submodule = f"submodule_{len(self.submodules)}" self.root.add_module(name_submodule, a) self.submodules[a] = name_submodule elif isinstance(a, FakeTensor): if not hasattr(a, "constant") or a.constant is None: raise ExportPassBaseError(f"Cannot add {a} to graph.") a = a.constant node = super().create_arg(a) if ( isinstance(a, torch.Tensor) and isinstance(node, torch.fx.Node) and node.op == "get_attr" ): self.set_metadata(node, a) self.callback.on_attr(ProxyValue(a, node)) return node def set_metadata( self, node: torch.fx.Node, value: Argument, ) -> None: # propagate the fake tensor or sym nodes def make_val( x: Argument, ) -> Union[ FakeTensor, torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, None, ]: if isinstance(x, FakeTensor): return x elif isinstance(x, torch.Tensor): if x.is_quantized: # TODO (tmanlaibaatar) properly support Quantized FakeTensor x = torch.dequantize(x) try: assert self.fake_tensor_mode is not None # TODO we should allocate static shapes # for param/buffer values if isinstance(x, torch.nn.Parameter): fake_tensor = self.fake_tensor_mode.from_tensor( x, static_shapes=True ) else: fake_tensor = self.fake_tensor_mode.from_tensor(x) except UnsupportedFakeTensorException: # TODO: This is just a workaround to get over the # x.as_subclass error print( "Fakeifying a Tensor subclass is not supported \ right now. Instead a TensorMetadata is used." ) fake_tensor = None return fake_tensor elif isinstance( x, ( torch.SymInt, torch.SymFloat, torch.SymBool, int, float, bool, str, ), ): return x else: return None node.meta["val"] = pytree.tree_map(make_val, value) # Set the tensor_metadata for values that do not have a corresponding FakeTensor def make_tensor_meta(x: Argument) -> Optional[TensorMetadata]: if not isinstance(x, FakeTensor) and isinstance(x, torch.Tensor): if x.is_quantized: # TODO (tmanlaibaatar) properly support Quantized FakeTensor x = torch.dequantize(x) try: assert self.fake_tensor_mode is not None _ = self.fake_tensor_mode.from_tensor(x) tensor_meta = None except UnsupportedFakeTensorException: # TODO: This is just a workaround to get over the # x.as_subclass error tensor_meta = _extract_tensor_metadata(x) return tensor_meta else: return None node.meta["tensor_meta"] = pytree.tree_map(make_tensor_meta, value) class ExportInterpreter(fx.Interpreter): def __init__( self, callback: "_ExportPassBaseDeprecatedDoNotUse", gm: fx.GraphModule ) -> None: super().__init__(gm) self.callback = callback self.node: torch.fx.Node = next(iter(gm.graph.nodes)) # pyrefly: ignore [bad-override] def placeholder( self, target: str, # type: ignore[override] args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> ProxyValue: arg = super().placeholder(target, args, kwargs) return self.callback.placeholder(target, arg, NodeMetadata(self.node.meta)) def output( self, target: torch.fx.node.Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> ProxyValue: return self.callback.output(args[0], NodeMetadata(self.node.meta)).data # type: ignore[return-value] def call_function( self, target: torch.fx.node.Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> ProxyValue: meta = NodeMetadata(self.node.meta) if target is operator.getitem: value, key = args return self.callback.call_getitem(value, key, meta) elif getattr(target, "__module__", None) in { "_operator", "builtins", "math", }: assert callable(target) return self.callback.call_sym(target, args, meta) elif target in _TORCH_SYM_OPS: assert callable(target) return self.callback.call_sym(target, args, meta) elif isinstance( target, (torch._ops.OpOverload, torch._ops.OpOverloadPacket) ): return self.callback.call_operator( target, args, kwargs, meta, ) elif target is torch.ops.higher_order.cond: pred, true_fn, false_fn, inputs = args return self.callback.call_cond(pred, true_fn, false_fn, inputs, meta) elif target is torch.ops.higher_order.map_impl: f, mapped_args, operands = args # type: ignore[assignment] return self.callback.call_map(f, mapped_args, operands, meta) # For other unregistered HigherOrderOps, just interpret them blindly elif isinstance(target, torch._ops.HigherOrderOperator): return self.callback._fx( "call_function", target, args, kwargs, meta, ) else: raise ExportPassBaseError(f"Unsupported target type: {target}") def get_attr( # type: ignore[override] self, target: str, args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> Argument: return super().get_attr(target, args, kwargs) def call_module( self, target: torch.fx.node.Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> None: raise ExportPassBaseError("call_module is not supported.") def call_method( # type: ignore[override] self, target: str, args: tuple[Argument, ...], kwargs: dict[str, Argument], ) -> None: raise ExportPassBaseError("call_method is not supported.") def run_node(self, n: torch.fx.Node) -> Argument: self.node = n self.callback.node_debug_str = n.format_node() return super().run_node(n) def __init__(self) -> None: self.interpreter = PropagateUnbackedSymInts( torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph()) ) self.tracer = self.ExportTracer(self, CodeGen()) self.fake_tensor_mode: Optional[FakeTensorMode] = None self._initialized = True self.node_debug_str: typing.Optional[str] = None def _fx( self, kind: str, target: torch.fx.node.Target, args: tuple[Argument, ...], kwargs: dict[str, Argument], meta: NodeMetadata, ) -> ProxyValue: args_data, kwargs_data = pytree.tree_map_only( ProxyValue, lambda x: x.data, (args, kwargs) ) res_data = getattr(self.interpreter, kind)(target, args_data, kwargs_data) args_proxy, kwargs_proxy = pytree.tree_map_only( ProxyValue, lambda x: x.proxy, (args, kwargs) ) name = None if isinstance(target, torch._ops.OpOverload): name = self.tracer.graph._target_to_str(target.overloadpacket.__name__) res_proxy = self.tracer.create_proxy( kind, target, args_proxy, kwargs_proxy, name=name ) res_proxy.node.meta.update(meta.data) if self.fake_tensor_mode and (shape_env := self.fake_tensor_mode.shape_env): if symbol_to_path := compute_unbacked_bindings(shape_env, res_data): res_proxy.node.meta["unbacked_bindings"] = symbol_to_path self.tracer.set_metadata(res_proxy.node, res_data) return ProxyValue(res_data, res_proxy) def inputs(self, graph_module: torch.fx.GraphModule) -> list[Argument]: # TODO(angelayi): Update this with what we decide to do for metadata in # the exported graph module if (args := graph_module.meta.get("args", None)) is not None: return list(args) def extract_input(node: torch.fx.Node) -> Optional[FakeTensor]: if "val" in node.meta: fake = node.meta["val"] if hasattr(fake, "constant") and fake.constant is not None: return fake.constant return fake elif tensor_meta := node.meta.get("tensor_meta"): assert self.fake_tensor_mode is not None return FakeTensor( self.fake_tensor_mode, torch.empty( tensor_meta.shape, dtype=tensor_meta.dtype, device="meta", requires_grad=tensor_meta.requires_grad, memory_format=tensor_meta.memory_format, ), torch.device("cpu"), ) elif len(node.users) == 0: return None raise ExportPassBaseError( f"Cannot construct an input for graph module: {graph_module}.", ) return [ extract_input(node) for node in graph_module.graph.nodes if node.op == "placeholder" ] def on_attr(self, attr: ProxyValue) -> None: pass def placeholder(self, name: str, arg: Argument, meta: NodeMetadata) -> ProxyValue: arg_proxy = self.tracer.create_proxy("placeholder", name, (), {}) arg_proxy.node.meta = meta.data self.tracer.set_metadata(arg_proxy.node, arg) return ProxyValue(arg, arg_proxy) def call_operator( self, op, args: tuple[Argument, ...], kwargs: dict[str, Argument], meta: NodeMetadata, ) -> ProxyValue: return self._fx("call_function", op, args, kwargs, meta) def call_sym( self, target: Fn, args: tuple[Argument, ...], meta: NodeMetadata, ) -> ProxyValue: return self._fx("call_function", target, args, {}, meta) def call_cond( self, pred: ProxyValue, true_fn: torch.fx.GraphModule, false_fn: torch.fx.GraphModule, inputs: list[Argument], meta: NodeMetadata, ) -> ProxyValue: true_branch = self.call_submodule(true_fn, tuple(inputs)) false_branch = self.call_submodule(false_fn, tuple(inputs)) assert true_branch is not None assert false_branch is not None return self._fx( "call_function", torch.ops.higher_order.cond, (pred, true_branch.graph_module, false_branch.graph_module, list(inputs)), {}, meta, ) def call_map( self, f: torch.fx.GraphModule, mapped_args: list[ProxyValue], operands: list[ProxyValue], meta: NodeMetadata, ) -> ProxyValue: xs = _unstack_pytree([arg.data for arg in mapped_args])[0] f_branch = self.call_submodule(f, tuple(xs + [arg.data for arg in operands])) assert f_branch is not None return self._fx( "call_function", torch.ops.higher_order.map_impl, (f_branch.graph_module, mapped_args, operands), {}, meta, ) def call_getitem( self, value: ProxyValue, key: int, meta: NodeMetadata ) -> ProxyValue: return self._fx("call_function", operator.getitem, (value, key), {}, meta) def output(self, results: list[Argument], meta: NodeMetadata) -> ProxyValue: return self._fx("output", "output", (results,), {}, meta) def call_submodule( self, graph_module: fx.GraphModule, inputs: tuple[Argument, ...] ) -> PassResult: prev_tracer, self.tracer = ( self.tracer, self.ExportTracer(self, graph_module.graph._codegen), ) self.tracer.fake_tensor_mode = prev_tracer.fake_tensor_mode interpreter = self.ExportInterpreter(self, graph_module) # pyrefly: ignore [bad-assignment] prev_interpreter, self.interpreter = ( self.interpreter, torch.fx.Interpreter( # type: ignore[assignment] torch.fx.GraphModule(torch.nn.Module(), torch.fx.Graph()) ), ) inputs_data = pytree.tree_map_only(ProxyValue, lambda x: x.data, inputs) with fx_traceback.preserve_node_meta(): interpreter.run(*inputs_data) new_graph_module = torch.fx.GraphModule(self.tracer.root, self.tracer.graph) self.tracer = prev_tracer self.interpreter = prev_interpreter return PassResult( new_graph_module, True, ) def call(self, graph_module: fx.GraphModule) -> PassResult: if not getattr(self, "_initialized", False): raise ExportPassBaseError( "ExportPass is not initialized with __init__().", ) inputs = self.inputs(graph_module) fake_tensor_mode = None for i in inputs: if isinstance(i, FakeTensor): assert fake_tensor_mode is None or fake_tensor_mode is i.fake_mode, ( "Multiple fake tensor mode detected." ) fake_tensor_mode = i.fake_mode if fake_tensor_mode is None: self.tracer.fake_tensor_mode = FakeTensorMode(allow_non_fake_inputs=True) fake_tensor_mode = nullcontext() # type: ignore[assignment] dispatcher_mode = nullcontext() # type: ignore[assignment] else: fake_tensor_mode.allow_non_fake_inputs = True self.tracer.fake_tensor_mode = fake_tensor_mode dispatcher_mode = enable_python_dispatcher() # type: ignore[assignment] self.fake_tensor_mode = self.tracer.fake_tensor_mode with fake_tensor_mode, dispatcher_mode: # type: ignore[assignment, union-attr] result = self.call_submodule(graph_module, tuple(inputs)) return result
_ExportPassBaseDeprecatedDoNotUse
python
has2k1__plotnine
plotnine/scales/scale_discrete.py
{ "start": 659, "end": 9431 }
class ____( scale[ RangeDiscrete, DiscreteBreaksUser, DiscreteLimitsUser, Literal["legend"] | None, ] ): """ Base class for all discrete scales """ limits: DiscreteLimitsUser = None """ Limits of the scale. These are the categories (unique values) of the variables. If is only a subset of the values, those that are left out will be treated as missing data and represented with a `na_value`. """ breaks: DiscreteBreaksUser = True """ List of major break points. Or a callable that takes a tuple of limits and returns a list of breaks. If `True`, automatically calculate the breaks. """ drop: bool = True """ Whether to drop unused categories from the scale """ na_translate: bool = True """ If `True` translate missing values and show them. If `False` remove missing values. """ na_value: Any = np.nan """ If `na_translate=True`, what aesthetic value should be assigned to the missing values. This parameter does not apply to position scales where `nan` is always placed on the right. """ guide: Literal["legend"] | None = "legend" def __post_init__(self): super().__post_init__() self._range = RangeDiscrete() @property def final_limits(self) -> Sequence[str]: if self.is_empty(): return ("0", "1") if self.limits is None: return tuple(self._range.range) elif callable(self.limits): return tuple(self.limits(self._range.range)) else: return tuple(self.limits) def train(self, x: AnyArrayLike, drop=False): """ Train scale Parameters ---------- x: A column of data to train over drop : Whether to drop(not include) unused categories A discrete range is stored in a list """ if not len(x): return na_rm = not self.na_translate self._range.train(x, drop, na_rm=na_rm) def dimension(self, expand=(0, 0, 0, 0), limits=None): """ Get the phyical size of the scale Unlike limits, this always returns a numeric vector of length 2 """ if limits is None: limits = self.final_limits return expand_range_distinct((0, len(limits)), expand) def expand_limits( self, limits: Sequence[str], expand: tuple[float, float] | tuple[float, float, float, float], coord_limits: tuple[float, float], trans: trans, ) -> range_view: """ Calculate the final range in coordinate space """ # Turn discrete limits into a tuple of continuous limits is_empty = self.is_empty() or len(limits) == 0 climits = (0, 1) if is_empty else (1, len(limits)) if coord_limits is not None: # - Override None in coord_limits # - Expand limits in coordinate space # - Remove any computed infinite values & c0, c1 = coord_limits climits = ( climits[0] if c0 is None else c0, climits[1] if c1 is None else c1, ) return expand_range(climits, expand, trans) def view( self, limits: Optional[Sequence[str]] = None, range: Optional[CoordRange] = None, ) -> scale_view: """ Information about the trained scale """ if limits is None: limits = self.final_limits if range is None: range = self.dimension(limits=limits) breaks_d = self.get_breaks(limits) breaks = self.map(pd.Categorical(breaks_d)) # pyright: ignore[reportArgumentType] minor_breaks = [] labels = self.get_labels(breaks_d) sv = scale_view( scale=self, aesthetics=self.aesthetics, name=self.name, limits=limits, range=range, breaks=breaks, labels=labels, minor_breaks=minor_breaks, ) return sv def default_expansion(self, mult=0, add=0.6, expand=True): """ Get the default expansion for a discrete scale """ return super().default_expansion(mult, add, expand) def palette(self, n: int) -> Sequence[Any]: """ Map integer `n` to `n` values of the scale """ return none_pal()(n) def map(self, x, limits: Optional[Sequence[str]] = None) -> Sequence[Any]: """ Map values in x to a palette """ if limits is None: limits = self.final_limits n = sum(~pd.isna(list(limits))) pal = self.palette(n) if isinstance(pal, dict): # manual palette with specific assignments pal_match = [] for val in x: try: pal_match.append(pal[val]) except KeyError: pal_match.append(self.na_value) else: if not isinstance(pal, np.ndarray): pal = np.asarray(pal, dtype=object) idx = np.asarray(match(x, limits)) try: pal_match = [pal[i] if i >= 0 else None for i in idx] except IndexError: # Deal with missing data # - Insert NaN where there is no match pal = np.hstack((pal.astype(object), np.nan)) idx = np.clip(idx, 0, len(pal) - 1) pal_match = list(pal[idx]) if self.na_translate: bool_pal_match = pd.isna(pal_match) if len(bool_pal_match.shape) > 1: # linetypes take tuples, these return 2d bool_pal_match = bool_pal_match.any(axis=1) bool_idx = pd.isna(x) | bool_pal_match if bool_idx.any(): pal_match = [ x if i else self.na_value for x, i in zip(pal_match, ~bool_idx) ] return pal_match def get_breaks( self, limits: Optional[Sequence[str]] = None ) -> Sequence[str]: """ Return an ordered list of breaks The form is suitable for use by the guides e.g. ['fair', 'good', 'very good', 'premium', 'ideal'] """ if self.is_empty(): return [] if limits is None: limits = self.final_limits if self.breaks in (None, False): breaks = [] elif self.breaks is True: breaks = list(limits) elif callable(self.breaks): breaks = self.breaks(limits) else: breaks = list(self.breaks) return breaks def get_bounded_breaks( self, limits: Optional[Sequence[str]] = None ) -> Sequence[str]: """ Return Breaks that are within limits """ if limits is None: limits = self.final_limits lookup_limits = set(limits) return [b for b in self.get_breaks() if b in lookup_limits] def get_labels( self, breaks: Optional[Sequence[str]] = None ) -> Sequence[str]: """ Generate labels for the legend/guide breaks """ if self.is_empty(): return [] if breaks is None: breaks = self.get_breaks() # The labels depend on the breaks if the breaks. # No breaks, no labels if breaks in (None, False) or self.labels in (None, False): return [] elif self.labels is True: return [str(b) for b in breaks] elif callable(self.labels): return self.labels(breaks) # if a dict is used to rename some labels elif isinstance(self.labels, dict): return [ str(self.labels[b]) if b in self.labels else str(b) for b in breaks ] else: # Return the labels in the order that they match with # the breaks. label_lookup = dict(zip(self.get_breaks(), self.labels)) return [label_lookup[b] for b in breaks] def transform_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Transform dataframe """ # Discrete scales do not do transformations return df def transform(self, x): """ Transform array|series x """ # Discrete scales do not do transformations return x def inverse_df(self, df: pd.DataFrame) -> pd.DataFrame: """ Inverse Transform dataframe """ # Discrete scales do not do transformations return df
scale_discrete
python
scipy__scipy
scipy/stats/_continuous_distns.py
{ "start": 130082, "end": 134591 }
class ____(rv_continuous): r"""A right-skewed Gumbel continuous random variable. %(before_notes)s See Also -------- gumbel_l, gompertz, genextreme Notes ----- The probability density function for `gumbel_r` is: .. math:: f(x) = \exp(-(x + e^{-x})) for real :math:`x`. The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett distribution. It is also related to the extreme value distribution, log-Weibull and Gompertz distributions. %(after_notes)s %(example)s """ def _shape_info(self): return [] def _pdf(self, x): # gumbel_r.pdf(x) = exp(-(x + exp(-x))) return np.exp(self._logpdf(x)) def _logpdf(self, x): return -x - np.exp(-x) def _cdf(self, x): return np.exp(-np.exp(-x)) def _logcdf(self, x): return -np.exp(-x) def _ppf(self, q): return -np.log(-np.log(q)) def _sf(self, x): return -sc.expm1(-np.exp(-x)) def _isf(self, p): return -np.log(-np.log1p(-p)) def _stats(self): return _EULER, np.pi*np.pi/6.0, 12*np.sqrt(6)/np.pi**3 * _ZETA3, 12.0/5 def _entropy(self): # https://en.wikipedia.org/wiki/Gumbel_distribution return _EULER + 1. @_call_super_mom @inherit_docstring_from(rv_continuous) def fit(self, data, *args, **kwds): data, floc, fscale = _check_fit_input_parameters(self, data, args, kwds) # By the method of maximum likelihood, the estimators of the # location and scale are the roots of the equations defined in # `func` and the value of the expression for `loc` that follows. # The first `func` is a first order derivative of the log-likelihood # equation and the second is from Source: Statistical Distributions, # 3rd Edition. Evans, Hastings, and Peacock (2000), Page 101. def get_loc_from_scale(scale): return -scale * (sc.logsumexp(-data / scale) - np.log(len(data))) if fscale is not None: # if the scale is fixed, the location can be analytically # determined. scale = fscale loc = get_loc_from_scale(scale) else: # A different function is solved depending on whether the location # is fixed. if floc is not None: loc = floc # equation to use if the location is fixed. # note that one cannot use the equation in Evans, Hastings, # and Peacock (2000) (since it assumes that the derivative # w.r.t. the log-likelihood is zero). however, it is easy to # derive the MLE condition directly if loc is fixed def func(scale): term1 = (loc - data) * np.exp((loc - data) / scale) + data term2 = len(data) * (loc + scale) return term1.sum() - term2 else: # equation to use if both location and scale are free def func(scale): sdata = -data / scale wavg = _average_with_log_weights(data, logweights=sdata) return data.mean() - wavg - scale # set brackets for `root_scalar` to use when optimizing over the # scale such that a root is likely between them. Use user supplied # guess or default 1. brack_start = kwds.get('scale', 1) lbrack, rbrack = brack_start / 2, brack_start * 2 # if a root is not between the brackets, iteratively expand them # until they include a sign change, checking after each bracket is # modified. def interval_contains_root(lbrack, rbrack): # return true if the signs disagree. return (np.sign(func(lbrack)) != np.sign(func(rbrack))) while (not interval_contains_root(lbrack, rbrack) and (lbrack > 0 or rbrack < np.inf)): lbrack /= 2 rbrack *= 2 res = optimize.root_scalar(func, bracket=(lbrack, rbrack), rtol=1e-14, xtol=1e-14) scale = res.root loc = floc if floc is not None else get_loc_from_scale(scale) return loc, scale gumbel_r = gumbel_r_gen(name='gumbel_r')
gumbel_r_gen