language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | allegroai__clearml | clearml/backend_api/services/v2_23/events.py | {
"start": 120615,
"end": 125660
} | class ____(Request):
"""
Get 'log' events for this task
:param task: Task ID
:type task: str
:param batch_size: The amount of log events to return
:type batch_size: int
:param navigate_earlier: If set then log events are retrieved from the latest
to the earliest ones (in timestamp descending order, unless order='asc').
Otherwise from the earliest to the latest ones (in timestamp ascending order,
unless order='desc'). The default is True
:type navigate_earlier: bool
:param from_timestamp: Epoch time in UTC ms to use as the navigation start.
Optional. If not provided, reference timestamp is determined by the
'navigate_earlier' parameter (if true, reference timestamp is the last
timestamp and if false, reference timestamp is the first timestamp)
:type from_timestamp: float
:param order: If set, changes the order in which log events are returned based
on the value of 'navigate_earlier'
:type order: str
"""
_service = "events"
_action = "get_task_log"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"batch_size": {
"description": "The amount of log events to return",
"type": "integer",
},
"from_timestamp": {
"description": "Epoch time in UTC ms to use as the navigation start. Optional. If not provided, reference timestamp is determined by the 'navigate_earlier' parameter (if true, reference timestamp is the last timestamp and if false, reference timestamp is the first timestamp)",
"type": "number",
},
"navigate_earlier": {
"description": "If set then log events are retrieved from the latest to the earliest ones (in timestamp descending order, unless order='asc'). Otherwise from the earliest to the latest ones (in timestamp ascending order, unless order='desc'). The default is True",
"type": "boolean",
},
"order": {
"description": "If set, changes the order in which log events are returned based on the value of 'navigate_earlier'",
"enum": ["asc", "desc"],
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
batch_size: Optional[int] = None,
navigate_earlier: Optional[bool] = None,
from_timestamp: Optional[float] = None,
order: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetTaskLogRequest, self).__init__(**kwargs)
self.task = task
self.batch_size = batch_size
self.navigate_earlier = navigate_earlier
self.from_timestamp = from_timestamp
self.order = order
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("batch_size")
def batch_size(self) -> Optional[int]:
return self._property_batch_size
@batch_size.setter
def batch_size(self, value: Optional[int]) -> None:
if value is None:
self._property_batch_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "batch_size", six.integer_types)
self._property_batch_size = value
@schema_property("navigate_earlier")
def navigate_earlier(self) -> Optional[bool]:
return self._property_navigate_earlier
@navigate_earlier.setter
def navigate_earlier(self, value: Optional[bool]) -> None:
if value is None:
self._property_navigate_earlier = None
return
self.assert_isinstance(value, "navigate_earlier", (bool,))
self._property_navigate_earlier = value
@schema_property("from_timestamp")
def from_timestamp(self) -> Optional[float]:
return self._property_from_timestamp
@from_timestamp.setter
def from_timestamp(self, value: Optional[float]) -> None:
if value is None:
self._property_from_timestamp = None
return
self.assert_isinstance(value, "from_timestamp", six.integer_types + (float,))
self._property_from_timestamp = value
@schema_property("order")
def order(self) -> Optional[str]:
return self._property_order
@order.setter
def order(self, value: Optional[str]) -> None:
if value is None:
self._property_order = None
return
self.assert_isinstance(value, "order", six.string_types)
self._property_order = value
| GetTaskLogRequest |
python | numpy__numpy | numpy/f2py/tests/test_value_attrspec.py | {
"start": 36,
"end": 330
} | class ____(util.F2PyTest):
sources = [util.getpath("tests", "src", "value_attrspec", "gh21665.f90")]
# gh-21665
@pytest.mark.slow
def test_gh21665(self):
inp = 2
out = self.module.fortfuncs.square(inp)
exp_out = 4
assert out == exp_out
| TestValueAttr |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchValue1.py | {
"start": 3315,
"end": 3532
} | class ____(Enum):
bar = auto()
def __str__(self) -> str:
match self:
case Foo.bar:
return "bar"
case x:
reveal_type(x, expected_text="Never")
| Foo |
python | paramiko__paramiko | paramiko/client.py | {
"start": 31919,
"end": 32779
} | class ____:
"""
Interface for defining the policy that `.SSHClient` should use when the
SSH server's hostname is not in either the system host keys or the
application's keys. Pre-made classes implement policies for automatically
adding the key to the application's `.HostKeys` object (`.AutoAddPolicy`),
and for automatically rejecting the key (`.RejectPolicy`).
This function may be used to ask the user to verify the key, for example.
"""
def missing_host_key(self, client, hostname, key):
"""
Called when an `.SSHClient` receives a server key for a server that
isn't in either the system or local `.HostKeys` object. To accept
the key, simply return. To reject, raised an exception (which will
be passed to the calling application).
"""
pass
| MissingHostKeyPolicy |
python | getsentry__sentry | src/sentry/api/endpoints/auth_login.py | {
"start": 756,
"end": 3315
} | class ____(Endpoint, OrganizationMixin):
publish_status = {
"POST": ApiPublishStatus.PRIVATE,
}
owner = ApiOwner.ENTERPRISE
# Disable authentication and permission requirements.
permission_classes = ()
def dispatch(self, request: HttpRequest, *args, **kwargs) -> Response:
self.active_organization = determine_active_organization(request)
return super().dispatch(request, *args, **kwargs)
def post(
self, request: Request, organization: Organization | None = None, *args, **kwargs
) -> Response:
"""
Process a login request via username/password. SSO login is handled
elsewhere.
"""
login_form = AuthenticationForm(request, request.data)
# Rate limit logins
is_limited = ratelimiter.backend.is_limited(
"auth:login:username:{}".format(
md5_text(login_form.clean_username(request.data.get("username"))).hexdigest()
),
limit=10,
window=60, # 10 per minute should be enough for anyone
)
if is_limited:
errors = {"__all__": [login_form.error_messages["rate_limited"]]}
metrics.incr(
"login.attempt", instance="rate_limited", skip_internal=True, sample_rate=1.0
)
return self.respond_with_error(errors)
if not login_form.is_valid():
metrics.incr("login.attempt", instance="failure", skip_internal=True, sample_rate=1.0)
return self.respond_with_error(login_form.errors)
user = login_form.get_user()
auth.login(request, user, organization_id=organization.id if organization else None)
metrics.incr("login.attempt", instance="success", skip_internal=True, sample_rate=1.0)
if not user.is_active:
return Response(
{
"nextUri": "/auth/reactivate/",
"user": serialize(user, user, DetailedSelfUserSerializer()),
}
)
redirect_url = auth.get_org_redirect_url(
request, self.active_organization.organization if self.active_organization else None
)
return Response(
{
"nextUri": auth.get_login_redirect(request, redirect_url),
"user": serialize(user, user, DetailedSelfUserSerializer()),
}
)
def respond_with_error(self, errors):
return Response({"detail": "Login attempt failed", "errors": errors}, status=400)
| AuthLoginEndpoint |
python | python__mypy | mypy/nodes.py | {
"start": 73332,
"end": 74463
} | class ____(Expression):
"""Index expression x[y].
Also wraps type application such as List[int] as a special form.
"""
__slots__ = ("base", "index", "method_type", "analyzed", "as_type")
__match_args__ = ("base", "index")
base: Expression
index: Expression
# Inferred __getitem__ method type
method_type: mypy.types.Type | None
# If not None, this is actually semantically a type application
# Class[type, ...] or a type alias initializer.
analyzed: TypeApplication | TypeAliasExpr | None
# If this value expression can also be parsed as a valid type expression,
# represents the type denoted by the type expression.
# None means "is not a type expression".
as_type: NotParsed | mypy.types.Type | None
def __init__(self, base: Expression, index: Expression) -> None:
super().__init__()
self.base = base
self.index = index
self.method_type = None
self.analyzed = None
self.as_type = NotParsed.VALUE
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_index_expr(self)
| IndexExpr |
python | joke2k__faker | faker/providers/person/de_AT/__init__.py | {
"start": 44,
"end": 30551
} | class ____(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}-{{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}-{{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats = formats_male + formats_female
# source: https://www.data.gv.at/katalog/dataset/87fc82a0-0042-49c8-b6f9-2602cd3dc17a
first_names_male = (
"Aaron",
"Adam",
"Adrian",
"Adriano",
"Alan",
"Aleksander",
"Alex",
"Alexandar",
"Alexander",
"Andreas",
"Andrej",
"Angelo",
"Anton",
"Antonio",
"Antonius",
"Arda",
"Armin",
"Aron",
"Arthur",
"Aurelio",
"Axel",
"Bastian",
"Ben",
"Benedict",
"Benedikt",
"Beni",
"Benjamin",
"Bernhard",
"Boris",
"Bruno",
"Calvin",
"Carl",
"Carlo",
"Chris",
"Christian",
"Christoph",
"Christopher",
"Clemens",
"Constantin",
"Cornelius",
"Cristiano",
"Damian",
"Daniel",
"Danilo",
"Dario",
"Darius",
"Darko",
"David",
"Dennis",
"Dominik",
"Eduard",
"Elias",
"Elyas",
"Emanuel",
"Emil",
"Emilian",
"Emmanuel",
"Eric",
"Erik",
"Erwin",
"Fabian",
"Fabio",
"Felix",
"Ferdinand",
"Fernando",
"Filip",
"Finn",
"Florentin",
"Florian",
"Florin",
"Franz",
"Frederik",
"Fridolin",
"Friedrich",
"Gabriel",
"Georg",
"Gregor",
"Gustav",
"Heinrich",
"Henri",
"Henrik",
"Henry",
"Hubert",
"Hugo",
"Igor",
"Ilias",
"Isa",
"Ismail",
"Jacob",
"Jakob",
"James",
"Jamie",
"Jan",
"Jannik",
"Jannis",
"Jasper",
"Joel",
"Johann",
"Johannes",
"John",
"Jonas",
"Jonathan",
"Josef",
"Joseph",
"Joshua",
"Julian",
"Julius",
"Justin",
"Justus",
"Kai",
"Karim",
"Karl",
"Kevin",
"Kilian",
"Konrad",
"Konstantin",
"Kristian",
"Lars",
"Laurenz",
"Laurin",
"Lean",
"Leander",
"Lennard",
"Lennart",
"Leo",
"Leon",
"Leonard",
"Leonardo",
"Leonhard",
"Leopold",
"Levi",
"Liam",
"Lino",
"Linus",
"Lionel",
"Lorenz",
"Lorenzo",
"Louis",
"Luca",
"Lucas",
"Luis",
"Luka",
"Lukas",
"Maksim",
"Manuel",
"Marc",
"Marcel",
"Marco",
"Marcus",
"Mario",
"Marius",
"Mark",
"Marko",
"Markus",
"Martin",
"Marvin",
"Mateo",
"Matheo",
"Mathias",
"Matteo",
"Matthias",
"Maurice",
"Max",
"Maximilian",
"Merlin",
"Mert",
"Michael",
"Mika",
"Mike",
"Milan",
"Milo",
"Moritz",
"Natan",
"Nathan",
"Nicholas",
"Nick",
"Nico",
"Nicolai",
"Nicolas",
"Niklas",
"Niko",
"Nikola",
"Nikolai",
"Nikolas",
"Nikolaus",
"Nils",
"Nino",
"Noah",
"Noel",
"Oliver",
"Oscar",
"Oskar",
"Pascal",
"Patrick",
"Patrik",
"Paul",
"Peter",
"Philip",
"Philipp",
"Phillip",
"Raffael",
"Ralph",
"Raphael",
"Rene",
"Ricardo",
"Richard",
"Robert",
"Robin",
"Roman",
"Ruben",
"Sam",
"Samuel",
"Sandro",
"Sascha",
"Sebastian",
"Severin",
"Simon",
"Stefan",
"Stephan",
"Steven",
"Sven",
"Teodor",
"Theo",
"Theodor",
"Thomas",
"Tim",
"Timo",
"Timon",
"Tobias",
"Tom",
"Tristan",
"Valentin",
"Valentino",
"Victor",
"Viktor",
"Vincent",
"Vito",
"William",
"Xavier",
)
# source: https://www.data.gv.at/katalog/dataset/87fc82a0-0042-49c8-b6f9-2602cd3dc17a
first_names_female = (
"Adelina",
"Adriana",
"Ajna",
"Alara",
"Aleksandra",
"Alena",
"Alexa",
"Alexandra",
"Alexia",
"Alice",
"Alma",
"Amanda",
"Amelia",
"Amelie",
"Anabella",
"Anastasia",
"Andjela",
"Andjelina",
"Andrea",
"Angela",
"Angelika",
"Angelina",
"Anika",
"Anita",
"Anja",
"Anna",
"Anna-Lena",
"Anna-Maria",
"Annabell",
"Annabella",
"Annabelle",
"Annalena",
"Anne",
"Annika",
"Antonella",
"Antonia",
"Ariana",
"Ariane",
"Aurelia",
"Aurora",
"Ava",
"Aylin",
"Barbara",
"Beatrice",
"Bernadette",
"Berra",
"Bianca",
"Carina",
"Carla",
"Carlotta",
"Carolina",
"Caroline",
"Catharina",
"Cecilia",
"Charlotte",
"Christina",
"Christine",
"Claire",
"Clara",
"Clarissa",
"Claudia",
"Constanze",
"Cristina",
"Dana",
"Daniela",
"Denise",
"Diana",
"Dilara",
"Domenica",
"Dora",
"Eda",
"Edda",
"Ela",
"Elena",
"Eleonora",
"Elina",
"Elisa",
"Elisabeth",
"Ella",
"Ellie",
"Elma",
"Elona",
"Elsa",
"Elvira",
"Emanuela",
"Emely",
"Emilia",
"Emilie",
"Emilija",
"Emma",
"Erina",
"Estelle",
"Esther",
"Eva",
"Evelyn",
"Felicitas",
"Fiona",
"Florentina",
"Francesca",
"Franziska",
"Frida",
"Gabriela",
"Gloria",
"Hanna",
"Hannah",
"Heidi",
"Helena",
"Helene",
"Ina",
"Ines",
"Irina",
"Iris",
"Irma",
"Isabel",
"Isabell",
"Isabella",
"Isabelle",
"Jana",
"Janine",
"Jasmina",
"Jasmine",
"Jennifer",
"Jessica",
"Johanna",
"Josefine",
"Jovana",
"Julia",
"Juliana",
"Juliane",
"Julijana",
"Juna",
"Kalina",
"Karina",
"Karla",
"Karolina",
"Karoline",
"Katarina",
"Katharina",
"Katja",
"Kerstin",
"Klara",
"Kristina",
"Kyra",
"Laetitia",
"Laila",
"Lana",
"Lara",
"Lara-Sophie",
"Larissa",
"Laura",
"Laureen",
"Lea",
"Lea-Sophie",
"Leah",
"Leandra",
"Lena",
"Leni",
"Leona",
"Leoni",
"Leonie",
"Leonora",
"Leontina",
"Leticia",
"Leyla",
"Lia",
"Lilia",
"Lilian",
"Liliana",
"Liliane",
"Lilli",
"Lilly",
"Lily",
"Lina",
"Linda",
"Linnea",
"Lisa",
"Lisa-Marie",
"Lola",
"Lora",
"Lorena",
"Lotta",
"Lotte",
"Louisa",
"Louise",
"Luana",
"Lucia",
"Lucie",
"Luisa",
"Luise",
"Luna",
"Lydia",
"Madeleine",
"Magdalena",
"Maida",
"Maja",
"Malena",
"Manuela",
"Mara",
"Maria",
"Mariam",
"Mariana",
"Marie",
"Marie-Louise",
"Marie-Sophie",
"Mariella",
"Marijana",
"Marina",
"Marissa",
"Marlene",
"Marta",
"Martha",
"Martina",
"Maryam",
"Mathilda",
"Matilda",
"Maya",
"Melanie",
"Melek",
"Melina",
"Melisa",
"Melissa",
"Mia",
"Michaela",
"Michelle",
"Mila",
"Milica",
"Mina",
"Mira",
"Miriam",
"Mona",
"Nadia",
"Nadin",
"Nadine",
"Nadja",
"Naomi",
"Natalia",
"Natalie",
"Natascha",
"Nathalie",
"Nela",
"Nele",
"Nelly",
"Nicola",
"Nicole",
"Nika",
"Nikita",
"Nikola",
"Nikolina",
"Nina",
"Nisa",
"Nora",
"Norah",
"Olivia",
"Patricia",
"Paula",
"Paulina",
"Pauline",
"Petra",
"Philippa",
"Pia",
"Rachel",
"Raffaela",
"Rana",
"Rayana",
"Rebecca",
"Rita",
"Romy",
"Ronja",
"Ronya",
"Rosa",
"Rosalie",
"Ruth",
"Sabine",
"Sabrina",
"Sahra",
"Salma",
"Sandra",
"Sara",
"Sarah",
"Selena",
"Selin",
"Selina",
"Selma",
"Sena",
"Siena",
"Sigrid",
"Sofia",
"Sofie",
"Sofija",
"Sonja",
"Sophia",
"Sophie",
"Sophie-Marie",
"Soraya",
"Stefanie",
"Stella",
"Stephanie",
"Tamara",
"Tanja",
"Tea",
"Theodora",
"Theresa",
"Therese",
"Tiffany",
"Tina",
"Valentina",
"Vanessa",
"Vera",
"Verena",
"Veronika",
"Victoria",
"Viktoria",
"Viola",
"Violetta",
"Vivian",
"Yasmina",
"Ylvie",
"Yvonne",
"Zara",
"Zoe",
"Zoey",
)
first_names = first_names_male + first_names_female
# about 1000 of the most popular Austrian surnames
# https://de.wiktionary.org/wiki/Verzeichnis:Deutsch/Namen/die_h%C3%A4ufigsten_Nachnamen_%C3%96sterreichs
last_names = (
"Achleitner",
"Ackerl",
"Adam",
"Adler",
"Aichholzer",
"Aichinger",
"Aigner",
"Albrecht",
"Altmann",
"Amann",
"Amon",
"Anderl",
"Angerer",
"Arnold",
"Artner",
"Aschauer",
"Auer",
"Augustin",
"Auinger",
"Bacher",
"Bachinger",
"Bachler",
"Bachmann",
"Bader",
"Baier",
"Baldauf",
"Barth",
"Bartl",
"Bauer",
"Baumann",
"Baumgartner",
"Bayer",
"Beck",
"Becker",
"Beer",
"Berchtold",
"Berger",
"Bergmann",
"Berner",
"Bernhard",
"Berthold",
"Bichler",
"Biedermann",
"Binder",
"Bischof",
"Bitschnau",
"Bittner",
"Blauensteiner",
"Blum",
"Blümel",
"Bock",
"Bodner",
"Bogner",
"Brandl",
"Brandner",
"Brandstetter",
"Brandstätter",
"Brandtner",
"Braun",
"Brenner",
"Breuer",
"Bruckner",
"Brugger",
"Brunner",
"Bräuer",
"Buchberger",
"Buchegger",
"Bucher",
"Buchinger",
"Buchner",
"Burger",
"Burgstaller",
"Burtscher",
"Bäck",
"Böck",
"Böhler",
"Böhm",
"Bösch",
"Bürger",
"Dallinger",
"Dangl",
"Danner",
"Danninger",
"Decker",
"Dengg",
"Denk",
"Deutschmann",
"Dietl",
"Dietrich",
"Dirnberger",
"Dittrich",
"Dobler",
"Doppler",
"Dorfer",
"Dorn",
"Dorner",
"Draxler",
"Dünser",
"Eberhard",
"Eberharter",
"Eberl",
"Ebner",
"Ecker",
"Eder",
"Edlinger",
"Egger",
"Eibl",
"Eichberger",
"Eichhorn",
"Eichinger",
"Eisl",
"Eisner",
"Eller",
"Ender",
"Engel",
"Engl",
"Enzinger",
"Erber",
"Erhart",
"Erlacher",
"Erler",
"Ernst",
"Ertl",
"Fabian",
"Falkner",
"Fankhauser",
"Farkas",
"Fasching",
"Fehringer",
"Feichtenschlager",
"Feichter",
"Feichtinger",
"Feichtner",
"Feigl",
"Felber",
"Felder",
"Fellinger",
"Fellner",
"Fercher",
"Ferstl",
"Fichtinger",
"Fiedler",
"Fink",
"Fischer",
"Fitz",
"Fleck",
"Fleischhacker",
"Fleischmann",
"Foidl",
"Forster",
"Forstner",
"Frank",
"Franz",
"Freitag",
"Freudenthaler",
"Frey",
"Frick",
"Friedl",
"Friedrich",
"Frisch",
"Fritsch",
"Fritz",
"Froschauer",
"Fröhlich",
"Fröschl",
"Frühwirth",
"Fuchs",
"Fuhrmann",
"Füreder",
"Fürst",
"Gabriel",
"Gahleitner",
"Galler",
"Gamsjäger",
"Gangl",
"Gartner",
"Gasser",
"Gassner",
"Gattringer",
"Geier",
"Geiger",
"Geisler",
"Geyer",
"Gindl",
"Glaser",
"Glatz",
"Glück",
"Gmeiner",
"Gollner",
"Gosch",
"Grabher",
"Grabner",
"Graf",
"Grasser",
"Grassl",
"Gratz",
"Gratzer",
"Gratzl",
"Greiner",
"Griesser",
"Grill",
"Gritsch",
"Gross",
"Groß",
"Gruber",
"Grundner",
"Grünberger",
"Grüner",
"Grünwald",
"Gschaider",
"Gschwandtner",
"Gstrein",
"Guggenberger",
"Gutmann",
"Gärtner",
"Göschl",
"Götz",
"Günther",
"Haas",
"Haberl",
"Hacker",
"Hackl",
"Haderer",
"Hafner",
"Hagen",
"Hager",
"Hahn",
"Haid",
"Haiden",
"Haider",
"Haidinger",
"Haindl",
"Hainzl",
"Haller",
"Hammer",
"Hammerer",
"Hammerl",
"Handl",
"Handler",
"Haring",
"Harrer",
"Hartl",
"Hartmann",
"Haslauer",
"Haslinger",
"Hattinger",
"Hauer",
"Haumer",
"Hausberger",
"Hauser",
"Hebenstreit",
"Hechenberger",
"Heger",
"Heigl",
"Heim",
"Heindl",
"Heinrich",
"Heinz",
"Heinzl",
"Heiss",
"Heissenberger",
"Held",
"Hell",
"Heller",
"Helm",
"Hemetsberger",
"Herbst",
"Hermann",
"Herrmann",
"Herzog",
"Himmelbauer",
"Hinterberger",
"Hinteregger",
"Hinterleitner",
"Hirsch",
"Hirschmann",
"Hochleitner",
"Hochreiter",
"Hofbauer",
"Hofer",
"Hoffmann",
"Hofinger",
"Hofmann",
"Hofmeister",
"Hofstetter",
"Hofstätter",
"Holl",
"Hollaus",
"Holler",
"Holzer",
"Holzinger",
"Holzknecht",
"Holzmann",
"Horak",
"Horn",
"Hosp",
"Huber",
"Hubmann",
"Huemer",
"Hufnagl",
"Humer",
"Hummel",
"Hummer",
"Huter",
"Hutter",
"Hutterer",
"Hämmerle",
"Häusler",
"Hödl",
"Höfer",
"Höfler",
"Höglinger",
"Höller",
"Hölzl",
"Hörl",
"Hörmann",
"Hübner",
"Hütter",
"Jahn",
"Jandl",
"Janisch",
"Jank",
"Jauk",
"Jenewein",
"Jost",
"Jovanovic",
"Juen",
"Jung",
"Jungwirth",
"Jäger",
"Jöbstl",
"Kager",
"Kahr",
"Kain",
"Kaindl",
"Kainz",
"Kaiser",
"Kalcher",
"Kaltenbrunner",
"Kaltenböck",
"Kaltenegger",
"Kammerer",
"Kapeller",
"Kappel",
"Kargl",
"Karl",
"Karner",
"Karrer",
"Kaspar",
"Kasper",
"Kastner",
"Kaufmann",
"Keller",
"Kellner",
"Keplinger",
"Kern",
"Kerschbaum",
"Kerschbaumer",
"Kessler",
"Kirchmair",
"Kirchner",
"Kirschner",
"Kiss",
"Kitzler",
"Klammer",
"Klaus",
"Klausner",
"Klein",
"Klement",
"Klinger",
"Klingler",
"Klocker",
"Kloiber",
"Klotz",
"Klug",
"Knapp",
"Knaus",
"Knoll",
"Kober",
"Koch",
"Kocher",
"Kofler",
"Kogler",
"Kohl",
"Kohler",
"Kolar",
"Kolb",
"Koller",
"Kollmann",
"Kolm",
"Konrad",
"Kopf",
"Kopp",
"Koppensteiner",
"Kraft",
"Krainer",
"Krainz",
"Kral",
"Krall",
"Kramer",
"Krammer",
"Kratzer",
"Kraus",
"Kraxner",
"Kreidl",
"Kreiner",
"Kremser",
"Krenn",
"Kreuzer",
"Kriegl",
"Kronberger",
"Kronsteiner",
"Krug",
"Kröll",
"Kucera",
"Kugler",
"Kuhn",
"Kummer",
"Kunz",
"Kurz",
"Kurzmann",
"Käfer",
"Köberl",
"Köck",
"Köhler",
"Kölbl",
"Köll",
"König",
"Kössler",
"Lackner",
"Ladner",
"Lagler",
"Laimer",
"Lammer",
"Lampert",
"Lampl",
"Lamprecht",
"Landl",
"Lang",
"Langer",
"Larcher",
"Lassnig",
"Leber",
"Lechner",
"Lederer",
"Leeb",
"Lehner",
"Leibetseder",
"Leitgeb",
"Leithner",
"Leitner",
"Lengauer",
"Lenz",
"Leonhartsberger",
"Leopold",
"Lerch",
"Lercher",
"Lettner",
"Leutgeb",
"Lichtenegger",
"Linder",
"Lindinger",
"Lindner",
"Lindorfer",
"Lintner",
"Lipp",
"List",
"Loibl",
"Loidl",
"Lorenz",
"Ludwig",
"Luger",
"Luttenberger",
"Lutz",
"Löffler",
"Macher",
"Mader",
"Maier",
"Maierhofer",
"Mair",
"Mairhofer",
"Mandl",
"Mann",
"Margreiter",
"Maringer",
"Mark",
"Markl",
"Marte",
"Martin",
"Marx",
"Mathis",
"Maurer",
"Mayer",
"Mayerhofer",
"Mayr",
"Mayrhofer",
"Meier",
"Meindl",
"Meister",
"Meixner",
"Messner",
"Metzler",
"Meusburger",
"Meyer",
"Mitter",
"Mitteregger",
"Mitterer",
"Mitterlehner",
"Mittermayr",
"Mohr",
"Moosbrugger",
"Moritz",
"Moser",
"Muhr",
"Mörth",
"Mühlbacher",
"Mühlberger",
"Mühlböck",
"Müller",
"Müllner",
"Nagel",
"Nagele",
"Nagl",
"Nemeth",
"Neubacher",
"Neubauer",
"Neugebauer",
"Neuhauser",
"Neuhold",
"Neulinger",
"Neumann",
"Neumayer",
"Neumayr",
"Neumeister",
"Neumüller",
"Neuner",
"Neureiter",
"Neuwirth",
"Niederl",
"Nowak",
"Nussbaumer",
"Nußbaumer",
"Nöbauer",
"Oberhauser",
"Oberhofer",
"Oberleitner",
"Obermayr",
"Obermüller",
"Oberndorfer",
"Ofner",
"Ortner",
"Ostermann",
"Oswald",
"Ott",
"Pacher",
"Pachler",
"Paier",
"Pammer",
"Parzer",
"Pauer",
"Paul",
"Paulitsch",
"Payer",
"Peer",
"Peham",
"Pendl",
"Penz",
"Perner",
"Pertl",
"Pesendorfer",
"Peter",
"Petz",
"Pfeffer",
"Pfeifer",
"Pfeiffer",
"Pfister",
"Pfleger",
"Philipp",
"Pichler",
"Pieber",
"Pilz",
"Pinter",
"Pircher",
"Pirker",
"Plank",
"Plattner",
"Platzer",
"Pock",
"Pohl",
"Pointner",
"Pokorny",
"Pollak",
"Polzer",
"Posch",
"Postl",
"Prager",
"Prantl",
"Praxmarer",
"Prem",
"Prenner",
"Prinz",
"Probst",
"Prohaska",
"Pröll",
"Pucher",
"Puchner",
"Puntigam",
"Punz",
"Putz",
"Pöll",
"Pölzl",
"Pöschl",
"Pühringer",
"Raab",
"Rabitsch",
"Rabl",
"Radl",
"Rainer",
"Ramsauer",
"Rath",
"Rauch",
"Rausch",
"Rauscher",
"Rauter",
"Rechberger",
"Redl",
"Reich",
"Reichel",
"Reicher",
"Reichl",
"Reichmann",
"Reif",
"Reinbacher",
"Reindl",
"Reiner",
"Reinisch",
"Reinprecht",
"Reinthaler",
"Reischl",
"Reisinger",
"Reisner",
"Reitbauer",
"Reiter",
"Reiterer",
"Reithofer",
"Reitinger",
"Renner",
"Resch",
"Rettenbacher",
"Richter",
"Rieder",
"Riedl",
"Riedler",
"Riedmann",
"Rieger",
"Riegler",
"Riener",
"Riepl",
"Rieser",
"Ringhofer",
"Rinner",
"Ritter",
"Rohrer",
"Rohrmoser",
"Rosenberger",
"Rosner",
"Rossmann",
"Roth",
"Rottensteiner",
"Rotter",
"Rudolf",
"Rupp",
"Röck",
"Rössler",
"Sagmeister",
"Sailer",
"Salcher",
"Salzer",
"Salzmann",
"Sammer",
"Santner",
"Sattler",
"Sauer",
"Schachinger",
"Schachner",
"Schaffer",
"Schalk",
"Schaller",
"Schandl",
"Schantl",
"Scharf",
"Scharinger",
"Schartner",
"Schatz",
"Schatzl",
"Schauer",
"Scheer",
"Scheiber",
"Scheidl",
"Schenk",
"Scherer",
"Scherr",
"Scherz",
"Scherzer",
"Scheucher",
"Schiefer",
"Schiestl",
"Schilcher",
"Schiller",
"Schimpl",
"Schinagl",
"Schindler",
"Schinnerl",
"Schlager",
"Schlosser",
"Schlögl",
"Schmid",
"Schmidinger",
"Schmidl",
"Schmidt",
"Schmied",
"Schmuck",
"Schmölzer",
"Schnabl",
"Schneeberger",
"Schneider",
"Schober",
"Scholz",
"Schramm",
"Schrammel",
"Schranz",
"Schreiber",
"Schreiner",
"Schrempf",
"Schrenk",
"Schrittwieser",
"Schröder",
"Schubert",
"Schuh",
"Schuler",
"Schuller",
"Schulz",
"Schuster",
"Schwab",
"Schwaiger",
"Schwaighofer",
"Schwarz",
"Schwarzinger",
"Schwarzl",
"Schweiger",
"Schweighofer",
"Schweitzer",
"Schwendinger",
"Schäfer",
"Schöberl",
"Schöffmann",
"Schöller",
"Schön",
"Schönauer",
"Schönberger",
"Schöpf",
"Schüller",
"Schütz",
"Seebacher",
"Seidl",
"Seifert",
"Seiler",
"Seiser",
"Seitz",
"Seiwald",
"Sieber",
"Sieberer",
"Siegl",
"Sigl",
"Siller",
"Simic",
"Simon",
"Singer",
"Sommer",
"Sonnleitner",
"Sorger",
"Sperl",
"Spiegl",
"Spindler",
"Spitzer",
"Spreitzer",
"Springer",
"Stadlbauer",
"Stadler",
"Stangl",
"Stark",
"Staudacher",
"Staudinger",
"Stecher",
"Stefan",
"Steger",
"Steidl",
"Steiger",
"Steinacher",
"Steinbacher",
"Steinbauer",
"Steinberger",
"Steinböck",
"Steindl",
"Steiner",
"Steininger",
"Steinkellner",
"Steinlechner",
"Steinwender",
"Stelzer",
"Stelzl",
"Stern",
"Steurer",
"Stiegler",
"Stifter",
"Stock",
"Stocker",
"Stockhammer",
"Stockinger",
"Stoiber",
"Stolz",
"Strasser",
"Strauss",
"Strauß",
"Streicher",
"Strobl",
"Strohmaier",
"Strohmayer",
"Strohmeier",
"Stummer",
"Sturm",
"Stöckl",
"Stöger",
"Stückler",
"Stütz",
"Sulzer",
"Suppan",
"Taferner",
"Tanzer",
"Tauber",
"Taucher",
"Teufl",
"Thaler",
"Thalhammer",
"Thaller",
"Thurner",
"Tiefenbacher",
"Tischler",
"Toth",
"Trattner",
"Trauner",
"Traxler",
"Trimmel",
"Trinkl",
"Trummer",
"Uhl",
"Ullmann",
"Ulrich",
"Unger",
"Unterberger",
"Unterweger",
"Urban",
"Varga",
"Veit",
"Vogel",
"Vogl",
"Vogler",
"Vogt",
"Wachter",
"Wagner",
"Walch",
"Walcher",
"Walder",
"Waldner",
"Wallner",
"Walser",
"Walter",
"Waltl",
"Wandl",
"Weber",
"Wechselberger",
"Wegscheider",
"Weidinger",
"Weigl",
"Weinberger",
"Weiser",
"Weiss",
"Weissenböck",
"Weiß",
"Wenger",
"Weninger",
"Wenzl",
"Werner",
"Widhalm",
"Widmann",
"Wiedner",
"Wieland",
"Wiener",
"Wiesbauer",
"Wieser",
"Wiesinger",
"Wiesner",
"Wild",
"Wilfinger",
"Wilhelm",
"Wimmer",
"Windhager",
"Windisch",
"Winkler",
"Winter",
"Wirth",
"Wittmann",
"Wohlmuth",
"Wolf",
"Wurm",
"Wurzer",
"Wurzinger",
"Wögerbauer",
"Wöhrer",
"Yilmaz",
"Zach",
"Zangerl",
"Zauner",
"Zechmeister",
"Zechner",
"Zehetner",
"Zeiler",
"Zeilinger",
"Zeiner",
"Zeller",
"Zenz",
"Zettl",
"Ziegler",
"Zimmermann",
"Zotter",
"Zöchling",
"Zöhrer",
)
prefixes_male = (
"Herr",
"Dr.",
"Ing.",
"Dipl.-Ing.",
"Prof.",
"Univ.Prof.",
)
prefixes_female = (
"Frau",
"Dr.",
"Ing.",
"Dipl.-Ing.",
"Prof.",
"Univ.Prof.",
)
prefixes_male = (
"Herr",
"Dr.",
"Ing.",
"Dipl.-Ing.",
"Prof.",
"Univ.Prof.",
)
prefixes_female = (
"Frau",
"Dr.",
"Ing.",
"Dipl.-Ing.",
"Prof.",
"Univ.Prof.",
)
prefixes = ("Dr.", "Mag.", "Ing.", "Dipl.-Ing.", "Prof.", "Univ.Prof.")
# source:
# https://www.bmbwf.gv.at/dam/jcr:68a61bdd-4fd4-416b-bfb2-4fbb44255574/AKADEMISCHE%20GRADE%202022_M%C3%A4rz%202022.pdf
academic_prefixes = (
"DI",
"DI (FH)",
"Dipl.-Ing.",
"Dipl.-Ing. (FH)",
"Dr. med. univ.",
"Dr. med. dent.",
"Mag.",
"Mag. (FH)",
)
academic_suffixes = (
"BA",
"B.A.",
"BEd",
"BSc",
"B.Sc.",
"Bakk.",
"MA",
"M.A.",
"MBA",
"MEd",
"MSc",
"M.Sc.",
"PhD",
)
"""
:return: Academic prefix
"""
def academic_prefix(self) -> str:
return self.random_element(self.academic_prefixes)
"""
:return: Academic suffix
"""
def academic_suffix(self) -> str:
return self.random_element(self.academic_suffixes)
| Provider |
python | GoogleCloudPlatform__python-docs-samples | logging/redaction/log_redaction_final.py | {
"start": 1838,
"end": 2327
} | class ____(CombineFn):
"""Collect all items in the windowed collection into single batch"""
def create_accumulator(self):
return []
def add_input(self, accumulator, input):
accumulator.append(input)
return accumulator
def merge_accumulators(self, accumulators):
merged = [item for accumulator in accumulators for item in accumulator]
return merged
def extract_output(self, accumulator):
return accumulator
| BatchPayloads |
python | tensorflow__tensorflow | tensorflow/python/ops/parallel_for/gradients_test.py | {
"start": 1894,
"end": 8363
} | class ____:
def __init__(self, activation_size, num_layers):
self._layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
def __call__(self, inp):
activation = inp
for layer in self._layers:
activation = layer(activation)
return activation
def fully_connected_model_fn(batch_size, activation_size, num_layers):
model = FullyConnectedModel(activation_size, num_layers)
inp = random_ops.random_normal([batch_size, activation_size])
return inp, model(inp)
def lstm_model_fn(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
state = init_state
for inp in inputs:
_, state = cell(inp, state)
return init_state.c, state.c
def dynamic_lstm_model_fn(batch_size, state_size, max_steps):
# We make inputs and sequence_length constant so that multiple session.run
# calls produce the same result.
inputs = constant_op.constant(
np.random.rand(batch_size, max_steps, state_size), dtype=dtypes.float32)
sequence_length = constant_op.constant(
np.random.randint(0, size=[batch_size], high=max_steps + 1),
dtype=dtypes.int32)
cell = rnn_cell.BasicLSTMCell(state_size)
initial_state = cell.zero_state(batch_size, dtypes.float32)
return inputs, rnn.dynamic_rnn(
cell,
inputs,
sequence_length=sequence_length,
initial_state=initial_state)
def create_fc_batch_jacobian(batch_size, activation_size, num_layers):
inp, output = fully_connected_model_fn(batch_size, activation_size,
num_layers)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_lstm_batch_jacobian(batch_size, state_size, steps, inputs_size=None):
inp, output = lstm_model_fn(batch_size, state_size, steps,
inputs_size=inputs_size)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
while_jacobian = gradients.batch_jacobian(output, inp, use_pfor=False)
return pfor_jacobian, while_jacobian
def create_dynamic_lstm_batch_jacobian(batch_size, state_size, max_steps):
inp, (_, final_state) = dynamic_lstm_model_fn(batch_size, state_size,
max_steps)
pfor_jacobian = gradients.batch_jacobian(final_state.c, inp, use_pfor=True)
# Note that use_pfor=False does not work above given the current limitations
# on implementation of while_loop. So we statically unroll the looping in the
# jacobian computation.
while_gradients = [
gradient_ops.gradients(array_ops.gather(final_state.c, i, axis=1), inp)[0]
for i in range(state_size)
]
return pfor_jacobian, while_gradients
def create_lstm_batch_hessian(batch_size, state_size, steps):
inp, output = lstm_model_fn(batch_size, state_size, steps)
pfor_jacobian = gradients.batch_jacobian(output, inp, use_pfor=True)
pfor_jacobian = array_ops.reshape(pfor_jacobian, [batch_size, -1])
pfor_hessian = gradients.batch_jacobian(pfor_jacobian, inp, use_pfor=True)
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobian for computing while_hessian.
while_jacobian = pfor_jacobian
while_hessian = gradients.batch_jacobian(while_jacobian, inp, use_pfor=False)
return pfor_hessian, while_hessian
def create_lstm_hessian(batch_size, state_size, steps):
_, output = lstm_model_fn(batch_size, state_size, steps)
weights = variables.trainable_variables()
pfor_jacobians = gradients.jacobian(output, weights, use_pfor=True)
pfor_hessians = [
gradients.jacobian(x, weights, use_pfor=True) for x in pfor_jacobians
]
# TODO(agarwal): using two nested while_loop doesn't seem to work here.
# Hence we use pfor_jacobians for computing while_hessians.
while_jacobians = pfor_jacobians
while_hessians = [
gradients.jacobian(x, weights, use_pfor=False) for x in while_jacobians
]
return pfor_hessians, while_hessians
def create_fc_per_eg_grad(batch_size, activation_size, num_layers):
inp = random_ops.random_normal([batch_size, activation_size])
layers = [
tf_layers.Dense(activation_size, activation=nn.relu)
for _ in range(num_layers)
]
projection = tf_layers.Dense(1)
def model_fn(activation):
for layer in layers:
activation = layer(activation)
activation = projection(activation)
activation = nn.l2_loss(activation)
return gradient_ops.gradients(activation, variables.trainable_variables())
def loop_fn(i):
return model_fn(array_ops.expand_dims(array_ops.gather(inp, i), 0))
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
def create_lstm_per_eg_grad(batch_size, state_size, steps, inputs_size=None):
inputs_size = inputs_size or state_size
inputs = [
random_ops.random_normal([batch_size, inputs_size]) for _ in range(steps)
]
cell = rnn_cell.BasicLSTMCell(state_size)
init_state = cell.zero_state(batch_size, dtypes.float32)
def model_fn(inps, init_state):
state = init_state
for inp in inps:
_, state = cell(inp, state)
output = nn.l2_loss(state.c)
return gradient_ops.gradients(output, variables.trainable_variables())
def loop_fn(i):
loop_inputs = [
array_ops.expand_dims(array_ops.gather(x, i), 0) for x in inputs
]
loop_init_state = rnn_cell.LSTMStateTuple(
*[array_ops.expand_dims(array_ops.gather(x, i), 0) for x in init_state])
return model_fn(loop_inputs, loop_init_state)
pfor_outputs = control_flow_ops.pfor(loop_fn, batch_size)
loop_fn_dtypes = [x.dtype for x in variables.trainable_variables()]
while_outputs = control_flow_ops.for_loop(loop_fn, loop_fn_dtypes, batch_size)
return pfor_outputs, while_outputs
# Importing the code from tensorflow_models seems to cause errors. Hence we
# duplicate the model definition here.
# TODO(agarwal): Use the version in tensorflow_models/official instead.
| FullyConnectedModel |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_multicolumn_datetime_difference_to_be_less_than_two_months.py | {
"start": 1868,
"end": 7633
} | class ____(MulticolumnMapExpectation):
"""Expect the difference of 2 datetime columns to be less than or equal to 2 months.
This means that for each row, we expect end_datetime - start_datetime <= 2 (in months)
Args:
start_datetime (datetime): The first datetime column to compare.
end_datetime (datetime): The second datetime column to compare.
"""
examples = [
{
"data": {
"start_datetime": [
"2023-01-01",
"2023-02-01",
"2023-03-01",
"2023-04-01",
"2023-05-01",
],
"end_datetime_within_threshold": [
"2023-01-15",
"2023-03-02",
"2023-05-01",
"2023-06-01",
"2023-07-01",
],
"end_datetime_above_threshold": [
"2023-04-15",
"2023-05-02",
"2023-06-01",
"2023-07-01",
"2023-09-01",
],
"end_datetime_with_Nan": [
pd.NaT,
"2023-03-02",
"2023-05-01",
"2023-06-01",
pd.NaT,
],
},
"tests": [
{
"title": "within threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"start_datetime": "start_datetime",
"end_datetime": "end_datetime_within_threshold",
"column_list": [
"start_datetime",
"end_datetime_within_threshold",
],
},
"out": {
"success": True,
},
},
{
"title": "above threshold",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"start_datetime": "start_datetime",
"end_datetime": "end_datetime_above_threshold",
"column_list": [
"start_datetime",
"end_datetime_above_threshold",
],
},
"out": {
"success": False,
},
},
{
"title": "with Nan",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"start_datetime": "start_datetime",
"end_datetime": "end_datetime_with_Nan",
"column_list": ["start_datetime", "end_datetime_with_Nan"],
},
"out": {
"success": True,
},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
map_metric = "multicolumn_values.column_datetime_difference_to_be_less_than_two_months"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("column_list", "start_datetime", "end_datetime")
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {
"result_format": "BASIC",
"catch_exceptions": False,
"base": 2,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
start_datetime = configuration.kwargs["start_datetime"]
end_datetime = configuration.kwargs["end_datetime"]
column_list = configuration.kwargs["column_list"]
# Check other things in configuration.kwargs and raise Exceptions if needed
try:
# parameter cannot be less than zero,
assert start_datetime is None or isinstance(start_datetime, str)
assert end_datetime is None or isinstance(end_datetime, str)
assert start_datetime in column_list
assert end_datetime in column_list
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"multi-column expectation",
"multi-column column datetime difference to be less than two months",
],
"contributors": ["@kcs-rohankolappa"],
}
if __name__ == "__main__":
ExpectMulticolumnDatetimeDifferenceToBeLessThanTwoMonths().print_diagnostic_checklist()
| ExpectMulticolumnDatetimeDifferenceToBeLessThanTwoMonths |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 51787,
"end": 52800
} | class ____(BaseModel):
"""Configuration for SPANN vector index."""
_validate_extra_fields = _create_extra_fields_validator(
[
"search_nprobe",
"search_rng_factor",
"search_rng_epsilon",
"nreplica_count",
"write_nprobe",
"write_rng_factor",
"write_rng_epsilon",
"split_threshold",
"num_samples_kmeans",
"initial_lambda",
"reassign_neighbor_count",
"merge_threshold",
"num_centers_to_merge_to",
"ef_construction",
"ef_search",
"max_neighbors",
]
)
search_nprobe: Optional[int] = None
write_nprobe: Optional[int] = None
ef_construction: Optional[int] = None
ef_search: Optional[int] = None
max_neighbors: Optional[int] = None
reassign_neighbor_count: Optional[int] = None
split_threshold: Optional[int] = None
merge_threshold: Optional[int] = None
| SpannIndexConfig |
python | walkccc__LeetCode | solutions/832. Flipping an Image/832.py | {
"start": 0,
"end": 244
} | class ____:
def flipAndInvertImage(self, A: list[list[int]]) -> list[list[int]]:
n = len(A)
for i in range(n):
for j in range((n + 2) // 2):
A[i][j], A[i][n - j - 2] = A[i][n - j - 1] ^ 2, A[i][j] ^ 1
return A
| Solution |
python | FactoryBoy__factory_boy | tests/test_base.py | {
"start": 2728,
"end": 6857
} | class ____(unittest.TestCase):
def test_base_attrs(self):
class AbstractFactory(base.Factory):
pass
# Declarative attributes
self.assertTrue(AbstractFactory._meta.abstract)
self.assertIsNone(AbstractFactory._meta.model)
self.assertEqual((), AbstractFactory._meta.inline_args)
self.assertEqual((), AbstractFactory._meta.exclude)
self.assertEqual(enums.CREATE_STRATEGY, AbstractFactory._meta.strategy)
# Non-declarative attributes
self.assertEqual({}, AbstractFactory._meta.pre_declarations.as_dict())
self.assertEqual({}, AbstractFactory._meta.post_declarations.as_dict())
self.assertEqual(AbstractFactory, AbstractFactory._meta.factory)
self.assertEqual(base.Factory, AbstractFactory._meta.base_factory)
self.assertEqual(AbstractFactory._meta, AbstractFactory._meta.counter_reference)
def test_declaration_collecting(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 1)
postgen = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
y2 = lazy2
z = postgen
# Declarations aren't removed
self.assertEqual(1, AbstractFactory.x)
self.assertEqual(lazy, AbstractFactory.y)
self.assertEqual(lazy2, AbstractFactory.y2)
self.assertEqual(postgen, AbstractFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy, 'y2': lazy2},
AbstractFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen},
AbstractFactory._meta.post_declarations.as_dict(),
)
def test_inherited_declaration_collecting(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 2)
postgen = declarations.PostGenerationDeclaration()
postgen2 = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
z = postgen
class OtherFactory(AbstractFactory):
a = lazy2
b = postgen2
# Declarations aren't removed
self.assertEqual(lazy2, OtherFactory.a)
self.assertEqual(postgen2, OtherFactory.b)
self.assertEqual(1, OtherFactory.x)
self.assertEqual(lazy, OtherFactory.y)
self.assertEqual(postgen, OtherFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy, 'a': lazy2},
OtherFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen, 'b': postgen2},
OtherFactory._meta.post_declarations.as_dict(),
)
def test_inherited_declaration_shadowing(self):
lazy = declarations.LazyFunction(int)
lazy2 = declarations.LazyAttribute(lambda _o: 2)
postgen = declarations.PostGenerationDeclaration()
postgen2 = declarations.PostGenerationDeclaration()
class AbstractFactory(base.Factory):
x = 1
y = lazy
z = postgen
class OtherFactory(AbstractFactory):
y = lazy2
z = postgen2
# Declarations aren't removed
self.assertEqual(1, OtherFactory.x)
self.assertEqual(lazy2, OtherFactory.y)
self.assertEqual(postgen2, OtherFactory.z)
# And are available in class Meta
self.assertEqual(
{'x': 1, 'y': lazy2},
OtherFactory._meta.pre_declarations.as_dict(),
)
self.assertEqual(
{'z': postgen2},
OtherFactory._meta.post_declarations.as_dict(),
)
def test_factory_as_meta_model_raises_exception(self):
class FirstFactory(base.Factory):
pass
class Meta:
model = FirstFactory
with self.assertRaises(TypeError):
type("SecondFactory", (base.Factory,), {"Meta": Meta})
| OptionsTests |
python | pytorch__pytorch | test/dynamo/test_ctx_manager.py | {
"start": 47457,
"end": 49059
} | class ____(torch.nn.Module):
def forward(self):
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable('This is not supported'); _saved_tensors_hooks_disable = None
x: "f32[1]" = torch.ones(1)
y: "f32[1]" = torch.zeros(1)
_saved_tensors_hooks_disable_1 = torch._C._autograd._saved_tensors_hooks_disable('This is not supported inner'); _saved_tensors_hooks_disable_1 = None
add: "f32[1]" = x + y; y = None
_saved_tensors_hooks_disable_2 = torch._C._autograd._saved_tensors_hooks_disable('This is not supported'); _saved_tensors_hooks_disable_2 = None
add_1: "f32[1]" = add + x; add = x = None
_saved_tensors_hooks_disable_3 = torch._C._autograd._saved_tensors_hooks_disable('Previously disabled message'); _saved_tensors_hooks_disable_3 = None
return (add_1,)
""", # NOQA: B950
)
def test_disable_saved_tensors_hooks_graph_break(self):
def fn(x):
with torch.autograd.graph.disable_saved_tensors_hooks(
"This is not supported"
):
y = x + 1
torch._dynamo.graph_break()
return y * 2
eager = EagerAndRecordGraphs()
torch.compile(fn, backend=eager, fullgraph=False)(torch.randn(()))
def check_graph(actual, expected): # noqa: F841
self.assertExpectedInline(actual, expected)
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | keras-team__keras | keras/src/losses/losses.py | {
"start": 58024,
"end": 99534
} | class ____(LossFunctionWrapper):
"""Computes the Generalized Cross Entropy loss between `y_true` & `y_pred`.
Generalized Cross Entropy (GCE) is a noise-robust loss function
that provides better robustness against noisy labels than
standard cross entropy.
It generalizes both cross entropy and mean absolute error through
the parameter q, where values closer to 1 make the loss more robust
to noisy labels.
Formula:
```python
loss = (1 - p**q) / q
```
where `p` is the predicted probability for the true class and `q`
is the noise parameter.
Args:
q: Float in range `(0, 1)`. It is the noise parameter.
Controls the behavior of the loss:
- As `q` approaches 0: Behaves more like cross entropy
- As `q` approaches 1: Behaves more like mean absolute error
Defaults to `0.5`
reduction: Type of reduction to apply to the loss. In almost all cases
this should be `"sum_over_batch_size"`. Supported options are
`"sum"`, `"sum_over_batch_size"`, `"mean"`,
`"mean_with_sample_weight"` or `None`. `"sum"` sums the loss,
`"sum_over_batch_size"` and `"mean"` sum the loss and divide by the
sample size, and `"mean_with_sample_weight"` sums the loss and
divides by the sum of the sample weights. `"none"` and `None`
perform no aggregation. Defaults to `"sum_over_batch_size"`.
name: Optional name for the loss instance.
dtype: The dtype of the loss's computations. Defaults to `None`, which
means using `keras.backend.floatx()`. `keras.backend.floatx()` is a
`"float32"` unless set to different value
(via `keras.backend.set_floatx()`). If a `keras.DTypePolicy` is
provided, then the `compute_dtype` will be utilized.
Example:
```python
y_true = np.array([0, 1, 0, 1])
y_pred = np.array([[0.7, 0.3], [0.2, 0.8], [0.6, 0.4], [0.4, 0.6]])
keras.losses.CategoricalGeneralizedCrossEntropy()(y_true, y_pred)
```
References:
- [Zhang, Sabuncu, 2018](https://arxiv.org/abs/1805.07836)
("Generalized Cross Entropy Loss for Training
Deep Neural Networks with Noisy Labels")
"""
def __init__(
self,
q=0.5,
reduction="sum_over_batch_size",
name="categorical_generalized_cross_entropy",
dtype=None,
):
if not 0 < q < 1:
raise ValueError("q must be in the interval (0, 1)")
super().__init__(
categorical_generalized_cross_entropy,
name=name,
reduction=reduction,
dtype=dtype,
q=q,
)
self.q = q
def get_config(self):
config = Loss.get_config(self)
config.update(
{
"q": self.q,
}
)
return config
def convert_binary_labels_to_hinge(y_true):
"""Converts binary labels into -1/1 for hinge loss/metric calculation."""
are_zeros = ops.equal(y_true, 0)
are_ones = ops.equal(y_true, 1)
is_binary = ops.all((ops.logical_or(are_zeros, are_ones)))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2.0 * y_true - 1.0
def _return_labels_unconverted():
# Returns the labels unchanged if they are non-binary
return y_true
updated_y_true = ops.cond(
is_binary, _convert_binary_labels, _return_labels_unconverted
)
return updated_y_true
@keras_export(
[
"keras.metrics.hinge",
"keras.losses.hinge",
]
)
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)
```
Args:
y_true: The ground truth values. `y_true` values are expected to be -1
or 1. If binary (0 or 1) labels are provided they will be converted
to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, dtype=y_pred.dtype)
y_true = ops.convert_to_tensor(y_true)
y_true = convert_binary_labels_to_hinge(y_true)
return ops.mean(ops.maximum(1.0 - y_true * y_pred, 0.0), axis=-1)
@keras_export(
[
"keras.metrics.squared_hinge",
"keras.losses.squared_hinge",
]
)
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)
```
Args:
y_true: The ground truth values. `y_true` values are expected to be -1
or 1. If binary (0 or 1) labels are provided we will convert them
to -1 or 1 with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.squared_hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
y_true = convert_binary_labels_to_hinge(y_true)
return ops.mean(
ops.square(ops.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1
)
@keras_export(
[
"keras.metrics.categorical_hinge",
"keras.losses.categorical_hinge",
]
)
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` & `y_pred`.
Formula:
```python
loss = maximum(neg - pos + 1, 0)
```
where `neg=maximum((1-y_true)*y_pred)` and `pos=sum(y_true*y_pred)`
Args:
y_true: The ground truth values. `y_true` values are expected to be
either `{-1, +1}` or `{0, 1}` (i.e. a one-hot-encoded tensor) with
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Categorical hinge loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = np.eye(np.max(y_true) + 1)[y_true]
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.categorical_hinge(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
pos = ops.sum(y_true * y_pred, axis=-1)
neg = ops.max((1.0 - y_true) * y_pred, axis=-1)
zero = ops.cast(0.0, y_pred.dtype)
return ops.maximum(neg - pos + 1.0, zero)
@keras_export(
[
"keras.metrics.mean_squared_error",
"keras.losses.mean_squared_error",
# Legacy aliases
"keras._legacy.losses.mse",
"keras._legacy.losses.MSE",
"keras._legacy.metrics.mse",
"keras._legacy.metrics.MSE",
]
)
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
Formula:
```python
loss = mean(square(y_true - y_pred), axis=-1)
```
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.mean_squared_error(y_true, y_pred)
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values with shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
return ops.mean(ops.square(y_true - y_pred), axis=-1)
@keras_export(
[
"keras.metrics.mean_absolute_error",
"keras.losses.mean_absolute_error",
# Legacy aliases
"keras._legacy.losses.MAE",
"keras._legacy.losses.mae",
"keras._legacy.metrics.MAE",
"keras._legacy.metrics.mae",
]
)
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
```python
loss = mean(abs(y_true - y_pred), axis=-1)
```
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.mean_absolute_error(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
return ops.mean(ops.abs(y_true - y_pred), axis=-1)
@keras_export(
[
"keras.metrics.mean_absolute_percentage_error",
"keras.losses.mean_absolute_percentage_error",
# Legacy aliases
"keras._legacy.losses.mape",
"keras._legacy.losses.MAPE",
"keras._legacy.metrics.mape",
"keras._legacy.metrics.MAPE",
]
)
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` & `y_pred`.
Formula:
```python
loss = 100 * mean(abs((y_true - y_pred) / y_true), axis=-1)
```
Division by zero is prevented by dividing by `maximum(y_true, epsilon)`
where `epsilon = keras.backend.epsilon()`
(default to `1e-7`).
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values with shape = `[batch_size, d0, ..
dN-1]`.
Example:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.mean_absolute_percentage_error(y_true, y_pred)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
diff = ops.abs((y_true - y_pred) / ops.maximum(ops.abs(y_true), epsilon))
return 100.0 * ops.mean(diff, axis=-1)
@keras_export(
[
"keras.metrics.mean_squared_logarithmic_error",
"keras.losses.mean_squared_logarithmic_error",
# Legacy aliases
"keras._legacy.losses.msle",
"keras._legacy.losses.MSLE",
"keras._legacy.metrics.msle",
"keras._legacy.metrics.MSLE",
]
)
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` & `y_pred`.
Formula:
```python
loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)
```
Note that `y_pred` and `y_true` cannot be less or equal to 0. Negative
values and 0 values will be replaced with `keras.backend.epsilon()`
(default to `1e-7`).
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values with shape = `[batch_size, d0, ..
dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
"""
epsilon = ops.convert_to_tensor(backend.epsilon())
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
first_log = ops.log(ops.maximum(y_pred, epsilon) + 1.0)
second_log = ops.log(ops.maximum(y_true, epsilon) + 1.0)
return ops.mean(ops.square(first_log - second_log), axis=-1)
@keras_export("keras.losses.cosine_similarity")
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Formula:
```python
loss = -sum(l2_norm(y_true) * l2_norm(y_pred))
```
Note that it is a number between -1 and 1. When it is a negative number
between -1 and 0, 0 indicates orthogonality and values closer to -1
indicate greater similarity. This makes it usable as a loss function in a
setting where you try to maximize the proximity between predictions and
targets. If either `y_true` or `y_pred` is a zero vector, cosine
similarity will be 0 regardless of the proximity between predictions
and targets.
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity. Defaults to `-1`.
Returns:
Cosine similarity tensor.
Example:
>>> y_true = [[0., 1.], [1., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.], [-1., -1.]]
>>> loss = keras.losses.cosine_similarity(y_true, y_pred, axis=-1)
[-0., -0.99999994, 0.99999994]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
y_pred = normalize(y_pred, axis=axis)
y_true = normalize(y_true, axis=axis)
return -ops.sum(y_true * y_pred, axis=axis)
@keras_export(["keras.losses.huber", "keras.metrics.huber"])
def huber(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
Formula:
```python
for x in error:
if abs(x) <= delta:
loss.append(0.5 * x^2)
elif abs(x) > delta:
loss.append(delta * abs(x) - 0.5 * delta^2)
loss = mean(loss, axis=-1)
```
See: [Huber loss](https://en.wikipedia.org/wiki/Huber_loss).
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = keras.losses.huber(y_true, y_pred)
0.155
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear. Defaults to `1.0`.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
delta = ops.convert_to_tensor(delta, dtype=y_pred.dtype)
error = ops.subtract(y_pred, y_true)
abs_error = ops.abs(error)
half = ops.convert_to_tensor(0.5, dtype=abs_error.dtype)
return ops.mean(
ops.where(
abs_error <= delta,
half * ops.square(error),
delta * abs_error - half * ops.square(delta),
),
axis=-1,
)
@keras_export(
[
"keras.losses.log_cosh",
"keras.metrics.log_cosh",
# Legacy aliases
"keras._legacy.losses.logcosh",
"keras._legacy.metrics.logcosh",
]
)
def log_cosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
Formula:
```python
loss = mean(log(cosh(y_pred - y_true)), axis=-1)
```
Note that `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small
`x` and to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works
mostly like the mean squared error, but will not be so strongly affected by
the occasional wildly incorrect prediction.
Example:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> loss = keras.losses.log_cosh(y_true, y_pred)
0.108
Args:
y_true: Ground truth values with shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values with shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values with shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
y_true, y_pred = squeeze_or_expand_to_same_rank(y_true, y_pred)
log2 = ops.convert_to_tensor(ops.log(2.0), dtype=y_pred.dtype)
def _logcosh(x):
return x + ops.softplus(x * -2.0) - log2
return ops.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export(
[
"keras.metrics.kl_divergence",
"keras.losses.kl_divergence",
# Legacy aliases
"keras._legacy.losses.KLD",
"keras._legacy.losses.kld",
"keras._legacy.losses.kullback_leibler_divergence",
"keras._legacy.metrics.KLD",
"keras._legacy.metrics.kld",
"keras._legacy.metrics.kullback_leibler_divergence",
]
)
def kl_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` & `y_pred`.
Formula:
```python
loss = y_true * log(y_true / y_pred)
```
`y_true` and `y_pred` are expected to be probability
distributions, with values between 0 and 1. They will get
clipped to the `[0, 1]` range.
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
KL Divergence loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float32)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.kl_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = ops.clip(y_true, 1e-7, 1)
>>> y_pred = ops.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss, np.sum(y_true * np.log(y_true / y_pred), axis=-1))
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, y_pred.dtype)
y_true = ops.clip(y_true, backend.epsilon(), 1)
y_pred = ops.clip(y_pred, backend.epsilon(), 1)
return ops.sum(y_true * ops.log(y_true / y_pred), axis=-1)
@keras_export(
[
"keras.metrics.poisson",
"keras.losses.poisson",
]
)
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
Formula:
```python
loss = y_pred - y_true * log(y_pred)
```
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss values with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss, np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.convert_to_tensor(y_true, dtype=y_pred.dtype)
epsilon = ops.convert_to_tensor(backend.epsilon(), dtype=y_pred.dtype)
return ops.mean(y_pred - y_true * ops.log(y_pred + epsilon), axis=-1)
@keras_export(
[
"keras.metrics.categorical_crossentropy",
"keras.losses.categorical_crossentropy",
]
)
def categorical_crossentropy(
y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
):
"""Computes the categorical crossentropy loss.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Categorical crossentropy loss value.
Example:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.0513, 2.303], dtype=float32)
"""
if isinstance(axis, bool):
raise ValueError(
"`axis` must be of type `int`. "
f"Received: axis={axis} of type {type(axis)}"
)
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] == 1:
warnings.warn(
"In loss categorical_crossentropy, expected "
"y_pred.shape to be (batch_size, num_classes) "
f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
"Consider using 'binary_crossentropy' if you only have 2 classes.",
SyntaxWarning,
stacklevel=2,
)
if label_smoothing:
num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
y_true = y_true * (1.0 - label_smoothing) + (
label_smoothing / num_classes
)
return ops.categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis
)
@keras_export(
[
"keras.metrics.categorical_focal_crossentropy",
"keras.losses.categorical_focal_crossentropy",
]
)
def categorical_focal_crossentropy(
y_true,
y_pred,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
):
"""Computes the categorical focal crossentropy loss.
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
alpha: A weight balancing factor for all classes, default is `0.25` as
mentioned in the reference. It can be a list of floats or a scalar.
In the multi-class case, alpha may be set by inverse class
frequency by using `compute_class_weight` from `sklearn.utils`.
gamma: A focusing parameter, default is `2.0` as mentioned in the
reference. It helps to gradually reduce the importance given to
simple examples in a smooth manner. When `gamma` = 0, there is
no focal effect on the categorical crossentropy.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability
distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels. For
example, if `0.1`, use `0.1 / num_classes` for non-target labels
and `0.9 + 0.1 / num_classes` for target labels.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Categorical focal crossentropy loss value.
Example:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.9, 0.05], [0.1, 0.85, 0.05]]
>>> loss = keras.losses.categorical_focal_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([2.63401289e-04, 6.75912094e-01], dtype=float32)
"""
if isinstance(axis, bool):
raise ValueError(
"`axis` must be of type `int`. "
f"Received: axis={axis} of type {type(axis)}"
)
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if y_pred.shape[-1] == 1:
warnings.warn(
"In loss categorical_focal_crossentropy, expected "
"y_pred.shape to be (batch_size, num_classes) "
f"with num_classes > 1. Received: y_pred.shape={y_pred.shape}. "
"Consider using 'binary_crossentropy' if you only have 2 classes.",
SyntaxWarning,
stacklevel=2,
)
if label_smoothing:
num_classes = ops.cast(ops.shape(y_true)[-1], y_pred.dtype)
y_true = y_true * (1.0 - label_smoothing) + (
label_smoothing / num_classes
)
if from_logits:
y_pred = ops.softmax(y_pred, axis=axis)
# Adjust the predictions so that the probability of
# each class for every sample adds up to 1
# This is needed to ensure that the cross entropy is
# computed correctly.
output = y_pred / ops.sum(y_pred, axis=axis, keepdims=True)
output = ops.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
# Calculate cross entropy
cce = -y_true * ops.log(output)
# Calculate factors
modulating_factor = ops.power(1.0 - output, gamma)
weighting_factor = ops.multiply(modulating_factor, alpha)
# Apply weighting factor
focal_cce = ops.multiply(weighting_factor, cce)
focal_cce = ops.sum(focal_cce, axis=axis)
return focal_cce
@keras_export(
[
"keras.metrics.sparse_categorical_crossentropy",
"keras.losses.sparse_categorical_crossentropy",
]
)
def sparse_categorical_crossentropy(
y_true, y_pred, from_logits=False, ignore_class=None, axis=-1
):
"""Computes the sparse categorical crossentropy loss.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
ignore_class: Optional integer. The ID of a class to be ignored during
loss computation. This is useful, for example, in segmentation
problems featuring a "void" class (commonly -1 or 255) in
segmentation maps. By default (`ignore_class=None`), all classes are
considered.
axis: Defaults to `-1`. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
Examples:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.0513, 2.303], dtype=float32)
"""
if len(y_true.shape) == len(y_pred.shape) and y_true.shape[-1] == 1:
y_true = ops.squeeze(y_true, axis=-1)
if ignore_class is not None:
res_shape = ops.shape(y_pred)[:-1]
valid_mask = ops.not_equal(y_true, ops.cast(ignore_class, y_pred.dtype))
y_true = y_true * ops.cast(valid_mask, y_true.dtype)
y_pred = y_pred * ops.cast(
ops.expand_dims(valid_mask, -1), y_pred.dtype
)
res = ops.sparse_categorical_crossentropy(
y_true,
y_pred,
from_logits=from_logits,
axis=axis,
)
if ignore_class is not None:
valid_mask = ops.reshape(valid_mask, res_shape)
res = ops.where(valid_mask, res, 0.0)
backend.set_keras_mask(res, mask=valid_mask)
return res
@keras_export(
[
"keras.metrics.binary_crossentropy",
"keras.losses.binary_crossentropy",
]
)
def binary_crossentropy(
y_true, y_pred, from_logits=False, label_smoothing=0.0, axis=-1
):
"""Computes the binary crossentropy loss.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
squeezing them towards 0.5, that is,
using `1. - 0.5 * label_smoothing` for the target class
and `0.5 * label_smoothing` for the non-target class.
axis: The axis along which the mean is computed. Defaults to `-1`.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss
array([0.916 , 0.714], dtype=float32)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if label_smoothing:
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
return ops.mean(
ops.binary_crossentropy(y_true, y_pred, from_logits=from_logits),
axis=axis,
)
@keras_export(
[
"keras.metrics.binary_focal_crossentropy",
"keras.losses.binary_focal_crossentropy",
]
)
def binary_focal_crossentropy(
y_true,
y_pred,
apply_class_balancing=False,
alpha=0.25,
gamma=2.0,
from_logits=False,
label_smoothing=0.0,
axis=-1,
):
"""Computes the binary focal crossentropy loss.
According to [Lin et al., 2018](https://arxiv.org/pdf/1708.02002.pdf), it
helps to apply a focal factor to down-weight easy examples and focus more on
hard examples. By default, the focal tensor is computed as follows:
`focal_factor = (1 - output) ** gamma` for class 1
`focal_factor = output ** gamma` for class 0
where `gamma` is a focusing parameter. When `gamma` = 0, there is no focal
effect on the binary crossentropy loss.
If `apply_class_balancing == True`, this function also takes into account a
weight balancing factor for the binary classes 0 and 1 as follows:
`weight = alpha` for class 1 (`target == 1`)
`weight = 1 - alpha` for class 0
where `alpha` is a float in the range of `[0, 1]`.
Args:
y_true: Ground truth values, of shape `(batch_size, d0, .. dN)`.
y_pred: The predicted values, of shape `(batch_size, d0, .. dN)`.
apply_class_balancing: A bool, whether to apply weight balancing on the
binary classes 0 and 1.
alpha: A weight balancing factor for class 1, default is `0.25` as
mentioned in the reference. The weight for class 0 is `1.0 - alpha`.
gamma: A focusing parameter, default is `2.0` as mentioned in the
reference.
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in `[0, 1]`. If > `0` then smooth the labels by
squeezing them towards 0.5, that is,
using `1. - 0.5 * label_smoothing` for the target class
and `0.5 * label_smoothing` for the non-target class.
axis: The axis along which the mean is computed. Defaults to `-1`.
Returns:
Binary focal crossentropy loss value
with shape = `[batch_size, d0, .. dN-1]`.
Example:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # In this instance, the first sample in the second batch is the
>>> # 'easier' example.
>>> focal_loss = keras.losses.binary_focal_crossentropy(
... y_true, y_pred, gamma=2)
>>> assert loss.shape == (2,)
>>> focal_loss
array([0.330, 0.206], dtype=float32)
>>> # Compare with binary_crossentropy
>>> bce_loss = keras.losses.binary_focal_crossentropy(
... y_true, y_pred)
>>> bce_loss
array([0.916, 0.714], dtype=float32)
>>> # Binary focal crossentropy loss attributes more importance to the
>>> # harder example which results in a higher loss for the first batch
>>> # when normalized by binary cross entropy loss
>>> focal_loss/bce_loss
array([0.360, 0.289]
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
if label_smoothing:
y_true = y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
if from_logits:
y_pred = ops.sigmoid(y_pred)
bce = ops.binary_crossentropy(
target=y_true,
output=y_pred,
from_logits=False,
)
# Calculate focal factor
p_t = y_true * y_pred + (1 - y_true) * (1 - y_pred)
focal_factor = ops.power(1.0 - p_t, gamma)
focal_bce = focal_factor * bce
if apply_class_balancing:
weight = y_true * alpha + (1 - y_true) * (1 - alpha)
focal_bce = weight * focal_bce
return ops.mean(focal_bce, axis=axis)
@keras_export("keras.losses.ctc")
def ctc(y_true, y_pred):
"""CTC (Connectionist Temporal Classification) loss.
Args:
y_true: A tensor of shape `(batch_size, max_length)` containing
the true labels in integer format. `0` always represents
the blank/mask index and should not be used for classes.
y_pred: A tensor of shape `(batch_size, max_length, num_classes)`
containing logits (the output of your model).
They should *not* be normalized via softmax.
"""
if len(ops.shape(y_true)) != 2:
raise ValueError(
"Targets `y_true` are expected to be a tensor of shape "
"`(batch_size, max_length)` in integer format. "
f"Received: y_true.shape={ops.shape(y_true)}"
)
if len(ops.shape(y_pred)) != 3:
raise ValueError(
"Logits `y_pred` are expected to be a tensor of shape "
"`(batch_size, max_length, num_classes)`. "
f"Received: y_pred.shape={ops.shape(y_pred)}"
)
mask_index = 0
batch_length = ops.shape(y_pred)[0]
input_length = ops.shape(y_pred)[1]
input_length = input_length * ops.ones((batch_length,), dtype="int32")
label_length = ops.cast(
ops.sum(y_true != mask_index, axis=-1), dtype="int32"
)
return ops.ctc_loss(
y_true, y_pred, label_length, input_length, mask_index=mask_index
)
@keras_export("keras.losses.dice")
def dice(y_true, y_pred, axis=None):
"""Computes the Dice loss value between `y_true` and `y_pred`.
Formula:
```python
loss = 1 - (2 * sum(y_true * y_pred)) / (sum(y_true) + sum(y_pred))
```
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
axis: tuple for which dimensions the loss is calculated
Returns:
Dice loss value.
Example:
>>> y_true = [[[[1.0], [1.0]], [[0.0], [0.0]]],
... [[[1.0], [1.0]], [[0.0], [0.0]]]]
>>> y_pred = [[[[0.0], [1.0]], [[0.0], [1.0]]],
... [[[0.4], [0.0]], [[0.0], [0.9]]]]
>>> axis = (1, 2, 3)
>>> loss = keras.losses.dice(y_true, y_pred, axis=axis)
>>> assert loss.shape == (2,)
>>> loss
array([0.5, 0.75757575], shape=(2,), dtype=float32)
>>> loss = keras.losses.dice(y_true, y_pred)
>>> assert loss.shape == ()
>>> loss
array(0.6164384, shape=(), dtype=float32)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
inputs = y_true
targets = y_pred
intersection = ops.sum(inputs * targets, axis=axis)
dice = ops.divide(
2.0 * intersection,
ops.sum(y_true, axis=axis)
+ ops.sum(y_pred, axis=axis)
+ backend.epsilon(),
)
return 1 - dice
@keras_export("keras.losses.tversky")
def tversky(y_true, y_pred, alpha=0.5, beta=0.5, axis=None):
"""Computes the Tversky loss value between `y_true` and `y_pred`.
This loss function is weighted by the alpha and beta coefficients
that penalize false positives and false negatives.
With `alpha=0.5` and `beta=0.5`, the loss value becomes equivalent to
Dice Loss.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
alpha: coefficient controlling incidence of false positives.
beta: coefficient controlling incidence of false negatives.
axis: tuple for which dimensions the loss is calculated.
Returns:
Tversky loss value.
Reference:
- [Salehi et al., 2017](https://arxiv.org/abs/1706.05721)
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, y_pred.dtype)
inputs = y_true
targets = y_pred
intersection = ops.sum(inputs * targets, axis=axis)
fp = ops.sum((1 - targets) * inputs, axis=axis)
fn = ops.sum(targets * (1 - inputs), axis=axis)
tversky = ops.divide(
intersection,
intersection + fp * alpha + fn * beta + backend.epsilon(),
)
return 1 - tversky
@keras_export("keras.losses.circle")
def circle(
y_true,
y_pred,
ref_labels=None,
ref_embeddings=None,
remove_diagonal=True,
gamma=80,
margin=0.4,
):
"""Computes the Circle loss.
It is designed to minimize within-class distances and maximize between-class
distances in L2 normalized embedding space.
Args:
y_true: Tensor with ground truth labels in integer format.
y_pred: Tensor with predicted L2 normalized embeddings.
ref_labels: Optional integer tensor with labels for reference
embeddings. If `None`, defaults to `y_true`.
ref_embeddings: Optional tensor with L2 normalized reference embeddings.
If `None`, defaults to `y_pred`.
remove_diagonal: Boolean, whether to remove self-similarities from
positive mask. Defaults to `True`.
gamma: Float, scaling factor for the loss. Defaults to `80`.
margin: Float, relaxation factor for the loss. Defaults to `0.4`.
Returns:
Circle loss value.
"""
y_pred = ops.convert_to_tensor(y_pred)
y_true = ops.cast(y_true, "int32")
ref_embeddings = (
y_pred
if ref_embeddings is None
else ops.convert_to_tensor(ref_embeddings)
)
ref_labels = y_true if ref_labels is None else ops.cast(ref_labels, "int32")
optim_pos = margin
optim_neg = 1 + margin
delta_pos = margin
delta_neg = 1 - margin
pairwise_cosine_distances = 1 - ops.matmul(
y_pred, ops.transpose(ref_embeddings)
)
pairwise_cosine_distances = ops.maximum(pairwise_cosine_distances, 0.0)
positive_mask, negative_mask = build_pos_neg_masks(
y_true,
ref_labels,
remove_diagonal=remove_diagonal,
)
positive_mask = ops.cast(
positive_mask, dtype=pairwise_cosine_distances.dtype
)
negative_mask = ops.cast(
negative_mask, dtype=pairwise_cosine_distances.dtype
)
pos_weights = optim_pos + pairwise_cosine_distances
pos_weights = pos_weights * positive_mask
pos_weights = ops.maximum(pos_weights, 0.0)
neg_weights = optim_neg - pairwise_cosine_distances
neg_weights = neg_weights * negative_mask
neg_weights = ops.maximum(neg_weights, 0.0)
pos_dists = delta_pos - pairwise_cosine_distances
neg_dists = delta_neg - pairwise_cosine_distances
pos_wdists = -1 * gamma * pos_weights * pos_dists
neg_wdists = gamma * neg_weights * neg_dists
p_loss = ops.logsumexp(
ops.where(positive_mask, pos_wdists, float("-inf")),
axis=1,
)
n_loss = ops.logsumexp(
ops.where(negative_mask, neg_wdists, float("-inf")),
axis=1,
)
circle_loss = ops.softplus(p_loss + n_loss)
backend.set_keras_mask(circle_loss, circle_loss > 0)
return circle_loss
@keras_export("keras.losses.categorical_generalized_cross_entropy")
def categorical_generalized_cross_entropy(y_true, y_pred, q):
"""Computes the Generalized Cross Entropy loss.
Generalized Cross Entropy (GCE) is a noise-robust loss function that
provides better robustness against noisy labels than standard cross entropy.
It generalizes both cross entropy and mean absolute error through
the parameter q, where values closer to 1 make the loss more robust
to noisy labels.
Formula:
```python
loss = (1 - p**q) / q
```
where `p` is the predicted probability for the true class and `q`
is the noise parameter.
Args:
y_true: Ground truth labels. Expected to contain *integer class indices*
with shape `[batch_size]` or `[batch_size, 1]`.
y_pred: The predicted class probabilities, with shape
`[batch_size, num_classes]`.
q: Float in range `(0, 1)`. It is the noise parameter.
Controls the behavior of the loss:
- As `q` approaches 0: Behaves more like cross entropy
- As `q` approaches 1: Behaves more like mean absolute error
Returns:
GCE loss values with shape `[batch_size]`.
```
References:
- [Zhang, Sabuncu, 2018](https://arxiv.org/abs/1805.07836)
("Generalized Cross Entropy Loss for Training
Deep Neural Networks with Noisy Labels")
"""
# Convert y_true to integer type and one-hot encode
y_true_one_hot = ops.one_hot(
ops.cast(y_true, "int"), num_classes=ops.shape(y_pred)[-1]
)
y_true_one_hot = ops.cast(y_true_one_hot, y_pred.dtype)
# Calculate the probability of the true class
p = ops.sum(y_pred * y_true_one_hot, axis=-1)
# Compute the GCE loss for q in (0,1)
gce_loss = (1 - ops.power(p, q)) / q
return gce_loss
| CategoricalGeneralizedCrossEntropy |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 879493,
"end": 879894
} | class ____(sgqlc.types.Type, ProjectV2FieldCommon, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("options",)
options = sgqlc.types.Field(
sgqlc.types.non_null(
sgqlc.types.list_of(sgqlc.types.non_null(ProjectV2SingleSelectFieldOption))
),
graphql_name="options",
)
| ProjectV2SingleSelectField |
python | facebookresearch__faiss | tests/test_swig_wrapper.py | {
"start": 4733,
"end": 5111
} | class ____(unittest.TestCase):
def test_rev_swig_ptr(self):
index = faiss.IndexFlatL2(4)
xb0 = np.vstack([
i * 10 + np.array([1, 2, 3, 4], dtype='float32')
for i in range(5)])
index.add(xb0)
xb = faiss.rev_swig_ptr(index.get_xb(), 4 * 5).reshape(5, 4)
self.assertEqual(np.abs(xb0 - xb).sum(), 0)
| TestRevSwigPtr |
python | pennersr__django-allauth | allauth/headless/mfa/views.py | {
"start": 2337,
"end": 2577
} | class ____(AuthenticatedAPIView):
def get(self, request, *args, **kwargs):
authenticators = Authenticator.objects.filter(user=request.user)
return response.AuthenticatorsResponse(request, authenticators)
| AuthenticatorsView |
python | python-poetry__poetry | src/poetry/repositories/legacy_repository.py | {
"start": 836,
"end": 4938
} | class ____(HTTPRepository):
def __init__(
self,
name: str,
url: str,
*,
config: Config | None = None,
disable_cache: bool = False,
pool_size: int = requests.adapters.DEFAULT_POOLSIZE,
) -> None:
if name == "pypi":
raise ValueError("The name [pypi] is reserved for repositories")
super().__init__(
name,
url.rstrip("/"),
config=config,
disable_cache=disable_cache,
pool_size=pool_size,
)
def package(self, name: str, version: Version) -> Package:
"""
Retrieve the release information.
This is a heavy task which takes time.
We have to download a package to get the dependencies.
We also need to download every file matching this release
to get the various hashes.
Note that this will be cached so the subsequent operations
should be much faster.
"""
try:
index = self._packages.index(Package(name, version))
return self._packages[index]
except ValueError:
package = super().package(name, version)
package._source_type = "legacy"
package._source_url = self._url
package._source_reference = self.name
return package
def find_links_for_package(self, package: Package) -> list[Link]:
try:
page = self.get_page(package.name)
except PackageNotFoundError:
return []
return list(page.links_for_version(package.name, package.version))
def _find_packages(
self, name: NormalizedName, constraint: VersionConstraint
) -> list[Package]:
"""
Find packages on the remote server.
"""
try:
page = self.get_page(name)
except PackageNotFoundError:
self._log(f"No packages found for {name}", level="debug")
return []
versions = [
(version, page.yanked(name, version))
for version in page.versions(name)
if constraint.allows(version)
]
return [
Package(
name,
version,
source_type="legacy",
source_reference=self.name,
source_url=self._url,
yanked=yanked,
)
for version, yanked in versions
]
def _get_release_info(
self, name: NormalizedName, version: Version
) -> dict[str, Any]:
page = self.get_page(name)
links = list(page.links_for_version(name, version))
yanked = page.yanked(name, version)
return self._links_to_data(
links,
PackageInfo(
name=name,
version=version.text,
summary="",
requires_dist=[],
requires_python=None,
files=[],
yanked=yanked,
cache_version=str(self.CACHE_VERSION),
),
)
def _get_page(self, name: NormalizedName) -> HTMLPage:
if not (response := self._get_response(f"/{name}/")):
raise PackageNotFoundError(f"Package [{name}] not found.")
return HTMLPage(response.url, response.text)
@cached_property
def root_page(self) -> SimpleRepositoryRootPage:
if not (response := self._get_response("/")):
self._log(
f"Unable to retrieve package listing from package source {self.name}",
level="error",
)
return SimpleRepositoryRootPage()
return SimpleRepositoryRootPage(response.text)
def search(self, query: str | list[str]) -> list[Package]:
results: list[Package] = []
for candidate in self.root_page.search(query):
with suppress(PackageNotFoundError):
page = self.get_page(candidate)
for package in page.packages:
results.append(package)
return results
| LegacyRepository |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 233557,
"end": 234696
} | class ____(Response):
"""
Response of tasks.delete_hyper_params endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_hyper_params"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteHyperParamsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
| DeleteHyperParamsResponse |
python | walkccc__LeetCode | solutions/238. Product of Array Except Self/238.py | {
"start": 0,
"end": 384
} | class ____:
def productExceptSelf(self, nums: list[int]) -> list[int]:
n = len(nums)
prefix = [1] * n # prefix product
suffix = [1] * n # suffix product
for i in range(1, n):
prefix[i] = prefix[i - 1] * nums[i - 1]
for i in reversed(range(n - 1)):
suffix[i] = suffix[i + 1] * nums[i + 1]
return [prefix[i] * suffix[i] for i in range(n)]
| Solution |
python | automl__auto-sklearn | autosklearn/experimental/askl2.py | {
"start": 2356,
"end": 4547
} | class ____:
def __init__(self, budget_type, eta, initial_budget, portfolio):
self.budget_type = budget_type
self.eta = eta
self.initial_budget = initial_budget
self.portfolio = portfolio
def __call__(
self,
scenario_dict,
seed,
ta,
ta_kwargs,
metalearning_configurations,
n_jobs,
dask_client,
multi_objective_algorithm,
multi_objective_kwargs,
):
from smac.facade.smac_ac_facade import SMAC4AC
from smac.intensification.successive_halving import SuccessiveHalving
from smac.runhistory.runhistory2epm import RunHistory2EPM4LogCost
from smac.scenario.scenario import Scenario
scenario = Scenario(scenario_dict)
initial_configurations = []
for member in self.portfolio.values():
try:
hp_names = scenario.cs.get_hyperparameter_names()
_member = {key: member[key] for key in member if key in hp_names}
initial_configurations.append(
Configuration(configuration_space=scenario.cs, values=_member)
)
except ValueError:
pass
rh2EPM = RunHistory2EPM4LogCost
ta_kwargs["budget_type"] = self.budget_type
smac4ac = SMAC4AC(
scenario=scenario,
rng=seed,
runhistory2epm=rh2EPM,
tae_runner=ta,
tae_runner_kwargs=ta_kwargs,
initial_configurations=initial_configurations,
run_id=seed,
intensifier=SuccessiveHalving,
intensifier_kwargs={
"initial_budget": self.initial_budget,
"max_budget": 100,
"eta": self.eta,
"min_chall": 1,
},
dask_client=dask_client,
n_jobs=n_jobs,
multi_objective_algorithm=multi_objective_algorithm,
multi_objective_kwargs=multi_objective_kwargs,
)
smac4ac.solver.epm_chooser.min_samples_model = int(
len(scenario.cs.get_hyperparameters()) / 2
)
return smac4ac
| SHObjectCallback |
python | huggingface__transformers | tests/models/whisper/test_modeling_whisper.py | {
"start": 250008,
"end": 251377
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (WhisperDecoder, WhisperForCausalLM) if is_torch_available() else ()
is_encoder_decoder = False
test_missing_keys = False
def setUp(self):
self.model_tester = WhisperStandaloneDecoderModelTester(self, is_training=False)
self.config_tester = ConfigTester(self, config_class=WhisperConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_decoder_model_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config, inputs_dict = config_and_inputs
self.model_tester.create_and_check_decoder_model_past(config=config, input_ids=inputs_dict["input_ids"])
def test_decoder_model_attn_mask_past(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
config, inputs_dict = config_and_inputs
self.model_tester.create_and_check_decoder_model_attention_mask_past(
config=config, input_ids=inputs_dict["input_ids"]
)
@unittest.skip(reason="Decoder can't keep attention grads")
def test_retain_grad_hidden_states_attentions(self):
return
@unittest.skip(reason="Decoder cannot keep gradients")
def test_flex_attention_with_grads():
return
| WhisperStandaloneDecoderModelTest |
python | pytorch__pytorch | benchmarks/fastrnns/custom_lstms.py | {
"start": 9956,
"end": 11019
} | class ____(jit.ScriptModule):
__constants__ = ["layers"] # Necessary for iterating through self.layers
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super().__init__()
self.layers = init_stacked_lstm(
num_layers, layer, first_layer_args, other_layer_args
)
@jit.script_method
def forward(
self, input: Tensor, states: list[list[tuple[Tensor, Tensor]]]
) -> tuple[Tensor, list[list[tuple[Tensor, Tensor]]]]:
# List[List[LSTMState]]: The outer list is for layers,
# inner list is for directions.
output_states = jit.annotate(list[list[tuple[Tensor, Tensor]]], [])
output = input
# XXX: enumerate https://github.com/pytorch/pytorch/issues/14471
i = 0
for rnn_layer in self.layers:
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
i += 1 # noqa: SIM113
return output, output_states
| StackedLSTM2 |
python | astropy__astropy | astropy/modeling/tests/test_models.py | {
"start": 22112,
"end": 22369
} | class ____(Fittable1DModelTester):
pass
@pytest.mark.filterwarnings(r"ignore:Model is linear in parameters.*")
@pytest.mark.parametrize(
("model_class", "test_parameters"),
sorted(models_2D.items(), key=lambda x: str(x[0])),
)
| TestFittable1DModels |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 101350,
"end": 102772
} | class ____(system_info):
section = 'boost_python'
dir_env_var = 'BOOST'
def get_paths(self, section, key):
pre_dirs = system_info.get_paths(self, section, key)
dirs = []
for d in pre_dirs:
dirs.extend([d] + self.combine_paths(d, ['boost*']))
return [d for d in dirs if os.path.isdir(d)]
def calc_info(self):
src_dirs = self.get_src_dirs()
src_dir = ''
for d in src_dirs:
if os.path.isfile(os.path.join(d, 'libs', 'python', 'src',
'module.cpp')):
src_dir = d
break
if not src_dir:
return
py_incl_dirs = [sysconfig.get_path('include')]
py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
bpl_srcs = glob(os.path.join(srcs_dir, '*.cpp'))
bpl_srcs += glob(os.path.join(srcs_dir, '*', '*.cpp'))
info = {'libraries': [('boost_python_src',
{'include_dirs': [src_dir] + py_incl_dirs,
'sources':bpl_srcs}
)],
'include_dirs': [src_dir],
}
if info:
self.set_info(**info)
return
| boost_python_info |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/diag_op_test.py | {
"start": 44583,
"end": 47388
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(0)
def _diagPartOp(self, tensor, dtype, expected_ans, use_gpu):
with self.cached_session(use_gpu=use_gpu):
tensor = ops.convert_to_tensor(tensor.astype(dtype))
tf_ans_inv = array_ops.diag_part(tensor)
inv_out = self.evaluate(tf_ans_inv)
self.assertAllClose(inv_out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans_inv)
def diagPartOp(self, tensor, dtype, expected_ans):
self._diagPartOp(tensor, dtype, expected_ans, False)
self._diagPartOp(tensor, dtype, expected_ans, True)
def testRankTwoFloatTensor(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankFourFloatTensorUnknownShape(self):
x = np.random.rand(3, 3)
i = np.arange(3)
expected_ans = x[i, i]
for shape in None, (None, 3), (3, None):
with self.cached_session(use_gpu=False):
t = ops.convert_to_tensor(x.astype(np.float32))
t.set_shape(shape)
tf_ans = array_ops.diag_part(t)
out = self.evaluate(tf_ans)
self.assertAllClose(out, expected_ans)
self.assertShapeEqual(expected_ans, tf_ans)
def testRankFourFloatTensor(self):
x = np.random.rand(2, 3, 2, 3)
i = np.arange(2)[:, None]
j = np.arange(3)
expected_ans = x[i, j, i, j]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankSixFloatTensor(self):
x = np.random.rand(2, 2, 2, 2, 2, 2)
i = np.arange(2)[:, None, None]
j = np.arange(2)[:, None]
k = np.arange(2)
expected_ans = x[i, j, k, i, j, k]
self.diagPartOp(x, np.float32, expected_ans)
self.diagPartOp(x, np.float64, expected_ans)
def testRankEightComplexTensor(self):
x = np.random.rand(2, 2, 2, 3, 2, 2, 2, 3)
i = np.arange(2)[:, None, None, None]
j = np.arange(2)[:, None, None]
k = np.arange(2)[:, None]
l = np.arange(3)
expected_ans = x[i, j, k, l, i, j, k, l]
self.diagPartOp(x, np.complex64, expected_ans)
self.diagPartOp(x, np.complex128, expected_ans)
@test_util.run_deprecated_v1
def testOddRank(self):
w = np.random.rand(2)
x = np.random.rand(2, 2, 2)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
with self.assertRaises(ValueError):
array_ops.diag_part(0.0)
@test_util.run_deprecated_v1
def testUnevenDimensions(self):
w = np.random.rand(2, 5)
x = np.random.rand(2, 1, 2, 3)
self.assertRaises(ValueError, self.diagPartOp, w, np.float32, 0)
self.assertRaises(ValueError, self.diagPartOp, x, np.float32, 0)
| DiagPartOpTest |
python | getsentry__sentry | fixtures/page_objects/organization_integration_settings.py | {
"start": 103,
"end": 733
} | class ____(ModalElement):
name_field_selector = "name"
submit_button_selector = '[type="submit"]'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = self.element.find_element(by=By.NAME, value="name")
continue_button_element = self.element.find_element(
by=By.CSS_SELECTOR, value=self.submit_button_selector
)
self.continue_button = ButtonElement(continue_button_element)
def fill_in_setup_form(self, installation_data):
self.name.send_keys(installation_data[self.name_field_selector])
| ExampleIntegrationSetupWindowElement |
python | wandb__wandb | wandb/automations/_generated/fragments.py | {
"start": 3867,
"end": 4491
} | class ____(GQLResult):
typename__: Typename[Literal["Project"]] = "Project"
triggers: List[TriggerFields]
ArtifactPortfolioScopeFields.model_rebuild()
ArtifactSequenceScopeFields.model_rebuild()
FilterEventFields.model_rebuild()
WebhookIntegrationFields.model_rebuild()
GenericWebhookActionFields.model_rebuild()
NoOpActionFields.model_rebuild()
SlackIntegrationFields.model_rebuild()
NotificationActionFields.model_rebuild()
PageInfoFields.model_rebuild()
ProjectScopeFields.model_rebuild()
QueueJobActionFields.model_rebuild()
TriggerFields.model_rebuild()
ProjectTriggersFields.model_rebuild()
| ProjectTriggersFields |
python | pyparsing__pyparsing | pyparsing/helpers.py | {
"start": 25595,
"end": 41636
} | class ____(Enum):
"""Enumeration of operator associativity
- used in constructing InfixNotationOperatorSpec for :class:`infix_notation`"""
LEFT = 1
RIGHT = 2
InfixNotationOperatorArgType = Union[
ParserElement, str, tuple[Union[ParserElement, str], Union[ParserElement, str]]
]
InfixNotationOperatorSpec = Union[
tuple[
InfixNotationOperatorArgType,
int,
OpAssoc,
typing.Optional[ParseAction],
],
tuple[
InfixNotationOperatorArgType,
int,
OpAssoc,
],
]
def infix_notation(
base_expr: ParserElement,
op_list: list[InfixNotationOperatorSpec],
lpar: Union[str, ParserElement] = Suppress("("),
rpar: Union[str, ParserElement] = Suppress(")"),
) -> Forward:
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary
or binary, left- or right-associative. Parse actions can also be
attached to operator expressions. The generated parser will also
recognize the use of parentheses to override operator precedences
(see example below).
Note: if you define a deep operator list, you may see performance
issues when using infix_notation. See
:class:`ParserElement.enable_packrat` for a mechanism to potentially
improve your parser performance.
Parameters:
:param base_expr: expression representing the most basic operand to
be used in the expression
:param op_list: list of tuples, one for each operator precedence level
in the expression grammar; each tuple is of the form ``(op_expr,
num_operands, right_left_assoc, (optional)parse_action)``, where:
- ``op_expr`` is the pyparsing expression for the operator; may also
be a string, which will be converted to a Literal; if ``num_operands``
is 3, ``op_expr`` is a tuple of two expressions, for the two
operators separating the 3 terms
- ``num_operands`` is the number of terms for this operator (must be 1,
2, or 3)
- ``right_left_assoc`` is the indicator whether the operator is right
or left associative, using the pyparsing-defined constants
``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
- ``parse_action`` is the parse action to be associated with
expressions matching this operator expression (the parse action
tuple member may be omitted); if the parse action is passed
a tuple or list of functions, this is equivalent to calling
``set_parse_action(*fn)``
(:class:`ParserElement.set_parse_action`)
:param lpar: expression for matching left-parentheses; if passed as a
str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as
an expression (such as ``Literal('(')``), then it will be kept in
the parsed results, and grouped with them. (default= ``Suppress('(')``)
:param rpar: expression for matching right-parentheses; if passed as a
str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as
an expression (such as ``Literal(')')``), then it will be kept in
the parsed results, and grouped with them. (default= ``Suppress(')')``)
Example:
.. testcode::
# simple example of four-function arithmetic with ints and
# variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infix_notation(integer | varname,
[
('-', 1, OpAssoc.RIGHT),
(one_of('* /'), 2, OpAssoc.LEFT),
(one_of('+ -'), 2, OpAssoc.LEFT),
])
arith_expr.run_tests('''
5+3*6
(5+3)*6
(5+x)*y
-2--11
''', full_dump=False)
prints:
.. testoutput::
:options: +NORMALIZE_WHITESPACE
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
(5+x)*y
[[[5, '+', 'x'], '*', 'y']]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
# captive version of FollowedBy that does not do parse actions or capture results names
class _FB(FollowedBy):
def parseImpl(self, instring, loc, doActions=True):
self.expr.try_parse(instring, loc)
return loc, []
_FB.__name__ = "FollowedBy>"
ret = Forward()
ret.set_name(f"{base_expr.name}_expression")
if isinstance(lpar, str):
lpar = Suppress(lpar)
if isinstance(rpar, str):
rpar = Suppress(rpar)
nested_expr = (lpar + ret + rpar).set_name(f"nested_{base_expr.name}_expression")
# if lpar and rpar are not suppressed, wrap in group
if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)):
lastExpr = base_expr | Group(nested_expr)
else:
lastExpr = base_expr | nested_expr
arity: int
rightLeftAssoc: opAssoc
pa: typing.Optional[ParseAction]
opExpr1: ParserElement
opExpr2: ParserElement
matchExpr: ParserElement
match_lookahead: ParserElement
for operDef in op_list:
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment]
if isinstance(opExpr, str_type):
opExpr = ParserElement._literalStringClass(opExpr)
opExpr = typing.cast(ParserElement, opExpr)
if arity == 3:
if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
raise ValueError(
"if numterms=3, opExpr must be a tuple or list of two expressions"
)
opExpr1, opExpr2 = opExpr
term_name = f"{opExpr1}{opExpr2} operations"
else:
term_name = f"{opExpr} operations"
if not 1 <= arity <= 3:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
raise ValueError("operator must indicate right or left associativity")
thisExpr: ParserElement = Forward().set_name(term_name)
thisExpr = typing.cast(Forward, thisExpr)
match_lookahead = And([])
if rightLeftAssoc is OpAssoc.LEFT:
if arity == 1:
match_lookahead = _FB(lastExpr + opExpr)
matchExpr = Group(lastExpr + opExpr[1, ...])
elif arity == 2:
if opExpr is not None:
match_lookahead = _FB(lastExpr + opExpr + lastExpr)
matchExpr = Group(lastExpr + (opExpr + lastExpr)[1, ...])
else:
match_lookahead = _FB(lastExpr + lastExpr)
matchExpr = Group(lastExpr[2, ...])
elif arity == 3:
match_lookahead = _FB(
lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
)
matchExpr = Group(
lastExpr + (opExpr1 + lastExpr + opExpr2 + lastExpr)[1, ...]
)
elif rightLeftAssoc is OpAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Opt):
opExpr = Opt(opExpr)
match_lookahead = _FB(opExpr.expr + thisExpr)
matchExpr = Group(opExpr + thisExpr)
elif arity == 2:
if opExpr is not None:
match_lookahead = _FB(lastExpr + opExpr + thisExpr)
matchExpr = Group(lastExpr + (opExpr + thisExpr)[1, ...])
else:
match_lookahead = _FB(lastExpr + thisExpr)
matchExpr = Group(lastExpr + thisExpr[1, ...])
elif arity == 3:
match_lookahead = _FB(
lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
)
matchExpr = Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
# suppress lookahead expr from railroad diagrams
match_lookahead.show_in_diagram = False
# TODO - determine why this statement can't be included in the following
# if pa block
matchExpr = match_lookahead + matchExpr
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.set_parse_action(*pa)
else:
matchExpr.set_parse_action(pa)
thisExpr <<= (matchExpr | lastExpr).set_name(term_name)
lastExpr = thisExpr
ret <<= lastExpr
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
"""
.. deprecated:: 3.0.0
Use the :class:`IndentedBlock` class instead. Note that `IndentedBlock`
has a difference method signature.
Helper method for defining space-delimited indentation blocks,
such as those used to define block statements in Python source code.
:param blockStatementExpr: expression defining syntax of statement that
is repeated within the indented block
:param indentStack: list created by caller to manage indentation stack
(multiple ``statementWithIndentedBlock`` expressions within a single
grammar should share a common ``indentStack``)
:param indent: boolean indicating whether block must be indented beyond
the current level; set to ``False`` for block of left-most statements
A valid block must contain at least one ``blockStatement``.
(Note that indentedBlock uses internal parse actions which make it
incompatible with packrat parsing.)
Example:
.. testcode::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group(funcDecl + func_body)
rvalue = Forward()
funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << (funcDef | assignment | identifier)
module_body = stmt[1, ...]
parseTree = module_body.parseString(data)
parseTree.pprint()
prints:
.. testoutput::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
warnings.warn(
f"{'indentedBlock'!r} deprecated - use {'IndentedBlock'!r}",
DeprecationWarning,
stacklevel=2,
)
backup_stacks.append(indentStack[:])
def reset_stack():
indentStack[:] = backup_stacks[-1]
def checkPeerIndent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseException(s, l, "illegal nesting")
raise ParseException(s, l, "not a peer entry")
def checkSubIndent(s, l, t):
curCol = col(l, s)
if curCol > indentStack[-1]:
indentStack.append(curCol)
else:
raise ParseException(s, l, "not a subentry")
def checkUnindent(s, l, t):
if l >= len(s):
return
curCol = col(l, s)
if not (indentStack and curCol in indentStack):
raise ParseException(s, l, "not an unindent")
if curCol < indentStack[-1]:
indentStack.pop()
NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
if indent:
smExpr = Group(
Opt(NL)
+ INDENT
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ UNDENT
)
else:
smExpr = Group(
Opt(NL)
+ OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ Opt(UNDENT)
)
# add a parse action to remove backup_stack from list of backups
smExpr.add_parse_action(
lambda: backup_stacks.pop(-1) and None if backup_stacks else None
)
smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.set_name("indented block")
# it's easy to get these comment structures wrong - they're very common,
# so may as well make them available
c_style_comment = Regex(r"/\*(?:[^*]|\*(?!/))*\*\/").set_name("C style comment")
"Comment of the form ``/* ... */``"
html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
"Comment of the form ``<!-- ... -->``"
rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
"Comment of the form ``// ... (to end of line)``"
cpp_style_comment = Regex(
r"(?:/\*(?:[^*]|\*(?!/))*\*\/)|(?://(?:\\\n|[^\n])*)"
).set_name("C++ style comment")
"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
java_style_comment = cpp_style_comment
"Same as :class:`cpp_style_comment`"
python_style_comment = Regex(r"#.*").set_name("Python style comment")
"Comment of the form ``# ... (to end of line)``"
# build list of built-in expressions, for future reference if a global default value
# gets updated
_builtin_exprs: list[ParserElement] = [
v for v in vars().values() if isinstance(v, ParserElement)
]
# compatibility function, superseded by DelimitedList class
def delimited_list(
expr: Union[str, ParserElement],
delim: Union[str, ParserElement] = ",",
combine: bool = False,
min: typing.Optional[int] = None,
max: typing.Optional[int] = None,
*,
allow_trailing_delim: bool = False,
) -> ParserElement:
"""
.. deprecated:: 3.1.0
Use the :class:`DelimitedList` class instead.
"""
return DelimitedList(
expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim
)
# Compatibility synonyms
# fmt: off
opAssoc = OpAssoc
anyOpenTag = any_open_tag
anyCloseTag = any_close_tag
commonHTMLEntity = common_html_entity
cStyleComment = c_style_comment
htmlComment = html_comment
restOfLine = rest_of_line
dblSlashComment = dbl_slash_comment
cppStyleComment = cpp_style_comment
javaStyleComment = java_style_comment
pythonStyleComment = python_style_comment
delimitedList = replaced_by_pep8("delimitedList", DelimitedList)
delimited_list = replaced_by_pep8("delimited_list", DelimitedList)
countedArray = replaced_by_pep8("countedArray", counted_array)
matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal)
matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr)
oneOf = replaced_by_pep8("oneOf", one_of)
dictOf = replaced_by_pep8("dictOf", dict_of)
originalTextFor = replaced_by_pep8("originalTextFor", original_text_for)
nestedExpr = replaced_by_pep8("nestedExpr", nested_expr)
makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags)
makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags)
replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity)
infixNotation = replaced_by_pep8("infixNotation", infix_notation)
# fmt: on
| OpAssoc |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed5.py | {
"start": 192,
"end": 827
} | class ____(TypedDict, extra_items=int):
name: str
extra_str: MovieExtraStr = {"name": "Blade Runner", "summary": ""}
extra_int: MovieExtraInt = {"name": "No Country for Old Men", "year": 2007}
str_mapping: Mapping[str, str] = extra_str
# This should generate an error.
int_mapping: Mapping[str, int] = extra_int
int_str_mapping: Mapping[str, int | str] = extra_int
def func1(movie: MovieExtraStr) -> None:
reveal_type(movie.items(), expected_text="dict_items[str, str]")
reveal_type(movie.keys(), expected_text="dict_keys[str, str]")
reveal_type(movie.values(), expected_text="dict_values[str, str]")
| MovieExtraInt |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 16892,
"end": 17143
} | class ____(_RerankerProvider):
reranker: Union[Rerankers, _EnumLikeStr] = Field(
default=Rerankers.VOYAGEAI, frozen=True, exclude=True
)
model: Optional[Union[RerankerVoyageAIModel, str]] = Field(default=None)
| _RerankerVoyageAIConfig |
python | django__django | tests/model_formsets/models.py | {
"start": 4815,
"end": 5086
} | class ____(models.Model):
team = models.ForeignKey(Team, models.SET_NULL, null=True)
name = models.CharField(max_length=100)
def __str__(self):
return self.name
# Models for testing custom ModelForm save methods in formsets and inline
# formsets
| Player |
python | bokeh__bokeh | src/bokeh/core/property/container.py | {
"start": 4763,
"end": 5671
} | class ____(Seq[T]):
""" Accept Python ``set()`` values.
"""
def __init__(self, item_type: TypeOrInst[Property[T]], *, default: Init[T] = set(), help: str | None = None) -> None:
# TODO: refactor to not use mutable objects as default values.
# Left in place for now because we want to allow None to express
# optional values. Also in Dict.
super().__init__(item_type, default=default, help=help)
def wrap(self, value: set[T]) -> PropertyValueSet[T]:
""" Some property types need to wrap their values in special containers, etc. """
if isinstance(value, set):
if isinstance(value, PropertyValueSet):
return value
else:
return PropertyValueSet(value)
else:
return value
@classmethod
def _is_seq(cls, value: Any) -> bool:
return isinstance(value, set)
| Set |
python | pallets__werkzeug | tests/test_datastructures.py | {
"start": 16807,
"end": 17447
} | class ____:
storage_class = ds.TypeConversionDict
def test_value_conversion(self):
d = self.storage_class(foo="1")
assert d.get("foo", type=int) == 1
def test_return_default_when_conversion_is_not_possible(self):
d = self.storage_class(foo="bar", baz=None)
assert d.get("foo", default=-1, type=int) == -1
assert d.get("baz", default=-1, type=int) == -1
def test_propagate_exceptions_in_conversion(self):
d = self.storage_class(foo="bar")
switch = {"a": 1}
with pytest.raises(KeyError):
d.get("foo", type=lambda x: switch[x])
| TestTypeConversionDict |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 1192,
"end": 1694
} | class ____(Spider):
name = "RecoverySpider"
custom_settings = {
"SPIDER_MIDDLEWARES_BASE": {},
"SPIDER_MIDDLEWARES": {
RecoveryMiddleware: 10,
},
}
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
yield {"test": 1}
self.logger.info("DONT_FAIL: %s", response.meta.get("dont_fail"))
if not response.meta.get("dont_fail"):
raise TabError
| RecoverySpider |
python | google__pytype | pytype/compare_test.py | {
"start": 14257,
"end": 14586
} | class ____(CompareTestBase):
def test_compatible_with(self):
pytd_sig = pytd.Signature((), None, None, pytd.AnythingType(), (), ())
sig = abstract.PyTDSignature("f", pytd_sig, self._ctx)
f = abstract.PyTDFunction(
"f", (sig,), pytd.MethodKind.METHOD, (), self._ctx
)
self.assertTruthy(f)
| FunctionTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 113157,
"end": 113633
} | class ____(sgqlc.types.Enum):
"""The different kinds of records that can be featured on a GitHub
Sponsors profile page.
Enumeration Choices:
* `REPOSITORY`: A repository owned by the user or organization
with the GitHub Sponsors profile.
* `USER`: A user who belongs to the organization with the GitHub
Sponsors profile.
"""
__schema__ = github_schema
__choices__ = ("REPOSITORY", "USER")
| SponsorsListingFeaturedItemFeatureableType |
python | openai__openai-python | src/openai/resources/beta/realtime/realtime.py | {
"start": 4390,
"end": 6709
} | class ____(AsyncAPIResource):
@cached_property
def sessions(self) -> AsyncSessions:
return AsyncSessions(self._client)
@cached_property
def transcription_sessions(self) -> AsyncTranscriptionSessions:
return AsyncTranscriptionSessions(self._client)
@cached_property
def with_raw_response(self) -> AsyncRealtimeWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncRealtimeWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncRealtimeWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncRealtimeWithStreamingResponse(self)
def connect(
self,
*,
model: str,
extra_query: Query = {},
extra_headers: Headers = {},
websocket_connection_options: WebsocketConnectionOptions = {},
) -> AsyncRealtimeConnectionManager:
"""
The Realtime API enables you to build low-latency, multi-modal conversational experiences. It currently supports text and audio as both input and output, as well as function calling.
Some notable benefits of the API include:
- Native speech-to-speech: Skipping an intermediate text format means low latency and nuanced output.
- Natural, steerable voices: The models have natural inflection and can laugh, whisper, and adhere to tone direction.
- Simultaneous multimodal output: Text is useful for moderation; faster-than-realtime audio ensures stable playback.
The Realtime API is a stateful, event-based API that communicates over a WebSocket.
"""
return AsyncRealtimeConnectionManager(
client=self._client,
extra_query=extra_query,
extra_headers=extra_headers,
websocket_connection_options=websocket_connection_options,
model=model,
)
| AsyncRealtime |
python | pytorch__pytorch | torchgen/api/types/signatures.py | {
"start": 9065,
"end": 10515
} | class ____:
# The schema this signature is derived from
func: FunctionSchema
symint: bool
prefix: str = ""
def name(self) -> str:
return self.prefix + native.name(self.func)
def decl(self, name: str | None = None) -> str:
args_str = ", ".join(a.decl() for a in self.arguments())
if name is None:
name = self.name()
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
def defn(self, name: str | None = None) -> str:
args_str = ", ".join(a.defn() for a in self.arguments())
if name is None:
name = self.name()
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} {name}({args_str})"
def ptr_type(self) -> str:
# don't include defaults in type signature!
args_str = ", ".join(a.defn() for a in self.arguments())
return f"{native.returns_type(self.func.returns, symint=self.symint).cpp_type()} (*)({args_str})"
def arguments(self) -> list[Binding]:
return native.arguments(self.func, symint=self.symint)
def returns_type(self) -> CType:
return native.returns_type(self.func.returns, symint=self.symint)
def dispatcher_exprs(self) -> list[Expr]:
return translate.translate(
self.arguments(), dispatcher.arguments(self.func), method=False
)
@dataclass(frozen=True)
| NativeSignature |
python | scikit-learn__scikit-learn | sklearn/tests/test_base.py | {
"start": 2066,
"end": 2122
} | class ____(NaNTag, NoNaNTag):
pass
| DiamondOverwriteTag |
python | graphql-python__graphene | graphene/tests/issues/test_425.py | {
"start": 1342,
"end": 2299
} | class ____(InputObjectType):
@classmethod
def __init_subclass_with_meta__(cls, other_attr="default", **options):
_meta = SpecialInputObjectTypeOptions(cls)
_meta.other_attr = other_attr
super(SpecialInputObjectType, cls).__init_subclass_with_meta__(
_meta=_meta, **options
)
def test_special_inputobjecttype_could_be_subclassed():
class MyInputObjectType(SpecialInputObjectType):
class Meta:
other_attr = "yeah!"
assert MyInputObjectType._meta.other_attr == "yeah!"
def test_special_inputobjecttype_could_be_subclassed_default():
class MyInputObjectType(SpecialInputObjectType):
pass
assert MyInputObjectType._meta.other_attr == "default"
def test_special_inputobjecttype_inherit_meta_options():
class MyInputObjectType(SpecialInputObjectType):
pass
assert MyInputObjectType._meta.name == "MyInputObjectType"
# Enum
| SpecialInputObjectType |
python | matplotlib__matplotlib | lib/matplotlib/textpath.py | {
"start": 8863,
"end": 12248
} | class ____(Path):
"""
Create a path from the text.
"""
def __init__(self, xy, s, size=None, prop=None,
_interpolation_steps=1, usetex=False):
r"""
Create a path from the text. Note that it simply is a path,
not an artist. You need to use the `.PathPatch` (or other artists)
to draw this path onto the canvas.
Parameters
----------
xy : tuple or array of two float values
Position of the text. For no offset, use ``xy=(0, 0)``.
s : str
The text to convert to a path.
size : float, optional
Font size in points. Defaults to the size specified via the font
properties *prop*.
prop : `~matplotlib.font_manager.FontProperties`, optional
Font property. If not provided, will use a default
`.FontProperties` with parameters from the
:ref:`rcParams<customizing-with-dynamic-rc-settings>`.
_interpolation_steps : int, optional
(Currently ignored)
usetex : bool, default: False
Whether to use tex rendering.
Examples
--------
The following creates a path from the string "ABC" with Helvetica
font face; and another path from the latex fraction 1/2::
from matplotlib.text import TextPath
from matplotlib.font_manager import FontProperties
fp = FontProperties(family="Helvetica", style="italic")
path1 = TextPath((12, 12), "ABC", size=12, prop=fp)
path2 = TextPath((0, 0), r"$\frac{1}{2}$", size=12, usetex=True)
Also see :doc:`/gallery/text_labels_and_annotations/demo_text_path`.
"""
# Circular import.
from matplotlib.text import Text
prop = FontProperties._from_any(prop)
if size is None:
size = prop.get_size_in_points()
self._xy = xy
self.set_size(size)
self._cached_vertices = None
s, ismath = Text(usetex=usetex)._preprocess_math(s)
super().__init__(
*text_to_path.get_text_path(prop, s, ismath=ismath),
_interpolation_steps=_interpolation_steps,
readonly=True)
self._should_simplify = False
def set_size(self, size):
"""Set the text size."""
self._size = size
self._invalid = True
def get_size(self):
"""Get the text size."""
return self._size
@property
def vertices(self):
"""
Return the cached path after updating it if necessary.
"""
self._revalidate_path()
return self._cached_vertices
@property
def codes(self):
"""
Return the codes
"""
return self._codes
def _revalidate_path(self):
"""
Update the path if necessary.
The path for the text is initially create with the font size of
`.FONT_SCALE`, and this path is rescaled to other size when necessary.
"""
if self._invalid or self._cached_vertices is None:
tr = (Affine2D()
.scale(self._size / text_to_path.FONT_SCALE)
.translate(*self._xy))
self._cached_vertices = tr.transform(self._vertices)
self._cached_vertices.flags.writeable = False
self._invalid = False
| TextPath |
python | pypa__pipenv | pipenv/vendor/click/shell_completion.py | {
"start": 1399,
"end": 5279
} | class ____:
"""Represents a completion value and metadata about the value. The
default metadata is ``type`` to indicate special shell handling,
and ``help`` if a shell supports showing a help string next to the
value.
Arbitrary parameters can be passed when creating the object, and
accessed using ``item.attr``. If an attribute wasn't passed,
accessing it returns ``None``.
:param value: The completion suggestion.
:param type: Tells the shell script to provide special completion
support for the type. Click uses ``"dir"`` and ``"file"``.
:param help: String shown next to the value if supported.
:param kwargs: Arbitrary metadata. The built-in implementations
don't use this, but custom type completions paired with custom
shell support could use it.
"""
__slots__ = ("value", "type", "help", "_info")
def __init__(
self,
value: t.Any,
type: str = "plain",
help: t.Optional[str] = None,
**kwargs: t.Any,
) -> None:
self.value: t.Any = value
self.type: str = type
self.help: t.Optional[str] = help
self._info = kwargs
def __getattr__(self, name: str) -> t.Any:
return self._info.get(name)
# Only Bash >= 4.4 has the nosort option.
_SOURCE_BASH = """\
%(complete_func)s() {
local IFS=$'\\n'
local response
response=$(env COMP_WORDS="${COMP_WORDS[*]}" COMP_CWORD=$COMP_CWORD \
%(complete_var)s=bash_complete $1)
for completion in $response; do
IFS=',' read type value <<< "$completion"
if [[ $type == 'dir' ]]; then
COMPREPLY=()
compopt -o dirnames
elif [[ $type == 'file' ]]; then
COMPREPLY=()
compopt -o default
elif [[ $type == 'plain' ]]; then
COMPREPLY+=($value)
fi
done
return 0
}
%(complete_func)s_setup() {
complete -o nosort -F %(complete_func)s %(prog_name)s
}
%(complete_func)s_setup;
"""
_SOURCE_ZSH = """\
#compdef %(prog_name)s
%(complete_func)s() {
local -a completions
local -a completions_with_descriptions
local -a response
(( ! $+commands[%(prog_name)s] )) && return 1
response=("${(@f)$(env COMP_WORDS="${words[*]}" COMP_CWORD=$((CURRENT-1)) \
%(complete_var)s=zsh_complete %(prog_name)s)}")
for type key descr in ${response}; do
if [[ "$type" == "plain" ]]; then
if [[ "$descr" == "_" ]]; then
completions+=("$key")
else
completions_with_descriptions+=("$key":"$descr")
fi
elif [[ "$type" == "dir" ]]; then
_path_files -/
elif [[ "$type" == "file" ]]; then
_path_files -f
fi
done
if [ -n "$completions_with_descriptions" ]; then
_describe -V unsorted completions_with_descriptions -U
fi
if [ -n "$completions" ]; then
compadd -U -V unsorted -a completions
fi
}
if [[ $zsh_eval_context[-1] == loadautofunc ]]; then
# autoload from fpath, call function directly
%(complete_func)s "$@"
else
# eval/source/. command, register function for later
compdef %(complete_func)s %(prog_name)s
fi
"""
_SOURCE_FISH = """\
function %(complete_func)s;
set -l response (env %(complete_var)s=fish_complete COMP_WORDS=(commandline -cp) \
COMP_CWORD=(commandline -t) %(prog_name)s);
for completion in $response;
set -l metadata (string split "," $completion);
if test $metadata[1] = "dir";
__fish_complete_directories $metadata[2];
else if test $metadata[1] = "file";
__fish_complete_path $metadata[2];
else if test $metadata[1] = "plain";
echo $metadata[2];
end;
end;
end;
complete --no-files --command %(prog_name)s --arguments \
"(%(complete_func)s)";
"""
| CompletionItem |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/cache_key.py | {
"start": 21430,
"end": 33930
} | class ____(HasTraversalDispatch):
# very common elements are inlined into the main _get_cache_key() method
# to produce a dramatic savings in Python function call overhead
visit_has_cache_key = visit_clauseelement = CALL_GEN_CACHE_KEY
visit_clauseelement_list = InternalTraversal.dp_clauseelement_list
visit_annotations_key = InternalTraversal.dp_annotations_key
visit_clauseelement_tuple = InternalTraversal.dp_clauseelement_tuple
visit_memoized_select_entities = (
InternalTraversal.dp_memoized_select_entities
)
visit_string = visit_boolean = visit_operator = visit_plain_obj = (
CACHE_IN_PLACE
)
visit_statement_hint_list = CACHE_IN_PLACE
visit_type = STATIC_CACHE_KEY
visit_anon_name = ANON_NAME
visit_propagate_attrs = PROPAGATE_ATTRS
def visit_compile_state_funcs(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return tuple((fn.__code__, c_key) for fn, c_key in obj)
def visit_inspectable(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (attrname, inspect(obj)._gen_cache_key(anon_map, bindparams))
def visit_string_list(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return tuple(obj)
def visit_multi(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
(
obj._gen_cache_key(anon_map, bindparams)
if isinstance(obj, HasCacheKey)
else obj
),
)
def visit_multi_list(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
tuple(
(
elem._gen_cache_key(anon_map, bindparams)
if isinstance(elem, HasCacheKey)
else elem
)
for elem in obj
),
)
def visit_has_cache_key_tuples(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple(
tuple(
elem._gen_cache_key(anon_map, bindparams)
for elem in tup_elem
)
for tup_elem in obj
),
)
def visit_has_cache_key_list(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple(elem._gen_cache_key(anon_map, bindparams) for elem in obj),
)
def visit_executable_options(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple(
elem._gen_cache_key(anon_map, bindparams)
for elem in obj
if elem._is_has_cache_key
),
)
def visit_inspectable_list(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return self.visit_has_cache_key_list(
attrname, [inspect(o) for o in obj], parent, anon_map, bindparams
)
def visit_clauseelement_tuples(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return self.visit_has_cache_key_tuples(
attrname, obj, parent, anon_map, bindparams
)
def visit_fromclause_ordered_set(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple([elem._gen_cache_key(anon_map, bindparams) for elem in obj]),
)
def visit_clauseelement_unordered_set(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
cache_keys = [
elem._gen_cache_key(anon_map, bindparams) for elem in obj
]
return (
attrname,
tuple(
sorted(cache_keys)
), # cache keys all start with (id_, class)
)
def visit_named_ddl_element(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (attrname, obj.name)
def visit_prefix_sequence(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple(
[
(clause._gen_cache_key(anon_map, bindparams), strval)
for clause, strval in obj
]
),
)
def visit_setup_join_tuple(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return tuple(
(
target._gen_cache_key(anon_map, bindparams),
(
onclause._gen_cache_key(anon_map, bindparams)
if onclause is not None
else None
),
(
from_._gen_cache_key(anon_map, bindparams)
if from_ is not None
else None
),
tuple([(key, flags[key]) for key in sorted(flags)]),
)
for (target, onclause, from_, flags) in obj
)
def visit_table_hint_list(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if not obj:
return ()
return (
attrname,
tuple(
[
(
clause._gen_cache_key(anon_map, bindparams),
dialect_name,
text,
)
for (clause, dialect_name), text in obj.items()
]
),
)
def visit_plain_dict(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (attrname, tuple([(key, obj[key]) for key in sorted(obj)]))
def visit_dialect_options(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
tuple(
(
dialect_name,
tuple(
[
(key, obj[dialect_name][key])
for key in sorted(obj[dialect_name])
]
),
)
for dialect_name in sorted(obj)
),
)
def visit_string_clauseelement_dict(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
tuple(
(key, obj[key]._gen_cache_key(anon_map, bindparams))
for key in sorted(obj)
),
)
def visit_string_multi_dict(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
tuple(
(
key,
(
value._gen_cache_key(anon_map, bindparams)
if isinstance(value, HasCacheKey)
else value
),
)
for key, value in [(key, obj[key]) for key in sorted(obj)]
),
)
def visit_fromclause_canonical_column_collection(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
# inlining into the internals of ColumnCollection
return (
attrname,
tuple(
col._gen_cache_key(anon_map, bindparams)
for k, col, _ in obj._collection
),
)
def visit_unknown_structure(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
anon_map[NO_CACHE] = True
return ()
def visit_dml_ordered_values(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
return (
attrname,
tuple(
(
(
key._gen_cache_key(anon_map, bindparams)
if hasattr(key, "__clause_element__")
else key
),
value._gen_cache_key(anon_map, bindparams),
)
for key, value in obj
),
)
def visit_dml_values(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
# in py37 we can assume two dictionaries created in the same
# insert ordering will retain that sorting
return (
attrname,
tuple(
(
(
k._gen_cache_key(anon_map, bindparams)
if hasattr(k, "__clause_element__")
else k
),
obj[k]._gen_cache_key(anon_map, bindparams),
)
for k in obj
),
)
def visit_dml_multi_values(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
# multivalues are simply not cacheable right now
anon_map[NO_CACHE] = True
return ()
def visit_params(
self,
attrname: str,
obj: Any,
parent: Any,
anon_map: anon_map,
bindparams: List[BindParameter[Any]],
) -> Tuple[Any, ...]:
if obj:
if CacheConst.PARAMS in anon_map:
to_set = anon_map[CacheConst.PARAMS] | obj
else:
to_set = obj
anon_map[CacheConst.PARAMS] = to_set
return ()
_cache_key_traversal_visitor = _CacheKeyTraversal()
| _CacheKeyTraversal |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/types.py | {
"start": 105,
"end": 429
} | class ____(str, Enum):
"""See https://docs.databricks.com/dev-tools/api/2.0/jobs.html#runresultstate."""
CANCELED = "CANCELED"
FAILED = "FAILED"
SUCCESS = "SUCCESS"
TIMEDOUT = "TIMEDOUT"
def is_successful(self) -> bool:
return self == DatabricksRunResultState.SUCCESS
| DatabricksRunResultState |
python | python-attrs__attrs | tests/test_make.py | {
"start": 68549,
"end": 69221
} | class ____:
@pytest.mark.parametrize(
"meth_name",
[
"__init__",
"__repr__",
"__eq__",
"__lt__",
"__le__",
"__gt__",
"__ge__",
],
)
def test_docs(self, meth_name):
"""
Tests the presence and correctness of the documentation
for the generated methods
"""
@attr.s
class A:
pass
if hasattr(A, "__qualname__"):
method = getattr(A, meth_name)
expected = f"Method generated by attrs for class {A.__qualname__}."
assert expected == method.__doc__
| TestDocs |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/concepts/resources/pythonic_resources.py | {
"start": 8930,
"end": 9909
} | class ____:
def __init__(*args, **kwargs):
pass
def organization(self, name: str) -> GitHubOrganization:
return GitHubOrganization(name)
def raw_github_resource() -> None:
# start_raw_github_resource
import dagster as dg
# `ResourceParam[GitHub]` is treated exactly like `GitHub` for type checking purposes,
# and the runtime type of the github parameter is `GitHub`. The purpose of the
# `ResourceParam` wrapper is to let Dagster know that `github` is a dg.resource and not an
# upstream dg.asset.
@dg.asset
def public_github_repos(github: dg.ResourceParam[GitHub]):
return github.organization("dagster-io").repositories()
# end_raw_github_resource
# start_raw_github_resource_defs
@dg.definitions
def resources():
return dg.Definitions(
resources={"github": GitHub(...)},
)
# end_raw_github_resource_defs
from contextlib import AbstractContextManager
| GitHub |
python | numba__numba | numba/tests/test_nrt.py | {
"start": 13280,
"end": 21073
} | class ____(unittest.TestCase):
sample_llvm_ir = '''
define i32 @"MyFunction"(i8** noalias nocapture %retptr, { i8*, i32 }** noalias nocapture %excinfo, i8* noalias nocapture readnone %env, double %arg.vt.0, double %arg.vt.1, double %arg.vt.2, double %arg.vt.3, double %arg.bounds.0, double %arg.bounds.1, double %arg.bounds.2, double %arg.bounds.3, i8* %arg.xs.0, i8* nocapture readnone %arg.xs.1, i64 %arg.xs.2, i64 %arg.xs.3, double* nocapture readonly %arg.xs.4, i64 %arg.xs.5.0, i64 %arg.xs.6.0, i8* %arg.ys.0, i8* nocapture readnone %arg.ys.1, i64 %arg.ys.2, i64 %arg.ys.3, double* nocapture readonly %arg.ys.4, i64 %arg.ys.5.0, i64 %arg.ys.6.0, i8* %arg.aggs_and_cols.0.0, i8* nocapture readnone %arg.aggs_and_cols.0.1, i64 %arg.aggs_and_cols.0.2, i64 %arg.aggs_and_cols.0.3, i32* nocapture %arg.aggs_and_cols.0.4, i64 %arg.aggs_and_cols.0.5.0, i64 %arg.aggs_and_cols.0.5.1, i64 %arg.aggs_and_cols.0.6.0, i64 %arg.aggs_and_cols.0.6.1) local_unnamed_addr {
entry:
tail call void @NRT_incref(i8* %arg.xs.0)
tail call void @NRT_incref(i8* %arg.ys.0)
tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0)
%.251 = icmp sgt i64 %arg.xs.5.0, 0
br i1 %.251, label %B42.preheader, label %B160
B42.preheader: ; preds = %entry
%0 = add i64 %arg.xs.5.0, 1
br label %B42
B42: ; preds = %B40.backedge, %B42.preheader
%lsr.iv3 = phi i64 [ %lsr.iv.next, %B40.backedge ], [ %0, %B42.preheader ]
%lsr.iv1 = phi double* [ %scevgep2, %B40.backedge ], [ %arg.xs.4, %B42.preheader ]
%lsr.iv = phi double* [ %scevgep, %B40.backedge ], [ %arg.ys.4, %B42.preheader ]
%.381 = load double, double* %lsr.iv1, align 8
%.420 = load double, double* %lsr.iv, align 8
%.458 = fcmp ole double %.381, %arg.bounds.1
%not..432 = fcmp oge double %.381, %arg.bounds.0
%"$phi82.1.1" = and i1 %.458, %not..432
br i1 %"$phi82.1.1", label %B84, label %B40.backedge
B84: ; preds = %B42
%.513 = fcmp ole double %.420, %arg.bounds.3
%not..487 = fcmp oge double %.420, %arg.bounds.2
%"$phi106.1.1" = and i1 %.513, %not..487
br i1 %"$phi106.1.1", label %B108.endif.endif.endif, label %B40.backedge
B160: ; preds = %B40.backedge, %entry
tail call void @NRT_decref(i8* %arg.ys.0)
tail call void @NRT_decref(i8* %arg.xs.0)
tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0)
store i8* null, i8** %retptr, align 8
ret i32 0
B108.endif.endif.endif: ; preds = %B84
%.575 = fmul double %.381, %arg.vt.0
%.583 = fadd double %.575, %arg.vt.1
%.590 = fptosi double %.583 to i64
%.630 = fmul double %.420, %arg.vt.2
%.638 = fadd double %.630, %arg.vt.3
%.645 = fptosi double %.638 to i64
tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0) ; GONE 1
tail call void @NRT_decref(i8* null) ; GONE 2
tail call void @NRT_incref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 3
%.62.i.i = icmp slt i64 %.645, 0
%.63.i.i = select i1 %.62.i.i, i64 %arg.aggs_and_cols.0.5.0, i64 0
%.64.i.i = add i64 %.63.i.i, %.645
%.65.i.i = icmp slt i64 %.590, 0
%.66.i.i = select i1 %.65.i.i, i64 %arg.aggs_and_cols.0.5.1, i64 0
%.67.i.i = add i64 %.66.i.i, %.590
%.84.i.i = mul i64 %.64.i.i, %arg.aggs_and_cols.0.5.1
%.87.i.i = add i64 %.67.i.i, %.84.i.i
%.88.i.i = getelementptr i32, i32* %arg.aggs_and_cols.0.4, i64 %.87.i.i
%.89.i.i = load i32, i32* %.88.i.i, align 4, !noalias !3
%.99.i.i = add i32 %.89.i.i, 1
store i32 %.99.i.i, i32* %.88.i.i, align 4, !noalias !3
tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0), !noalias !0 ; GONE 4
tail call void @NRT_decref(i8* %arg.aggs_and_cols.0.0) ; GONE 5
br label %B40.backedge
B40.backedge: ; preds = %B108.endif.endif.endif, %B84, %B42
%scevgep = getelementptr double, double* %lsr.iv, i64 1
%scevgep2 = getelementptr double, double* %lsr.iv1, i64 1
%lsr.iv.next = add i64 %lsr.iv3, -1
%.294 = icmp sgt i64 %lsr.iv.next, 1
br i1 %.294, label %B42, label %B160
}
''' # noqa
def test_refct_pruning_op_recognize(self):
input_ir = self.sample_llvm_ir
input_lines = list(input_ir.splitlines())
before_increfs = [ln for ln in input_lines if 'NRT_incref' in ln]
before_decrefs = [ln for ln in input_lines if 'NRT_decref' in ln]
# prune
output_ir = nrtopt._remove_redundant_nrt_refct(input_ir)
output_lines = list(output_ir.splitlines())
after_increfs = [ln for ln in output_lines if 'NRT_incref' in ln]
after_decrefs = [ln for ln in output_lines if 'NRT_decref' in ln]
# check
self.assertNotEqual(before_increfs, after_increfs)
self.assertNotEqual(before_decrefs, after_decrefs)
pruned_increfs = set(before_increfs) - set(after_increfs)
pruned_decrefs = set(before_decrefs) - set(after_decrefs)
# the symm difference == or-combined
combined = pruned_increfs | pruned_decrefs
self.assertEqual(combined, pruned_increfs ^ pruned_decrefs)
pruned_lines = '\n'.join(combined)
# all GONE lines are pruned
for i in [1, 2, 3, 4, 5]:
gone = '; GONE {}'.format(i)
self.assertIn(gone, pruned_lines)
# no other lines
self.assertEqual(len(list(pruned_lines.splitlines())), len(combined))
@unittest.skip("Pass removed as it was buggy. Re-enable when fixed.")
def test_refct_pruning_with_branches(self):
'''testcase from #2350'''
@njit
def _append_non_na(x, y, agg, field):
if not np.isnan(field):
agg[y, x] += 1
@njit
def _append(x, y, agg, field):
if not np.isnan(field):
if np.isnan(agg[y, x]):
agg[y, x] = field
else:
agg[y, x] += field
@njit
def append(x, y, agg, field):
_append_non_na(x, y, agg, field)
_append(x, y, agg, field)
# Disable python wrapper to avoid detecting necessary
# refcount inside it
@njit(no_cpython_wrapper=True)
def extend(arr, field):
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
append(j, i, arr, field)
# Compile
extend.compile("(f4[:,::1], f4)")
# Test there are no reference count operations
llvmir = str(extend.inspect_llvm(extend.signatures[0]))
refops = list(re.finditer(r'(NRT_incref|NRT_decref)\([^\)]+\)', llvmir))
self.assertEqual(len(refops), 0)
@linux_only
@x86_only
def test_inline_asm(self):
"""The InlineAsm class from llvmlite.ir has no 'name' attr the refcount
pruning pass should be tolerant to this"""
llvm.initialize_native_target()
llvm.initialize_native_asmprinter()
llvm.initialize_native_asmparser()
@intrinsic
def bar(tyctx, x, y):
def codegen(cgctx, builder, sig, args):
(arg_0, arg_1) = args
fty = ir.FunctionType(ir.IntType(32), [ir.IntType(32),
ir.IntType(32)])
mul = builder.asm(fty, "mov $2, $0; imul $1, $0", "=&r,r,r",
(arg_0, arg_1), name="asm_mul",
side_effect=False)
return impl_ret_untracked(cgctx, builder, sig.return_type, mul)
return signature(types.int32, types.int32, types.int32), codegen
@njit(['int32(int32)'])
def foo(x):
x += 1
z = bar(x, 2)
return z
self.assertEqual(foo(10), 22) # expect (10 + 1) * 2 = 22
@skip_unless_cffi
| TestRefCtPruning |
python | ApeWorX__ape | src/ape/pytest/fixtures.py | {
"start": 22934,
"end": 27202
} | class ____(ManagerAccessMixin):
receipt_map: dict[str, dict[str, "ReceiptAPI"]] = {}
enter_blocks: list[int] = []
def __init__(self, config_wrapper: "ConfigWrapper"):
self.config_wrapper = config_wrapper
def __enter__(self):
block_number = self._get_block_number()
if block_number is not None:
self.enter_blocks.append(block_number)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enter_blocks:
return
start_block = self.enter_blocks.pop()
stop_block = self._get_block_number()
if stop_block is None or start_block > stop_block:
return
self.capture_range(start_block, stop_block)
def capture_range(self, start_block: int, stop_block: int):
blocks = self.chain_manager.blocks.range(start_block, stop_block + 1)
transactions = [t for b in blocks for t in b.transactions]
for txn in transactions:
try:
txn_hash = to_hex(txn.txn_hash)
except Exception:
# Might have been from an impersonated account.
# Those txns need to be added separately, same as tracing calls.
# Likely, it was already accounted before this point.
continue
self.capture(txn_hash)
def capture(self, transaction_hash: str):
try:
receipt = self.chain_manager.history[transaction_hash]
except ChainError:
return
if not receipt:
return
elif not (contract_address := (receipt.receiver or receipt.contract_address)):
return
elif not (contract_type := self.chain_manager.contracts.get(contract_address)):
# Not an invoke-transaction or a known address
return
elif not (source_id := (contract_type.source_id or None)):
# Not a local or known contract type.
return
elif source_id not in self.receipt_map:
self.receipt_map[source_id] = {}
if transaction_hash in self.receipt_map[source_id]:
# Transaction already known.
return
self.receipt_map[source_id][transaction_hash] = receipt
if self.config_wrapper.track_gas:
receipt.track_gas()
if self.config_wrapper.track_coverage:
receipt.track_coverage()
def clear(self):
self.receipt_map = {}
self.enter_blocks = []
@allow_disconnected
def _get_block_number(self) -> Optional[int]:
return self.provider.get_block("latest").number
def _exclude_from_gas_report(
self, contract_name: str, method_name: Optional[str] = None
) -> bool:
"""
Helper method to determine if a certain contract / method combination should be
excluded from the gas report.
"""
for exclusion in self.config_wrapper.gas_exclusions:
# Default to looking at all contracts
contract_pattern = exclusion.contract_name
if not fnmatch(contract_name, contract_pattern) or not method_name:
continue
method_pattern = exclusion.method_name
if not method_pattern or fnmatch(method_name, method_pattern):
return True
return False
def fixture(chain_isolation: Optional[bool], **kwargs):
"""
A thin-wrapper around ``@pytest.fixture`` with extra capabilities.
Set ``chain_isolation`` to ``False`` to signal to Ape that this fixture's
cached result is the same regardless of block number and it does not
need to be invalidated during times or pytest-scoped based chain rebasing.
Usage example::
import ape
from ape_tokens import tokens
@ape.fixture(scope="session", chain_isolation=False, params=("WETH", "DAI", "BAT"))
def token_addresses(request):
return tokens[request].address
"""
def decorator(fixture_function):
if chain_isolation is not None:
name = kwargs.get("name", fixture_function.__name__)
FixtureManager._stateful_fixtures_cache[name] = chain_isolation
return pytest.fixture(fixture_function, **kwargs)
return decorator
| ReceiptCapture |
python | ray-project__ray | doc/source/serve/doc_code/key_concepts.py | {
"start": 650,
"end": 737
} | class ____:
def __call__(self) -> str:
return "Hello"
@serve.deployment
| Hello |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_project_issues_resolved_in_release.py | {
"start": 438,
"end": 4006
} | class ____(APITestCase):
endpoint = "sentry-api-0-project-release-resolved"
method = "get"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user()
self.org = self.create_organization()
self.team = self.create_team(organization=self.org)
self.create_member(organization=self.org, user=self.user, teams=[self.team])
self.project = self.create_project(teams=[self.team])
self.release = self.create_release(project=self.project)
self.group = self.create_group(project=self.project)
self.login_as(self.user)
def build_grouplink(self) -> None:
repo = Repository.objects.create(organization_id=self.org.id, name=self.project.name)
commit = Commit.objects.create(
organization_id=self.org.id, repository_id=repo.id, key=uuid1().hex
)
commit2 = Commit.objects.create(
organization_id=self.org.id, repository_id=repo.id, key=uuid1().hex
)
ReleaseCommit.objects.create(
organization_id=self.org.id, release=self.release, commit=commit, order=1
)
ReleaseCommit.objects.create(
organization_id=self.org.id, release=self.release, commit=commit2, order=0
)
GroupLink.objects.create(
group_id=self.group.id,
project_id=self.group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
def build_group_resolution(self, group: Group | None = None) -> GroupResolution:
return GroupResolution.objects.create(
group=self.group if group is None else group,
release=self.release,
type=GroupResolution.Type.in_release,
)
def run_test(self, expected_groups: list[Group]) -> None:
response = self.get_success_response(self.org.slug, self.project.slug, self.release.version)
assert len(response.data) == len(expected_groups)
expected = set(map(str, [g.id for g in expected_groups]))
assert {item["id"] for item in response.data} == expected
def test_shows_issues_from_groupresolution(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupResolution model
"""
self.build_group_resolution()
self.run_test([self.group])
def test_shows_issues_from_grouplink(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink model
"""
self.build_grouplink()
self.run_test([self.group])
def test_does_not_return_duplicate_groups(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from the GroupLink and GroupResolution model
but will not return the groups twice if they appear in both
"""
self.build_grouplink()
self.build_group_resolution()
self.run_test([self.group])
def test_return_groups_from_both_types(self) -> None:
"""
tests that the endpoint will correctly retrieve issues resolved
in a release from both the GroupLink and GroupResolution model
"""
self.build_grouplink()
group_2 = self.create_group(project=self.project)
self.build_group_resolution(group_2)
self.run_test([self.group, group_2])
| ProjectIssuesResolvedInReleaseEndpointTest |
python | django__django | tests/null_fk/models.py | {
"start": 655,
"end": 872
} | class ____(models.Model):
post = models.ForeignKey(Post, models.SET_NULL, null=True)
comment_text = models.CharField(max_length=250)
class Meta:
ordering = ("comment_text",)
# Ticket 15823
| Comment |
python | apache__airflow | airflow-core/src/airflow/providers_manager.py | {
"start": 12298,
"end": 56481
} | class ____(LoggingMixin, metaclass=Singleton):
"""
Manages all provider distributions.
This is a Singleton class. The first time it is
instantiated, it discovers all available providers in installed packages.
"""
resource_version = "0"
_initialized: bool = False
_initialization_stack_trace = None
@staticmethod
def initialized() -> bool:
return ProvidersManager._initialized
@staticmethod
def initialization_stack_trace() -> str | None:
return ProvidersManager._initialization_stack_trace
def __init__(self):
"""Initialize the manager."""
super().__init__()
ProvidersManager._initialized = True
ProvidersManager._initialization_stack_trace = "".join(traceback.format_stack(inspect.currentframe()))
self._initialized_cache: dict[str, bool] = {}
# Keeps dict of providers keyed by module name
self._provider_dict: dict[str, ProviderInfo] = {}
self._fs_set: set[str] = set()
self._asset_uri_handlers: dict[str, Callable[[SplitResult], SplitResult]] = {}
self._asset_factories: dict[str, Callable[..., Asset]] = {}
self._asset_to_openlineage_converters: dict[str, Callable] = {}
self._taskflow_decorators: dict[str, Callable] = LazyDictWithCache()
# keeps mapping between connection_types and hook class, package they come from
self._hook_provider_dict: dict[str, HookClassProvider] = {}
self._dialect_provider_dict: dict[str, DialectInfo] = {}
# Keeps dict of hooks keyed by connection type. They are lazy evaluated at access time
self._hooks_lazy_dict: LazyDictWithCache[str, HookInfo | Callable] = LazyDictWithCache()
# Keeps methods that should be used to add custom widgets tuple of keyed by name of the extra field
self._connection_form_widgets: dict[str, ConnectionFormWidgetInfo] = {}
# Customizations for javascript fields are kept here
self._field_behaviours: dict[str, dict] = {}
self._extra_link_class_name_set: set[str] = set()
self._logging_class_name_set: set[str] = set()
self._auth_manager_class_name_set: set[str] = set()
self._secrets_backend_class_name_set: set[str] = set()
self._executor_class_name_set: set[str] = set()
self._queue_class_name_set: set[str] = set()
self._provider_configs: dict[str, dict[str, Any]] = {}
self._trigger_info_set: set[TriggerInfo] = set()
self._notification_info_set: set[NotificationInfo] = set()
self._provider_schema_validator = _create_provider_info_schema_validator()
self._customized_form_fields_schema_validator = (
_create_customized_form_field_behaviours_schema_validator()
)
# Set of plugins contained in providers
self._plugins_set: set[PluginInfo] = set()
self._init_airflow_core_hooks()
def _init_airflow_core_hooks(self):
"""Initialize the hooks dict with default hooks from Airflow core."""
core_dummy_hooks = {
"generic": "Generic",
"email": "Email",
}
for key, display in core_dummy_hooks.items():
self._hooks_lazy_dict[key] = HookInfo(
hook_class_name=None,
connection_id_attribute_name=None,
package_name=None,
hook_name=display,
connection_type=None,
connection_testable=False,
)
for conn_type, class_name in (
("fs", "airflow.providers.standard.hooks.filesystem.FSHook"),
("package_index", "airflow.providers.standard.hooks.package_index.PackageIndexHook"),
):
self._hooks_lazy_dict[conn_type] = functools.partial(
self._import_hook,
connection_type=None,
package_name="apache-airflow-providers-standard",
hook_class_name=class_name,
provider_info=None,
)
@provider_info_cache("list")
def initialize_providers_list(self):
"""Lazy initialization of providers list."""
# Local source folders are loaded first. They should take precedence over the package ones for
# Development purpose. In production provider.yaml files are not present in the 'airflow" directory
# So there is no risk we are going to override package provider accidentally. This can only happen
# in case of local development
self._discover_all_providers_from_packages()
self._verify_all_providers_all_compatible()
self._provider_dict = dict(sorted(self._provider_dict.items()))
def _verify_all_providers_all_compatible(self):
from packaging import version as packaging_version
for provider_id, info in self._provider_dict.items():
min_version = MIN_PROVIDER_VERSIONS.get(provider_id)
if min_version:
if packaging_version.parse(min_version) > packaging_version.parse(info.version):
log.warning(
"The package %s is not compatible with this version of Airflow. "
"The package has version %s but the minimum supported version "
"of the package is %s",
provider_id,
info.version,
min_version,
)
@provider_info_cache("hooks")
def initialize_providers_hooks(self):
"""Lazy initialization of providers hooks."""
self._init_airflow_core_hooks()
self.initialize_providers_list()
self._discover_hooks()
self._hook_provider_dict = dict(sorted(self._hook_provider_dict.items()))
@provider_info_cache("filesystems")
def initialize_providers_filesystems(self):
"""Lazy initialization of providers filesystems."""
self.initialize_providers_list()
self._discover_filesystems()
@provider_info_cache("asset_uris")
def initialize_providers_asset_uri_resources(self):
"""Lazy initialization of provider asset URI handlers, factories, converters etc."""
self.initialize_providers_list()
self._discover_asset_uri_resources()
@provider_info_cache("hook_lineage_writers")
@provider_info_cache("taskflow_decorators")
def initialize_providers_taskflow_decorator(self):
"""Lazy initialization of providers hooks."""
self.initialize_providers_list()
self._discover_taskflow_decorators()
@provider_info_cache("extra_links")
def initialize_providers_extra_links(self):
"""Lazy initialization of providers extra links."""
self.initialize_providers_list()
self._discover_extra_links()
@provider_info_cache("logging")
def initialize_providers_logging(self):
"""Lazy initialization of providers logging information."""
self.initialize_providers_list()
self._discover_logging()
@provider_info_cache("secrets_backends")
def initialize_providers_secrets_backends(self):
"""Lazy initialization of providers secrets_backends information."""
self.initialize_providers_list()
self._discover_secrets_backends()
@provider_info_cache("executors")
def initialize_providers_executors(self):
"""Lazy initialization of providers executors information."""
self.initialize_providers_list()
self._discover_executors()
@provider_info_cache("queues")
def initialize_providers_queues(self):
"""Lazy initialization of providers queue information."""
self.initialize_providers_list()
self._discover_queues()
@provider_info_cache("notifications")
def initialize_providers_notifications(self):
"""Lazy initialization of providers notifications information."""
self.initialize_providers_list()
self._discover_notifications()
@provider_info_cache("auth_managers")
def initialize_providers_auth_managers(self):
"""Lazy initialization of providers notifications information."""
self.initialize_providers_list()
self._discover_auth_managers()
@provider_info_cache("config")
def initialize_providers_configuration(self):
"""Lazy initialization of providers configuration information."""
self._initialize_providers_configuration()
def _initialize_providers_configuration(self):
"""
Initialize providers configuration information.
Should be used if we do not want to trigger caching for ``initialize_providers_configuration`` method.
In some cases we might want to make sure that the configuration is initialized, but we do not want
to cache the initialization method - for example when we just want to write configuration with
providers, but it is used in the context where no providers are loaded yet we will eventually
restore the original configuration and we want the subsequent ``initialize_providers_configuration``
method to be run in order to load the configuration for providers again.
"""
self.initialize_providers_list()
self._discover_config()
# Now update conf with the new provider configuration from providers
from airflow.configuration import conf
conf.load_providers_configuration()
@provider_info_cache("plugins")
def initialize_providers_plugins(self):
self.initialize_providers_list()
self._discover_plugins()
def _discover_all_providers_from_packages(self) -> None:
"""
Discover all providers by scanning packages installed.
The list of providers should be returned via the 'apache_airflow_provider'
entrypoint as a dictionary conforming to the 'airflow/provider_info.schema.json'
schema. Note that the schema is different at runtime than provider.yaml.schema.json.
The development version of provider schema is more strict and changes together with
the code. The runtime version is more relaxed (allows for additional properties)
and verifies only the subset of fields that are needed at runtime.
"""
for entry_point, dist in entry_points_with_dist("apache_airflow_provider"):
if not dist.metadata:
continue
package_name = canonicalize_name(dist.metadata["name"])
if package_name in self._provider_dict:
continue
log.debug("Loading %s from package %s", entry_point, package_name)
version = dist.version
provider_info = entry_point.load()()
self._provider_schema_validator.validate(provider_info)
provider_info_package_name = provider_info["package-name"]
if package_name != provider_info_package_name:
raise ValueError(
f"The package '{package_name}' from packaging information "
f"{provider_info_package_name} do not match. Please make sure they are aligned"
)
if package_name not in self._provider_dict:
self._provider_dict[package_name] = ProviderInfo(version, provider_info)
else:
log.warning(
"The provider for package '%s' could not be registered from because providers for that "
"package name have already been registered",
package_name,
)
def _discover_hooks_from_connection_types(
self,
hook_class_names_registered: set[str],
already_registered_warning_connection_types: set[str],
package_name: str,
provider: ProviderInfo,
):
"""
Discover hooks from the "connection-types" property.
This is new, better method that replaces discovery from hook-class-names as it
allows to lazy import individual Hook classes when they are accessed.
The "connection-types" keeps information about both - connection type and class
name so we can discover all connection-types without importing the classes.
:param hook_class_names_registered: set of registered hook class names for this provider
:param already_registered_warning_connection_types: set of connections for which warning should be
printed in logs as they were already registered before
:param package_name:
:param provider:
:return:
"""
provider_uses_connection_types = False
connection_types = provider.data.get("connection-types")
if connection_types:
for connection_type_dict in connection_types:
connection_type = connection_type_dict["connection-type"]
hook_class_name = connection_type_dict["hook-class-name"]
hook_class_names_registered.add(hook_class_name)
already_registered = self._hook_provider_dict.get(connection_type)
if already_registered:
if already_registered.package_name != package_name:
already_registered_warning_connection_types.add(connection_type)
else:
log.warning(
"The connection type '%s' is already registered in the"
" package '%s' with different class names: '%s' and '%s'. ",
connection_type,
package_name,
already_registered.hook_class_name,
hook_class_name,
)
else:
self._hook_provider_dict[connection_type] = HookClassProvider(
hook_class_name=hook_class_name, package_name=package_name
)
# Defer importing hook to access time by setting import hook method as dict value
self._hooks_lazy_dict[connection_type] = functools.partial(
self._import_hook,
connection_type=connection_type,
provider_info=provider,
)
provider_uses_connection_types = True
return provider_uses_connection_types
def _discover_hooks_from_hook_class_names(
self,
hook_class_names_registered: set[str],
already_registered_warning_connection_types: set[str],
package_name: str,
provider: ProviderInfo,
provider_uses_connection_types: bool,
):
"""
Discover hooks from "hook-class-names' property.
This property is deprecated but we should support it in Airflow 2.
The hook-class-names array contained just Hook names without connection type,
therefore we need to import all those classes immediately to know which connection types
are supported. This makes it impossible to selectively only import those hooks that are used.
:param already_registered_warning_connection_types: list of connection hooks that we should warn
about when finished discovery
:param package_name: name of the provider package
:param provider: class that keeps information about version and details of the provider
:param provider_uses_connection_types: determines whether the provider uses "connection-types" new
form of passing connection types
:return:
"""
hook_class_names = provider.data.get("hook-class-names")
if hook_class_names:
for hook_class_name in hook_class_names:
if hook_class_name in hook_class_names_registered:
# Silently ignore the hook class - it's already marked for lazy-import by
# connection-types discovery
continue
hook_info = self._import_hook(
connection_type=None,
provider_info=provider,
hook_class_name=hook_class_name,
package_name=package_name,
)
if not hook_info:
# Problem why importing class - we ignore it. Log is written at import time
continue
already_registered = self._hook_provider_dict.get(hook_info.connection_type)
if already_registered:
if already_registered.package_name != package_name:
already_registered_warning_connection_types.add(hook_info.connection_type)
else:
if already_registered.hook_class_name != hook_class_name:
log.warning(
"The hook connection type '%s' is registered twice in the"
" package '%s' with different class names: '%s' and '%s'. "
" Please fix it!",
hook_info.connection_type,
package_name,
already_registered.hook_class_name,
hook_class_name,
)
else:
self._hook_provider_dict[hook_info.connection_type] = HookClassProvider(
hook_class_name=hook_class_name, package_name=package_name
)
self._hooks_lazy_dict[hook_info.connection_type] = hook_info
if not provider_uses_connection_types:
warnings.warn(
f"The provider {package_name} uses `hook-class-names` "
"property in provider-info and has no `connection-types` one. "
"The 'hook-class-names' property has been deprecated in favour "
"of 'connection-types' in Airflow 2.2. Use **both** in case you want to "
"have backwards compatibility with Airflow < 2.2",
DeprecationWarning,
stacklevel=1,
)
for already_registered_connection_type in already_registered_warning_connection_types:
log.warning(
"The connection_type '%s' has been already registered by provider '%s.'",
already_registered_connection_type,
self._hook_provider_dict[already_registered_connection_type].package_name,
)
def _discover_hooks(self) -> None:
"""Retrieve all connections defined in the providers via Hooks."""
for package_name, provider in self._provider_dict.items():
duplicated_connection_types: set[str] = set()
hook_class_names_registered: set[str] = set()
self._discover_provider_dialects(package_name, provider)
provider_uses_connection_types = self._discover_hooks_from_connection_types(
hook_class_names_registered, duplicated_connection_types, package_name, provider
)
self._discover_hooks_from_hook_class_names(
hook_class_names_registered,
duplicated_connection_types,
package_name,
provider,
provider_uses_connection_types,
)
self._hook_provider_dict = dict(sorted(self._hook_provider_dict.items()))
def _discover_provider_dialects(self, provider_name: str, provider: ProviderInfo):
dialects = provider.data.get("dialects", [])
if dialects:
self._dialect_provider_dict.update(
{
item["dialect-type"]: DialectInfo(
name=item["dialect-type"],
dialect_class_name=item["dialect-class-name"],
provider_name=provider_name,
)
for item in dialects
}
)
@provider_info_cache("import_all_hooks")
def _import_info_from_all_hooks(self):
"""Force-import all hooks and initialize the connections/fields."""
# Retrieve all hooks to make sure that all of them are imported
_ = list(self._hooks_lazy_dict.values())
self._field_behaviours = dict(sorted(self._field_behaviours.items()))
# Widgets for connection forms are currently used in two places:
# 1. In the UI Connections, expected same order that it defined in Hook.
# 2. cli command - `airflow providers widgets` and expected that it in alphabetical order.
# It is not possible to recover original ordering after sorting,
# that the main reason why original sorting moved to cli part:
# self._connection_form_widgets = dict(sorted(self._connection_form_widgets.items()))
def _discover_filesystems(self) -> None:
"""Retrieve all filesystems defined in the providers."""
for provider_package, provider in self._provider_dict.items():
for fs_module_name in provider.data.get("filesystems", []):
if _correctness_check(provider_package, f"{fs_module_name}.get_fs", provider):
self._fs_set.add(fs_module_name)
self._fs_set = set(sorted(self._fs_set))
def _discover_asset_uri_resources(self) -> None:
"""Discovers and registers asset URI handlers, factories, and converters for all providers."""
from airflow.sdk.definitions.asset import normalize_noop
def _safe_register_resource(
provider_package_name: str,
schemes_list: list[str],
resource_path: str | None,
resource_registry: dict,
default_resource: Any = None,
):
"""
Register a specific resource (handler, factory, or converter) for the given schemes.
If the resolved resource (either from the path or the default) is valid, it updates
the resource registry with the appropriate resource for each scheme.
"""
resource = (
_correctness_check(provider_package_name, resource_path, provider)
if resource_path is not None
else default_resource
)
if resource:
resource_registry.update((scheme, resource) for scheme in schemes_list)
for provider_name, provider in self._provider_dict.items():
for uri_info in provider.data.get("asset-uris", []):
if "schemes" not in uri_info or "handler" not in uri_info:
continue # Both schemas and handler must be explicitly set, handler can be set to null
common_args = {"schemes_list": uri_info["schemes"], "provider_package_name": provider_name}
_safe_register_resource(
resource_path=uri_info["handler"],
resource_registry=self._asset_uri_handlers,
default_resource=normalize_noop,
**common_args,
)
_safe_register_resource(
resource_path=uri_info.get("factory"),
resource_registry=self._asset_factories,
**common_args,
)
_safe_register_resource(
resource_path=uri_info.get("to_openlineage_converter"),
resource_registry=self._asset_to_openlineage_converters,
**common_args,
)
def _discover_taskflow_decorators(self) -> None:
for name, info in self._provider_dict.items():
for taskflow_decorator in info.data.get("task-decorators", []):
self._add_taskflow_decorator(
taskflow_decorator["name"], taskflow_decorator["class-name"], name
)
def _add_taskflow_decorator(self, name, decorator_class_name: str, provider_package: str) -> None:
if not _check_builtin_provider_prefix(provider_package, decorator_class_name):
return
if name in self._taskflow_decorators:
try:
existing = self._taskflow_decorators[name]
other_name = f"{existing.__module__}.{existing.__name__}"
except Exception:
# If problem importing, then get the value from the functools.partial
other_name = self._taskflow_decorators._raw_dict[name].args[0] # type: ignore[attr-defined]
log.warning(
"The taskflow decorator '%s' has been already registered (by %s).",
name,
other_name,
)
return
self._taskflow_decorators[name] = functools.partial(import_string, decorator_class_name)
@staticmethod
def _get_attr(obj: Any, attr_name: str):
"""Retrieve attributes of an object, or warn if not found."""
if not hasattr(obj, attr_name):
log.warning("The object '%s' is missing %s attribute and cannot be registered", obj, attr_name)
return None
return getattr(obj, attr_name)
def _import_hook(
self,
connection_type: str | None,
provider_info: ProviderInfo,
hook_class_name: str | None = None,
package_name: str | None = None,
) -> HookInfo | None:
"""
Import hook and retrieve hook information.
Either connection_type (for lazy loading) or hook_class_name must be set - but not both).
Only needs package_name if hook_class_name is passed (for lazy loading, package_name
is retrieved from _connection_type_class_provider_dict together with hook_class_name).
:param connection_type: type of the connection
:param hook_class_name: name of the hook class
:param package_name: provider package - only needed in case connection_type is missing
: return
"""
if connection_type is None and hook_class_name is None:
raise ValueError("Either connection_type or hook_class_name must be set")
if connection_type is not None and hook_class_name is not None:
raise ValueError(
f"Both connection_type ({connection_type} and "
f"hook_class_name {hook_class_name} are set. Only one should be set!"
)
if connection_type is not None:
class_provider = self._hook_provider_dict[connection_type]
package_name = class_provider.package_name
hook_class_name = class_provider.hook_class_name
else:
if not hook_class_name:
raise ValueError("Either connection_type or hook_class_name must be set")
if not package_name:
raise ValueError(
f"Provider package name is not set when hook_class_name ({hook_class_name}) is used"
)
hook_class: type[BaseHook] | None = _correctness_check(package_name, hook_class_name, provider_info)
if hook_class is None:
return None
try:
from wtforms import BooleanField, IntegerField, PasswordField, StringField
allowed_field_classes = [IntegerField, PasswordField, StringField, BooleanField]
module, class_name = hook_class_name.rsplit(".", maxsplit=1)
# Do not use attr here. We want to check only direct class fields not those
# inherited from parent hook. This way we add form fields only once for the whole
# hierarchy and we add it only from the parent hook that provides those!
if "get_connection_form_widgets" in hook_class.__dict__:
widgets = hook_class.get_connection_form_widgets()
if widgets:
for widget in widgets.values():
if widget.field_class not in allowed_field_classes:
log.warning(
"The hook_class '%s' uses field of unsupported class '%s'. "
"Only '%s' field classes are supported",
hook_class_name,
widget.field_class,
allowed_field_classes,
)
return None
self._add_widgets(package_name, hook_class, widgets)
if "get_ui_field_behaviour" in hook_class.__dict__:
field_behaviours = hook_class.get_ui_field_behaviour()
if field_behaviours:
self._add_customized_fields(package_name, hook_class, field_behaviours)
except ImportError as e:
if e.name in ["flask_appbuilder", "wtforms"]:
log.info(
"The hook_class '%s' is not fully initialized (UI widgets will be missing), because "
"the 'flask_appbuilder' package is not installed, however it is not required for "
"Airflow components to work",
hook_class_name,
)
except Exception as e:
log.warning(
"Exception when importing '%s' from '%s' package: %s",
hook_class_name,
package_name,
e,
)
return None
hook_connection_type = self._get_attr(hook_class, "conn_type")
if connection_type:
if hook_connection_type != connection_type:
log.warning(
"Inconsistency! The hook class '%s' declares connection type '%s'"
" but it is added by provider '%s' as connection_type '%s' in provider info. "
"This should be fixed!",
hook_class,
hook_connection_type,
package_name,
connection_type,
)
connection_type = hook_connection_type
connection_id_attribute_name: str = self._get_attr(hook_class, "conn_name_attr")
hook_name: str = self._get_attr(hook_class, "hook_name")
if not connection_type or not connection_id_attribute_name or not hook_name:
log.warning(
"The hook misses one of the key attributes: "
"conn_type: %s, conn_id_attribute_name: %s, hook_name: %s",
connection_type,
connection_id_attribute_name,
hook_name,
)
return None
return HookInfo(
hook_class_name=hook_class_name,
connection_id_attribute_name=connection_id_attribute_name,
package_name=package_name,
hook_name=hook_name,
connection_type=connection_type,
connection_testable=hasattr(hook_class, "test_connection"),
)
def _add_widgets(self, package_name: str, hook_class: type, widgets: dict[str, Any]):
conn_type = hook_class.conn_type # type: ignore
for field_identifier, field in widgets.items():
if field_identifier.startswith("extra__"):
prefixed_field_name = field_identifier
else:
prefixed_field_name = f"extra__{conn_type}__{field_identifier}"
if prefixed_field_name in self._connection_form_widgets:
log.warning(
"The field %s from class %s has already been added by another provider. Ignoring it.",
field_identifier,
hook_class.__name__,
)
# In case of inherited hooks this might be happening several times
else:
self._connection_form_widgets[prefixed_field_name] = ConnectionFormWidgetInfo(
hook_class.__name__,
package_name,
field,
field_identifier,
hasattr(field.field_class.widget, "input_type")
and field.field_class.widget.input_type == "password",
)
def _add_customized_fields(self, package_name: str, hook_class: type, customized_fields: dict):
try:
connection_type = getattr(hook_class, "conn_type")
self._customized_form_fields_schema_validator.validate(customized_fields)
if connection_type:
customized_fields = _ensure_prefix_for_placeholders(customized_fields, connection_type)
if connection_type in self._field_behaviours:
log.warning(
"The connection_type %s from package %s and class %s has already been added "
"by another provider. Ignoring it.",
connection_type,
package_name,
hook_class.__name__,
)
return
self._field_behaviours[connection_type] = customized_fields
except Exception as e:
log.warning(
"Error when loading customized fields from package '%s' hook class '%s': %s",
package_name,
hook_class.__name__,
e,
)
def _discover_auth_managers(self) -> None:
"""Retrieve all auth managers defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("auth-managers"):
for auth_manager_class_name in provider.data["auth-managers"]:
if _correctness_check(provider_package, auth_manager_class_name, provider):
self._auth_manager_class_name_set.add(auth_manager_class_name)
def _discover_notifications(self) -> None:
"""Retrieve all notifications defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("notifications"):
for notification_class_name in provider.data["notifications"]:
if _correctness_check(provider_package, notification_class_name, provider):
self._notification_info_set.add(notification_class_name)
def _discover_extra_links(self) -> None:
"""Retrieve all extra links defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("extra-links"):
for extra_link_class_name in provider.data["extra-links"]:
if _correctness_check(provider_package, extra_link_class_name, provider):
self._extra_link_class_name_set.add(extra_link_class_name)
def _discover_logging(self) -> None:
"""Retrieve all logging defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("logging"):
for logging_class_name in provider.data["logging"]:
if _correctness_check(provider_package, logging_class_name, provider):
self._logging_class_name_set.add(logging_class_name)
def _discover_secrets_backends(self) -> None:
"""Retrieve all secrets backends defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("secrets-backends"):
for secrets_backends_class_name in provider.data["secrets-backends"]:
if _correctness_check(provider_package, secrets_backends_class_name, provider):
self._secrets_backend_class_name_set.add(secrets_backends_class_name)
def _discover_executors(self) -> None:
"""Retrieve all executors defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("executors"):
for executors_class_name in provider.data["executors"]:
if _correctness_check(provider_package, executors_class_name, provider):
self._executor_class_name_set.add(executors_class_name)
def _discover_queues(self) -> None:
"""Retrieve all queues defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("queues"):
for queue_class_name in provider.data["queues"]:
if _correctness_check(provider_package, queue_class_name, provider):
self._queue_class_name_set.add(queue_class_name)
def _discover_config(self) -> None:
"""Retrieve all configs defined in the providers."""
for provider_package, provider in self._provider_dict.items():
if provider.data.get("config"):
self._provider_configs[provider_package] = provider.data.get("config") # type: ignore[assignment]
def _discover_plugins(self) -> None:
"""Retrieve all plugins defined in the providers."""
for provider_package, provider in self._provider_dict.items():
for plugin_dict in provider.data.get("plugins", ()):
if not _correctness_check(provider_package, plugin_dict["plugin-class"], provider):
log.warning("Plugin not loaded due to above correctness check problem.")
continue
self._plugins_set.add(
PluginInfo(
name=plugin_dict["name"],
plugin_class=plugin_dict["plugin-class"],
provider_name=provider_package,
)
)
@provider_info_cache("triggers")
def initialize_providers_triggers(self):
"""Initialize providers triggers."""
self.initialize_providers_list()
for provider_package, provider in self._provider_dict.items():
for trigger in provider.data.get("triggers", []):
for trigger_class_name in trigger.get("python-modules"):
self._trigger_info_set.add(
TriggerInfo(
package_name=provider_package,
trigger_class_name=trigger_class_name,
integration_name=trigger.get("integration-name", ""),
)
)
@property
def auth_managers(self) -> list[str]:
"""Returns information about available providers notifications class."""
self.initialize_providers_auth_managers()
return sorted(self._auth_manager_class_name_set)
@property
def notification(self) -> list[NotificationInfo]:
"""Returns information about available providers notifications class."""
self.initialize_providers_notifications()
return sorted(self._notification_info_set)
@property
def trigger(self) -> list[TriggerInfo]:
"""Returns information about available providers trigger class."""
self.initialize_providers_triggers()
return sorted(self._trigger_info_set, key=lambda x: x.package_name)
@property
def providers(self) -> dict[str, ProviderInfo]:
"""Returns information about available providers."""
self.initialize_providers_list()
return self._provider_dict
@property
def hooks(self) -> MutableMapping[str, HookInfo | None]:
"""
Return dictionary of connection_type-to-hook mapping.
Note that the dict can contain None values if a hook discovered cannot be imported!
"""
self.initialize_providers_hooks()
# When we return hooks here it will only be used to retrieve hook information
return self._hooks_lazy_dict
@property
def dialects(self) -> MutableMapping[str, DialectInfo]:
"""Return dictionary of connection_type-to-dialect mapping."""
self.initialize_providers_hooks()
# When we return dialects here it will only be used to retrieve dialect information
return self._dialect_provider_dict
@property
def plugins(self) -> list[PluginInfo]:
"""Returns information about plugins available in providers."""
self.initialize_providers_plugins()
return sorted(self._plugins_set, key=lambda x: x.plugin_class)
@property
def taskflow_decorators(self) -> dict[str, TaskDecorator]:
self.initialize_providers_taskflow_decorator()
return self._taskflow_decorators # type: ignore[return-value]
@property
def extra_links_class_names(self) -> list[str]:
"""Returns set of extra link class names."""
self.initialize_providers_extra_links()
return sorted(self._extra_link_class_name_set)
@property
def connection_form_widgets(self) -> dict[str, ConnectionFormWidgetInfo]:
"""
Returns widgets for connection forms.
Dictionary keys in the same order that it defined in Hook.
"""
self.initialize_providers_hooks()
self._import_info_from_all_hooks()
return self._connection_form_widgets
@property
def field_behaviours(self) -> dict[str, dict]:
"""Returns dictionary with field behaviours for connection types."""
self.initialize_providers_hooks()
self._import_info_from_all_hooks()
return self._field_behaviours
@property
def logging_class_names(self) -> list[str]:
"""Returns set of log task handlers class names."""
self.initialize_providers_logging()
return sorted(self._logging_class_name_set)
@property
def secrets_backend_class_names(self) -> list[str]:
"""Returns set of secret backend class names."""
self.initialize_providers_secrets_backends()
return sorted(self._secrets_backend_class_name_set)
@property
def executor_class_names(self) -> list[str]:
self.initialize_providers_executors()
return sorted(self._executor_class_name_set)
@property
def queue_class_names(self) -> list[str]:
self.initialize_providers_queues()
return sorted(self._queue_class_name_set)
@property
def filesystem_module_names(self) -> list[str]:
self.initialize_providers_filesystems()
return sorted(self._fs_set)
@property
def asset_factories(self) -> dict[str, Callable[..., Asset]]:
self.initialize_providers_asset_uri_resources()
return self._asset_factories
@property
def asset_uri_handlers(self) -> dict[str, Callable[[SplitResult], SplitResult]]:
self.initialize_providers_asset_uri_resources()
return self._asset_uri_handlers
@property
def asset_to_openlineage_converters(
self,
) -> dict[str, Callable]:
self.initialize_providers_asset_uri_resources()
return self._asset_to_openlineage_converters
@property
def provider_configs(self) -> list[tuple[str, dict[str, Any]]]:
self.initialize_providers_configuration()
return sorted(self._provider_configs.items(), key=lambda x: x[0])
@property
def already_initialized_provider_configs(self) -> list[tuple[str, dict[str, Any]]]:
return sorted(self._provider_configs.items(), key=lambda x: x[0])
def _cleanup(self):
self._initialized_cache.clear()
self._provider_dict.clear()
self._fs_set.clear()
self._taskflow_decorators.clear()
self._hook_provider_dict.clear()
self._dialect_provider_dict.clear()
self._hooks_lazy_dict.clear()
self._connection_form_widgets.clear()
self._field_behaviours.clear()
self._extra_link_class_name_set.clear()
self._logging_class_name_set.clear()
self._auth_manager_class_name_set.clear()
self._secrets_backend_class_name_set.clear()
self._executor_class_name_set.clear()
self._queue_class_name_set.clear()
self._provider_configs.clear()
self._trigger_info_set.clear()
self._notification_info_set.clear()
self._plugins_set.clear()
self._initialized = False
self._initialization_stack_trace = None
| ProvidersManager |
python | django__django | tests/model_fields/models.py | {
"start": 12605,
"end": 12825
} | class ____(models.Model):
value = models.JSONField(
db_default=models.JSONNull(), encoder=JSONNullCustomEncoder
)
class Meta:
required_db_features = {"supports_json_field"}
| JSONNullDefaultModel |
python | pypa__warehouse | tests/common/db/organizations.py | {
"start": 5252,
"end": 5658
} | class ____(WarehouseFactory):
class Meta:
model = Team
id = factory.Faker("uuid4", cast_to=None)
name = factory.Faker("pystr", max_chars=12)
created = factory.Faker(
"date_time_between_dates",
datetime_start=datetime.datetime(2020, 1, 1),
datetime_end=datetime.datetime(2022, 1, 1),
)
organization = factory.SubFactory(OrganizationFactory)
| TeamFactory |
python | getsentry__sentry | src/sentry/api/analytics.py | {
"start": 440,
"end": 698
} | class ____(analytics.Event):
organization_id: int
project_id: int
group_id: int
hash: str
user_id: int | None
count_over_threshold: int | None = None
@analytics.eventclass("devtoolbar.api_request")
| GroupSimilarIssuesEmbeddingsCountEvent |
python | huggingface__transformers | src/transformers/models/gpt_oss/modeling_gpt_oss.py | {
"start": 8425,
"end": 8833
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.router = GptOssTopKRouter(config)
self.experts = GptOssExperts(config)
def forward(self, hidden_states):
router_scores, router_indices = self.router(hidden_states)
routed_out = self.experts(hidden_states, router_indices, router_scores)
return routed_out, router_scores
| GptOssMLP |
python | pytorch__pytorch | torch/_dynamo/pgo.py | {
"start": 4883,
"end": 6318
} | class ____:
filename: str
firstlineno: int
name: str
# When a job restart, the code can be copied to a different path than the previous attempt. In that case
# self.filename will have a different value, we do not want to consider those differences. Instead we
# hash the content of the file and use it as an identifier of the file.
#
# self.filename is kept in the object to give readable information/pointer to the actual file, in a local
# code state it will refer to the first seen file path.
file_hash: str
# Exclude file name.
def __eq__(self, other: object) -> bool:
if not isinstance(other, CodeId):
return False
return (
self.file_hash == other.file_hash
and self.firstlineno == other.firstlineno
and self.name == other.name
)
# Ensure if two CodeIds are the same, then they have the same hash by excluding filename.
def __hash__(self) -> int:
return hash((self.file_hash, self.name, self.firstlineno))
def __str__(self) -> str:
return f"hash({self.file_hash}){self.filename}:{self.firstlineno}:{self.name}"
@staticmethod
def make(code: types.CodeType) -> CodeId:
return CodeId(
code.co_filename,
code.co_firstlineno,
code.co_name,
_hash_containing_file(code.co_filename),
)
@dataclasses.dataclass
| CodeId |
python | neetcode-gh__leetcode | python/0334-increasing-triplet-subsequence.py | {
"start": 0,
"end": 596
} | class ____:
def increasingTriplet(self, nums: List[int]) -> bool:
first = float('inf') # Initialize first to positive infinity
second = float('inf') # Initialize second to positive infinity
for num in nums:
if num <= first:
first = num # Update first if num is smaller or equal
elif num <= second:
second = num # Update second if num is smaller or equal
else:
return True # We found a triplet: first < second < num
return False # No triplet exists
| Solution |
python | wandb__wandb | wandb/sdk/lib/fsm.py | {
"start": 1836,
"end": 2623
} | class ____(Protocol[T_FsmInputs, T_FsmContext_cov]):
@abstractmethod
def on_exit(self, inputs: T_FsmInputs) -> T_FsmContext_cov: ... # pragma: no cover
# It would be nice if python provided optional protocol members, but it does not as described here:
# https://peps.python.org/pep-0544/#support-optional-protocol-members
# Until then, we can only enforce that a state at least supports one protocol interface. This
# unfortunately will not check the signature of other potential protocols.
FsmState: TypeAlias = Union[
FsmStateCheck[T_FsmInputs],
FsmStateOutput[T_FsmInputs],
FsmStateEnter[T_FsmInputs],
FsmStateEnterWithContext[T_FsmInputs, T_FsmContext],
FsmStateStay[T_FsmInputs],
FsmStateExit[T_FsmInputs, T_FsmContext],
]
@dataclass
| FsmStateExit |
python | getsentry__sentry | src/sentry/api/serializers/models/group.py | {
"start": 33474,
"end": 33723
} | class ____(TypedDict):
culprit: str | None
id: str
isUnhandled: bool | None
issueCategory: str
permalink: str
shortId: str
title: str
latestEvent: dict[str, Any]
project: dict[str, Any]
| SharedGroupSerializerResponse |
python | getsentry__sentry | tests/sentry/relocation/tasks/test_process.py | {
"start": 37969,
"end": 40566
} | class ____(RelocationTaskTestCase):
def setUp(self) -> None:
super().setUp()
self.relocation.step = Relocation.Step.PREPROCESSING.value
self.relocation.latest_task = OrderedTask.PREPROCESSING_SCAN.name
self.relocation.want_usernames = ["importing"]
self.relocation.save()
self.create_user("importing")
self.relocation_storage = get_relocation_storage()
def test_retry_if_attempts_left(
self,
preprocessing_baseline_config_mock: Mock,
fake_message_builder: Mock,
):
RelocationFile.objects.filter(relocation=self.relocation).delete()
self.mock_message_builder(fake_message_builder)
# An exception being raised will trigger a retry task.
with pytest.raises(Exception):
preprocessing_transfer(self.uuid)
assert fake_message_builder.call_count == 0
assert preprocessing_baseline_config_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.IN_PROGRESS.value
assert relocation.latest_notified != Relocation.EmailKind.FAILED.value
assert not relocation.failure_reason
def test_fail_if_no_attempts_left(
self,
preprocessing_baseline_config_mock: Mock,
fake_message_builder: Mock,
):
self.relocation.latest_task = OrderedTask.PREPROCESSING_TRANSFER.name
self.relocation.latest_task_attempts = MAX_FAST_TASK_RETRIES
self.relocation.save()
RelocationFile.objects.filter(relocation=self.relocation).delete()
self.mock_message_builder(fake_message_builder)
with pytest.raises(Exception):
preprocessing_transfer(self.uuid)
assert fake_message_builder.call_count == 1
assert fake_message_builder.call_args.kwargs["type"] == "relocation.failed"
fake_message_builder.return_value.send_async.assert_called_once_with(
to=[self.owner.email, self.superuser.email]
)
assert preprocessing_baseline_config_mock.call_count == 0
relocation = Relocation.objects.get(uuid=self.uuid)
assert relocation.status == Relocation.Status.FAILURE.value
assert relocation.latest_notified == Relocation.EmailKind.FAILED.value
assert relocation.failure_reason == ERR_PREPROCESSING_INTERNAL
@patch("sentry.backup.crypto.KeyManagementServiceClient")
@patch("sentry.relocation.utils.MessageBuilder")
@patch("sentry.relocation.tasks.process.preprocessing_colliding_users.apply_async")
| PreprocessingTransferTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 72517,
"end": 72835
} | class ____(_PrintableStructure):
_fields_ = [
("version", c_uint),
("bChannelRepairPending", c_uint),
("bTpcRepairPending", c_uint),
]
def __init__(self):
super(c_nvmlRepairStatus_t, self).__init__(version=nvmlRepairStatus_v1)
nvmlNvLinkInfo_v1 =0x1000008
| c_nvmlRepairStatus_t |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 35018,
"end": 38959
} | class ____(TestCase):
"""
Other backends are not checked once a backend raises PermissionDenied
"""
backend = "auth_tests.test_auth_backends.PermissionDeniedBackend"
@classmethod
def setUpTestData(cls):
cls.user1 = User.objects.create_user("test", "test@example.com", "test")
def setUp(self):
self.user_login_failed = []
signals.user_login_failed.connect(self.user_login_failed_listener)
self.addCleanup(
signals.user_login_failed.disconnect, self.user_login_failed_listener
)
def user_login_failed_listener(self, sender, credentials, **kwargs):
self.user_login_failed.append(credentials)
@modify_settings(AUTHENTICATION_BACKENDS={"prepend": backend})
def test_permission_denied(self):
"""
user is not authenticated after a backend raises permission denied
#2550
"""
self.assertIsNone(authenticate(username="test", password="test"))
# user_login_failed signal is sent.
self.assertEqual(
self.user_login_failed,
[{"password": "********************", "username": "test"}],
)
@modify_settings(AUTHENTICATION_BACKENDS={"prepend": backend})
async def test_aauthenticate_permission_denied(self):
self.assertIsNone(await aauthenticate(username="test", password="test"))
# user_login_failed signal is sent.
self.assertEqual(
self.user_login_failed,
[{"password": "********************", "username": "test"}],
)
@modify_settings(AUTHENTICATION_BACKENDS={"append": backend})
def test_authenticates(self):
self.assertEqual(authenticate(username="test", password="test"), self.user1)
@modify_settings(AUTHENTICATION_BACKENDS={"append": backend})
async def test_aauthenticate(self):
self.assertEqual(
await aauthenticate(username="test", password="test"), self.user1
)
@modify_settings(AUTHENTICATION_BACKENDS={"prepend": backend})
def test_has_perm_denied(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(
name="test", content_type=content_type, codename="test"
)
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm("auth.test"), False)
self.assertIs(self.user1.has_module_perms("auth"), False)
@modify_settings(AUTHENTICATION_BACKENDS={"prepend": backend})
async def test_ahas_perm_denied(self):
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test"
)
await self.user1.user_permissions.aadd(perm)
self.assertIs(await self.user1.ahas_perm("auth.test"), False)
self.assertIs(await self.user1.ahas_module_perms("auth"), False)
@modify_settings(AUTHENTICATION_BACKENDS={"append": backend})
def test_has_perm(self):
content_type = ContentType.objects.get_for_model(Group)
perm = Permission.objects.create(
name="test", content_type=content_type, codename="test"
)
self.user1.user_permissions.add(perm)
self.assertIs(self.user1.has_perm("auth.test"), True)
self.assertIs(self.user1.has_module_perms("auth"), True)
@modify_settings(AUTHENTICATION_BACKENDS={"append": backend})
async def test_ahas_perm(self):
content_type = await sync_to_async(ContentType.objects.get_for_model)(Group)
perm = await Permission.objects.acreate(
name="test", content_type=content_type, codename="test"
)
await self.user1.user_permissions.aadd(perm)
self.assertIs(await self.user1.ahas_perm("auth.test"), True)
self.assertIs(await self.user1.ahas_module_perms("auth"), True)
| PermissionDeniedBackendTest |
python | getsentry__sentry | src/sentry/integrations/github_enterprise/integration.py | {
"start": 5780,
"end": 10120
} | class ____(
RepositoryIntegration, GitHubIssuesSpec, CommitContextIntegration
):
codeowners_locations = ["CODEOWNERS", ".github/CODEOWNERS", "docs/CODEOWNERS"]
@property
def integration_name(self) -> str:
return IntegrationProviderSlug.GITHUB_ENTERPRISE.value
def get_client(self):
if not self.org_integration:
raise IntegrationError("Organization Integration does not exist")
base_url = self.model.metadata["domain_name"].split("/")[0]
return GitHubEnterpriseApiClient(
base_url=base_url,
integration=self.model,
private_key=self.model.metadata["installation"]["private_key"],
app_id=self.model.metadata["installation"]["id"],
verify_ssl=self.model.metadata["installation"]["verify_ssl"],
org_integration_id=self.org_integration.id,
)
# IntegrationInstallation methods
def message_from_error(self, exc: Exception) -> str:
if isinstance(exc, ApiError):
if exc.code is None:
message = None
else:
message = API_ERRORS.get(exc.code)
if message is None:
message = exc.json.get("message", "unknown error") if exc.json else "unknown error"
return f"Error Communicating with GitHub Enterprise (HTTP {exc.code}): {message}"
else:
return ERR_INTERNAL
# RepositoryIntegration methods
def get_repositories(
self, query: str | None = None, page_number_limit: int | None = None
) -> list[dict[str, Any]]:
if not query:
all_repos = self.get_client().get_repos(page_number_limit=page_number_limit)
return [
{
"name": i["name"],
"identifier": i["full_name"],
"default_branch": i.get("default_branch"),
}
for i in all_repos
if not i.get("archived")
]
full_query = build_repository_query(self.model.metadata, self.model.name, query)
response = self.get_client().search_repositories(full_query)
return [
{
"name": i["name"],
"identifier": i["full_name"],
"default_branch": i.get("default_branch"),
}
for i in response.get("items", [])
]
def source_url_matches(self, url: str) -> bool:
return url.startswith(f"https://{self.model.metadata["domain_name"]}")
def format_source_url(self, repo: Repository, filepath: str, branch: str | None) -> str:
# Must format the url ourselves since `check_file` is a head request
# "https://github.example.org/octokit/octokit.rb/blob/master/README.md"
return f"{repo.url}/blob/{branch}/{filepath}"
def extract_branch_from_source_url(self, repo: Repository, url: str) -> str:
if not repo.url:
return ""
branch, _ = parse_github_blob_url(repo.url, url)
return branch
def extract_source_path_from_source_url(self, repo: Repository, url: str) -> str:
if not repo.url:
return ""
_, source_path = parse_github_blob_url(repo.url, url)
return source_path
def search_issues(self, query: str | None, **kwargs):
return self.get_client().search_issues(query)
def has_repo_access(self, repo: RpcRepository) -> bool:
# TODO: define this, used to migrate repositories
return False
# CommitContextIntegration methods
def on_create_or_update_comment_error(self, api_error: ApiError, metrics_base: str) -> bool:
if api_error.json:
if ISSUE_LOCKED_ERROR_MESSAGE in api_error.json.get("message", ""):
metrics.incr(
metrics_base.format(integration=self.integration_name, key="error"),
tags={"type": "issue_locked_error"},
)
return True
elif RATE_LIMITED_MESSAGE in api_error.json.get("message", ""):
metrics.incr(
metrics_base.format(integration=self.integration_name, key="error"),
tags={"type": "rate_limited_error"},
)
return True
return False
| GitHubEnterpriseIntegration |
python | PrefectHQ__prefect | src/prefect/server/orchestration/core_policy.py | {
"start": 8241,
"end": 8641
} | class ____(TaskRunOrchestrationPolicy):
@staticmethod
def priority() -> list[
Union[
type[BaseUniversalTransform[orm_models.TaskRun, core.TaskRunPolicy]],
type[BaseOrchestrationRule[orm_models.TaskRun, core.TaskRunPolicy]],
]
]:
return [
ReleaseTaskConcurrencySlots, # always release concurrency slots
]
| MinimalTaskPolicy |
python | tensorflow__tensorflow | tensorflow/python/compiler/tensorrt/test/multi_connection_neighbor_engine_test.py | {
"start": 1082,
"end": 2506
} | class ____(trt_test.TfTrtIntegrationTestBase):
"""Test for multi connection neighboring nodes wiring tests in TF-TRT."""
def GraphFn(self, x):
dtype = x.dtype
e = constant_op.constant(
np.random.normal(.05, .005, [3, 2, 3, 4]), name="weights", dtype=dtype)
conv = nn.conv2d(
input=x,
filter=e,
data_format="NCHW",
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
b = constant_op.constant(
np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
t = conv + b
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
q = conv - b
edge = self.trt_incompatible_op(q)
b = constant_op.constant(
np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
d = b + conv
edge3 = self.trt_incompatible_op(d)
edge1 = self.trt_incompatible_op(conv)
t = t - edge1
q = q + edge
t = t + q
t = t + d
t = t - edge3
return array_ops.squeeze(t, name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 7, 5]],
[[2, 4, 5, 4]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000", "TRTEngineOp_001"]
if __name__ == "__main__":
test.main()
| MultiConnectionNeighborEngineTest |
python | plotly__plotly.py | plotly/graph_objs/table/legendgrouptitle/_font.py | {
"start": 233,
"end": 9916
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "table.legendgrouptitle"
_path_str = "table.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.table.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_kubernetes_engine.py | {
"start": 10033,
"end": 14074
} | class ____:
def setup_method(self):
self.operator = GKEDeleteClusterOperator(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
cluster_name=GKE_CLUSTER_NAME,
gcp_conn_id=TEST_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
def test_template_fields(self):
expected_template_fields = {"api_version", "deferrable", "poll_interval"} | set(
GKEOperatorMixin.template_fields
)
assert set(self.operator.template_fields) == expected_template_fields
@pytest.mark.parametrize("missing_parameter", ["project_id", "location", "cluster_name"])
def test_check_input(self, missing_parameter):
setattr(self.operator, missing_parameter, None)
with pytest.raises(AirflowException):
self.operator._check_input()
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_execute(self, mock_cluster_hook):
mock_delete_cluster = mock_cluster_hook.return_value.delete_cluster
mock_operation = mock_delete_cluster.return_value
mock_operation.self_link = TEST_SELF_LINK
result = self.operator.execute(context=mock.MagicMock())
mock_delete_cluster.assert_called_once_with(
name=GKE_CLUSTER_NAME,
project_id=TEST_PROJECT_ID,
wait_to_complete=True,
)
assert result == TEST_SELF_LINK
@mock.patch(GKE_OPERATORS_PATH.format("GKEOperationTrigger"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEDeleteClusterOperator.defer"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_deferrable(self, mock_cluster_hook, mock_defer, mock_trigger):
mock_delete_cluster = mock_cluster_hook.return_value.delete_cluster
mock_operation = mock_delete_cluster.return_value
mock_operation.name = TEST_OPERATION_NAME
mock_trigger_instance = mock_trigger.return_value
self.operator.deferrable = True
self.operator.execute(context=mock.MagicMock())
mock_delete_cluster.assert_called_once_with(
name=GKE_CLUSTER_NAME,
project_id=TEST_PROJECT_ID,
wait_to_complete=False,
)
mock_trigger.assert_called_once_with(
operation_name=TEST_OPERATION_NAME,
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
gcp_conn_id=TEST_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
poll_interval=10,
)
mock_defer.assert_called_once_with(
trigger=mock_trigger_instance,
method_name="execute_complete",
)
@mock.patch(GKE_OPERATORS_PATH.format("GKEDeleteClusterOperator.log"))
@mock.patch(GKE_OPERATORS_PATH.format("GKEHook"))
def test_execute_complete(self, cluster_hook, mock_log):
mock_get_operation = cluster_hook.return_value.get_operation
mock_get_operation.return_value.self_link = TEST_SELF_LINK
expected_status, expected_message = "success", "test-message"
event = dict(status=expected_status, message=expected_message, operation_name=TEST_OPERATION_NAME)
result = self.operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.info.assert_called_once_with(expected_message)
mock_get_operation.assert_called_once_with(operation_name=TEST_OPERATION_NAME)
assert result == TEST_SELF_LINK
@pytest.mark.parametrize("status", ["failed", "error"])
@mock.patch(GKE_OPERATORS_PATH.format("GKEDeleteClusterOperator.log"))
def test_execute_complete_error(self, mock_log, status):
expected_message = "test-message"
event = dict(status=status, message=expected_message)
with pytest.raises(AirflowException):
self.operator.execute_complete(context=mock.MagicMock(), event=event)
mock_log.exception.assert_called_once_with("Trigger ended with one of the failed statuses.")
| TestGKEDeleteClusterOperator |
python | gevent__gevent | src/greentest/3.14/test_socketserver.py | {
"start": 12072,
"end": 12554
} | class ____(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
| ThreadingErrorTestServer |
python | tornadoweb__tornado | tornado/test/gen_test.py | {
"start": 18728,
"end": 20086
} | class ____(AsyncHTTPTestCase):
def get_app(self):
return Application(
[
("/coroutine_sequence", GenCoroutineSequenceHandler),
(
"/coroutine_unfinished_sequence",
GenCoroutineUnfinishedSequenceHandler,
),
("/undecorated_coroutine", UndecoratedCoroutinesHandler),
("/async_prepare_error", AsyncPrepareErrorHandler),
("/native_coroutine", NativeCoroutineHandler),
]
)
def test_coroutine_sequence_handler(self):
response = self.fetch("/coroutine_sequence")
self.assertEqual(response.body, b"123")
def test_coroutine_unfinished_sequence_handler(self):
response = self.fetch("/coroutine_unfinished_sequence")
self.assertEqual(response.body, b"123")
def test_undecorated_coroutines(self):
response = self.fetch("/undecorated_coroutine")
self.assertEqual(response.body, b"123")
def test_async_prepare_error_handler(self):
response = self.fetch("/async_prepare_error")
self.assertEqual(response.code, 403)
def test_native_coroutine_handler(self):
response = self.fetch("/native_coroutine")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"ok")
| GenWebTest |
python | pytorch__pytorch | test/dynamo/test_cudagraphs.py | {
"start": 1373,
"end": 5736
} | class ____(torch._dynamo.test_case.TestCase):
@patch_all()
def test_basic(self):
def model(x, y):
return (x + y) * y
@torch.compile(backend="cudagraphs")
def fn(x, y):
for _ in range(N_ITERS):
loss = model(x, y).sum()
loss.backward()
x = torch.randn(3, device="cuda", requires_grad=True)
y = torch.randn(3, device="cuda")
fn(x, y)
@patch_all()
def test_dtoh(self):
def model(x, y):
a = x + y
b = a.cpu() * 3
return b
@torch.compile(backend="cudagraphs")
def fn(x, y):
for _ in range(N_ITERS):
loss = model(x, y).sum()
loss.backward()
x = torch.randn(3, device="cuda", requires_grad=True)
y = torch.randn(3, device="cuda")
fn(x, y)
@patch_all()
def test_htod(self):
def model(x, y):
a = x + y
return a * 3
@torch.compile(backend="cudagraphs")
def fn(x, y):
for _ in range(N_ITERS):
loss = model(x, y).sum()
loss.backward()
x = torch.randn(3, device="cuda", requires_grad=True)
y = torch.randn((), device="cpu")
fn(x, y)
def test_mutate_input(self):
def model(x, y):
y.add_(3)
return x * y
@torch.compile(backend="cudagraphs")
def fn(x, y):
for i in range(N_ITERS):
with self.subTest(i):
y_orig = y.clone()
loss = model(x, y).sum()
self.assertTrue(same(y, y_orig + 3))
loss.backward()
x = torch.randn(3, device="cuda", requires_grad=True)
y = torch.randn(3, device="cuda")
fn(x, y)
@patch_all()
def test_mutate_constant(self):
def model(x, y):
c = torch.tensor(1)
c.add_(2)
return x * y * 0 + c
@torch.compile(backend="cudagraphs")
def fn(x, y):
for i in range(N_ITERS):
with self.subTest(i):
loss = model(x, y).sum()
self.assertTrue(same(loss, torch.tensor(3.0, device="cuda")))
loss.backward()
x = torch.randn(1, device="cuda", requires_grad=True)
y = torch.randn(1, device="cuda")
fn(x, y)
@patch_all()
def test_factory(self):
def model(y):
x = torch.zeros(3, device="cuda:0")
x.add_(3)
return x * y
@torch.compile(backend="cudagraphs")
def fn(y):
for i in range(N_ITERS):
with self.subTest(i):
loss = model(y).sum()
loss.backward()
y = torch.randn(3, device="cuda:0", requires_grad=True)
fn(y)
@patch_all()
def test_mutated_metadata(self):
# more tortured example at
# https://github.com/pytorch/pytorch/issues/81385
def model(x):
x = x.clone()
x.resize_(20)
x.fill_(2)
return x
@torch.compile(backend="cudagraphs")
def fn(x):
for i in range(N_ITERS):
with self.subTest(i):
rx = model(x)
self.assertTrue(same(rx, torch.full((20,), 2.0, device="cuda:0")))
x = torch.empty(0, device="cuda:0")
fn(x)
@patch_all()
def test_dead_fill(self):
def model(x):
x = x.clone()
y = x[0:0]
x.fill_(2)
y.fill_(3)
return x, y
@torch.compile(backend="cudagraphs")
def fn(x):
for i in range(N_ITERS):
with self.subTest(i):
rx, ry = model(x)
self.assertTrue(same(rx, torch.full((20,), 2.0, device="cuda:0")))
self.assertTrue(same(ry, torch.empty(0, device="cuda:0")))
x = torch.empty(20, device="cuda:0")
fn(x)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
if not TEST_CUDA_GRAPH:
if __name__ == "__main__":
import sys
sys.exit(0)
raise unittest.SkipTest("cuda graph test is skipped")
run_tests()
| TestAotCudagraphs |
python | pydantic__pydantic | tests/test_private_attributes.py | {
"start": 9778,
"end": 15858
} | class ____(BaseModel):
_FIELD_UPDATE_STRATEGY: t.ClassVar[t.dict[str, t.Any]] = {}
"""
)
assert module.BaseConfig._FIELD_UPDATE_STRATEGY == {}
@pytest.mark.skipif(not hasattr(functools, 'cached_property'), reason='cached_property is not available')
def test_private_properties_not_included_in_iter_cached_property() -> None:
class Model(BaseModel):
foo: int
@computed_field
@functools.cached_property
def _foo(self) -> int:
return -self.foo
m = Model(foo=1)
assert '_foo' not in list(k for k, _ in m)
def test_private_properties_not_included_in_iter_property() -> None:
class Model(BaseModel):
foo: int
@computed_field
@property
def _foo(self) -> int:
return -self.foo
m = Model(foo=1)
assert '_foo' not in list(k for k, _ in m)
def test_private_properties_not_included_in_repr_by_default_property() -> None:
class Model(BaseModel):
foo: int
@computed_field
@property
def _private_property(self) -> int:
return -self.foo
m = Model(foo=1)
m_repr = repr(m)
assert '_private_property' not in m_repr
@pytest.mark.skipif(not hasattr(functools, 'cached_property'), reason='cached_property is not available')
def test_private_properties_not_included_in_repr_by_default_cached_property() -> None:
class Model(BaseModel):
foo: int
@computed_field
@functools.cached_property
def _private_cached_property(self) -> int:
return -self.foo
m = Model(foo=1)
m_repr = repr(m)
assert '_private_cached_property' not in m_repr
@pytest.mark.parametrize('base', [ModelPrivateAttr, object])
@pytest.mark.parametrize('use_annotation', [True, False])
def test_private_descriptors(base, use_annotation):
set_name_calls = []
get_calls = []
set_calls = []
delete_calls = []
class MyDescriptor(base):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.name = ''
def __set_name__(self, owner, name):
set_name_calls.append((owner, name))
self.name = name
def __get__(self, obj, type=None):
get_calls.append((obj, type))
return self.fn(obj) if obj else self
def __set__(self, obj, value):
set_calls.append((obj, value))
self.fn = lambda obj: value
def __delete__(self, obj):
delete_calls.append(obj)
def fail(obj):
# I have purposely not used the exact formatting you'd get if the attribute wasn't defined,
# to make it clear this function is being called, while also having sensible behavior
raise AttributeError(f'{self.name!r} is not defined on {obj!r}')
self.fn = fail
class A(BaseModel):
x: int
if use_annotation:
_some_func: MyDescriptor = MyDescriptor(lambda self: self.x)
else:
_some_func = MyDescriptor(lambda self: self.x)
@property
def _double_x(self):
return self.x * 2
assert set(A.__private_attributes__) == {'_some_func'}
assert set_name_calls == [(A, '_some_func')]
a = A(x=2)
assert a._double_x == 4 # Ensure properties with leading underscores work fine and don't become private attributes
assert get_calls == []
assert a._some_func == 2
assert get_calls == [(a, A)]
assert set_calls == []
a._some_func = 3
assert set_calls == [(a, 3)]
assert a._some_func == 3
assert get_calls == [(a, A), (a, A)]
assert delete_calls == []
del a._some_func
assert delete_calls == [a]
with pytest.raises(AttributeError, match=r"'_some_func' is not defined on A\(x=2\)"):
a._some_func
assert get_calls == [(a, A), (a, A), (a, A)]
def test_private_attr_set_name():
class SetNameInt(int):
_owner_attr_name: Optional[str] = None
def __set_name__(self, owner, name):
self._owner_attr_name = f'{owner.__name__}.{name}'
_private_attr_default = SetNameInt(1)
class Model(BaseModel):
_private_attr_1: int = PrivateAttr(default=_private_attr_default)
_private_attr_2: SetNameInt = SetNameInt(2)
assert _private_attr_default._owner_attr_name == 'Model._private_attr_1'
m = Model()
assert m._private_attr_1 == 1
assert m._private_attr_1._owner_attr_name == 'Model._private_attr_1'
assert m._private_attr_2 == 2
assert m._private_attr_2._owner_attr_name == 'Model._private_attr_2'
def test_private_attr_default_descriptor_attribute_error():
class SetNameInt(int):
def __get__(self, obj, cls):
return self
_private_attr_default = SetNameInt(1)
class Model(BaseModel):
_private_attr: int = PrivateAttr(default=_private_attr_default)
assert Model.__private_attributes__['_private_attr'].__get__(None, Model) == _private_attr_default
with pytest.raises(AttributeError, match="'ModelPrivateAttr' object has no attribute 'some_attr'"):
Model.__private_attributes__['_private_attr'].some_attr
def test_private_attr_set_name_do_not_crash_if_not_callable():
class SetNameInt(int):
__set_name__ = None
_private_attr_default = SetNameInt(2)
class Model(BaseModel):
_private_attr: int = PrivateAttr(default=_private_attr_default)
# Checks below are just to ensure that everything is the same as in `test_private_attr_set_name`
# The main check is that model class definition above doesn't crash
assert Model()._private_attr == 2
def test_private_attribute_not_skipped_during_ns_inspection() -> None:
# It is important for the enum name to start with the class name
# (it previously caused issues as we were comparing qualnames without
# taking this into account):
class Fullname(str, Enum):
pass
class Full(BaseModel):
_priv: object = Fullname
assert isinstance(Full._priv, ModelPrivateAttr)
| BaseConfig |
python | django-extensions__django-extensions | django_extensions/management/commands/mail_debug.py | {
"start": 1049,
"end": 3240
} | class ____(BaseCommand):
help = "Starts a test mail server for development."
args = "[optional port number or ippaddr:port]"
requires_system_checks: List[str] = []
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("addrport", nargs="?")
parser.add_argument(
"--output",
dest="output_file",
default=None,
help="Specifies an output file to send a copy of all messages "
"(not flushed immediately).",
)
parser.add_argument(
"--use-settings",
dest="use_settings",
action="store_true",
default=False,
help="Uses EMAIL_HOST and HOST_PORT from Django settings.",
)
@signalcommand
def handle(self, addrport="", *args, **options):
if not addrport:
if options["use_settings"]:
from django.conf import settings
addr = getattr(settings, "EMAIL_HOST", "")
port = str(getattr(settings, "EMAIL_PORT", "1025"))
else:
addr = ""
port = "1025"
else:
try:
addr, port = addrport.split(":")
except ValueError:
addr, port = "", addrport
if not addr:
addr = "127.0.0.1"
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
else:
port = int(port)
# Add console handler
setup_logger(logger, stream=self.stdout, filename=options["output_file"])
def inner_run():
quit_command = (sys.platform == "win32") and "CTRL-BREAK" or "CONTROL-C"
print(
"Now accepting mail at %s:%s -- use %s to quit"
% (addr, port, quit_command)
)
handler = CustomHandler()
controller = Controller(handler, hostname=addr, port=port)
controller.start()
loop = asyncio.get_event_loop()
loop.run_forever()
try:
inner_run()
except KeyboardInterrupt:
pass
| Command |
python | google__pytype | pytype/pyc/opcodes.py | {
"start": 20119,
"end": 20205
} | class ____(OpcodeWithArg):
_FLAGS = HAS_ARGUMENT | HAS_CONST
__slots__ = ()
| KW_NAMES |
python | redis__redis-py | tests/test_connection_pool.py | {
"start": 23066,
"end": 28247
} | class ____:
def test_on_connect_error(self):
"""
An error in Connection.on_connect should disconnect from the server
see for details: https://github.com/andymccurdy/redis-py/issues/368
"""
# this assumes the Redis server being tested against doesn't have
# 9999 databases ;)
bad_connection = redis.Redis(db=9999)
# an error should be raised on connect
with pytest.raises(redis.RedisError):
bad_connection.info()
pool = bad_connection.connection_pool
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_disconnects_socket(self, r):
"""
If Redis raises a LOADING error, the connection should be
disconnected and a BusyLoadingError raised
"""
with pytest.raises(redis.BusyLoadingError):
r.execute_command("DEBUG", "ERROR", "LOADING fake message")
assert not r.connection._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline_immediate_command(self, r):
"""
BusyLoadingErrors should raise from Pipelines that execute a
command immediately, like WATCH does.
"""
pipe = r.pipeline()
with pytest.raises(redis.BusyLoadingError):
pipe.immediate_execute_command("DEBUG", "ERROR", "LOADING fake message")
pool = r.connection_pool
assert pipe.connection
assert pipe.connection in pool._in_use_connections
assert not pipe.connection._sock
@pytest.mark.onlynoncluster
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_busy_loading_from_pipeline(self, r):
"""
BusyLoadingErrors should be raised from a pipeline execution
regardless of the raise_on_error flag.
"""
pipe = r.pipeline()
pipe.execute_command("DEBUG", "ERROR", "LOADING fake message")
with pytest.raises(redis.BusyLoadingError):
pipe.execute()
pool = r.connection_pool
assert not pipe.connection
assert len(pool._available_connections) == 1
assert not pool._available_connections[0]._sock
@skip_if_server_version_lt("2.8.8")
@skip_if_redis_enterprise()
def test_read_only_error(self, r):
"READONLY errors get turned into ReadOnlyError exceptions"
with pytest.raises(redis.ReadOnlyError):
r.execute_command("DEBUG", "ERROR", "READONLY blah blah")
def test_oom_error(self, r):
"OOM errors get turned into OutOfMemoryError exceptions"
with pytest.raises(redis.OutOfMemoryError):
# note: don't use the DEBUG OOM command since it's not the same
# as the db being full
r.execute_command("DEBUG", "ERROR", "OOM blah blah")
def test_connect_from_url_tcp(self):
connection = redis.Redis.from_url("redis://localhost:6379?db=0")
pool = connection.connection_pool
assert re.match(
r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
).groups() == (
"ConnectionPool",
"Connection",
"db=0,host=localhost,port=6379",
)
def test_connect_from_url_unix(self):
connection = redis.Redis.from_url("unix:///path/to/socket")
pool = connection.connection_pool
assert re.match(
r"< .*?([^\.]+) \( < .*?([^\.]+) \( (.+) \) > \) >", repr(pool), re.VERBOSE
).groups() == (
"ConnectionPool",
"UnixDomainSocketConnection",
"path=/path/to/socket",
)
@skip_if_redis_enterprise()
def test_connect_no_auth_configured(self, r):
"""
AuthenticationError should be raised when the server is not configured with auth
but credentials are supplied by the user.
"""
# Redis < 6
with pytest.raises(redis.AuthenticationError):
r.execute_command(
"DEBUG", "ERROR", "ERR Client sent AUTH, but no password is set"
)
# Redis >= 6
with pytest.raises(redis.AuthenticationError):
r.execute_command(
"DEBUG",
"ERROR",
"ERR AUTH <password> called without any password "
"configured for the default user. Are you sure "
"your configuration is correct?",
)
@skip_if_redis_enterprise()
def test_connect_invalid_auth_credentials_supplied(self, r):
"""
AuthenticationError should be raised when sending invalid username/password
"""
# Redis < 6
with pytest.raises(redis.AuthenticationError):
r.execute_command("DEBUG", "ERROR", "ERR invalid password")
# Redis >= 6
with pytest.raises(redis.AuthenticationError):
r.execute_command("DEBUG", "ERROR", "WRONGPASS")
@pytest.mark.onlynoncluster
| TestConnection |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_delete_model_app/migrations/0002_delete_model.py | {
"start": 145,
"end": 362
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_delete_model_app", "0001_initial"),
]
operations = [
migrations.DeleteModel(
name="TestTable",
),
]
| Migration |
python | huggingface__transformers | tests/models/mistral/test_modeling_mistral.py | {
"start": 2189,
"end": 16776
} | class ____(unittest.TestCase):
# This variable is used to determine which accelerator are we using for our runners (e.g. A10 or T4)
# Depending on the hardware we get different logits / generations
device_properties: DeviceProperties = (None, None, None)
@classmethod
def setUpClass(cls):
cls.device_properties = get_device_properties()
def setUp(self):
cleanup(torch_device, gc_collect=True)
def tearDown(self):
cleanup(torch_device, gc_collect=True)
@slow
def test_model_7b_logits(self):
input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338]
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
with torch.no_grad():
out = model(input_ids).logits.float().cpu()
# Expected mean on dim = -1
EXPECTED_MEAN = torch.tensor([[-2.5548, -2.5737, -3.0600, -2.5906, -2.8478, -2.8118, -2.9325, -2.7694]])
torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2)
# ("cuda", 8) for A100/A10, and ("cuda", 7) 7 for T4.
# considering differences in hardware processing and potential deviations in output.
# fmt: off
EXPECTED_SLICES = Expectations(
{
("cuda", 7): torch.tensor([-5.8828, -5.8633, -0.1042, -4.7266, -5.8828, -5.8789, -5.8789, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -1.0801, 1.7598, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828, -5.8828]),
("cuda", 8): torch.tensor([-5.8711, -5.8555, -0.1050, -4.7148, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -1.0781, 1.7568, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711, -5.8711]),
("rocm", 9): torch.tensor([-5.8750, -5.8594, -0.1047, -4.7188, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -1.0781, 1.7578, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750, -5.8750]),
}
)
# fmt: on
expected_slice = EXPECTED_SLICES.get_expectation()
torch.testing.assert_close(out[0, 0, :30], expected_slice, atol=1e-4, rtol=1e-4)
@slow
@require_bitsandbytes
def test_model_7b_generation(self):
EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% ketchup. I’m not a fan of mustard, mayo,"
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1",
device_map={"": torch_device},
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
# TODO joao, manuel: remove this in v4.62.0
@slow
def test_model_7b_dola_generation(self):
# ground truth text generated with dola_layers="low", repetition_penalty=1.2
EXPECTED_TEXT_COMPLETION = (
"""My favourite condiment is 100% ketchup. I love it on everything, and I’m not ash"""
)
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(
input_ids,
max_new_tokens=20,
temperature=0,
dola_layers="low",
repetition_penalty=1.2,
trust_remote_code=True,
custom_generate="transformers-community/dola",
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
del model
backend_empty_cache(torch_device)
gc.collect()
@require_flash_attn
@require_bitsandbytes
@slow
@pytest.mark.flash_attn_test
def test_model_7b_long_prompt(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1",
device_map={"": torch_device},
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
attn_implementation="flash_attention_2",
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
@slow
def test_model_7b_long_prompt_sdpa(self):
EXPECTED_OUTPUT_TOKEN_IDS = [306, 338]
# An input with 4097 tokens that is above the size of the sliding window
input_ids = [1] + [306, 338] * 2048
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1", device_map="auto", attn_implementation="sdpa", dtype=torch.float16
)
input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device)
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
# Assisted generation
assistant_model = model
assistant_model.generation_config.num_assistant_tokens = 2
assistant_model.generation_config.num_assistant_tokens_schedule = "constant"
generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0)
self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist())
del assistant_model
backend_empty_cache(torch_device)
gc.collect()
EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% ketchup. I love it on everything. I’m not a big"""
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@slow
def test_speculative_generation(self):
EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% ketchup. I’m not a fan of mustard, relish"
prompt = "My favourite condiment is "
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
model = MistralForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto", dtype=torch.float16)
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device)
# greedy generation outputs
set_seed(0)
generated_ids = model.generate(
input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=model
)
text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, text)
@pytest.mark.torch_compile_test
@slow
def test_compile_static_cache(self):
# `torch==2.2` will throw an error on this test (as in other compilation tests), but torch==2.1.2 and torch>2.2
# work as intended. See https://github.com/pytorch/pytorch/issues/121943
if version.parse(torch.__version__) < version.parse("2.3.0"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
if self.device_properties[0] == "cuda" and self.device_properties[1] == 7:
self.skipTest(reason="This test is failing (`torch.compile` fails) on Nvidia T4 GPU.")
NUM_TOKENS_TO_GENERATE = 40
EXPECTED_TEXT_COMPLETION = [
"My favourite condiment is 100% ketchup. I love it on everything. "
"I’m not a big fan of mustard, mayo, or relish. I’m not a fan of pickles"
]
prompts = ["My favourite condiment is "]
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", use_fast=False)
tokenizer.pad_token = tokenizer.eos_token
model = MistralForCausalLM.from_pretrained(
"mistralai/Mistral-7B-v0.1", device_map=torch_device, dtype=torch.float16
)
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
# Dynamic Cache
generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False)
dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text)
# Static Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Sliding Window Cache
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="sliding_window"
)
static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text)
# Static Cache + compile
forward_function = model.__call__
model.__call__ = torch.compile(forward_function, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
# Sliding Window Cache + compile
torch._dynamo.reset()
model.__call__ = torch.compile(forward_function, mode="reduce-overhead", fullgraph=True)
generated_ids = model.generate(
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="sliding_window"
)
static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text)
@pytest.mark.flash_attn_test
@parameterized.expand([("flash_attention_2",), ("sdpa",), ("flex_attention",), ("eager",)])
@require_flash_attn
@slow
def test_generation_beyond_sliding_window_dynamic(self, attn_implementation: str):
"""Test that we can correctly generate beyond the sliding window. This is non-trivial as Mistral will use
a DynamicCache with only sliding layers."""
# Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model.
if attn_implementation == "flex_attention":
self.skipTest(
reason="`flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.`"
)
model_id = "mistralai/Mistral-7B-v0.1"
EXPECTED_COMPLETIONS = [
"scenery, scenery, scenery, scenery, scenery,",
", green, yellow, orange, purple, pink, brown, black, white, gray, silver",
]
input_text = [
"This is a nice place. " * 682 + "I really enjoy the scenery,", # This has 4101 tokens, 15 more than 4096
"A list of colors: red, blue", # This will almost all be padding tokens
]
if attn_implementation == "eager":
input_text = input_text[:1]
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
tokenizer.pad_token_id = tokenizer.eos_token_id
inputs = tokenizer(input_text, padding=True, return_tensors="pt").to(torch_device)
model = MistralForCausalLM.from_pretrained(
model_id, attn_implementation=attn_implementation, device_map=torch_device, dtype=torch.float16
)
# Make sure prefill is larger than sliding window
batch_size, input_size = inputs.input_ids.shape
self.assertTrue(input_size > model.config.sliding_window)
# Should already be Dynamic by default, but let's make sure!
out = model.generate(**inputs, max_new_tokens=20, cache_implementation="dynamic", return_dict_in_generate=True)
output_text = tokenizer.batch_decode(out.sequences[:batch_size, input_size:])
self.assertEqual(output_text, EXPECTED_COMPLETIONS[:batch_size])
# Let's check that the dynamic cache has hybrid layers!
dynamic_cache = out.past_key_values
self.assertTrue(isinstance(dynamic_cache, DynamicCache))
for layer in dynamic_cache.layers:
self.assertTrue(isinstance(layer, DynamicSlidingWindowLayer))
self.assertEqual(layer.keys.shape[-2], model.config.sliding_window - 1)
@slow
@require_torch_accelerator
| MistralIntegrationTest |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_ipv4.py | {
"start": 671,
"end": 1652
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_ipv4"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_ipv4(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidIPv4 |
python | chardet__chardet | chardet/codingstatemachine.py | {
"start": 1208,
"end": 3666
} | class ____:
"""
A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active
state machine available, one byte at a time. The state machine changes its
state based on its previous state and the byte it receives. There are 3
states in a state machine that are of interest to an auto-detector:
START state: This is the state to start with, or a legal byte sequence
(i.e. a valid code point) for character has been identified.
ME state: This indicates that the state machine identified a byte sequence
that is specific to the charset it is designed for and that
there is no other possible encoding which can contain this byte
sequence. This will to lead to an immediate positive answer for
the detector.
ERROR state: This indicates the state machine identified an illegal byte
sequence for that encoding. This will lead to an immediate
negative answer for this encoding. Detector will exclude this
encoding from consideration from here on.
"""
def __init__(self, sm: CodingStateMachineDict) -> None:
self._model = sm
self._curr_byte_pos = 0
self._curr_char_len = 0
self._curr_state = MachineState.START
self.active = True
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self) -> None:
self._curr_state = MachineState.START
def next_state(self, c: int) -> int:
# for each byte we get its class
# if it is first byte, we also get byte length
byte_class = self._model["class_table"][c]
if self._curr_state == MachineState.START:
self._curr_byte_pos = 0
self._curr_char_len = self._model["char_len_table"][byte_class]
# from byte's class and state_table, we get its next state
curr_state = self._curr_state * self._model["class_factor"] + byte_class
self._curr_state = self._model["state_table"][curr_state]
self._curr_byte_pos += 1
return self._curr_state
def get_current_charlen(self) -> int:
return self._curr_char_len
def get_coding_state_machine(self) -> str:
return self._model["name"]
@property
def language(self) -> str:
return self._model["language"]
| CodingStateMachine |
python | walkccc__LeetCode | solutions/1564. Put Boxes Into the Warehouse I/1564-2.py | {
"start": 0,
"end": 254
} | class ____:
def maxBoxesInWarehouse(self, boxes: list[int], warehouse: list[int]) -> int:
i = 0 # warehouse's index
for box in sorted(boxes, reverse=True):
if i < len(warehouse) and warehouse[i] >= box:
i += 1
return i
| Solution |
python | ray-project__ray | python/ray/util/check_serialize.py | {
"start": 413,
"end": 707
} | class ____:
def __init__(self, print_file):
self.level = 0
self.print_file = print_file
def indent(self):
return _indent(self)
def print(self, msg):
indent = " " * self.level
print(indent + msg, file=self.print_file)
@DeveloperAPI
| _Printer |
python | gevent__gevent | src/greentest/3.11/test_socket.py | {
"start": 17998,
"end": 18884
} | class ____(SocketTestBase, ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
| ThreadedSocketTestMixin |
python | getsentry__sentry | src/sentry/snuba/metrics/query_builder.py | {
"start": 29256,
"end": 50075
} | class ____:
#: Datasets actually implemented in snuba:
_implemented_datasets = {
"metrics_counters",
"metrics_distributions",
"metrics_sets",
"generic_metrics_counters",
"generic_metrics_distributions",
"generic_metrics_sets",
"generic_metrics_gauges",
}
def __init__(
self,
projects: Sequence[Project],
metrics_query: DeprecatingMetricsQuery,
use_case_id: UseCaseID,
):
self._projects = projects
self._metrics_query = metrics_query
self._org_id = metrics_query.org_id
self._use_case_id = use_case_id
self._alias_to_metric_field = {
field.alias: field for field in self._metrics_query.select if field.alias is not None
}
@overload
@staticmethod
def generate_snql_for_action_by_fields(
metric_action_by_field: MetricOrderByField,
use_case_id: UseCaseID,
org_id: int,
projects: Sequence[Project],
is_column: bool = False,
) -> list[OrderBy]: ...
@overload
@staticmethod
def generate_snql_for_action_by_fields(
metric_action_by_field: MetricActionByField,
use_case_id: UseCaseID,
org_id: int,
projects: Sequence[Project],
is_column: bool = False,
) -> Column | AliasedExpression | Function: ...
@staticmethod
def generate_snql_for_action_by_fields(
metric_action_by_field: MetricActionByField,
use_case_id: UseCaseID,
org_id: int,
projects: Sequence[Project],
is_column: bool = False,
) -> list[OrderBy] | Column | AliasedExpression | Function:
"""
Generates the necessary snql for any action by field which in our case will be group by and order by. This
function has been designed to share as much logic as possible, however, it should be refactored in case
the snql generation starts to diverge significantly.
"""
is_group_by = isinstance(metric_action_by_field, MetricGroupByField)
is_order_by = isinstance(metric_action_by_field, MetricOrderByField)
if not is_group_by and not is_order_by:
raise InvalidParams("The metric action must either be an order by or group by.")
if isinstance(metric_action_by_field.field, str):
# This transformation is currently supported only for group by because OrderBy doesn't support the Function type.
if is_group_by and metric_action_by_field.field == "transaction":
return transform_null_transaction_to_unparameterized(
use_case_id, org_id, metric_action_by_field.alias
)
# Handles the case when we are trying to group or order by `project` for example, but we want
# to translate it to `project_id` as that is what the metrics dataset understands.
if metric_action_by_field.field in FIELD_ALIAS_MAPPINGS:
column_name = FIELD_ALIAS_MAPPINGS[metric_action_by_field.field]
elif metric_action_by_field.field in FIELD_ALIAS_MAPPINGS.values():
column_name = metric_action_by_field.field
else:
# The support for tags in the order by is disabled for now because there is no need to have it. If the
# need arise, we will implement it.
if is_group_by:
assert isinstance(metric_action_by_field.field, str)
column_name = resolve_tag_key(use_case_id, org_id, metric_action_by_field.field)
else:
raise NotImplementedError(
f"Unsupported string field: {metric_action_by_field.field}"
)
exp = (
AliasedExpression(
exp=Column(name=column_name),
alias=metric_action_by_field.alias,
)
if is_group_by and not is_column
else Column(name=column_name)
)
if is_order_by:
# We return a list in order to use the "extend" method and reduce the number of changes across
# the codebase.
exp = [OrderBy(exp=exp, direction=metric_action_by_field.direction)]
return exp
elif isinstance(metric_action_by_field.field, MetricField):
try:
metric_expression = metric_object_factory(
metric_action_by_field.field.op, metric_action_by_field.field.metric_mri
)
if is_group_by:
return metric_expression.generate_groupby_statements(
use_case_id=use_case_id,
alias=metric_action_by_field.field.alias,
params=metric_action_by_field.field.params,
projects=projects,
)[0]
elif is_order_by:
return metric_expression.generate_orderby_clause(
use_case_id=use_case_id,
alias=metric_action_by_field.field.alias,
params=metric_action_by_field.field.params,
projects=projects,
direction=metric_action_by_field.direction,
)
else:
raise NotImplementedError(
f"Unsupported metric field: {metric_action_by_field.field}"
)
except IndexError:
raise InvalidParams(f"Cannot resolve {metric_action_by_field.field} into SnQL")
else:
raise NotImplementedError(
f"Unsupported {"group by" if is_group_by else "order by" if is_order_by else "None"} field: {metric_action_by_field.field} needs to be either a MetricField or a string"
)
def _build_where(self) -> list[BooleanCondition | Condition]:
where: list[BooleanCondition | Condition] = [
Condition(Column("org_id"), Op.EQ, self._org_id),
Condition(Column("project_id"), Op.IN, self._metrics_query.project_ids),
]
where += self._build_timeframe()
if not self._metrics_query.where:
return where
snuba_conditions = []
# Adds filters that do not need to be resolved because they are instances of `MetricConditionField`
metric_condition_filters = []
for condition in self._metrics_query.where:
if isinstance(condition, MetricConditionField):
metric_expression = metric_object_factory(
condition.lhs.op, condition.lhs.metric_mri
)
try:
metric_condition_filters.append(
Condition(
lhs=metric_expression.generate_where_statements(
use_case_id=self._use_case_id,
params=condition.lhs.params,
projects=self._projects,
alias=condition.lhs.alias,
)[0],
op=condition.op,
rhs=(
resolve_tag_value(self._use_case_id, self._org_id, condition.rhs)
if require_rhs_condition_resolution(condition.lhs.op)
else condition.rhs
),
)
)
except IndexError:
raise InvalidParams(f"Cannot resolve {condition.lhs} into SnQL")
else:
snuba_conditions.append(condition)
if metric_condition_filters:
where.extend(metric_condition_filters)
filter_ = resolve_tags(self._use_case_id, self._org_id, snuba_conditions, self._projects)
if filter_:
where.extend(filter_)
return where
def _build_timeframe(self) -> list[BooleanCondition | Condition]:
"""
Builds the timeframe of the query, comprehending the `start` and `end` intervals.
"""
where = []
if self._metrics_query.start:
where.append(
Condition(Column(get_timestamp_column_name()), Op.GTE, self._metrics_query.start)
)
if self._metrics_query.end:
where.append(
Condition(Column(get_timestamp_column_name()), Op.LT, self._metrics_query.end)
)
return where
def _build_groupby(self) -> list[Column] | None:
if self._metrics_query.groupby is None:
return None
groupby_cols = []
for metric_groupby_obj in self._metrics_query.groupby or []:
groupby_cols.append(
self.generate_snql_for_action_by_fields(
metric_action_by_field=metric_groupby_obj,
use_case_id=self._use_case_id,
org_id=self._org_id,
projects=self._projects,
)
)
return groupby_cols
def _build_orderby(self) -> list[OrderBy] | None:
if self._metrics_query.orderby is None:
return None
orderby_fields: list[OrderBy] = []
for metric_order_by_obj in self._metrics_query.orderby:
orderby_fields.extend(
self.generate_snql_for_action_by_fields(
metric_action_by_field=metric_order_by_obj,
use_case_id=self._use_case_id,
org_id=self._org_id,
projects=self._projects,
is_column=True,
)
)
return orderby_fields
def _build_having(self) -> list[BooleanCondition | Condition]:
"""
This function makes a lot of assumptions about what the HAVING clause allows, mostly
because HAVING is not a fully supported function of metrics.
It is assumed that the having clause is a list of simple conditions, where the LHS is an aggregated
metric e.g. p50(duration) and the RHS is a literal value being compared too.
"""
resolved_having = []
if not self._metrics_query.having:
return []
for condition in self._metrics_query.having:
lhs_expression = condition.lhs
if isinstance(lhs_expression, Function):
metric = lhs_expression.parameters[0]
assert isinstance(metric, Column)
metrics_field_obj = metric_object_factory(lhs_expression.function, metric.name)
resolved_lhs = metrics_field_obj.generate_select_statements(
projects=self._projects,
use_case_id=self._use_case_id,
alias=lhs_expression.alias,
params=None,
)
resolved_having.append(Condition(resolved_lhs[0], condition.op, condition.rhs))
else:
resolved_having.append(condition)
return resolved_having
def __build_totals_and_series_queries(
self,
entity,
select,
where,
having,
groupby,
orderby,
limit,
offset,
rollup,
intervals_len,
):
rv = {}
totals_query = Query(
match=Entity(entity),
groupby=groupby,
select=select,
where=where,
having=having,
limit=limit,
offset=offset or None,
granularity=rollup,
orderby=orderby,
)
if self._metrics_query.include_totals:
rv["totals"] = totals_query
if self._metrics_query.include_series:
series_limit = limit.limit * intervals_len
if self._metrics_query.max_limit:
series_limit = self._metrics_query.max_limit
if self._use_case_id in [UseCaseID.TRANSACTIONS, UseCaseID.SPANS]:
time_groupby_column = self.__generate_time_groupby_column_for_discover_queries(
self._metrics_query.interval
)
else:
time_groupby_column = Column(TS_COL_GROUP)
rv["series"] = totals_query.set_limit(series_limit).set_groupby(
list(totals_query.groupby or []) + [time_groupby_column]
)
return rv
@staticmethod
def __generate_time_groupby_column_for_discover_queries(interval: int) -> Function:
return Function(
function="toStartOfInterval",
parameters=[
Column(name=get_timestamp_column_name()),
Function(
function="toIntervalSecond",
parameters=[interval],
alias=None,
),
"Universal",
],
alias=TS_COL_GROUP,
)
def __update_query_dicts_with_component_entities(
self,
component_entities: dict[MetricEntity, Sequence[str]],
metric_mri_to_obj_dict: dict[tuple[str | None, str, str], MetricExpressionBase],
fields_in_entities: dict[MetricEntity, list[tuple[str | None, str, str]]],
parent_alias,
) -> dict[tuple[str | None, str, str], MetricExpressionBase]:
# At this point in time, we are only supporting raw metrics in the metrics attribute of
# any instance of DerivedMetric, and so in this case the op will always be None
# ToDo(ahmed): In future PR, we might want to allow for dependency metrics to also have an
# an aggregate and in this case, we would need to parse the op here
op = None
for entity, metric_mris in component_entities.items():
for metric_mri in metric_mris:
# The constituents of an instance of CompositeEntityDerivedMetric will have a reference to their parent
# alias so that we are able to distinguish the constituents in case we have naming collisions that could
# potentially occur from requesting the same CompositeEntityDerivedMetric multiple times with different
# params. This means that if parent composite metric alias is for example sessions_errored, and it has
# a constituent `e:sessions/error.unique@none` then that constituent will be aliased as
# `e:sessions/error.unique@none__CHILD_OF__sessions_errored`
metric_key = (
op,
metric_mri,
f"{metric_mri}{COMPOSITE_ENTITY_CONSTITUENT_ALIAS}{parent_alias}",
)
if metric_key not in metric_mri_to_obj_dict:
metric_mri_to_obj_dict[metric_key] = metric_object_factory(op, metric_mri)
fields_in_entities.setdefault(entity, []).append(metric_key)
return metric_mri_to_obj_dict
def get_snuba_queries(self):
metric_mri_to_obj_dict: dict[tuple[str | None, str, str], MetricExpressionBase] = {}
fields_in_entities: dict[MetricEntity, list[tuple[str | None, str, str]]] = {}
for select_field in self._metrics_query.select:
metric_field_obj = metric_object_factory(select_field.op, select_field.metric_mri)
# `get_entity` is called the first, to fetch the entities of constituent metrics,
# and validate especially in the case of SingularEntityDerivedMetric that it is
# actually composed of metrics that belong to the same entity
try:
# When we get to an instance of a MetricFieldBase where the entity is an
# instance of dict, we know it is from a composite entity derived metric, and
# we need to traverse down the constituent metrics dependency tree until we get
# to instances of SingleEntityDerivedMetric, and add those to our queries so
# that we are able to generate the original CompositeEntityDerivedMetric later
# on as a result of a post query operation on the results of the constituent
# SingleEntityDerivedMetric instances
component_entities = metric_field_obj.get_entity(
projects=self._projects, use_case_id=self._use_case_id
)
if isinstance(component_entities, dict):
# In this case, component_entities is a dictionary with entity keys and
# lists of metric_mris as values representing all the entities and
# metric_mris combination that this metric_object is composed of, or rather
# the instances of SingleEntityDerivedMetric that it is composed of
metric_mri_to_obj_dict = self.__update_query_dicts_with_component_entities(
component_entities=component_entities,
metric_mri_to_obj_dict=metric_mri_to_obj_dict,
fields_in_entities=fields_in_entities,
parent_alias=select_field.alias,
)
continue
elif isinstance(component_entities, str):
entity = component_entities
else:
raise DerivedMetricParseException("Entity parsed is in incorrect format")
except MetricDoesNotExistException:
# If we get here, it means that one or more of the constituent metrics for a
# derived metric does not exist, and so no further attempts to query that derived
# metric will be made, and the field value will be set to the default value in
# the response
continue
if entity not in self._implemented_datasets:
raise NotImplementedError(f"Dataset not yet implemented: {entity}")
metric_mri_to_obj_dict[
(select_field.op, select_field.metric_mri, select_field.alias)
] = metric_field_obj
fields_in_entities.setdefault(entity, []).append(
(select_field.op, select_field.metric_mri, select_field.alias)
)
where = self._build_where()
groupby = self._build_groupby()
queries_dict = {}
for entity, fields in fields_in_entities.items():
select = []
metric_ids_set = set()
for field in fields:
metric_field_obj = metric_mri_to_obj_dict[field]
try:
params = self._alias_to_metric_field[field[2]].params
except KeyError:
params = None
# In order to support on demand metrics which require an interval (e.g. epm),
# we want to pass the interval down via params so we can pass it to the associated snql_factory
params = {"interval": self._metrics_query.interval, **(params or {})}
select += metric_field_obj.generate_select_statements(
projects=self._projects,
use_case_id=self._use_case_id,
alias=field[2],
params=params,
)
metric_ids_set |= metric_field_obj.generate_metric_ids(
self._projects, self._use_case_id
)
where_for_entity = [
Condition(
Column("metric_id"),
Op.IN,
list(metric_ids_set),
),
]
orderby = self._build_orderby()
having = self._build_having()
# Functionally [] and None will be the same and the same applies for Offset(0) and None.
queries_dict[entity] = self.__build_totals_and_series_queries(
entity=entity,
select=select,
where=where + where_for_entity,
having=having,
groupby=groupby, # Empty group by is set to None.
orderby=orderby, # Empty order by is set to None.
limit=self._metrics_query.limit,
offset=self._metrics_query.offset, # No offset is set to None.
rollup=self._metrics_query.granularity,
intervals_len=get_num_intervals(
self._metrics_query.start,
self._metrics_query.end,
self._metrics_query.granularity.granularity,
interval=self._metrics_query.interval,
),
)
return queries_dict, fields_in_entities
| SnubaQueryBuilder |
python | PyCQA__pycodestyle | testing/data/W29.py | {
"start": 43,
"end": 337
} | class ____(object):
bang = 12
#: W291:2:35
'''multiline
string with trailing whitespace'''
#: W291 W292 noeol
x = 1
#: W191 W292 noeol
if False:
pass # indented with tabs
#: W292:1:5 E225:1:2 noeol
1+ 1
#: W292:1:27 E261:1:12 noeol
import this # no line feed
#: W292:3:22 noeol
| Foo |
python | streamlit__streamlit | lib/tests/streamlit/runtime/caching/cache_errors_test.py | {
"start": 997,
"end": 2876
} | class ____(DeltaGeneratorTestCase):
"""Make sure user-visible error messages look correct.
These errors are a little annoying to test, but they're important! So we
are testing them word-for-word as much as possible. Even though this
*feels* like an antipattern, it isn't: we're making sure the codepaths
that pull useful debug info from the code are working.
"""
maxDiff = None
def test_unhashable_type(self):
@st.cache_data
def unhashable_type_func(lock: threading.Lock):
return str(lock)
with pytest.raises(UnhashableParamError) as cm:
unhashable_type_func(threading.Lock())
ep = ExceptionProto()
exception.marshall(ep, cm.value)
assert ep.type == "UnhashableParamError"
expected_message = """
Cannot hash argument 'lock' (of type `_thread.lock`) in 'unhashable_type_func'.
To address this, you can tell Streamlit not to hash this argument by adding a
leading underscore to the argument's name in the function signature:
```
@st.cache_data
def unhashable_type_func(_lock, ...):
...
```
"""
assert testutil.normalize_md(expected_message) == testutil.normalize_md(
ep.message
)
assert ep.message_is_markdown
assert not ep.is_warning
def test_unserializable_return_value_error(self):
@st.cache_data
def unserializable_return_value_func():
return threading.Lock()
with pytest.raises(UnserializableReturnValueError) as cm:
unserializable_return_value_func()
ep = ExceptionProto()
exception.marshall(ep, cm.value)
assert ep.type == "UnserializableReturnValueError"
assert "Cannot serialize the return value" in ep.message
assert ep.message_is_markdown
assert not ep.is_warning
| CacheErrorsTest |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/kernel_tests/group_by_reducer_test.py | {
"start": 7693,
"end": 8563
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_dataset(self, components):
reducer = grouping.Reducer(
init_func=lambda _: np.int64(0),
reduce_func=lambda x, y: x + y,
finalize_func=lambda x: x)
return dataset_ops.Dataset.from_tensor_slices(components).apply(
grouping.group_by_reducer(lambda x: x % 5, reducer))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
components = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.int64)
verify_fn(
self,
lambda: self._build_dataset(components),
num_outputs=5)
if __name__ == "__main__":
test.main()
| GroupByReducerCheckpointTest |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran_tests/deprecated/test_load_from_instance.py | {
"start": 591,
"end": 3613
} | class ____(DagsterFivetranTranslator):
def get_asset_spec(self, props: FivetranConnectorTableProps) -> AssetSpec:
asset_spec = super().get_asset_spec(props)
return asset_spec.replace_attributes(
key=asset_spec.key.with_prefix("my_prefix"),
metadata={"foo": "bar", **asset_spec.metadata},
group_name="custom_group_name",
)
@responses.activate
@pytest.mark.parametrize(
("translator, custom_prefix, custom_metadata, custom_group_name"),
[
(DagsterFivetranTranslator, [], {}, None),
(CustomDagsterFivetranTranslator, ["my_prefix"], {"foo": "bar"}, "custom_group_name"),
],
)
def test_load_from_instance_with_translator(
translator, custom_prefix, custom_metadata, custom_group_name
) -> None:
with environ({"FIVETRAN_API_KEY": "some_key", "FIVETRAN_API_SECRET": "some_secret"}):
ft_resource = FivetranResource(
api_key=EnvVar("FIVETRAN_API_KEY"), api_secret=EnvVar("FIVETRAN_API_SECRET")
)
mock_responses(ft_resource)
ft_cacheable_assets = load_assets_from_fivetran_instance(
ft_resource,
poll_interval=10,
poll_timeout=600,
translator=translator,
)
ft_assets = ft_cacheable_assets.build_definitions(
ft_cacheable_assets.compute_cacheable_data()
)
# Create set of expected asset keys
tables = {
AssetKey([*custom_prefix, "xyz1", "abc2"]),
AssetKey([*custom_prefix, "xyz1", "abc1"]),
AssetKey([*custom_prefix, "abc", "xyz"]),
}
# Check schema metadata is added correctly to asset def
assets_def = ft_assets[0]
assert any(
metadata.get("dagster/column_schema")
== (
TableSchema(
columns=[
TableColumn(name="column_1", type=""),
TableColumn(name="column_2", type=""),
TableColumn(name="column_3", type=""),
]
)
)
for key, metadata in assets_def.metadata_by_key.items()
), str(assets_def.metadata_by_key)
for key, metadata in assets_def.metadata_by_key.items():
assert metadata.get("dagster/table_name") == (
"example_database." + ".".join(key.path[-2:])
)
assert has_kind(assets_def.tags_by_key[key], "snowflake")
for key, value in custom_metadata.items():
assert all(metadata[key] == value for metadata in assets_def.metadata_by_key.values())
assert ft_assets[0].keys == tables
assert all(
[
ft_assets[0].group_names_by_key.get(t)
== (custom_group_name or "some_service_some_name")
for t in tables
]
), str(ft_assets[0].group_names_by_key)
assert len(ft_assets[0].op.output_defs) == len(tables)
| CustomDagsterFivetranTranslator |
python | doocs__leetcode | solution/2600-2699/2657.Find the Prefix Common Array of Two Arrays/Solution.py | {
"start": 0,
"end": 352
} | class ____:
def findThePrefixCommonArray(self, A: List[int], B: List[int]) -> List[int]:
ans = []
cnt1 = Counter()
cnt2 = Counter()
for a, b in zip(A, B):
cnt1[a] += 1
cnt2[b] += 1
t = sum(min(v, cnt2[x]) for x, v in cnt1.items())
ans.append(t)
return ans
| Solution |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 1597,
"end": 1699
} | class ____(NamedTuple):
docname: str
node_id: str
objtype: str
aliased: bool
| ObjectEntry |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-jinaai-rerank/llama_index/postprocessor/jinaai_rerank/base.py | {
"start": 666,
"end": 3716
} | class ____(BaseNodePostprocessor):
api_url: str = Field(
default=f"{DEFAULT_JINA_AI_API_URL}/rerank",
description="The URL of the JinaAI Rerank API.",
)
api_key: str = Field(default=None, description="The JinaAI API key.")
model: str = Field(
default="jina-reranker-v1-base-en",
description="The model to use when calling Jina AI API",
)
top_n: int = Field(description="Top N nodes to return.")
_session: Any = PrivateAttr()
def __init__(
self,
top_n: int = 2,
model: str = "jina-reranker-v1-base-en",
base_url: str = DEFAULT_JINA_AI_API_URL,
api_key: Optional[str] = None,
):
super().__init__(top_n=top_n, model=model)
self.api_url = f"{base_url}/rerank"
self.api_key = get_from_param_or_env("api_key", api_key, "JINAAI_API_KEY", "")
self.model = model
self._session = requests.Session()
self._session.headers.update(
{"Authorization": f"Bearer {self.api_key}", "Accept-Encoding": "identity"}
)
@classmethod
def class_name(cls) -> str:
return "JinaRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.model,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.model,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
resp = self._session.post( # type: ignore
self.api_url,
json={
"query": query_bundle.query_str,
"documents": texts,
"model": self.model,
"top_n": self.top_n,
},
).json()
if "results" not in resp:
raise RuntimeError(resp["detail"])
results = resp["results"]
new_nodes = []
for result in results:
new_node_with_score = NodeWithScore(
node=nodes[result["index"]].node, score=result["relevance_score"]
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
| JinaRerank |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 40115,
"end": 42132
} | class ____(Visitor):
"""Visitor for verifying pytd ASTs. For tests."""
_all_templates: set[pytd.Node]
def __init__(self):
super().__init__()
self._valid_param_name = re.compile(r"[a-zA-Z_]\w*$")
def _AssertNoDuplicates(self, node, attrs):
"""Checks that we don't have duplicate top-level names."""
get_set = lambda attr: {entry.name for entry in getattr(node, attr)}
attr_to_set = {attr: get_set(attr) for attr in attrs}
# Do a quick sanity check first, and a deeper check if that fails.
total1 = len(set.union(*attr_to_set.values())) # all distinct names
total2 = sum(map(len, attr_to_set.values()), 0) # all names
if total1 != total2:
for a1, a2 in itertools.combinations(attrs, 2):
both = attr_to_set[a1] & attr_to_set[a2]
if both:
raise AssertionError(
f"Duplicate name(s) {list(both)} in both {a1} and {a2}"
)
def EnterTypeDeclUnit(self, node):
self._AssertNoDuplicates(
node, ["constants", "type_params", "classes", "functions", "aliases"]
)
self._all_templates = set()
def LeaveTypeDeclUnit(self, node):
declared_type_params = {n.name for n in node.type_params}
for t in self._all_templates:
if t.name not in declared_type_params:
raise AssertionError(
"Type parameter %r used, but not declared. "
"Did you call AdjustTypeParameters?"
% t.name
)
def EnterClass(self, node):
self._AssertNoDuplicates(node, ["methods", "constants"])
def EnterFunction(self, node):
assert node.signatures, node
def EnterSignature(self, node):
assert isinstance(node.has_optional, bool), node
def EnterTemplateItem(self, node):
self._all_templates.add(node)
def EnterParameter(self, node):
assert self._valid_param_name.match(node.name), node.name
def EnterCallableType(self, node):
self.EnterGenericType(node)
def EnterGenericType(self, node):
assert node.parameters, node
| VerifyVisitor |
python | streamlit__streamlit | lib/streamlit/runtime/context.py | {
"start": 2331,
"end": 3085
} | class ____(AttributeDictionary):
"""A dictionary-like object containing theme information.
This class extends the functionality of a standard dictionary to allow items
to be accessed via attribute-style dot notation in addition to the traditional
key-based access. If a dictionary item is accessed and is itself a dictionary,
it is automatically wrapped in another `AttributeDictionary`, enabling recursive
attribute-style access.
"""
type: Literal["dark", "light"] | None
def __init__(self, theme_info: dict[str, str | None]):
super().__init__(theme_info)
@classmethod
def from_context_info(cls, context_dict: dict[str, str | None]) -> StreamlitTheme:
return cls(context_dict)
| StreamlitTheme |
python | pallets__quart | src/quart/ctx.py | {
"start": 4055,
"end": 5936
} | class ____(_BaseRequestWebsocketContext):
"""The context relating to the specific request, bound to the current task.
Do not use directly, prefer the
:func:`~quart.Quart.request_context` or
:func:`~quart.Quart.test_request_context` instead.
Attributes:
_after_request_functions: List of functions to execute after the current
request, see :func:`after_this_request`.
"""
def __init__(
self,
app: Quart,
request: Request,
session: SessionMixin | None = None,
) -> None:
super().__init__(app, request, session)
self.flashes = None
self._after_request_functions: list[AfterRequestCallable] = []
@property
def request(self) -> Request:
return cast(Request, self.request_websocket)
async def push(self) -> None:
await super()._push_appctx(_cv_request.set(self))
await super()._push()
async def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore
try:
if len(self._cv_tokens) == 1:
if exc is _sentinel:
exc = sys.exc_info()[1]
await self.app.do_teardown_request(exc, self)
request_close = getattr(self.request_websocket, "close", None)
if request_close is not None:
await request_close()
finally:
ctx = _cv_request.get()
token, app_ctx = self._cv_tokens.pop()
_cv_request.reset(token)
if app_ctx is not None:
await app_ctx.pop(exc)
if ctx is not self:
raise AssertionError(
f"Popped wrong request context. ({ctx!r} instead of {self!r})"
)
async def __aenter__(self) -> RequestContext:
await self.push()
return self
| RequestContext |
python | facebookresearch__faiss | tests/test_local_search_quantizer.py | {
"start": 9741,
"end": 10311
} | class ____(unittest.TestCase):
def test_training(self):
"""check that the error is in the same ballpark as PQ."""
ds = datasets.SyntheticDataset(32, 3000, 3000, 0)
xt = ds.get_train()
xb = ds.get_database()
M = 4
nbits = 4
lsq = faiss.LocalSearchQuantizer(ds.d, M, nbits)
lsq.train(xt)
err_lsq = eval_codec(lsq, xb)
pq = faiss.ProductQuantizer(ds.d, M, nbits)
pq.train(xt)
err_pq = eval_codec(pq, xb)
self.assertLess(err_lsq, err_pq)
| TestLocalSearchQuantizer |
python | allegroai__clearml | clearml/debugging/timer.py | {
"start": 1915,
"end": 3621
} | class ____(object):
def __init__(self) -> None:
self._timers = {}
def add_timers(self, *names: Any) -> None:
for name in names:
self.add_timer(name)
def add_timer(self, name: str, timer: Timer = None) -> Timer:
if name in self._timers:
raise ValueError("timer %s already exists" % name)
timer = timer or Timer()
self._timers[name] = timer
return timer
def get_timer(self, name: str, default: Optional[Timer] = None) -> Optional[Timer]:
return self._timers.get(name, default)
def get_timers(self) -> Dict[str, Timer]:
return self._timers
def _call_timer(
self,
name: str,
callable: Callable[[Timer], Any],
silent_fail: bool = False,
) -> Any:
try:
return callable(self._timers[name])
except KeyError:
if not silent_fail:
six.reraise(*sys.exc_info())
def reset_timers(self, *names: Any) -> None:
for name in names:
self._call_timer(name, lambda t: t.reset())
def reset_average_timers(self, *names: Any) -> None:
for name in names:
self._call_timer(name, lambda t: t.reset_average())
def tic_timers(self, *names: Any) -> None:
for name in names:
self._call_timer(name, lambda t: t.tic())
def toc_timers(self, *names: Any) -> List[float]:
return [self._call_timer(name, lambda t: t.toc()) for name in names]
def toc_with_reset_timer(self, name: str, average: bool = True, reset_if_calls: int = 1000) -> float:
return self._call_timer(name, lambda t: t.toc_with_reset(average, reset_if_calls))
| TimersMixin |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 156183,
"end": 162790
} | class ____(ParserElement):
"""Abstract subclass of ParserElement, for combining and
post-processing parsed tokens.
"""
def __init__(
self, exprs: typing.Iterable[ParserElement], savelist: bool = False
) -> None:
super().__init__(savelist)
self.exprs: list[ParserElement]
if isinstance(exprs, _generatorType):
exprs = list(exprs)
if isinstance(exprs, str_type):
self.exprs = [self._literalStringClass(exprs)]
elif isinstance(exprs, ParserElement):
self.exprs = [exprs]
elif isinstance(exprs, Iterable):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if any(isinstance(expr, str_type) for expr in exprs):
exprs = (
self._literalStringClass(e) if isinstance(e, str_type) else e
for e in exprs
)
self.exprs = list(exprs)
else:
try:
self.exprs = list(exprs)
except TypeError:
self.exprs = [exprs]
self.callPreparse = False
def recurse(self) -> list[ParserElement]:
return self.exprs[:]
def append(self, other) -> ParserElement:
"""
Add an expression to the list of expressions related to this ParseExpression instance.
"""
self.exprs.append(other)
self._defaultName = None
return self
def leave_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
all contained expressions.
"""
super().leave_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.leave_whitespace(recursive)
return self
def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
"""
Extends ``ignore_whitespace`` defined in base class, and also invokes ``ignore_whitespace`` on
all contained expressions.
"""
super().ignore_whitespace(recursive)
if recursive:
self.exprs = [e.copy() for e in self.exprs]
for e in self.exprs:
e.ignore_whitespace(recursive)
return self
def ignore(self, other) -> ParserElement:
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance(other, Suppress):
if other not in self.ignoreExprs:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
else:
super().ignore(other)
for e in self.exprs:
e.ignore(self.ignoreExprs[-1])
return self
def _generateDefaultName(self) -> str:
return f"{type(self).__name__}:({self.exprs})"
def streamline(self) -> ParserElement:
if self.streamlined:
return self
super().streamline()
for e in self.exprs:
e.streamline()
# collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
if len(self.exprs) == 2:
other = self.exprs[0]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = other.exprs[:] + [self.exprs[1]]
self._defaultName = None
self._may_return_empty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if (
isinstance(other, self.__class__)
and not other.parseAction
and other.resultsName is None
and not other.debug
):
self.exprs = self.exprs[:-1] + other.exprs[:]
self._defaultName = None
self._may_return_empty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = f"Expected {self}"
return self
def validate(self, validateTrace=None) -> None:
warnings.warn(
"ParserElement.validate() is deprecated, and should not be used to check for left recursion",
DeprecationWarning,
stacklevel=2,
)
tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
for e in self.exprs:
e.validate(tmp)
self._checkRecursion([])
def copy(self) -> ParserElement:
"""
Returns a copy of this expression.
Generally only used internally by pyparsing.
"""
ret = super().copy()
ret = typing.cast(ParseExpression, ret)
ret.exprs = [e.copy() for e in self.exprs]
return ret
def _setResultsName(self, name, list_all_matches=False) -> ParserElement:
if not (
__diag__.warn_ungrouped_named_tokens_in_collection
and Diagnostics.warn_ungrouped_named_tokens_in_collection
not in self.suppress_warnings_
):
return super()._setResultsName(name, list_all_matches)
for e in self.exprs:
if (
isinstance(e, ParserElement)
and e.resultsName
and (
Diagnostics.warn_ungrouped_named_tokens_in_collection
not in e.suppress_warnings_
)
):
warning = (
"warn_ungrouped_named_tokens_in_collection:"
f" setting results name {name!r} on {type(self).__name__} expression"
f" collides with {e.resultsName!r} on contained expression"
)
warnings.warn(warning, stacklevel=3)
break
return super()._setResultsName(name, list_all_matches)
# Compatibility synonyms
# fmt: off
leaveWhitespace = replaced_by_pep8("leaveWhitespace", leave_whitespace)
ignoreWhitespace = replaced_by_pep8("ignoreWhitespace", ignore_whitespace)
# fmt: on
| ParseExpression |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/identity.py | {
"start": 830,
"end": 3432
} | class ____:
_wr: weakref.ref[IdentityMap]
_dict: Dict[_IdentityKeyType[Any], Any]
_modified: Set[InstanceState[Any]]
def __init__(self) -> None:
self._dict = {}
self._modified = set()
self._wr = weakref.ref(self)
def _kill(self) -> None:
self._add_unpresent = _killed # type: ignore
def all_states(self) -> List[InstanceState[Any]]:
raise NotImplementedError()
def contains_state(self, state: InstanceState[Any]) -> bool:
raise NotImplementedError()
def __contains__(self, key: _IdentityKeyType[Any]) -> bool:
raise NotImplementedError()
def safe_discard(self, state: InstanceState[Any]) -> None:
raise NotImplementedError()
def __getitem__(self, key: _IdentityKeyType[_O]) -> _O:
raise NotImplementedError()
def get(
self, key: _IdentityKeyType[_O], default: Optional[_O] = None
) -> Optional[_O]:
raise NotImplementedError()
def fast_get_state(
self, key: _IdentityKeyType[_O]
) -> Optional[InstanceState[_O]]:
raise NotImplementedError()
def keys(self) -> Iterable[_IdentityKeyType[Any]]:
return self._dict.keys()
def values(self) -> Iterable[object]:
raise NotImplementedError()
def replace(self, state: InstanceState[_O]) -> Optional[InstanceState[_O]]:
raise NotImplementedError()
def add(self, state: InstanceState[Any]) -> bool:
raise NotImplementedError()
def _fast_discard(self, state: InstanceState[Any]) -> None:
raise NotImplementedError()
def _add_unpresent(
self, state: InstanceState[Any], key: _IdentityKeyType[Any]
) -> None:
"""optional inlined form of add() which can assume item isn't present
in the map"""
self.add(state)
def _manage_incoming_state(self, state: InstanceState[Any]) -> None:
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state: InstanceState[Any]) -> None:
del state._instance_dict
if state.modified:
self._modified.discard(state)
def _dirty_states(self) -> Set[InstanceState[Any]]:
return self._modified
def check_modified(self) -> bool:
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key: _IdentityKeyType[Any]) -> bool:
return key in self
def __len__(self) -> int:
return len(self._dict)
| IdentityMap |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/partitions/partitioned_schedule.py | {
"start": 1465,
"end": 12740
} | class ____:
"""Points to an unresolved asset job. The asset selection isn't resolved yet, so we can't resolve
the PartitionsDefinition, so we can't resolve the schedule cadence.
"""
name: str
job: UnresolvedAssetJobDefinition
description: Optional[str]
default_status: DefaultScheduleStatus
minute_of_hour: Optional[int]
hour_of_day: Optional[int]
day_of_week: Optional[int]
day_of_month: Optional[int]
tags: Optional[Mapping[str, str]]
metadata: Optional[Mapping[str, Any]]
def resolve(self, resolved_job: JobDefinition) -> ScheduleDefinition:
partitions_def = resolved_job.partitions_def
if partitions_def is None:
check.failed(
f"Job '{resolved_job.name}' provided to build_schedule_from_partitioned_job must"
" contain partitioned assets or a partitions definition."
)
partitions_def = _check_valid_schedule_partitions_def(partitions_def)
time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))
return schedule(
name=self.name,
cron_schedule=time_partitions_def.get_cron_schedule(
self.minute_of_hour, self.hour_of_day, self.day_of_week, self.day_of_month
),
job=resolved_job,
default_status=self.default_status,
execution_timezone=time_partitions_def.timezone,
description=self.description,
metadata=self.metadata,
)(_get_schedule_evaluation_fn(partitions_def, resolved_job, self.tags))
def with_metadata(self, metadata: RawMetadataMapping) -> Self:
return copy(self, metadata=metadata)
def build_schedule_from_partitioned_job(
job: Union[JobDefinition, UnresolvedAssetJobDefinition],
description: Optional[str] = None,
name: Optional[str] = None,
minute_of_hour: Optional[int] = None,
hour_of_day: Optional[int] = None,
day_of_week: Optional[int] = None,
day_of_month: Optional[int] = None,
default_status: DefaultScheduleStatus = DefaultScheduleStatus.STOPPED,
tags: Optional[Mapping[str, str]] = None,
cron_schedule: Optional[str] = None,
execution_timezone: Optional[str] = None,
metadata: Optional[RawMetadataMapping] = None,
) -> Union[UnresolvedPartitionedAssetScheduleDefinition, ScheduleDefinition]:
"""Creates a schedule from a job that targets
time window-partitioned or statically-partitioned assets. The job can also be
multi-partitioned, as long as one of the partition dimensions is time-partitioned.
The schedule executes at the cadence specified by the time partitioning of the job or assets.
**Example:**
.. code-block:: python
######################################
# Job that targets partitioned assets
######################################
from dagster import (
DailyPartitionsDefinition,
asset,
build_schedule_from_partitioned_job,
define_asset_job,
Definitions,
)
@asset(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))
def asset1():
...
asset1_job = define_asset_job("asset1_job", selection=[asset1])
# The created schedule will fire daily
asset1_job_schedule = build_schedule_from_partitioned_job(asset1_job)
Definitions(assets=[asset1], schedules=[asset1_job_schedule])
################
# Non-asset job
################
from dagster import DailyPartitionsDefinition, build_schedule_from_partitioned_job, jog
@job(partitions_def=DailyPartitionsDefinition(start_date="2020-01-01"))
def do_stuff_partitioned():
...
# The created schedule will fire daily
do_stuff_partitioned_schedule = build_schedule_from_partitioned_job(
do_stuff_partitioned,
)
Definitions(schedules=[do_stuff_partitioned_schedule])
"""
check.invariant(
not (day_of_week and day_of_month),
"Cannot provide both day_of_month and day_of_week parameter to"
" build_schedule_from_partitioned_job.",
)
check.invariant(
not (
(cron_schedule or execution_timezone)
and (
day_of_month is not None
or day_of_week is not None
or hour_of_day is not None
or minute_of_hour is not None
)
),
"Cannot provide both cron_schedule / execution_timezone parameters and"
" day_of_month / day_of_week / hour_of_day / minute_of_hour parameters to"
" build_schedule_from_partitioned_job.",
)
if isinstance(job, UnresolvedAssetJobDefinition) and job.partitions_def is None:
if cron_schedule or execution_timezone:
check.failed(
"Cannot provide cron_schedule or execution_timezone to"
" build_schedule_from_partitioned_job for a time-partitioned job."
)
return UnresolvedPartitionedAssetScheduleDefinition(
job=job,
default_status=default_status,
name=check.opt_str_param(name, "name", f"{job.name}_schedule"),
description=check.opt_str_param(description, "description"),
minute_of_hour=minute_of_hour,
hour_of_day=hour_of_day,
day_of_week=day_of_week,
day_of_month=day_of_month,
tags=tags,
metadata=metadata,
)
else:
partitions_def = job.partitions_def
if partitions_def is None:
check.failed("The provided job is not partitioned")
partitions_def = _check_valid_schedule_partitions_def(partitions_def)
if isinstance(partitions_def, StaticPartitionsDefinition):
check.not_none(
cron_schedule,
"Creating a schedule from a static partitions definition requires a cron schedule",
)
should_execute = None
else:
if cron_schedule or execution_timezone:
check.failed(
"Cannot provide cron_schedule or execution_timezone to"
" build_schedule_from_partitioned_job for a time-partitioned job."
)
time_partitions_def = check.not_none(get_time_partitions_def(partitions_def))
cron_schedule = time_partitions_def.get_cron_schedule(
minute_of_hour, hour_of_day, day_of_week, day_of_month
)
execution_timezone = time_partitions_def.timezone
if time_partitions_def.exclusions:
def _should_execute(context: ScheduleEvaluationContext) -> bool:
with partition_loading_context(
effective_dt=context.scheduled_execution_time,
dynamic_partitions_store=context.instance
if context.instance_ref is not None
else None,
):
window = time_partitions_def.get_last_partition_window_ignoring_exclusions()
if not window:
return True
return not time_partitions_def.is_window_start_excluded(window.start)
should_execute = _should_execute
else:
should_execute = None
return schedule(
cron_schedule=cron_schedule, # type: ignore[arg-type]
job=job,
default_status=default_status,
execution_timezone=execution_timezone,
name=check.opt_str_param(name, "name", f"{job.name}_schedule"),
description=check.opt_str_param(description, "description"),
should_execute=should_execute,
)(_get_schedule_evaluation_fn(partitions_def, job, tags))
def _get_schedule_evaluation_fn(
partitions_def: PartitionsDefinition,
job: Union[JobDefinition, UnresolvedAssetJobDefinition],
tags: Optional[Mapping[str, str]] = None,
) -> Callable[[ScheduleEvaluationContext], Union[SkipReason, RunRequest, RunRequestIterator]]:
def schedule_fn(context):
# Run for the latest partition. Prior partitions will have been handled by prior ticks.
with partition_loading_context(
effective_dt=context._scheduled_execution_time, # noqa
dynamic_partitions_store=context.instance if context.instance_ref is not None else None,
):
if isinstance(partitions_def, TimeWindowPartitionsDefinition):
partition_key = partitions_def.get_last_partition_key()
if partition_key is None:
return SkipReason("The job's PartitionsDefinition has no partitions")
return job.run_request_for_partition(
partition_key=partition_key, run_key=partition_key, tags=tags
)
if isinstance(partitions_def, StaticPartitionsDefinition):
return [
job.run_request_for_partition(partition_key=key, run_key=key, tags=tags)
for key in partitions_def.get_partition_keys()
]
else:
check.invariant(isinstance(partitions_def, MultiPartitionsDefinition))
time_window_dimension = partitions_def.time_window_dimension # pyright: ignore[reportAttributeAccessIssue]
partition_key = time_window_dimension.partitions_def.get_last_partition_key()
if partition_key is None:
return SkipReason("The job's PartitionsDefinition has no partitions")
return [
job.run_request_for_partition(partition_key=key, run_key=key, tags=tags)
for key in partitions_def.get_multipartition_keys_with_dimension_value( # pyright: ignore[reportAttributeAccessIssue]
time_window_dimension.name, partition_key
)
]
return schedule_fn # pyright: ignore[reportReturnType]
def _check_valid_schedule_partitions_def(
partitions_def: PartitionsDefinition,
) -> Union[
TimeWindowPartitionsDefinition,
MultiPartitionsDefinition,
StaticPartitionsDefinition,
]:
if not has_one_dimension_time_window_partitioning(partitions_def) and not isinstance(
partitions_def, StaticPartitionsDefinition
):
raise DagsterInvalidDefinitionError(
"Tried to build a partitioned schedule from an asset job, but received an invalid"
" partitions definition. The permitted partitions definitions are: \n1."
" TimeWindowPartitionsDefinition\n2. MultiPartitionsDefinition with a single"
" TimeWindowPartitionsDefinition dimension\n3. StaticPartitionsDefinition"
)
return cast(
"Union[TimeWindowPartitionsDefinition, MultiPartitionsDefinition, StaticPartitionsDefinition]",
partitions_def,
)
schedule_from_partitions = build_schedule_from_partitioned_job
| UnresolvedPartitionedAssetScheduleDefinition |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.