language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | faker/providers/person/es_MX/__init__.py | {
"start": 46,
"end": 18571
} | class ____(PersonProvider):
formats = (
"{{first_name}} {{last_name}} {{last_name}}",
"{{first_name}} {{first_name}} {{last_name}}",
"{{first_name}} {{first_name}} {{last_name}} {{last_name}}",
"{{first_name}} {{last_name}}",
"{{prefix}} {{first_name}} {{last_name}}",
)
first_names = (
"Abel",
"Abelardo",
"Abigail",
"Abraham",
"Abril",
"Adalberto",
"Adán",
"Adela",
"Adriana",
"Aida",
"Alejandra",
"Agustín",
"Alberto",
"Aldonza",
"Alicia",
"Alta Gracia",
"Alonso",
"Aldo",
"Alejandro",
"Alfonso",
"Alfredo",
"Alma",
"Alvaro",
"Amalia",
"Amanda",
"Amador",
"Amelia",
"Ana",
"Anabel",
"Ana Luisa",
"Ana María",
"Anel",
"Andrea",
"Andrés",
"Ángel",
"Ángela",
"Angélica",
"Antonia",
"Antonio",
"Araceli",
"Arcelia",
"Ariadna",
"Armando",
"Arturo",
"Asunción",
"Augusto",
"Aurora",
"Aurelio",
"Barbara",
"Beatriz",
"Berta",
"Benito",
"Benjamín",
"Bernardo",
"Bernabé",
"Bianca",
"Blanca",
"Bruno",
"Camila",
"Camilo",
"Caridad",
"Carla",
"Carlos",
"Carlota",
"Carmen",
"Carolina",
"Catalina",
"César",
"Cecilia",
"Celia",
"Citlali",
"Clara",
"Claudia",
"Claudio",
"Clemente",
"Concepción",
"Conchita",
"Cornelio",
"Cristian",
"Cristal",
"Cristina",
"Cristobal",
"Cynthia",
"Dalia",
"Daniel",
"Daniela",
"Darío",
"David",
"Débora",
"Delia",
"Diana",
"Diego",
"Dolores",
"Dulce",
"Dulce María",
"Eduardo",
"Elena",
"Elias",
"Elisa",
"Eloisa",
"Elsa",
"Elvia",
"Elvira",
"Eloy",
"Emilia",
"Emiliano",
"Emilio",
"Enrique",
"Eric",
"Ernesto",
"Esmeralda",
"Esteban",
"Estefanía",
"Estela",
"Esparta",
"Espartaco",
"Esperanza",
"Estela",
"Esther",
"Eugenia",
"Eugenio",
"Eva",
"Evelio",
"Fabiola",
"Federico",
"Felipe",
"Fernando",
"Felix",
"Fidel",
"Flavio",
"Florencia",
"Francisco",
"Francisco Javier",
"Francisca",
"Frida",
"Gabino",
"Gabriela",
"Gabriel",
"Genaro",
"Georgina",
"Gerardo",
"Gerónimo",
"Germán",
"Gilberto",
"Guillermina",
"Gloria",
"Gonzalo",
"Graciela",
"Gregorio",
"Guillermo",
"Guadalupe",
"Gustavo",
"Héctor",
"Helena",
"Hermelinda",
"Hernán",
"Hilda",
"Homero",
"Horacio",
"Hugo",
"Humberto",
"Ignacio",
"Ilse",
"Indira",
"Inés",
"Irene",
"Irma",
"Itzel",
"Isaac",
"Isabel",
"Isabela",
"Israel",
"Iván",
"Ivonne",
"Jacinto",
"Jacobo",
"Jaime",
"Javier",
"Jaqueline",
"Jerónimo",
"Jesús",
"Joaquín",
"Jonás",
"Jorge",
"Jorge Luis",
"Jos",
"José",
"Josefina",
"José Carlos",
"José Eduardo",
"José Emilio",
"José Luis",
"José Manuél",
"José María",
"Juan",
"Juana",
"Juan Carlos",
"Judith",
"Julia",
"Julio",
"Julio César",
"Laura",
"Leonardo",
"Leonel",
"Leonor",
"Karla",
"Karina",
"Leticia",
"Lorenzo",
"Lucas",
"Lilia",
"Liliana",
"Linda",
"Lorena",
"Lourdes",
"Lucía",
"Luisa",
"Luz",
"Luis",
"Luis Miguel",
"Luis Manuel",
"Magdalena",
"Manuel",
"Marco Antonio",
"Marcela",
"Marcos",
"Margarita",
"María",
"Marisela",
"Marisol",
"María del Carmen",
"María Cristina",
"María Elena",
"María Eugenia",
"María José",
"María Luisa",
"María Teresa",
"Marisol",
"Martha",
"Mayte",
"Mariano",
"Mariana",
"Mario",
"Martín",
"Mateo",
"Mauro",
"Mauricio",
"Maximiliano",
"Mercedes",
"Micaela",
"Minerva",
"Mitzy",
"Miguel",
"Miguel Ángel",
"Miriam",
"Modesto",
"Mónica",
"Nadia",
"Natalia",
"Natividad",
"Nancy",
"Nayeli",
"Nelly",
"Noelia",
"Noemí",
"Norma",
"Nicolás",
"Octavio",
"Ofelia",
"Olivia",
"Óliver",
"Olga",
"Óscar",
"Oswaldo",
"Omar",
"Pablo",
"Paola",
"Patricia",
"Pamela",
"Patricio",
"Pascual",
"Paulina",
"Pedro",
"Perla",
"Pilar",
"Porfirio",
"Rafaél",
"Ramiro",
"Ramón",
"Raúl",
"Raquel",
"Rebeca",
"Reina",
"Renato",
"René",
"Reynaldo",
"Ricardo",
"Roberto",
"Rodolfo",
"Rocío",
"Rodrigo",
"Rolando",
"Rosa",
"Rosalia",
"Rosario",
"Rubén",
"Rufino",
"Ruby",
"Salvador",
"Salma",
"Samuel",
"Sandra",
"Santiago",
"Sara",
"Sessa",
"Sergio",
"Serafín",
"Silvano",
"Silvia",
"Sofía",
"Socorro",
"Soledad",
"Sonia",
"Susana",
"Tania",
"Teresa",
"Teodoro",
"Timoteo",
"Tomás",
"Trinidad",
"Verónica",
"Vicente",
"Violeta",
"Uriel",
"Úrsula",
"Vanesa",
"Víctor",
"Victoria",
"Virginia",
"Wilfrido",
"Wendolin",
"Yeni",
"Yolanda",
"Yuridia",
"Zacarías",
"Zeferino",
"Zoé",
)
last_names = (
"Abrego",
"Abreu",
"Acevedo",
"Acosta",
"Acuña",
"Adame",
"Aguayo",
"Aguilar",
"Aguilera",
"Aguirre",
"Alarcón",
"Alba",
"Alcala",
"Alcántar",
"Alcaraz",
"Alejandro",
"Alemán",
"Alfaro",
"Almanza",
"Almaraz",
"Almonte",
"Alonso",
"Alonzo",
"Altamirano",
"Alva",
"Alvarado",
"Alvarez",
"Amador",
"Amaya",
"Anaya",
"Anguiano",
"Angulo",
"Aparicio",
"Apodaca",
"Aponte",
"Aragón",
"Aranda",
"Arce",
"Archuleta",
"Arellano",
"Arenas",
"Arevalo",
"Arguello",
"Arias",
"Armas",
"Armendáriz",
"Armenta",
"Arredondo",
"Arreola",
"Arriaga",
"Arroyo",
"Arteaga",
"Ávalos",
"Ávila",
"Avilés",
"Ayala",
"Baca",
"Badillo",
"Báez",
"Baeza",
"Bahena",
"Balderas",
"Ballesteros",
"Bañuelos",
"Barajas",
"Barela",
"Barragán",
"Barraza",
"Barrera",
"Barreto",
"Barrientos",
"Barrios",
"Batista",
"Becerra",
"Beltrán",
"Benavides",
"Benavídez",
"Benítez",
"Bermúdez",
"Bernal",
"Berríos",
"Bétancourt",
"Blanco",
"Bonilla",
"Borrego",
"Botello",
"Bravo",
"Briones",
"Briseño",
"Brito",
"Bueno",
"Burgos",
"Bustamante",
"Bustos",
"Caballero",
"Cabán",
"Cabrera",
"Cadena",
"Caldera",
"Calderón",
"Calvillo",
"Camacho",
"Camarillo",
"Campos",
"Canales",
"Candelaria",
"Cano",
"Cantú",
"Caraballo",
"Carbajal",
"Cardenas",
"Cardona",
"Carmona",
"Carranza",
"Carrasco",
"Carreón",
"Carrera",
"Carrero",
"Carrillo",
"Carrión",
"Carvajal",
"Casanova",
"Casares",
"Casárez",
"Casas",
"Casillas",
"Castañeda",
"Castellanos",
"Castillo",
"Castro",
"Cavazos",
"Cazares",
"Ceballos",
"Cedillo",
"Ceja",
"Centeno",
"Cepeda",
"Cervantes",
"Cervántez",
"Chacón",
"Chapa",
"Chavarría",
"Chávez",
"Cintrón",
"Cisneros",
"Collado",
"Collazo",
"Colón",
"Colunga",
"Concepción",
"Contreras",
"Cordero",
"Córdova",
"Cornejo",
"Corona",
"Coronado",
"Corral",
"Corrales",
"Correa",
"Cortés",
"Cortez",
"Cotto",
"Covarrubias",
"Crespo",
"Cruz",
"Cuellar",
"Curiel",
"Dávila",
"de Anda",
"de Jesús",
"de la Crúz",
"de la Fuente",
"de la Garza",
"de la O",
"de la Rosa",
"de la Torre",
"de León",
"Delgadillo",
"Delgado",
"del Río",
"del Valle",
"Díaz",
"Domínguez",
"Duarte",
"Dueñas",
"Durán",
"Echeverría",
"Elizondo",
"Enríquez",
"Escalante",
"Escamilla",
"Escobar",
"Escobedo",
"Esparza",
"Espinal",
"Espino",
"Espinosa",
"Espinoza",
"Esquibel",
"Esquivel",
"Estévez",
"Estrada",
"Fajardo",
"Farías",
"Feliciano",
"Fernández",
"Ferrer",
"Fierro",
"Figueroa",
"Flores",
"Flórez",
"Fonseca",
"Franco",
"Frías",
"Fuentes",
"Gaitán",
"Galarza",
"Galindo",
"Gallardo",
"Gallegos",
"Galván",
"Gálvez",
"Gamboa",
"Gamez",
"Gaona",
"Garay",
"García",
"Garibay",
"Garica",
"Garrido",
"Garza",
"Gastélum",
"Gaytán",
"Gil",
"Girón",
"Godínez",
"Godoy",
"Gómez",
"Gonzales",
"González",
"Gollum",
"Gracia",
"Granado",
"Granados",
"Griego",
"Grijalva",
"Guajardo",
"Guardado",
"Guerra",
"Guerrero",
"Guevara",
"Guillen",
"Gurule",
"Gutiérrez",
"Guzmán",
"Haro",
"Henríquez",
"Heredia",
"Hernádez",
"Hernandes",
"Hernández",
"Herrera",
"Hidalgo",
"Hinojosa",
"Holguín",
"Huerta",
"Hurtado",
"Ibarra",
"Iglesias",
"Irizarry",
"Jaime",
"Jaimes",
"Jáquez",
"Jaramillo",
"Jasso",
"Jiménez",
"Jimínez",
"Juárez",
"Jurado",
"Laboy",
"Lara",
"Laureano",
"Leal",
"Lebrón",
"Ledesma",
"Leiva",
"Lemus",
"León",
"Lerma",
"Leyva",
"Limón",
"Linares",
"Lira",
"Llamas",
"Loera",
"Lomeli",
"Longoria",
"López",
"Lovato",
"Loya",
"Lozada",
"Lozano",
"Lucero",
"Lucio",
"Luevano",
"Lugo",
"Luna",
"Macías",
"Madera",
"Madrid",
"Madrigal",
"Maestas",
"Magaña",
"Malave",
"Maldonado",
"Manzanares",
"Mares",
"Marín",
"Márquez",
"Marrero",
"Marroquín",
"Martínez",
"Mascareñas",
"Mata",
"Mateo",
"Matías",
"Matos",
"Maya",
"Mayorga",
"Medina",
"Medrano",
"Mejía",
"Meléndez",
"Melgar",
"Mena",
"Menchaca",
"Méndez",
"Mendoza",
"Menéndez",
"Meraz",
"Mercado",
"Merino",
"Mesa",
"Meza",
"Miramontes",
"Miranda",
"Mireles",
"Mojica",
"Molina",
"Mondragón",
"Monroy",
"Montalvo",
"Montañez",
"Montaño",
"Montemayor",
"Montenegro",
"Montero",
"Montes",
"Montez",
"Montoya",
"Mora",
"Morales",
"Moreno",
"Mota",
"Moya",
"Munguía",
"Muñiz",
"Muñoz",
"Murillo",
"Muro",
"Nájera",
"Naranjo",
"Narváez",
"Nava",
"Navarrete",
"Navarro",
"Nazario",
"Negrete",
"Negrón",
"Nevárez",
"Nieto",
"Nieves",
"Niño",
"Noriega",
"Núñez",
"Ocampo",
"Ocasio",
"Ochoa",
"Ojeda",
"Olivares",
"Olivárez",
"Olivas",
"Olivera",
"Olivo",
"Olmos",
"Olvera",
"Ontiveros",
"Oquendo",
"Ordóñez",
"Orellana",
"Ornelas",
"Orosco",
"Orozco",
"Orta",
"Ortega",
"Ortiz",
"Osorio",
"Otero",
"Ozuna",
"Pabón",
"Pacheco",
"Padilla",
"Padrón",
"Páez",
"Palacios",
"Palomino",
"Palomo",
"Pantoja",
"Paredes",
"Parra",
"Partida",
"Patiño",
"Paz",
"Pedraza",
"Pedroza",
"Pelayo",
"Peña",
"Perales",
"Peralta",
"Perea",
"Peres",
"Pérez",
"Pichardo",
"Piña",
"Pineda",
"Pizarro",
"Polanco",
"Ponce",
"Porras",
"Portillo",
"Posada",
"Prado",
"Preciado",
"Prieto",
"Puente",
"Puga",
"Pulido",
"Quesada",
"Quezada",
"Quiñones",
"Quiñónez",
"Quintana",
"Quintanilla",
"Quintero",
"Quiroz",
"Rael",
"Ramírez",
"Ramón",
"Ramos",
"Rangel",
"Rascón",
"Raya",
"Razo",
"Regalado",
"Rendón",
"Rentería",
"Reséndez",
"Reyes",
"Reyna",
"Reynoso",
"Rico",
"Rincón",
"Riojas",
"Ríos",
"Rivas",
"Rivera",
"Rivero",
"Robledo",
"Robles",
"Rocha",
"Rodarte",
"Rodrígez",
"Rodríguez",
"Rodríquez",
"Rojas",
"Rojo",
"Roldán",
"Rolón",
"Romero",
"Romo",
"Roque",
"Rosado",
"Rosales",
"Rosario",
"Rosas",
"Roybal",
"Rubio",
"Ruelas",
"Ruiz",
"Saavedra",
"Sáenz",
"Saiz",
"Salas",
"Salazar",
"Salcedo",
"Salcido",
"Saldaña",
"Saldivar",
"Salgado",
"Salinas",
"Samaniego",
"Sanabria",
"Sanches",
"Sánchez",
"Sandoval",
"Santacruz",
"Santana",
"Santiago",
"Santillán",
"Sarabia",
"Sauceda",
"Saucedo",
"Segovia",
"Segura",
"Sepúlveda",
"Serna",
"Serrano",
"Serrato",
"Sevilla",
"Sierra",
"Sisneros",
"Solano",
"Solís",
"Soliz",
"Solorio",
"Solorzano",
"Soria",
"Sosa",
"Sotelo",
"Soto",
"Suárez",
"Tafoya",
"Tamayo",
"Tamez",
"Tapia",
"Tejada",
"Tejeda",
"Téllez",
"Tello",
"Terán",
"Terrazas",
"Tijerina",
"Tirado",
"Toledo",
"Toro",
"Torres",
"Tórrez",
"Tovar",
"Trejo",
"Treviño",
"Trujillo",
"Ulibarri",
"Ulloa",
"Urbina",
"Ureña",
"Urías",
"Uribe",
"Urrutia",
"Vaca",
"Valadez",
"Valdés",
"Valdez",
"Valdivia",
"Valencia",
"Valentín",
"Valenzuela",
"Valladares",
"Valle",
"Vallejo",
"Valles",
"Valverde",
"Vanegas",
"Varela",
"Vargas",
"Vásquez",
"Vázquez",
"Vega",
"Vela",
"Velasco",
"Velásquez",
"Velázquez",
"Vélez",
"Véliz",
"Venegas",
"Vera",
"Verdugo",
"Verduzco",
"Vergara",
"Viera",
"Vigil",
"Villa",
"Villagómez",
"Villalobos",
"Villalpando",
"Villanueva",
"Villareal",
"Villarreal",
"Villaseñor",
"Villegas",
"Yáñez",
"Ybarra",
"Zambrano",
"Zamora",
"Zamudio",
"Zapata",
"Zaragoza",
"Zarate",
"Zavala",
"Zayas",
"Zedillo",
"Zelaya",
"Zepeda",
"Zúñiga",
)
prefixes = ("Sr(a).", "Dr.", "Mtro.", "Lic.", "Ing.")
| Provider |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 45168,
"end": 57442
} | class ____(Conv2D):
"""Transposed convolution layer (sometimes called Deconvolution).
The need for transposed convolutions generally arises
from the desire to use a transformation going in the opposite direction
of a normal convolution, i.e., from something that has the shape of the
output of some convolution to something that has the shape of its input
while maintaining a connectivity pattern that is compatible with
said convolution.
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers or `None`, does not include the sample axis),
e.g. `input_shape=(128, 128, 3)` for 128x128 RGB pictures
in `data_format="channels_last"`.
Args:
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
kernel_size: An integer or tuple/list of 2 integers, specifying the
height and width of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the height and width.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: one of `"valid"` or `"same"` (case-insensitive).
`"valid"` means no padding. `"same"` results in padding with zeros evenly
to the left/right or up/down of the input such that output has the same
height/width dimension as the input.
output_padding: An integer or tuple/list of 2 integers,
specifying the amount of padding along the height and width
of the output tensor.
Can be a single integer to specify the same value for all
spatial dimensions.
The amount of output padding along a given dimension must be
lower than the stride along that same dimension.
If set to `None` (default), the output shape is inferred.
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, height, width, channels)` while `channels_first`
corresponds to inputs with shape
`(batch_size, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
dilation_rate: an integer or tuple/list of 2 integers, specifying
the dilation rate to use for dilated convolution.
Can be a single integer to specify the same value for
all spatial dimensions.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any stride value != 1.
activation: Activation function to use.
If you don't specify anything, no activation is applied (
see `keras.activations`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix (
see `keras.initializers`). Defaults to 'glorot_uniform'.
bias_initializer: Initializer for the bias vector (
see `keras.initializers`). Defaults to 'zeros'.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix (see `keras.regularizers`).
bias_regularizer: Regularizer function applied to the bias vector (
see `keras.regularizers`).
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation") (see `keras.regularizers`).
kernel_constraint: Constraint function applied to the kernel matrix (
see `keras.constraints`).
bias_constraint: Constraint function applied to the bias vector (
see `keras.constraints`).
Input shape:
4D tensor with shape:
`(batch_size, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(batch_size, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(batch_size, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to padding.
If `output_padding` is specified:
```
new_rows = ((rows - 1) * strides[0] + kernel_size[0] - 2 * padding[0] +
output_padding[0])
new_cols = ((cols - 1) * strides[1] + kernel_size[1] - 2 * padding[1] +
output_padding[1])
```
Returns:
A tensor of rank 4 representing
`activation(conv2dtranspose(inputs, kernel) + bias)`.
Raises:
ValueError: if `padding` is "causal".
ValueError: when both `strides` > 1 and `dilation_rate` > 1.
References:
- [A guide to convolution arithmetic for deep
learning](https://arxiv.org/abs/1603.07285v1)
- [Deconvolutional
Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)
"""
def __init__(self,
filters,
kernel_size,
strides=(1, 1),
padding='valid',
output_padding=None,
data_format=None,
dilation_rate=(1, 1),
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(Conv2DTranspose, self).__init__(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=padding,
data_format=data_format,
dilation_rate=dilation_rate,
activation=activations.get(activation),
use_bias=use_bias,
kernel_initializer=initializers.get(kernel_initializer),
bias_initializer=initializers.get(bias_initializer),
kernel_regularizer=regularizers.get(kernel_regularizer),
bias_regularizer=regularizers.get(bias_regularizer),
activity_regularizer=regularizers.get(activity_regularizer),
kernel_constraint=constraints.get(kernel_constraint),
bias_constraint=constraints.get(bias_constraint),
**kwargs)
self.output_padding = output_padding
if self.output_padding is not None:
self.output_padding = conv_utils.normalize_tuple(
self.output_padding, 2, 'output_padding')
for stride, out_pad in zip(self.strides, self.output_padding):
if out_pad >= stride:
raise ValueError('Stride ' + str(self.strides) + ' must be '
'greater than output padding ' +
str(self.output_padding))
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if len(input_shape) != 4:
raise ValueError('Inputs should have rank 4. Received input '
'shape: ' + str(input_shape))
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
input_dim = int(input_shape[channel_axis])
self.input_spec = InputSpec(ndim=4, axes={channel_axis: input_dim})
kernel_shape = self.kernel_size + (self.filters, input_dim)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs_shape = array_ops.shape(inputs)
batch_size = inputs_shape[0]
if self.data_format == 'channels_first':
h_axis, w_axis = 2, 3
else:
h_axis, w_axis = 1, 2
# Use the constant height and weight when possible.
# TODO(scottzhu): Extract this into a utility function that can be applied
# to all convolutional layers, which currently lost the static shape
# information due to tf.shape().
height, width = None, None
if inputs.shape.rank is not None:
dims = inputs.shape.as_list()
height = dims[h_axis]
width = dims[w_axis]
height = height if height is not None else inputs_shape[h_axis]
width = width if width is not None else inputs_shape[w_axis]
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
# Infer the dynamic output shape:
out_height = conv_utils.deconv_output_length(height,
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
out_width = conv_utils.deconv_output_length(width,
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
if self.data_format == 'channels_first':
output_shape = (batch_size, self.filters, out_height, out_width)
else:
output_shape = (batch_size, out_height, out_width, self.filters)
output_shape_tensor = array_ops_stack.stack(output_shape)
outputs = backend.conv2d_transpose(
inputs,
self.kernel,
output_shape_tensor,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if not context.executing_eagerly():
# Infer the static output shape:
out_shape = self.compute_output_shape(inputs.shape)
outputs.set_shape(out_shape)
if self.use_bias:
outputs = nn.bias_add(
outputs,
self.bias,
data_format=conv_utils.convert_data_format(self.data_format, ndim=4))
if self.activation is not None:
return self.activation(outputs)
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = list(input_shape)
if self.data_format == 'channels_first':
c_axis, h_axis, w_axis = 1, 2, 3
else:
c_axis, h_axis, w_axis = 3, 1, 2
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.strides
if self.output_padding is None:
out_pad_h = out_pad_w = None
else:
out_pad_h, out_pad_w = self.output_padding
output_shape[c_axis] = self.filters
output_shape[h_axis] = conv_utils.deconv_output_length(
output_shape[h_axis],
kernel_h,
padding=self.padding,
output_padding=out_pad_h,
stride=stride_h,
dilation=self.dilation_rate[0])
output_shape[w_axis] = conv_utils.deconv_output_length(
output_shape[w_axis],
kernel_w,
padding=self.padding,
output_padding=out_pad_w,
stride=stride_w,
dilation=self.dilation_rate[1])
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Conv2DTranspose, self).get_config()
config['output_padding'] = self.output_padding
return config
| Conv2DTranspose |
python | sdispater__pendulum | src/pendulum/tz/exceptions.py | {
"start": 169,
"end": 218
} | class ____(TimezoneError):
pass
| InvalidTimezone |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 14547,
"end": 14699
} | class ____(BaseModel):
"""
Response for the external log URL endpoint.
"""
url: Annotated[str, Field(title="Url")]
| ExternalLogUrlResponse |
python | sqlalchemy__sqlalchemy | test/base/test_examples.py | {
"start": 308,
"end": 454
} | class ____(
test_versioning.TestVersioning,
fixtures.RemoveORMEventsGlobally,
fixtures.TestBase,
):
pass
| VersionedRowsTestLegacyBase |
python | getsentry__sentry | tests/sentry/api/serializers/test_release.py | {
"start": 40475,
"end": 55598
} | class ____(TestCase):
def test_get_users_for_authors_finds_by_username(self) -> None:
user = self.create_user(email="john@company.com", name="John Smith")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
ExternalActor.objects.create(
external_name="@johnsmith",
user_id=user.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
# CommitAuthor with anonymous email
author = CommitAuthor.objects.create(
email="34950490+johnsmith@users.noreply.github.com",
name="Other",
external_id="github:johnsmith",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == str(user.id)
assert users[str(author.id)]["email"] == "john@company.com"
assert users[str(author.id)]["name"] == "John Smith"
def test_get_users_for_authors_by_external_actor_no_user_id(self) -> None:
"""CommitAuthor has an ExternalActor but it's a team mapping"""
project = self.create_project()
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
team = self.create_team(organization=project.organization)
ExternalActor.objects.create(
external_name="@teamuser",
team_id=team.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
author = CommitAuthor.objects.create(
email="teamuser@company.com",
name="Team User",
external_id="github:teamuser",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == "not present"
assert users[str(author.id)]["email"] == "teamuser@company.com"
assert users[str(author.id)]["name"] == "Team User"
def test_get_users_for_authors_no_match(self) -> None:
project = self.create_project()
author = CommitAuthor.objects.create(
email="unknown@company.com",
name="Unknown User",
external_id="github:unknownuser",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == "not present"
assert users[str(author.id)]["email"] == "unknown@company.com"
assert users[str(author.id)]["name"] == "Unknown User"
def test_get_users_for_authors_finds_by_email(self) -> None:
user = self.create_user(email="regular@company.com", name="Regular Sentry User")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
author = CommitAuthor.objects.create(
email="regular@company.com",
name="Regular User",
external_id="github:regularuser",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == str(user.id)
assert users[str(author.id)]["email"] == "regular@company.com"
assert users[str(author.id)]["name"] == "Regular Sentry User"
def test_get_users_for_authors_external_actor_takes_precedence(self) -> None:
email_user = self.create_user(email="john@company.com", name="Email User")
mapping_user = self.create_user(email="john-external@company.com", name="external User")
project = self.create_project()
self.create_member(user=email_user, organization=project.organization)
self.create_member(user=mapping_user, organization=project.organization)
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
ExternalActor.objects.create(
external_name="@johnsmith",
user_id=mapping_user.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
author = CommitAuthor.objects.create(
email="john@company.com", # matches email_user
name="John Smith",
external_id="github:johnsmith", # matches ExternalActor
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == str(mapping_user.id)
assert users[str(author.id)]["email"] == "john-external@company.com"
assert users[str(author.id)]["name"] == "external User"
def test_get_users_for_authors_mixed_authors(self) -> None:
project = self.create_project()
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
email_user1 = self.create_user(email="direct1@company.com", name="Direct User 1")
self.create_member(user=email_user1, organization=project.organization)
email_user2 = self.create_user(email="direct2@company.com", name="Direct User 2")
self.create_member(user=email_user2, organization=project.organization)
external_user1 = self.create_user(email="external1@company.com", name="External User 1")
self.create_member(user=external_user1, organization=project.organization)
ExternalActor.objects.create(
external_name="@externaluser1",
user_id=external_user1.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
external_user2 = self.create_user(email="external2@company.com", name="External User 2")
self.create_member(user=external_user2, organization=project.organization)
ExternalActor.objects.create(
external_name="@externaluser2",
user_id=external_user2.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
authors = [
CommitAuthor.objects.create(
email="direct1@company.com",
name="Commit Author Name 1",
external_id="github:directuser1",
organization_id=project.organization_id,
),
CommitAuthor.objects.create(
email="direct2@company.com",
name="Commit Author Name 2",
external_id="github:directuser2",
organization_id=project.organization_id,
),
CommitAuthor.objects.create(
email="12345+externaluser1@users.noreply.github.com",
name="Commit Author Name 3",
external_id="github:externaluser1",
organization_id=project.organization_id,
),
CommitAuthor.objects.create(
email="67890+externaluser2@users.noreply.github.com",
name="Commit Author Name 4",
external_id="github:externaluser2",
organization_id=project.organization_id,
),
CommitAuthor.objects.create(
email="unknown1@company.com",
name="Commit Author Name 5",
external_id="github:unknownuser1",
organization_id=project.organization_id,
),
CommitAuthor.objects.create(
email="unknown2@company.com",
name="Commit Author Name 6",
external_id="unknownuser2", # non-GH CommitAuthor
organization_id=project.organization_id,
),
]
users = get_users_for_authors(organization_id=project.organization_id, authors=authors)
assert len(users) == 6
assert users[str(authors[0].id)].get("id", "not present") == str(email_user1.id)
assert users[str(authors[0].id)]["email"] == "direct1@company.com"
assert users[str(authors[0].id)]["name"] == "Direct User 1"
assert users[str(authors[1].id)].get("id", "not present") == str(email_user2.id)
assert users[str(authors[1].id)]["email"] == "direct2@company.com"
assert users[str(authors[1].id)]["name"] == "Direct User 2"
# ExternalActor resolution assertions (takes precedence over email)
assert users[str(authors[2].id)].get("id", "not present") == str(external_user1.id)
assert users[str(authors[2].id)]["email"] == "external1@company.com"
assert users[str(authors[2].id)]["name"] == "External User 1"
assert users[str(authors[3].id)].get("id", "not present") == str(external_user2.id)
assert users[str(authors[3].id)]["email"] == "external2@company.com"
assert users[str(authors[3].id)]["name"] == "External User 2"
# CommitAuthor fallback assertions
assert users[str(authors[4].id)].get("id", "not present") == "not present"
assert users[str(authors[4].id)]["email"] == "unknown1@company.com"
assert users[str(authors[4].id)]["name"] == "Commit Author Name 5"
assert users[str(authors[5].id)].get("id", "not present") == "not present"
assert users[str(authors[5].id)]["email"] == "unknown2@company.com"
assert users[str(authors[5].id)]["name"] == "Commit Author Name 6"
def test_get_users_for_authors_multiple_emails(self) -> None:
user = self.create_user(email="regular@company.com", name="Regular Sentry User")
self.create_useremail(user=user, email="backup_email@gmail.com")
project = self.create_project()
self.create_member(user=user, organization=project.organization)
author = CommitAuthor.objects.create(
email="backup_email@gmail.com",
name="RU",
external_id="github:regularuser",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == str(user.id)
assert users[str(author.id)]["email"] == "regular@company.com" # returns primary email
assert users[str(author.id)]["name"] == "Regular Sentry User"
@patch("sentry.users.services.user.service.user_service.serialize_many")
def test_get_users_for_authors_user_dropped(self, mock_serialize) -> None:
"""Edge case: user ID is found but doesn't come back from serialize_many"""
project = self.create_project()
user = self.create_user(email="found@company.com", name="Found User")
self.create_member(user=user, organization=project.organization)
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
author = CommitAuthor.objects.create(
email="found@company.com",
name="CommitAuthor Fallback Name",
external_id="github:founduser",
organization_id=project.organization_id,
)
mock_serialize.return_value = [] # User ID found but not serialized
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
# fallback to CommitAuthor fields
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == "not present"
assert users[str(author.id)]["email"] == "found@company.com"
assert users[str(author.id)]["name"] == "CommitAuthor Fallback Name"
def test_external_actor_duplicate_external_name_prefers_most_recent(self) -> None:
"""Edge case: ExternalActor objects with the same external_name
map to multiple sentry users - select most recently created ExternalActor"""
project = self.create_project()
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=project.organization_id, integration_id=integration.id
)
user0 = self.create_user(email="user0@company.com", name="User 0")
self.create_member(user=user0, organization=project.organization)
ExternalActor.objects.create(
external_name="@duplicate_name",
user_id=user0.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
user1 = self.create_user(email="user1@company.com", name="User 1")
self.create_member(user=user1, organization=project.organization)
ExternalActor.objects.create(
external_name="@duplicate_name",
user_id=user1.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
user2 = self.create_user(email="user2@company.com", name="User 2")
self.create_member(user=user2, organization=project.organization)
ExternalActor.objects.create(
external_name="@duplicate_name",
user_id=user2.id,
organization_id=project.organization_id,
integration_id=integration.id,
provider=200,
)
author = CommitAuthor.objects.create(
email="12345+duplicateuser@users.noreply.github.com",
name="Duplicate User Commit Name",
external_id="github:duplicate_name",
organization_id=project.organization_id,
)
users = get_users_for_authors(organization_id=project.organization_id, authors=[author])
assert len(users) == 1
assert users[str(author.id)].get("id", "not present") == str(user2.id)
assert users[str(author.id)]["email"] == "user2@company.com"
assert users[str(author.id)]["name"] == "User 2"
| GetUsersForAuthorsUserMappingsTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-reverse-operations.py | {
"start": 954,
"end": 1982
} | class ____(object):
def minReverseOperations(self, n, p, banned, k):
"""
:type n: int
:type p: int
:type banned: List[int]
:type k: int
:rtype: List[int]
"""
lookup = [False]*n
for i in banned:
lookup[i] = True
d = 0
result = [-1]*n
result[p] = d
uf = UnionFind(n+2)
uf.union_set(p, p+2)
q = [p]
d += 1
while q:
new_q = []
for p in q:
left, right = 2*max(p-(k-1), 0)+(k-1)-p, 2*min(p+(k-1), n-1)-(k-1)-p
p = uf.right_set(left)
while p <= right:
if not lookup[p]:
result[p] = d
new_q.append(p)
uf.union_set(p, p+2)
p = uf.right_set(p)
q = new_q
d += 1
return result
# Time: O(nlogn)
# Space: O(n)
from sortedcontainers import SortedList
# bfs, sorted list
| Solution |
python | joke2k__faker | tests/providers/test_automotive.py | {
"start": 2103,
"end": 2263
} | class ____(_SimpleAutomotiveTestMixin):
"""Test az_AZ automotive provider methods"""
license_plate_pattern = re.compile(r"\d{2}-[A-Z]{2}-\d{3}")
| TestAzAz |
python | pytorch__pytorch | test/fx/test_z3_gradual_types.py | {
"start": 87827,
"end": 91084
} | class ____(unittest.TestCase):
def test_alexnet1(self):
alexnet = models.alexnet()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
n.type = Dyn
# print(symbolic_traced)
res = alexnet.forward(torch.rand(10, 3, 227, 227)).size()
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEqual(solver.check(), z3.sat)
input = z3.Const(1, tensor_type)
conv = z3.Const(2, tensor_type)
solver.add(
input == tensor_type.tensor4(D(1, 10), D(1, 3), D(1, 227), D(1, 227))
)
self.assertEqual(solver.check(), z3.sat)
assert solver.model()[conv] == tensor_type.tensor4(
D(1, 10), D(1, 64), D(1, 56), D(1, 56)
)
relu = z3.Const(7, tensor_type)
assert solver.model()[relu] == tensor_type.tensor4(
D(1, 10), D(1, 64), D(1, 56), D(1, 56)
)
maxpool = z3.Const(8, tensor_type)
assert solver.model()[maxpool] == tensor_type.tensor4(
D(1, 10), D(1, 64), D(1, 27), D(1, 27)
)
maxpool2 = z3.Const(42, tensor_type)
assert solver.model()[maxpool2] == tensor_type.tensor4(
D(1, 10), D(1, 256), D(1, 6), D(1, 6)
)
flatten = z3.Const(52, tensor_type)
assert solver.model()[flatten] == tensor_type.tensor2(D(1, 10), D(1, 9216))
linear = z3.Const(64, tensor_type)
assert solver.model()[linear] == tensor_type.tensor2(D(1, 10), D(1, 4096))
linear2 = z3.Const(109, tensor_type)
assert solver.model()[linear2] == tensor_type.tensor2(
D(1, res[0]), D(1, res[1])
)
def test_alexnet2(self):
alexnet = models.alexnet()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, 4, 227, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEqual(solver.check(), z3.unsat)
def test_alexnet3(self):
alexnet = models.alexnet()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, Dyn, 227, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEqual(solver.check(), z3.sat)
def test_alexnet4(self):
alexnet = models.alexnet()
symbolic_traced: torch.fx.GraphModule = symbolic_trace(alexnet)
for n in symbolic_traced.graph.nodes:
if n.op == "placeholder":
n.type = TensorType([Dyn, Dyn, 227])
constraints = transform_all_constraints(symbolic_traced, counter=0)
solver = z3.Solver()
solver.add(constraints)
self.assertEqual(solver.check(), z3.unsat)
if __name__ == "__main__":
unittest.main()
| TestAlexNet |
python | django-haystack__django-haystack | test_haystack/elasticsearch5_tests/test_backend.py | {
"start": 50907,
"end": 55342
} | class ____(TestCase):
fixtures = ["bulk_data.json"]
def setUp(self):
super().setUp()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = Elasticsearch5AutocompleteMockModelSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sqs = SearchQuerySet("elasticsearch")
# Wipe it clean.
clear_elasticsearch_index()
# Reboot the schema.
self.sb = connections["elasticsearch"].get_backend()
self.sb.setup()
self.smmi.update(using="elasticsearch")
def tearDown(self):
# Restore.
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def test_build_schema(self):
self.sb = connections["elasticsearch"].get_backend()
content_name, mapping = self.sb.build_schema(self.ui.all_searchfields())
self.assertEqual(
mapping,
{
"django_id": {
"index": "not_analyzed",
"type": "string",
"include_in_all": False,
},
"django_ct": {
"index": "not_analyzed",
"type": "string",
"include_in_all": False,
},
"name_auto": {"type": "string", "analyzer": "edgengram_analyzer"},
"text": {"type": "string", "analyzer": "snowball"},
"pub_date": {"type": "date"},
"name": {"type": "string", "analyzer": "snowball"},
"text_auto": {"type": "string", "analyzer": "edgengram_analyzer"},
},
)
def test_autocomplete(self):
autocomplete = self.sqs.autocomplete(text_auto="mod")
self.assertEqual(autocomplete.count(), 16)
self.assertEqual(
set([result.pk for result in autocomplete]),
{
"1",
"12",
"6",
"14",
"7",
"4",
"23",
"17",
"13",
"18",
"20",
"22",
"19",
"15",
"10",
"2",
},
)
self.assertTrue("mod" in autocomplete[0].text.lower())
self.assertTrue("mod" in autocomplete[1].text.lower())
self.assertTrue("mod" in autocomplete[6].text.lower())
self.assertTrue("mod" in autocomplete[9].text.lower())
self.assertTrue("mod" in autocomplete[13].text.lower())
self.assertEqual(len([result.pk for result in autocomplete]), 16)
# Test multiple words.
autocomplete_2 = self.sqs.autocomplete(text_auto="your mod")
self.assertEqual(autocomplete_2.count(), 13)
self.assertEqual(
set([result.pk for result in autocomplete_2]),
{"1", "6", "2", "14", "12", "13", "10", "19", "4", "20", "23", "22", "15"},
)
map_results = {result.pk: result for result in autocomplete_2}
self.assertTrue("your" in map_results["1"].text.lower())
self.assertTrue("mod" in map_results["1"].text.lower())
self.assertTrue("your" in map_results["6"].text.lower())
self.assertTrue("mod" in map_results["6"].text.lower())
self.assertTrue("your" in map_results["2"].text.lower())
self.assertEqual(len([result.pk for result in autocomplete_2]), 13)
# Test multiple fields.
autocomplete_3 = self.sqs.autocomplete(text_auto="Django", name_auto="dan")
self.assertEqual(autocomplete_3.count(), 4)
self.assertEqual(
set([result.pk for result in autocomplete_3]), {"12", "1", "22", "14"}
)
self.assertEqual(len([result.pk for result in autocomplete_3]), 4)
# Test numbers in phrases
autocomplete_4 = self.sqs.autocomplete(text_auto="Jen 867")
self.assertEqual(autocomplete_4.count(), 1)
self.assertEqual(set([result.pk for result in autocomplete_4]), {"20"})
# Test numbers alone
autocomplete_4 = self.sqs.autocomplete(text_auto="867")
self.assertEqual(autocomplete_4.count(), 1)
self.assertEqual(set([result.pk for result in autocomplete_4]), {"20"})
| LiveElasticsearch5AutocompleteTestCase |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/longest_inc_subseq/test_longest_increasing_subseq.py | {
"start": 18,
"end": 611
} | class ____(unittest.TestCase):
def test_longest_increasing_subseq(self):
subseq = Subsequence()
self.assertRaises(TypeError, subseq.longest_inc_subseq, None)
self.assertEqual(subseq.longest_inc_subseq([]), [])
seq = [3, 4, -1, 0, 6, 2, 3]
expected = [-1, 0, 2, 3]
self.assertEqual(subseq.longest_inc_subseq(seq), expected)
print('Success: test_longest_increasing_subseq')
def main():
test = TestLongestIncreasingSubseq()
test.test_longest_increasing_subseq()
if __name__ == '__main__':
main()
| TestLongestIncreasingSubseq |
python | openai__openai-python | src/openai/types/beta/realtime/response_done_event.py | {
"start": 243,
"end": 494
} | class ____(BaseModel):
event_id: str
"""The unique ID of the server event."""
response: RealtimeResponse
"""The response resource."""
type: Literal["response.done"]
"""The event type, must be `response.done`."""
| ResponseDoneEvent |
python | kamyu104__LeetCode-Solutions | Python/sort-matrix-by-diagonals.py | {
"start": 47,
"end": 757
} | class ____(object):
def sortMatrix(self, grid):
"""
:type grid: List[List[int]]
:rtype: List[List[int]]
"""
lookup = [[] for _ in xrange((len(grid)-1)+(len(grid[0])-1)-(0-(len(grid[0])-1))+1)]
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
lookup[i-j].append(grid[i][j])
for i in xrange(0-(len(grid[0])-1), (len(grid)-1)+(len(grid[0])-1)+1):
if i < 0:
lookup[i].sort(reverse=True)
else:
lookup[i].sort()
for i in xrange(len(grid)):
for j in xrange(len(grid[0])):
grid[i][j] = lookup[i-j].pop()
return grid
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/overloadOverlap1.py | {
"start": 4877,
"end": 5394
} | class ____:
@overload
def method1(self, x: type) -> bool: ...
@overload
def method1(self, x: Any) -> str | bool: ...
def method1(self, x: Any) -> Any: ...
@overload
def func18(s: Sequence[_T1], extra: Literal[False]) -> list[_T1]: ...
@overload
def func18(s: Sequence[_T1], extra: Literal[True]) -> list[_T1] | tuple[_T1]: ...
@overload
def func18(s: Sequence[_T1], extra: bool) -> list[_T1] | tuple[_T1]: ...
def func18(s: Sequence[_T1], extra: bool) -> list[_T1] | tuple[_T1]: ...
| ClassC |
python | django__django | django/contrib/redirects/admin.py | {
"start": 114,
"end": 270
} | class ____(admin.ModelAdmin):
list_display = ("old_path", "new_path")
list_filter = ("site",)
search_fields = ("old_path", "new_path")
| RedirectAdmin |
python | django__django | tests/generic_views/views.py | {
"start": 6469,
"end": 6900
} | class ____(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {"custom_key": "custom_value"}
context.update(kwargs)
return super().get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
| CustomContextView |
python | numba__numba | numba/tests/test_ndarray_subclasses.py | {
"start": 2355,
"end": 3810
} | class ____(types.Array):
def __init__(self, dtype, ndim, layout, readonly=False, aligned=True):
name = f"MyArray({ndim}, {dtype}, {layout})"
super().__init__(dtype, ndim, layout, readonly=readonly,
aligned=aligned, name=name)
def copy(self, *args, **kwargs):
# This is here to future-proof.
# The test here never uses this.
raise NotImplementedError
# Tell Numba typing how to combine MyArrayType with other ndarray types.
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if method == "__call__":
for inp in inputs:
if not isinstance(inp, (types.Array, types.Number)):
return NotImplemented
# Ban if all arguments are MyArrayType
if all(isinstance(inp, MyArrayType) for inp in inputs):
return NotImplemented
return MyArrayType
else:
return NotImplemented
@property
def box_type(self):
return MyArray
@typeof_impl.register(MyArray)
def typeof_ta_ndarray(val, c):
try:
dtype = numpy_support.from_dtype(val.dtype)
except NotImplementedError:
raise ValueError("Unsupported array dtype: %s" % (val.dtype,))
layout = numpy_support.map_layout(val)
readonly = not val.flags.writeable
return MyArrayType(dtype, val.ndim, layout, readonly=readonly)
@register_model(MyArrayType)
| MyArrayType |
python | pytorch__pytorch | torch/export/unflatten.py | {
"start": 59848,
"end": 70202
} | class ____:
"""
Collect the intermediate values of mutations in a graph.
Example: in the following graph, suppose that buf_in and buf_out
are the input and output values of a buffer.
buf_in = placeholder()
...
ival1 = f0(buf_in, ...) # inside self.n0(...)
...
ival2 = f1(ival1, ...) # inside self.n1(...)
...
buf_out = f2(ival2, ...) # inside self.n2(...)
return buf_out, ...
Here ival1 and ival2 are intermediate values created inside
calls to n0 and n1 respectively, and used inside calls to
n1 and n2 respectively.
"""
def __init__(self):
# for each fqn, set of node names corresponding to intermediate values
self.node_names_by_fqn = defaultdict(set)
def _is_mutable(self, target):
if isinstance(target, torch._ops.OpOverload):
return target._schema.is_mutable
return False
def read(self, mf, node):
"""
Read state corresponding to a given intermediate value.
"""
# we can assume that the node must be from a mutation
assert node.op == "call_function"
b = self._is_mutable(node.target)
print("Checking mutability", node.target, b)
if not b:
# so the mutation was functionalized;
# we will apply the original mutation later (see below)
fqn, _ = next(reversed(node.meta["nn_module_stack"].values()))
self.node_names_by_fqn[fqn].add(node.name)
return mf.remap_input(node.args[0])
def update(self, partitions):
"""
Update states corresponding to intermediate values that were read.
"""
for shared_submodules in partitions:
for entry in shared_submodules:
graph = entry.module.graph
node_names = self.node_names_by_fqn[entry.fqn]
nodes = [n for n in graph.nodes if n.name in node_names]
for node in nodes:
# so node must be from a functionalized mutation;
# we perform the original mutation now
with graph.inserting_after(node):
new_node = graph.create_node(
"call_function",
torch.ops.aten.copy_.default,
(node.args[0], node),
)
new_node.meta = copy.copy(node.meta)
def _copy_graph_attrs(
gm: torch.fx.GraphModule,
root_module: UnflattenedModule,
seen_attrs: dict[str, set[str]],
):
for child_fqn, names in seen_attrs.items():
module = _get_attr(root_module, child_fqn) if child_fqn else root_module
for name in names:
val = getattr(gm, name)
setattr(module, name, val)
def _deduplicate_modules(partitions):
redirected_call_indices = {}
for shared_submodules in partitions:
for i, entry in enumerate(shared_submodules):
child_fqn = _call_name(entry.fqn, entry.call_idx)
target = _compute_accessor(entry.parent_fqn, child_fqn)
deduplicated = False
# Iterate over all previously seen modules, and deduplicate if possible
for seen in shared_submodules[:i]:
if _check_graph_equivalence(seen.module, entry.module):
parent = entry.parent_module
# Since graphs are equivalent, we can deduplicate.
# There are two cases.
if seen.fqn == entry.fqn:
# Case 1: The current module has the same fqn as the seen module.
# In this case we have generated a call name that can be optimized away.
# So we remove the current module from the hierarchy and replace
# the current call name with the seen call name in the parent graph.
*prefix, name = target.split(".")
_get_attr_via_attr_list(parent, prefix)._modules.pop(name)
seen_child_fqn = _call_name(seen.fqn, seen.call_idx)
seen_target = _compute_accessor(
entry.parent_fqn, seen_child_fqn
)
entry.parent_call_module.target = seen_target
redirected_call_indices[child_fqn] = seen_child_fqn
break
elif not deduplicated:
# Case 2: The current module has a different fqn than the seen module.
# In this case we replace the current module with the seen module.
# There should be nothing pointing to the current module any more,
# so it can be garbage collected.
# NOTE: We *do not* replace the current call name with the seen call name
# in the parent graph, because this will lose information on which fqn
# was actually called. However, it is possible that the current call name
# will be optimized away when we find another seen module with the same fqn,
# so we do not break out of the loop yet.
parent.set_submodule(target, seen.module)
deduplicated = True
return redirected_call_indices
def _sink_params(
module: torch.nn.Module,
inputs_to_state: dict[str, list[str]],
scope: list[str],
module_id_to_inputs_removed: Optional[dict[int, set[str]]] = None,
):
"""Sink params, buffers, and constants from graph inputs into get_attr nodes.
Exported modules are purely functional, so they pass their parameters and
buffers in as inputs to the graph.
To replicate eager's semantics, we need to get them from the module state
via get_attr instead.
module: GraphModule, potentially containing nested submodules.
inputs_to_state: mapping graph input names to the corresponding key in the state_dict.
scope: tracks where we are in the module hierarchy, so that we can emit the
right `getattr(self, "foo.bar")` calls, etc.
module_id_to_inputs_removed: records inputs removed by child modules, mapping
the module object id to the list of placeholder node names in the child module
that were removed.
"""
if module_id_to_inputs_removed is None:
module_id_to_inputs_removed = defaultdict(set)
if id(module) in module_id_to_inputs_removed:
return {id(module): module_id_to_inputs_removed[id(module)]}
# We need to use _modules here instead of named_children(), because we
# explicitly want duplicate modules to show up in the traversal.
for name, submodule in module._modules.items():
submod_id_to_inputs_removed = _sink_params(
cast("torch.nn.Module", submodule),
inputs_to_state,
scope + [name],
module_id_to_inputs_removed,
)
for k, v in submod_id_to_inputs_removed.items():
module_id_to_inputs_removed[k].update(v)
graph = getattr(module, "graph", None)
if graph is None or len(graph.nodes) == 0:
# Not all modules have graphs defined, if they are empty modules with no operations (like ParameterList)
return module_id_to_inputs_removed
assert isinstance(graph, torch.fx.Graph)
inputs = list(filter(lambda n: n.op == "placeholder", graph.nodes))
the_last_input = None if len(inputs) == 0 else inputs[-1]
# Also remove from call_module nodes
call_module_nodes = filter(lambda n: n.op == "call_module", graph.nodes)
for node in call_module_nodes:
submodule = _get_attr(module, node.target)
# remove placeholder from call_module node arguments, only if we've
# erased the placeholder node in the corresponding _sink_params() call
if submodule is not None and id(submodule) in module_id_to_inputs_removed:
node.args = tuple(
filter(
lambda n: n.name not in module_id_to_inputs_removed[id(submodule)],
node.args,
)
)
# Filter out inputs_to_state corresponding to current scope.
inputs_to_state_of_scope: dict[torch.fx.Node, list[str]] = {}
for node in inputs:
if node.name not in inputs_to_state:
continue
state_name = None
for sn in inputs_to_state[node.name]:
sn_split = sn.split(".")
if sn_split[: len(scope)] == [x.split("@")[0] for x in scope]:
state_name = sn_split
break
# If there's a mismatch between scope name and state name, then
# there must be multiple scopes pointing to the same state name,
# meaning some modules are shared. In such case, we can simply skip
# updating the current node because another later iteration will
# take care of this input node when the unique match between scope
# and state name occurs. To make sure this always happen, we should
# enforce the invariant that no placeholder node in the unflattened
# graph appears in inputs_to_state dict, which means all the extra
# input nodes have been handled.
if state_name is None:
continue
inputs_to_state_of_scope[node] = state_name
# Record name of remove inputs for return purpose.
inputs_removed: set[str] = set()
for node, state_name in inputs_to_state_of_scope.items():
if len(node.users) > 0:
attr_path = state_name[len(scope) :]
state_attr = _get_attr_via_attr_list(module, attr_path)
assert isinstance(state_attr, (torch.Tensor, torch.ScriptObject))
# Make sure the newly created get_attr node is placed after the last placeholder node
with graph.inserting_after(the_last_input):
new_node = graph.create_node("get_attr", ".".join(attr_path))
node.replace_all_uses_with(new_node, propagate_meta=True)
graph.erase_node(node)
inputs_removed.add(node.name)
if isinstance(module, InterpreterModule):
module.finalize()
return {id(module): inputs_removed}
| _IVals |
python | scrapy__scrapy | tests/test_pipeline_files.py | {
"start": 12205,
"end": 12321
} | class ____(TestFilesPipelineFieldsMixin):
item_class = FilesPipelineTestAttrsItem
| TestFilesPipelineFieldsAttrsItem |
python | gevent__gevent | src/greentest/3.11/test_signal.py | {
"start": 44229,
"end": 51624
} | class ____(unittest.TestCase):
"""
Stress signal delivery, especially when a signal arrives in
the middle of recomputing the signal state or executing
previously tripped signal handlers.
"""
def setsig(self, signum, handler):
old_handler = signal.signal(signum, handler)
self.addCleanup(signal.signal, signum, old_handler)
def measure_itimer_resolution(self):
N = 20
times = []
def handler(signum=None, frame=None):
if len(times) < N:
times.append(time.perf_counter())
# 1 µs is the smallest possible timer interval,
# we want to measure what the concrete duration
# will be on this platform
signal.setitimer(signal.ITIMER_REAL, 1e-6)
self.addCleanup(signal.setitimer, signal.ITIMER_REAL, 0)
self.setsig(signal.SIGALRM, handler)
handler()
while len(times) < N:
time.sleep(1e-3)
durations = [times[i+1] - times[i] for i in range(len(times) - 1)]
med = statistics.median(durations)
if support.verbose:
print("detected median itimer() resolution: %.6f s." % (med,))
return med
def decide_itimer_count(self):
# Some systems have poor setitimer() resolution (for example
# measured around 20 ms. on FreeBSD 9), so decide on a reasonable
# number of sequential timers based on that.
reso = self.measure_itimer_resolution()
if reso <= 1e-4:
return 10000
elif reso <= 1e-2:
return 100
else:
self.skipTest("detected itimer resolution (%.3f s.) too high "
"(> 10 ms.) on this platform (or system too busy)"
% (reso,))
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_dependent(self):
"""
This test uses dependent signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def first_handler(signum, frame):
# 1e-6 is the minimum non-zero value for `setitimer()`.
# Choose a random delay so as to improve chances of
# triggering a race condition. Ideally the signal is received
# when inside critical signal-handling routines such as
# Py_MakePendingCalls().
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
def second_handler(signum=None, frame=None):
sigs.append(signum)
# Here on Linux, SIGPROF > SIGALRM > SIGUSR1. By using both
# ascending and descending sequences (SIGUSR1 then SIGALRM,
# SIGPROF then SIGALRM), we maximize chances of hitting a bug.
self.setsig(signal.SIGPROF, first_handler)
self.setsig(signal.SIGUSR1, first_handler)
self.setsig(signal.SIGALRM, second_handler) # for ITIMER_REAL
expected_sigs = 0
deadline = time.monotonic() + support.SHORT_TIMEOUT
while expected_sigs < N:
os.kill(os.getpid(), signal.SIGPROF)
expected_sigs += 1
# Wait for handlers to run to avoid signal coalescing
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 1
while len(sigs) < expected_sigs and time.monotonic() < deadline:
time.sleep(1e-5)
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipUnless(hasattr(signal, "setitimer"),
"test needs setitimer()")
def test_stress_delivery_simultaneous(self):
"""
This test uses simultaneous signal handlers.
"""
N = self.decide_itimer_count()
sigs = []
def handler(signum, frame):
sigs.append(signum)
self.setsig(signal.SIGUSR1, handler)
self.setsig(signal.SIGALRM, handler) # for ITIMER_REAL
expected_sigs = 0
while expected_sigs < N:
# Hopefully the SIGALRM will be received somewhere during
# initial processing of SIGUSR1.
signal.setitimer(signal.ITIMER_REAL, 1e-6 + random.random() * 1e-5)
os.kill(os.getpid(), signal.SIGUSR1)
expected_sigs += 2
# Wait for handlers to run to avoid signal coalescing
for _ in support.sleeping_retry(support.SHORT_TIMEOUT, error=False):
if len(sigs) >= expected_sigs:
break
# All ITIMER_REAL signals should have been delivered to the
# Python handler
self.assertEqual(len(sigs), N, "Some signals were lost")
@unittest.skipIf(sys.platform == "darwin", "crashes due to system bug (FB13453490)")
@unittest.skipUnless(hasattr(signal, "SIGUSR1"),
"test needs SIGUSR1")
@threading_helper.requires_working_threading()
def test_stress_modifying_handlers(self):
# bpo-43406: race condition between trip_signal() and signal.signal
signum = signal.SIGUSR1
num_sent_signals = 0
num_received_signals = 0
do_stop = False
def custom_handler(signum, frame):
nonlocal num_received_signals
num_received_signals += 1
def set_interrupts():
nonlocal num_sent_signals
while not do_stop:
signal.raise_signal(signum)
num_sent_signals += 1
def cycle_handlers():
while num_sent_signals < 100 or num_received_signals < 1:
for i in range(20000):
# Cycle between a Python-defined and a non-Python handler
for handler in [custom_handler, signal.SIG_IGN]:
signal.signal(signum, handler)
old_handler = signal.signal(signum, custom_handler)
self.addCleanup(signal.signal, signum, old_handler)
t = threading.Thread(target=set_interrupts)
try:
ignored = False
with support.catch_unraisable_exception() as cm:
t.start()
cycle_handlers()
do_stop = True
t.join()
if cm.unraisable is not None:
# An unraisable exception may be printed out when
# a signal is ignored due to the aforementioned
# race condition, check it.
self.assertIsInstance(cm.unraisable.exc_value, OSError)
self.assertIn(
f"Signal {signum:d} ignored due to race condition",
str(cm.unraisable.exc_value))
ignored = True
# bpo-43406: Even if it is unlikely, it's technically possible that
# all signals were ignored because of race conditions.
if not ignored:
# Sanity check that some signals were received, but not all
self.assertGreater(num_received_signals, 0)
self.assertLessEqual(num_received_signals, num_sent_signals)
finally:
do_stop = True
t.join()
| StressTest |
python | tornadoweb__tornado | tornado/websocket.py | {
"start": 51391,
"end": 64092
} | class ____(simple_httpclient._HTTPConnection):
"""WebSocket client connection.
This class should not be instantiated directly; use the
`websocket_connect` function instead.
"""
protocol = None # type: WebSocketProtocol
def __init__(
self,
request: httpclient.HTTPRequest,
on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
compression_options: Optional[Dict[str, Any]] = None,
ping_interval: Optional[float] = None,
ping_timeout: Optional[float] = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = None,
resolver: Optional[Resolver] = None,
) -> None:
self.connect_future = Future() # type: Future[WebSocketClientConnection]
self.read_queue = Queue(1) # type: Queue[Union[None, str, bytes]]
self.key = base64.b64encode(os.urandom(16))
self._on_message_callback = on_message_callback
self.close_code = None # type: Optional[int]
self.close_reason = None # type: Optional[str]
self.params = _WebSocketParams(
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
compression_options=compression_options,
)
scheme, sep, rest = request.url.partition(":")
scheme = {"ws": "http", "wss": "https"}[scheme]
request.url = scheme + sep + rest
request.headers.update(
{
"Upgrade": "websocket",
"Connection": "Upgrade",
"Sec-WebSocket-Key": to_unicode(self.key),
"Sec-WebSocket-Version": "13",
}
)
if subprotocols is not None:
request.headers["Sec-WebSocket-Protocol"] = ",".join(subprotocols)
if compression_options is not None:
# Always offer to let the server set our max_wbits (and even though
# we don't offer it, we will accept a client_no_context_takeover
# from the server).
# TODO: set server parameters for deflate extension
# if requested in self.compression_options.
request.headers["Sec-WebSocket-Extensions"] = (
"permessage-deflate; client_max_window_bits"
)
# Websocket connection is currently unable to follow redirects
request.follow_redirects = False
self.tcp_client = TCPClient(resolver=resolver)
super().__init__(
None,
request,
lambda: None,
self._on_http_response,
104857600,
self.tcp_client,
65536,
104857600,
)
def __del__(self) -> None:
if self.protocol is not None:
# Unclosed client connections can sometimes log "task was destroyed but
# was pending" warnings if shutdown strikes at the wrong time (such as
# while a ping is being processed due to ping_interval). Log our own
# warning to make it a little more deterministic (although it's still
# dependent on GC timing).
warnings.warn("Unclosed WebSocketClientConnection", ResourceWarning)
def close(self, code: Optional[int] = None, reason: Optional[str] = None) -> None:
"""Closes the websocket connection.
``code`` and ``reason`` are documented under
`WebSocketHandler.close`.
.. versionadded:: 3.2
.. versionchanged:: 4.0
Added the ``code`` and ``reason`` arguments.
"""
if self.protocol is not None:
self.protocol.close(code, reason)
self.protocol = None # type: ignore
def on_connection_close(self) -> None:
if not self.connect_future.done():
self.connect_future.set_exception(StreamClosedError())
self._on_message(None)
self.tcp_client.close()
super().on_connection_close()
def on_ws_connection_close(
self, close_code: Optional[int] = None, close_reason: Optional[str] = None
) -> None:
self.close_code = close_code
self.close_reason = close_reason
self.on_connection_close()
def _on_http_response(self, response: httpclient.HTTPResponse) -> None:
if not self.connect_future.done():
if response.error:
self.connect_future.set_exception(response.error)
else:
self.connect_future.set_exception(
WebSocketError("Non-websocket response")
)
async def headers_received(
self,
start_line: Union[httputil.RequestStartLine, httputil.ResponseStartLine],
headers: httputil.HTTPHeaders,
) -> None:
assert isinstance(start_line, httputil.ResponseStartLine)
if start_line.code != 101:
await super().headers_received(start_line, headers)
return
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
self.headers = headers
self.protocol = self.get_websocket_protocol()
self.protocol._process_server_headers(self.key, self.headers)
self.protocol.stream = self.connection.detach()
IOLoop.current().add_callback(self.protocol._receive_frame_loop)
self.protocol.start_pinging()
# Once we've taken over the connection, clear the final callback
# we set on the http request. This deactivates the error handling
# in simple_httpclient that would otherwise interfere with our
# ability to see exceptions.
self.final_callback = None # type: ignore
future_set_result_unless_cancelled(self.connect_future, self)
def write_message(
self, message: Union[str, bytes, Dict[str, Any]], binary: bool = False
) -> "Future[None]":
"""Sends a message to the WebSocket server.
If the stream is closed, raises `WebSocketClosedError`.
Returns a `.Future` which can be used for flow control.
.. versionchanged:: 5.0
Exception raised on a closed stream changed from `.StreamClosedError`
to `WebSocketClosedError`.
"""
if self.protocol is None:
raise WebSocketClosedError("Client connection has been closed")
return self.protocol.write_message(message, binary=binary)
def read_message(
self,
callback: Optional[Callable[["Future[Union[None, str, bytes]]"], None]] = None,
) -> Awaitable[Union[None, str, bytes]]:
"""Reads a message from the WebSocket server.
If on_message_callback was specified at WebSocket
initialization, this function will never return messages
Returns a future whose result is the message, or None
if the connection is closed. If a callback argument
is given it will be called with the future when it is
ready.
"""
awaitable = self.read_queue.get()
if callback is not None:
self.io_loop.add_future(asyncio.ensure_future(awaitable), callback)
return awaitable
def on_message(self, message: Union[str, bytes]) -> Optional[Awaitable[None]]:
return self._on_message(message)
def _on_message(
self, message: Union[None, str, bytes]
) -> Optional[Awaitable[None]]:
if self._on_message_callback:
self._on_message_callback(message)
return None
else:
return self.read_queue.put(message)
def ping(self, data: bytes = b"") -> None:
"""Send ping frame to the remote end.
The data argument allows a small amount of data (up to 125
bytes) to be sent as a part of the ping message. Note that not
all websocket implementations expose this data to
applications.
Consider using the ``ping_interval`` argument to
`websocket_connect` instead of sending pings manually.
.. versionadded:: 5.1
"""
data = utf8(data)
if self.protocol is None:
raise WebSocketClosedError()
self.protocol.write_ping(data)
def on_pong(self, data: bytes) -> None:
pass
def on_ping(self, data: bytes) -> None:
pass
def get_websocket_protocol(self) -> WebSocketProtocol:
return WebSocketProtocol13(self, mask_outgoing=True, params=self.params)
@property
def selected_subprotocol(self) -> Optional[str]:
"""The subprotocol selected by the server.
.. versionadded:: 5.1
"""
return self.protocol.selected_subprotocol
def log_exception(
self,
typ: "Optional[Type[BaseException]]",
value: Optional[BaseException],
tb: Optional[TracebackType],
) -> None:
assert typ is not None
assert value is not None
app_log.error("Uncaught exception %s", value, exc_info=(typ, value, tb))
def websocket_connect(
url: Union[str, httpclient.HTTPRequest],
callback: Optional[Callable[["Future[WebSocketClientConnection]"], None]] = None,
connect_timeout: Optional[float] = None,
on_message_callback: Optional[Callable[[Union[None, str, bytes]], None]] = None,
compression_options: Optional[Dict[str, Any]] = None,
ping_interval: Optional[float] = None,
ping_timeout: Optional[float] = None,
max_message_size: int = _default_max_message_size,
subprotocols: Optional[List[str]] = None,
resolver: Optional[Resolver] = None,
) -> "Awaitable[WebSocketClientConnection]":
"""Client-side websocket support.
Takes a url and returns a Future whose result is a
`WebSocketClientConnection`.
``compression_options`` is interpreted in the same way as the
return value of `.WebSocketHandler.get_compression_options`.
The connection supports two styles of operation. In the coroutine
style, the application typically calls
`~.WebSocketClientConnection.read_message` in a loop::
conn = yield websocket_connect(url)
while True:
msg = yield conn.read_message()
if msg is None: break
# Do something with msg
In the callback style, pass an ``on_message_callback`` to
``websocket_connect``. In both styles, a message of ``None``
indicates that the connection has been closed.
``subprotocols`` may be a list of strings specifying proposed
subprotocols. The selected protocol may be found on the
``selected_subprotocol`` attribute of the connection object
when the connection is complete.
.. versionchanged:: 3.2
Also accepts ``HTTPRequest`` objects in place of urls.
.. versionchanged:: 4.1
Added ``compression_options`` and ``on_message_callback``.
.. versionchanged:: 4.5
Added the ``ping_interval``, ``ping_timeout``, and ``max_message_size``
arguments, which have the same meaning as in `WebSocketHandler`.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.1
Added the ``subprotocols`` argument.
.. versionchanged:: 6.3
Added the ``resolver`` argument.
.. deprecated:: 6.5
The ``callback`` argument is deprecated and will be removed in Tornado 7.0.
Use the returned Future instead. Note that ``on_message_callback`` is not
deprecated and may still be used.
"""
if isinstance(url, httpclient.HTTPRequest):
assert connect_timeout is None
request = url
# Copy and convert the headers dict/object (see comments in
# AsyncHTTPClient.fetch)
request.headers = httputil.HTTPHeaders(request.headers)
else:
request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
request = cast(
httpclient.HTTPRequest,
httpclient._RequestProxy(request, httpclient.HTTPRequest._DEFAULTS),
)
conn = WebSocketClientConnection(
request,
on_message_callback=on_message_callback,
compression_options=compression_options,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
max_message_size=max_message_size,
subprotocols=subprotocols,
resolver=resolver,
)
if callback is not None:
warnings.warn(
"The callback argument to websocket_connect is deprecated. "
"Use the returned Future instead.",
DeprecationWarning,
stacklevel=2,
)
IOLoop.current().add_future(conn.connect_future, callback)
return conn.connect_future
| WebSocketClientConnection |
python | huggingface__transformers | src/transformers/models/dac/modeling_dac.py | {
"start": 11153,
"end": 17769
} | class ____(nn.Module):
"""
ResidualVectorQuantize block - Introduced in SoundStream: An end2end neural audio codec (https://huggingface.co/papers/2107.03312)
"""
def __init__(self, config: DacConfig):
super().__init__()
n_codebooks = config.n_codebooks
quantizer_dropout = config.quantizer_dropout
self.n_codebooks = n_codebooks
self.quantizers = nn.ModuleList([DacVectorQuantize(config) for i in range(config.n_codebooks)])
self.quantizer_dropout = quantizer_dropout
def forward(self, hidden_state, n_quantizers: Optional[int] = None):
"""
Quantizes the input tensor using a fixed set of codebooks and returns corresponding codebook vectors.
Args:
hidden_state (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
Input tensor to be quantized.
n_quantizers (`int`, *optional*):
Number of quantizers to use. If specified and `self.quantizer_dropout` is True,
this argument is ignored during training, and a random number of quantizers is used.
Returns:
quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
Quantized continuous representation of input.
audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`):
Codebook indices for each codebook (quantized discrete representation of input).
projected_latents (`torch.Tensor` of shape `(batch_size, num_codebooks * dimension, time_steps)`):
Projected latents (continuous representation of input before quantization).
commitment_loss (`torch.Tensor` of shape `(1)`):
Commitment loss to train the encoder to predict vectors closer to codebook entries.
codebook_loss (`torch.Tensor` of shape `(1)`):
Codebook loss to update the codebook.
"""
quantized_representation = 0
residual = hidden_state
commitment_loss = 0
codebook_loss = 0
audio_codes = []
projected_latents = []
n_quantizers = n_quantizers if n_quantizers is not None else self.n_codebooks
if self.training:
n_quantizers = torch.ones((hidden_state.shape[0],)) * self.n_codebooks + 1
dropout = torch.randint(1, self.n_codebooks + 1, (hidden_state.shape[0],))
n_dropout = int(hidden_state.shape[0] * self.quantizer_dropout)
n_quantizers[:n_dropout] = dropout[:n_dropout]
n_quantizers = n_quantizers.to(hidden_state.device)
for i, quantizer in enumerate(self.quantizers):
if self.training is False and i >= n_quantizers:
break
quantized_representation_i, commitment_loss_i, codebook_loss_i, indices_i, projected_latents_i = quantizer(
residual
)
# Create mask to apply quantizer dropout
mask = torch.full((hidden_state.shape[0],), fill_value=i, device=hidden_state.device) < n_quantizers
quantized_representation = quantized_representation + quantized_representation_i * mask[:, None, None]
residual = residual - quantized_representation_i
# Sum losses
commitment_loss += commitment_loss_i * mask
codebook_loss += codebook_loss_i * mask
audio_codes.append(indices_i)
projected_latents.append(projected_latents_i)
audio_codes = torch.stack(audio_codes, dim=1)
projected_latents = torch.cat(projected_latents, dim=1)
return quantized_representation, audio_codes, projected_latents, commitment_loss, codebook_loss
def from_codes(self, audio_codes: torch.Tensor):
"""
Reconstructs the continuous representation from quantized codes.
Args:
audio_codes (`torch.Tensor` of shape `(batch_size, num_codebooks, time_steps)`):
Quantized discrete representation of input.
Returns:
quantized_representation (`torch.Tensor`):
Quantized continuous representation of input.
projected_latents (`torch.Tensor`):
List of projected latents (continuous representations of input before quantization)
for each codebook.
audio_codes (`torch.Tensor`):
Codebook indices for each codebook.
"""
quantized_representation = 0.0
projected_latents = []
n_codebooks = audio_codes.shape[1]
for i in range(n_codebooks):
projected_latents_i = self.quantizers[i].codebook(audio_codes[:, i, :]).transpose(1, 2)
projected_latents.append(projected_latents_i)
quantized_representation += self.quantizers[i].out_proj(projected_latents_i)
return quantized_representation, torch.cat(projected_latents, dim=1), audio_codes
def from_latents(self, latents: torch.Tensor):
"""Reconstructs the quantized representation from unquantized latents.
Args:
latents (`torch.Tensor` of shape `(batch_size, total_latent_dimension, time_steps)`):
Continuous representation of input after projection.
Returns:
quantized_representation (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
Quantized representation of the full-projected space.
quantized_latents (`torch.Tensor` of shape `(batch_size, dimension, time_steps)`):
Quantized representation of the latent space (continuous representation before quantization).
"""
quantized_representation = 0
quantized_latents = []
codes = []
codebook_dims_tensor = torch.tensor([0] + [q.codebook_dim for q in self.quantizers])
dims = torch.cumsum(codebook_dims_tensor, dim=0)
n_codebooks = np.where(dims <= latents.shape[1])[0].max(axis=0, keepdims=True)[0]
for i in range(n_codebooks):
hidden_dim_j, hidden_dim_k = dims[i], dims[i + 1]
quantized_latents_i, codes_i = self.quantizers[i].decode_latents(latents[:, hidden_dim_j:hidden_dim_k, :])
quantized_latents.append(quantized_latents_i)
codes.append(codes_i)
quantized_representation_i = self.quantizers[i].out_proj(quantized_latents_i)
quantized_representation = quantized_representation + quantized_representation_i
return quantized_representation, torch.cat(quantized_latents, dim=1)
| DacResidualVectorQuantize |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/qr_op_test.py | {
"start": 1742,
"end": 6358
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testWrongDimensions(self):
# The input to qr should be a tensor of at least rank 2.
scalar = constant_op.constant(1.)
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*0"):
linalg_ops.qr(scalar)
vector = constant_op.constant([1., 2.])
with self.assertRaisesRegex((ValueError, errors_impl.InvalidArgumentError),
"rank.* 2.*1"):
linalg_ops.qr(vector)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def testConcurrentExecutesWithoutError(self):
seed = [42, 24]
all_ops = []
for full_matrices_ in True, False:
for rows_ in 4, 5:
for cols_ in 4, 5:
matrix_shape = [rows_, cols_]
matrix1 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed)
matrix2 = stateless_random_ops.stateless_random_normal(
matrix_shape, seed)
self.assertAllEqual(matrix1, matrix2)
q1, r1 = linalg_ops.qr(matrix1, full_matrices=full_matrices_)
q2, r2 = linalg_ops.qr(matrix2, full_matrices=full_matrices_)
all_ops += [q1, q2, r1, r2]
val = self.evaluate(all_ops)
for i in range(0, len(val), 2):
self.assertAllClose(val[i], val[i + 1])
def _GetQrOpTest(dtype_, shape_, full_matrices_, use_static_shape_):
is_complex = dtype_ in (np.complex64, np.complex128)
is_single = dtype_ in (np.float32, np.complex64)
def CompareOrthogonal(self, x, y, rank):
if is_single:
atol = 5e-4
else:
atol = 5e-14
# We only compare the first 'rank' orthogonal vectors since the
# remainder form an arbitrary orthonormal basis for the
# (row- or column-) null space, whose exact value depends on
# implementation details. Notice that since we check that the
# matrices of singular vectors are unitary elsewhere, we do
# implicitly test that the trailing vectors of x and y span the
# same space.
x = x[..., 0:rank]
y = y[..., 0:rank]
# Q is only unique up to sign (complex phase factor for complex matrices),
# so we normalize the sign first.
sum_of_ratios = np.sum(np.divide(y, x), -2, keepdims=True)
phases = np.divide(sum_of_ratios, np.abs(sum_of_ratios))
x *= phases
self.assertAllClose(x, y, atol=atol)
def CheckApproximation(self, a, q, r):
if is_single:
tol = 1e-5
else:
tol = 1e-14
# Tests that a ~= q*r.
a_recon = test_util.matmul_without_tf32(q, r)
self.assertAllClose(a_recon, a, rtol=tol, atol=tol)
def CheckUnitary(self, x):
# Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
xx = test_util.matmul_without_tf32(x, x, adjoint_a=True)
identity = array_ops.matrix_band_part(array_ops.ones_like(xx), 0, 0)
if is_single:
tol = 1e-5
else:
tol = 1e-14
self.assertAllClose(identity, xx, atol=tol)
@test_util.run_in_graph_and_eager_modes(use_gpu=True)
def Test(self):
if not use_static_shape_ and context.executing_eagerly():
return
np.random.seed(1)
x_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if is_complex:
x_np += 1j * np.random.uniform(
low=-1.0, high=1.0,
size=np.prod(shape_)).reshape(shape_).astype(dtype_)
if use_static_shape_:
x_tf = constant_op.constant(x_np)
else:
x_tf = array_ops.placeholder(dtype_)
q_tf, r_tf = linalg_ops.qr(x_tf, full_matrices=full_matrices_)
if use_static_shape_:
q_tf_val, r_tf_val = self.evaluate([q_tf, r_tf])
else:
with self.session() as sess:
q_tf_val, r_tf_val = sess.run([q_tf, r_tf], feed_dict={x_tf: x_np})
q_dims = q_tf_val.shape
np_q = np.ndarray(q_dims, dtype_)
np_q_reshape = np.reshape(np_q, (-1, q_dims[-2], q_dims[-1]))
new_first_dim = np_q_reshape.shape[0]
x_reshape = np.reshape(x_np, (-1, x_np.shape[-2], x_np.shape[-1]))
for i in range(new_first_dim):
if full_matrices_:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="complete")
else:
np_q_reshape[i, :, :], _ = np.linalg.qr(
x_reshape[i, :, :], mode="reduced")
np_q = np.reshape(np_q_reshape, q_dims)
CompareOrthogonal(self, np_q, q_tf_val, min(shape_[-2:]))
CheckApproximation(self, x_np, q_tf_val, r_tf_val)
CheckUnitary(self, q_tf_val)
return Test
| QrOpTest |
python | ray-project__ray | python/ray/tune/integration/pytorch_lightning.py | {
"start": 2436,
"end": 6224
} | class ____(TuneCallback):
"""PyTorch Lightning report and checkpoint callback
Saves checkpoints after each validation step. Also reports metrics to Tune,
which is needed for checkpoint registration.
Args:
metrics: Metrics to report to Tune. If this is a list,
each item describes the metric key reported to PyTorch Lightning,
and it will reported under the same name to Tune. If this is a
dict, each key will be the name reported to Tune and the respective
value will be the metric key reported to PyTorch Lightning.
filename: Filename of the checkpoint within the checkpoint
directory. Defaults to "checkpoint".
save_checkpoints: If True (default), checkpoints will be saved and
reported to Ray. If False, only metrics will be reported.
on: When to trigger checkpoint creations and metric reports. Must be one of
the PyTorch Lightning event hooks (less the ``on_``), e.g.
"train_batch_start", or "train_end". Defaults to "validation_end".
Example:
.. code-block:: python
import pytorch_lightning as pl
from ray.tune.integration.pytorch_lightning import (
TuneReportCheckpointCallback)
# Save checkpoint after each training batch and after each
# validation epoch.
trainer = pl.Trainer(callbacks=[TuneReportCheckpointCallback(
metrics={"loss": "val_loss", "mean_accuracy": "val_acc"},
filename="trainer.ckpt", on="validation_end")])
"""
def __init__(
self,
metrics: Optional[Union[str, List[str], Dict[str, str]]] = None,
filename: str = "checkpoint",
save_checkpoints: bool = True,
on: Union[str, List[str]] = "validation_end",
):
super(TuneReportCheckpointCallback, self).__init__(on=on)
if isinstance(metrics, str):
metrics = [metrics]
self._save_checkpoints = save_checkpoints
self._filename = filename
self._metrics = metrics
def _get_report_dict(self, trainer: Trainer, pl_module: LightningModule):
# Don't report if just doing initial validation sanity checks.
if trainer.sanity_checking:
return
if not self._metrics:
report_dict = {k: v.item() for k, v in trainer.callback_metrics.items()}
else:
report_dict = {}
for key in self._metrics:
if isinstance(self._metrics, dict):
metric = self._metrics[key]
else:
metric = key
if metric in trainer.callback_metrics:
report_dict[key] = trainer.callback_metrics[metric].item()
else:
logger.warning(
f"Metric {metric} does not exist in "
"`trainer.callback_metrics."
)
return report_dict
@contextmanager
def _get_checkpoint(self, trainer: Trainer) -> Optional[Checkpoint]:
if not self._save_checkpoints:
yield None
return
with tempfile.TemporaryDirectory() as checkpoint_dir:
trainer.save_checkpoint(os.path.join(checkpoint_dir, self._filename))
checkpoint = Checkpoint.from_directory(checkpoint_dir)
yield checkpoint
def _handle(self, trainer: Trainer, pl_module: LightningModule):
if trainer.sanity_checking:
return
report_dict = self._get_report_dict(trainer, pl_module)
if not report_dict:
return
with self._get_checkpoint(trainer) as checkpoint:
ray.tune.report(report_dict, checkpoint=checkpoint)
| TuneReportCheckpointCallback |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 168577,
"end": 169485
} | class ____:
_counter: int = 0
_id: int = -1
_depth = 0
@classmethod
def start(cls) -> None:
cls._depth = cls._depth + 1
if cls._depth == 1:
cls._id = _instruction_counter.start()
@classmethod
def end(cls) -> None:
cls._depth = cls._depth - 1
if cls._depth == 0:
cls._counter += _instruction_counter.end(cls._id)
cls._id = -1
@classmethod
def clear(cls) -> None:
cls._counter = 0
@classmethod
def value(cls) -> int:
return cls._counter
@classmethod
@contextmanager
def record(cls) -> Generator[None, None, None]:
try:
if config.record_compile_time_instruction_count:
cls.start()
yield
finally:
if config.record_compile_time_instruction_count:
cls.end()
| CompileTimeInstructionCounter |
python | getsentry__sentry | src/sentry/core/endpoints/project_details.py | {
"start": 3680,
"end": 4113
} | class ____(serializers.Serializer):
id = serializers.ChoiceField(required=True, choices=get_supported_biases_ids())
active = serializers.BooleanField(default=False)
def validate(self, data):
if data.keys() != {"id", "active"}:
raise serializers.ValidationError(
"Error: Only 'id' and 'active' fields are allowed for bias."
)
return data
| DynamicSamplingBiasSerializer |
python | realpython__materials | inheritance-and-composition/composition/productivity.py | {
"start": 959,
"end": 1073
} | class ____:
def perform_duties(self, hours):
return f"manufactures gadgets for {hours} hours."
| FactoryRole |
python | PyCQA__pylint | tests/functional/m/mixin_class_rgx.py | {
"start": 880,
"end": 1084
} | class ____:
"""Class that does match the option pattern"""
def set_attribute(self):
"""Set an attribute outside of __init__"""
self.attr = 1
# Tests for no-member
| OutsideInitMixin |
python | streamlit__streamlit | lib/tests/streamlit/elements/markdown_test.py | {
"start": 8707,
"end": 12487
} | class ____(DeltaGeneratorTestCase):
"""Test st.badge API."""
def test_st_badge(self):
"""Test st.badge with all parameters."""
# Test with all parameters
st.badge(
"Badge with all params",
icon=":material/warning:",
color="red",
)
el = self.get_delta_from_queue().new_element
assert (
el.markdown.body == ":red-badge[:material/warning: Badge with all params]"
)
assert not el.markdown.allow_html
# Test with default parameters
st.badge("Simple badge")
el = self.get_delta_from_queue().new_element
assert el.markdown.body == ":blue-badge[Simple badge]"
def test_st_badge_with_width(self):
"""Test st.badge with different width types."""
test_cases = [
(200, WidthConfigFields.PIXEL_WIDTH.value, "pixel_width", 200),
("stretch", WidthConfigFields.USE_STRETCH.value, "use_stretch", True),
("content", WidthConfigFields.USE_CONTENT.value, "use_content", True),
]
for width_value, expected_width_spec, field_name, field_value in test_cases:
with self.subTest(width_value=width_value):
st.badge("test badge", width=width_value)
el = self.get_delta_from_queue().new_element
assert el.markdown.body == ":blue-badge[test badge]"
assert el.width_config.WhichOneof("width_spec") == expected_width_spec
assert getattr(el.width_config, field_name) == field_value
def test_st_badge_with_invalid_width(self):
"""Test st.badge with invalid width values."""
test_cases = [
(
"invalid",
"Invalid width value: 'invalid'. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
-25,
"Invalid width value: -25. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
0,
"Invalid width value: 0. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
(
50.7,
"Invalid width value: 50.7. Width must be either an integer (pixels), 'stretch', or 'content'.",
),
]
for width_value, expected_error_message in test_cases:
with self.subTest(width_value=width_value):
with pytest.raises(StreamlitAPIException) as exc:
st.badge("test badge", width=width_value)
assert str(exc.value) == expected_error_message
def test_st_badge_default_width(self):
"""Test that st.badge defaults to content width."""
st.badge("test badge")
el = self.get_delta_from_queue().new_element
assert el.markdown.body == ":blue-badge[test badge]"
assert (
el.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_CONTENT.value
)
assert el.width_config.use_content is True
def test_st_badge_with_help(self):
"""Test st.badge with help parameter."""
st.badge("Badge with help", help="Tooltip text")
el = self.get_delta_from_queue().new_element
assert el.markdown.body == ":blue-badge[Badge with help]"
assert el.markdown.help == "Tooltip text"
def test_st_badge_help_not_set_when_none(self):
"""Test that st.badge does not set help when help is None."""
st.badge("Badge without help")
el = self.get_delta_from_queue().new_element
assert el.markdown.body == ":blue-badge[Badge without help]"
assert not getattr(el.markdown, "help", None)
| StBadgeAPITest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE796.py | {
"start": 514,
"end": 588
} | class ____(enum.Enum):
A = "A"
B = "B"
C = "C"
@unique
| FakeEnum7 |
python | walkccc__LeetCode | solutions/2975. Maximum Square Area by Removing Fences From a Field/2975.py | {
"start": 0,
"end": 628
} | class ____:
def maximizeSquareArea(
self,
m: int,
n: int,
hFences: list[int],
vFences: list[int],
) -> int:
hFences = sorted(hFences + [1, m])
vFences = sorted(vFences + [1, n])
hGaps = {hFences[i] - hFences[j]
for i in range(len(hFences))
for j in range(i)}
vGaps = {vFences[i] - vFences[j]
for i in range(len(vFences))
for j in range(i)}
maxGap = next((hGap
for hGap in sorted(hGaps, reverse=True)
if hGap in vGaps), -1)
return -1 if maxGap == -1 else maxGap**2 % (10**9 + 7)
| Solution |
python | walkccc__LeetCode | solutions/228. Summary Ranges/228.py | {
"start": 0,
"end": 385
} | class ____:
def summaryRanges(self, nums: list[int]) -> list[str]:
ans = []
i = 0
while i < len(nums):
begin = nums[i]
while i < len(nums) - 1 and nums[i] == nums[i + 1] - 1:
i += 1
end = nums[i]
if begin == end:
ans.append(str(begin))
else:
ans.append(str(begin) + "->" + str(end))
i += 1
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/saved_model/load_test.py | {
"start": 5585,
"end": 97818
} | class ____(test.TestCase, parameterized.TestCase):
def test_structure_import(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.dep_one = autotrackable.AutoTrackable()
root.dep_two = autotrackable.AutoTrackable()
root.dep_two.dep = autotrackable.AutoTrackable()
root.dep_three = root.dep_two.dep
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertIs(imported.dep_three, imported.dep_two.dep)
self.assertIsNot(imported.dep_one, imported.dep_two)
@test_util.run_in_graph_and_eager_modes
def test_variables(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0, trainable=True)
root.v2 = variables.Variable(2.0, trainable=False)
self.evaluate([root.v1.initializer, root.v2.initializer])
for _ in range(cycles):
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
self.evaluate([imported.v1.initializer, imported.v2.initializer])
if not context.executing_eagerly():
self.assertIsInstance(imported.v1.initializer, ops.Operation)
self.assertIsInstance(imported.v2.initializer, ops.Operation)
self.assertEqual(self.evaluate(imported.v1), 1.0)
self.assertTrue(imported.v1.trainable)
self.assertEqual(self.evaluate(imported.v2), 2.0)
self.assertFalse(imported.v2.trainable)
def test_variables_name(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
# Test 2 variables with same name: should work as the checkpoint
# is based on object name and not on variable name.
root.v1 = variables.Variable(1.0, trainable=True, name="v1")
root.v2 = variables.Variable(2.0, trainable=False, name="v1")
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(imported.v1.numpy(), 1.0)
self.assertEqual(imported.v2.numpy(), 2.0)
self.assertEqual(imported.v1.name, root.v1.name)
self.assertEqual(imported.v2.name, root.v2.name)
with variable_scope.variable_scope("foo"):
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(imported.v1.name.startswith("foo/"))
self.assertTrue(imported.v2.name.startswith("foo/"))
@test_util.disable_xla("This test never passed for XLA")
def test_partially_defined_variable_shape(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class MakeVariable(module.Module):
def __init__(self):
self.v = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int64)]
)
def make_variable(self, initial_value):
if self.v is None:
self.v = variables.Variable(initial_value)
m = MakeVariable()
m.make_variable([1, 2, 3])
m = cycle(m, cycles, use_cpp_bindings=use_cpp_bindings)
m.v.assign([1, 2, 3, 4])
self.assertEqual([None], tensor_shape.as_shape(m.v.shape).as_list())
@test_util.run_in_graph_and_eager_modes
def test_capture_variables(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.weights = variables.Variable(2.0)
self.evaluate(root.weights.initializer)
root.f = def_function.function(
lambda x: root.weights * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
for _ in range(cycles):
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
self.evaluate(imported.weights.initializer)
self.assertEqual(4.0, self.evaluate(imported.f(constant_op.constant(2.0))))
self.evaluate(imported.weights.assign(4.0))
self.assertEqual(8.0, self.evaluate(imported.f(constant_op.constant(2.0))))
@test_util.run_in_graph_and_eager_modes
def test_capture_constant(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
captured_constant = constant_op.constant(2.0)
root.f = def_function.function(
lambda x: captured_constant * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, self.evaluate(imported.f(constant_op.constant(2.0))))
def test_control_outputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
exported = autotrackable.AutoTrackable()
exported.v = variables.Variable(1.0)
exported.f = def_function.function(
lambda: exported.v.assign(2.0, name="should_be_control_output")
)
exported_graph = exported.f.get_concrete_function().graph
self.assertIn(
exported_graph.get_operation_by_name("should_be_control_output"),
exported_graph.control_outputs,
)
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
# Calling get_concrete_function wraps in a second call operation; we want to
# inspect the original function body for the control output; digging into
# graph.as_graph_def() and its FunctionDefLibrary is another option.
(imported_concrete,) = imported.f.concrete_functions
imported_graph = imported_concrete.graph
self.assertIn(
imported_graph.get_operation_by_name("should_be_control_output"),
imported_graph.control_outputs,
)
def _make_asset(self, contents):
fd, filename = tempfile.mkstemp(prefix=self.get_temp_dir())
with os.fdopen(fd, "w") as f:
f.write(contents)
return filename
@test_util.run_in_graph_and_eager_modes
def test_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
file1 = self._make_asset("contents 1")
file2 = self._make_asset("contents 2")
root = autotrackable.AutoTrackable()
root.asset1 = asset.Asset(file1)
root.asset2 = asset.Asset(file2)
save_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, save_dir)
file_io.delete_file(file1)
file_io.delete_file(file2)
load_dir = os.path.join(self.get_temp_dir(), "load_dir")
file_io.rename(save_dir, load_dir)
imported = test_load(load_dir, use_cpp_bindings=use_cpp_bindings)
with open(self.evaluate(imported.asset1.asset_path), "r") as f:
self.assertEqual("contents 1", f.read())
with open(self.evaluate(imported.asset2.asset_path), "r") as f:
self.assertEqual("contents 2", f.read())
def test_cond_prune(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
x_in = []
x_out = []
def f(x, y):
x_in.append(x)
xx = cond_v2.cond_v2(
math_ops.less(1, 2),
lambda: x + 1,
lambda: x + 2,
)
x_out.append(xx)
return xx, 2 * y
f_wrapped = wrap_function.wrap_function(
f, [tensor_spec.TensorSpec((), dtypes.float32)] * 2
)
f_pruned = f_wrapped.prune(x_in[0], [x_out[0]])
class Adder(module.Module):
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(shape=None, dtype=dtypes.float32)
]
)
def add(self, x):
return f_pruned(x)
root = Adder()
root.add(constant_op.constant(1.0))
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
root.add(constant_op.constant(1.0))
def test_capture_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.vocab = asset.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path, input_signature=[]
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
original_output = root.f().numpy()
imported_output = imported.f().numpy()
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_capture_assets_in_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.vocab = asset.Asset(self._make_asset("contents"))
root.f = def_function.function(
lambda: root.vocab.asset_path, input_signature=[]
)
original_output = root.f().numpy()
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default():
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
imported_tensor = imported.f()
with monitored_session.MonitoredSession() as sess:
imported_output = sess.run(imported_tensor)
self.assertLen(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS), 1)
self.assertNotEqual(original_output, imported_output)
with open(imported_output, "r") as f:
self.assertEqual("contents", f.read())
def test_dedup_assets(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
vocab = self._make_asset("contents")
root = autotrackable.AutoTrackable()
root.asset1 = asset.Asset(vocab)
root.asset2 = asset.Asset(vocab)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
imported.asset1.asset_path.numpy(), imported.asset2.asset_path.numpy()
)
def test_asset_fspath(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
vocab = pathlib.Path(self._make_asset("contents"))
root = autotrackable.AutoTrackable()
root.asset = asset.Asset(vocab)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(hasattr(imported, "asset"))
def test_implicit_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
# Add two traces.
root.f(constant_op.constant(1.0))
root.f(constant_op.constant(1))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
self.assertEqual(14, imported.f(constant_op.constant(7)).numpy())
def test_explicit_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
def test_explicit_save_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(
root,
cycles,
signatures={
"f": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)
)
},
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(4.0, imported.f(constant_op.constant(2.0)).numpy())
def test_nested_functions(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
f = def_function.function(
lambda x: x * 2.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
g = def_function.function(
lambda x: f(x) + 1.0,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
root = autotrackable.AutoTrackable()
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
imported.g(constant_op.constant([1.0]))
def test_function_with_default_bool_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False):
if training:
return 2 * x
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
def test_function_with_defaults_input_tensor(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def func(x=constant_op.constant(5.0)):
return x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(5.0, root.f())
self.assertAllEqual(7.0, root.f(7.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(5.0, imported.f().numpy())
self.assertEqual(7.0, imported.f(constant_op.constant(7.0)).numpy())
# imported.signatures with defaults are not supported.
# TODO(b/277814477) support defaults in loaded.signatures
# self.assertEqual(
# {"output_0": 5.0},
# self.evaluate(
# imported.signatures["serving_default"]()
# ),
# )
def test_function_with_defaults_input_numpy(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(input_signature=[tensor_spec.TensorSpec([])])
def func(x=np.array(5.0)):
return x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(5.0, root.f())
self.assertAllEqual(7.0, root.f(np.array(7.0)))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(5.0, imported.f().numpy())
self.assertEqual(7.0, imported.f(np.array(7.0)).numpy())
# imported.signatures with defaults are not supported.
# TODO(b/277814477) support defaults in loaded.signatures
# self.assertEqual(
# {"output_0": 5.0},
# self.evaluate(
# imported.signatures["serving_default"]()
# ),
# )
def test_function_with_default_none_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, dtype=None):
if dtype:
return array_ops.zeros(shape=x.shape, dtype=dtype)
else:
return array_ops.zeros(shape=x.shape, dtype=dtypes.float32)
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertAllEqual(
[0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3])).numpy()
)
self.assertAllEqual(
[0.0, 0.0, 0.0], root.f(constant_op.constant([1.0, 2.0, 3.0])).numpy()
)
self.assertAllEqual(
[0.0, 0.0, 0.0, 0.0], root.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[0, 0, 0],
root.f(
constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32
).numpy(),
)
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 4)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 4)
self.assertAllEqual(
[0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3]), None).numpy(),
)
self.assertAllEqual(
[0.0, 0.0, 0.0],
imported.f(constant_op.constant([1.0, 2.0, 3.0])).numpy(),
)
self.assertAllEqual(
[0.0, 0.0, 0.0, 0.0],
imported.f(constant_op.constant([1, 2, 3, 4])).numpy(),
)
self.assertAllEqual(
[0, 0, 0],
imported.f(
constant_op.constant([1.0, 2.0, 3.0]), dtype=dtypes.int32
).numpy(),
)
def test_function_with_str_bytes_input(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, y):
return string_ops.string_join([x, y])
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(b"ab", root.f("a", "b"))
self.assertAllEqual(b"ab", root.f("a", constant_op.constant("b")))
self.assertAllEqual(b"ab", root.f(constant_op.constant("a"), "b"))
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 3)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 3)
self.assertAllEqual(b"ab", imported.f("a", "b"))
self.assertAllEqual(b"ab", imported.f("a", constant_op.constant("b")))
self.assertAllEqual(b"ab", imported.f(constant_op.constant("a"), "b"))
def test_function_no_return(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class TrackableWithOneVariable(autotrackable.AutoTrackable):
def __init__(self, initial_value=0.0):
super(TrackableWithOneVariable, self).__init__()
self.variable = variables.Variable(initial_value)
@def_function.function
def increase(self, by=1.0):
self.variable.assign_add(by)
obj = TrackableWithOneVariable(5.0)
obj.increase(constant_op.constant(10.0))
self.assertEqual(15.0, obj.variable.numpy())
obj.increase()
self.assertEqual(16.0, obj.variable.numpy())
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
imported.increase(constant_op.constant(10.0))
self.assertEqual(26.0, imported.variable.numpy())
imported.increase(constant_op.constant(1.0))
self.assertEqual(27.0, imported.variable.numpy())
def test_structured_inputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
# Note: by only calling f(input1) before serialization, only inputs with
# matching signature will be valid on the loaded model.
self.assertEqual(31, root.f(input1).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported.f(input2)
self.assertEqual(31, imported.f(input1).numpy())
self.assertEqual(32, imported.f(input3).numpy())
def test_structured_inputs_bare_concrete_function(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=True):
# x is a nested structure, we care about one particular tensor.
_, (a, b) = x
if training:
return 2 * a["a"] + b
else:
return 7
x = constant_op.constant(10)
y = constant_op.constant(11)
input1 = [6, ({"a": x}, y)]
input2 = [7, ({"a": x}, y)] # Not compatible with input1 signature.
input3 = [6, ({"a": y}, x)] # Compatible with input1 signature.
root = autotrackable.AutoTrackable()
root.f = def_function.function(func).get_concrete_function(input1)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaises(TypeError):
imported.f(input2)
self.assertEqual(31, imported.f(input1, True).numpy())
self.assertEqual(32, imported.f(input3, True).numpy())
def test_structured_output(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Use fields with non-alphabetical order
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
result = root.f(constant_op.constant(2), constant_op.constant(3))
self.assertEqual(5, result[0].a.numpy())
self.assertEqual(6, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(3, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
result = imported.f(constant_op.constant(2), constant_op.constant(5))
self.assertEqual(7, result[0].a.numpy())
self.assertEqual(10, result[0].b.numpy())
self.assertEqual(["b", "a"], list(result[0]._asdict().keys()))
self.assertEqual(5, result[1].numpy())
self.assertEqual(0.5, result[2]["x"].numpy())
def testConcreteFunctionType(self, cycles, use_cpp_bindings):
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
y = constant_op.constant(1)
@def_function.function
def foo(x):
return {"input": x, "capture": y}
root = autotrackable.AutoTrackable()
root.f = foo.get_concrete_function(tensor_spec.TensorSpec([], dtypes.int32))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
x = constant_op.constant(2)
output = imported.f(x)
self.assertEqual(set(output.keys()), {"input", "capture"})
self.assertEqual(output["input"].numpy(), 2)
self.assertEqual(output["capture"].numpy(), 1)
parameters = list(imported.f.function_type.parameters.values())
self.assertLen(parameters, 1)
self.assertEqual(parameters[0].name, "x")
self.assertEqual(
parameters[0].type_constraint,
tensor_spec.TensorSpec([], dtypes.int32, name="x"),
)
captures = imported.f.function_type.captures
self.assertLen(captures, 1)
self.assertEqual(
list(captures.values())[0], tensor_spec.TensorSpec([], dtypes.int32)
)
output = imported.f.function_type.output
self.assertEqual(
output.mapping,
{
"input": tensor_spec.TensorSpec(
shape=(), dtype=dtypes.int32, name="input"
),
"capture": tensor_spec.TensorSpec(
shape=(), dtype=dtypes.int32, name="capture"
),
},
)
def test_pretty_print_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
named_tuple_type = collections.namedtuple("NamedTupleHello", ["b", "a"])
def func(input1, input2):
named_tuple = named_tuple_type(a=input1 + input2, b=input1 * input2)
return [named_tuple, input2, {"x": 0.5}]
root = autotrackable.AutoTrackable()
root.f = def_function.function(func).get_concrete_function(
constant_op.constant(2), constant_op.constant(3)
)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
imported.f.pretty_printed_signature(),
"Input Parameters:\n"
+ " input1 (POSITIONAL_OR_KEYWORD): TensorSpec(shape=(),"
" dtype=tf.int32, name='input1')\n"
+ " input2 (POSITIONAL_OR_KEYWORD): TensorSpec(shape=(),"
" dtype=tf.int32, name='input2')\n"
+ "Output Type:\n"
+ " List[NamedTupleHello[['b', TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_0_b')], ['a', TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_0_a')]], TensorSpec(shape=(), dtype=tf.int32,"
" name='tensor_1'), Dict[['x', TensorSpec(shape=(), dtype=tf.float32,"
" name='tensor_2_x')]]]\n"
+ "Captures:\n"
+ " None",
)
def test_positional_arguments(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False, abc=7.1, defg=7.7):
del abc
if training:
return 2 * x
if defg == 7:
return 6
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(7, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
self.assertEqual(6, root.f(constant_op.constant(1), defg=7.0).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(7, imported.f(constant_op.constant(2)).numpy())
self.assertEqual(6, imported.f(constant_op.constant(1), defg=7.0).numpy())
def test_additional_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def func(x, training=False, **options):
del options
if training:
return 2 * x
else:
return 7
root = autotrackable.AutoTrackable()
root.f = def_function.function(func)
x = constant_op.constant(10)
self.assertEqual(7, root.f(x, learning_rate=0.5, epochs=3).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call.*"
):
imported.f(x, learning_rate=0.5, epochs=4)
self.assertEqual(7, imported.f(x, learning_rate=0.5, epochs=3).numpy())
def test_member_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class TrackableWithMember(autotrackable.AutoTrackable):
def __init__(self):
super(TrackableWithMember, self).__init__()
self._some_value = 20
@def_function.function
def f(self, x, training=False):
if training:
return 2 * x
else:
return 7 + self._some_value
root = TrackableWithMember()
self.assertEqual(20, root.f(constant_op.constant(10), True).numpy())
self.assertEqual(27, root.f(constant_op.constant(1)).numpy())
self.assertEqual(2, root.f(constant_op.constant(1), True).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4, imported.f(constant_op.constant(2), True).numpy())
self.assertEqual(27, imported.f(constant_op.constant(2)).numpy())
def test_side_effect_listing(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class M(autotrackable.AutoTrackable):
def __init__(self):
super(M, self).__init__()
self.var = None
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def f(self, x):
if self.var is None:
self.var = variables.Variable(2.0)
return x * self.var
m = M()
cycle(m, cycles)
self.assertEqual(4.0, m.f(constant_op.constant(2.0)).numpy())
def test_basic_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(1.0, trainable=True)
bias = variables.Variable(0.0, trainable=True)
g = def_function.function(
lambda x: x * weight + bias,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
root = autotrackable.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 1.0])
def test_nested_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(1.0, trainable=True)
bias = variables.Variable(0.0, trainable=True)
# Note: this function gets called from other function defs via a
# "PartitionedCall" op node.
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
]
)
def mul(x, y):
return x * y
# Note: this function gets called from other function defs via a
# "StatefulPartitionedCall" op node.
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def f(x):
return mul(weight.read_value(), x)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def g(x):
return (f(x) + bias,)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def h(x):
return (g(x) + bias,)
root = autotrackable.AutoTrackable()
root.weight = weight
root.bias = bias
root.g = h
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
with backprop.GradientTape() as t:
x = constant_op.constant([3.5])
loss = imported.g(x)
grad = t.gradient(loss, [imported.weight, imported.bias])
self.assertAllClose(grad, [3.5, 2.0])
def test_while_loop_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(2.0, trainable=True)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=(None, None))
]
)
def g(x):
"""Adds rows of matrix x after multiplying each entry by v."""
i_0 = constant_op.constant(0)
s_0 = constant_op.constant([0.0, 0.0])
cond = lambda i, _: i < array_ops.shape(x)[1]
body = lambda i, s: (i + 1, s + weight * x[:, i])
i_end, s_end = while_loop.while_loop(cond, body, (i_0, s_0))
del i_end
return s_end
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant([[1.0, 2.0, 3.0], [1.0, -2, 3.0]])
y = obj.g(x)
self.assertAllClose(y, obj.weight * [6.0, 2.0])
loss = math_ops.reduce_sum(y) # weight * 8.
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(loss, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 8.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 8.0)
def _test_restored_func_with_captured_var_backprop(
self, cycles, use_cpp_bindings, dtype
):
weight = variables.Variable(2.0, trainable=True, dtype=dtype)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(dtype=dtype, shape=())]
)
def g(x):
return x * weight
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj):
with backprop.GradientTape() as t:
x = constant_op.constant(2.0, dtype=dtype)
y = obj.g(x)
self.assertAllClose(y, obj.weight * 2.0)
self.assertAllEqual(t.watched_variables(), [obj.weight])
return t.gradient(y, obj.weight)
imported_gradient = get_gradient(imported)
original_gradient = get_gradient(root)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 2.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 2.0)
def test_nested_fn_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
weight = variables.Variable(2.0, trainable=True)
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(dtype=dtypes.float32, shape=(None, None))
]
)
def g(x):
weight.read_value() # Just get the tape to watch the variable
handle = array_ops.identity(weight.handle)
@def_function.function
def launder_var_handle():
return array_ops.identity(handle)
return x + resource_variable_ops.read_variable_op(
launder_var_handle(), dtypes.float32
)
root = autotrackable.AutoTrackable()
root.weight = weight
root.g = g
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
def get_gradient(obj, persistent):
with backprop.GradientTape(persistent=persistent) as t:
x = constant_op.constant([[1.0, 2.0, 3.0], [1.0, -2, 3.0]])
y = obj.g(x)
self.assertAllClose(y, obj.weight + x)
loss = math_ops.reduce_sum(y)
return t.gradient(loss, obj.weight)
imported_gradient = get_gradient(imported, persistent=False)
original_gradient = get_gradient(root, persistent=False)
self.assertIsNotNone(original_gradient)
self.assertAllClose(original_gradient, 6.0)
self.assertIsNotNone(imported_gradient)
self.assertAllClose(imported_gradient, 6.0)
def test_restored_func_with_captured_var_backprop_float32(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._test_restored_func_with_captured_var_backprop(
cycles, use_cpp_bindings, dtypes.float32
)
def test_restored_func_with_captured_var_backprop_float64(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._test_restored_func_with_captured_var_backprop(
cycles, use_cpp_bindings, dtypes.float64
)
def test_callable(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class M1(autotrackable.AutoTrackable):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def __call__(self, x):
return x
root = autotrackable.AutoTrackable()
root.m1 = M1()
root.m2 = autotrackable.AutoTrackable()
root.m2.__call__ = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)(lambda x: x * 3.0)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
x = constant_op.constant(1.0)
self.assertTrue(callable(imported.m1))
self.assertAllEqual(root.m1(x), imported.m1(x))
# Note: `root.m2` was not callable since `__call__` attribute was set
# into the instance and not on the class. But after a serialization cycle
# that starts to work.
self.assertTrue(callable(imported.m2))
self.assertAllEqual(root.m2.__call__(x), imported.m2(x))
# Verify that user objects without `__call__` attribute are not callable.
self.assertFalse(callable(imported))
def test_chain_callable(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
func = def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)(lambda x: x * 3.0)
root = autotrackable.AutoTrackable()
root.__call__ = autotrackable.AutoTrackable()
root.__call__.__call__ = autotrackable.AutoTrackable()
root.__call__.__call__.__call__ = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertTrue(callable(imported))
x = constant_op.constant(1.0)
self.assertAllEqual(imported(x).numpy(), 3.0)
def test_load_in_graph_mode(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0, name="v_one", trainable=False)
root.v2 = variables.Variable(2.0, name="v_two", trainable=True)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
with ops.Graph().as_default() as g:
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
var_v1 = imported.v1
self.assertFalse(var_v1.trainable)
var_v2 = imported.v2
self.assertTrue(var_v2.trainable)
output = imported.f(constant_op.constant(2.0))
with monitored_session.MonitoredSession() as sess:
self.assertEqual(1.0, sess.run(var_v1))
self.assertEqual(4.0, sess.run(output))
self.assertCountEqual(
[var_v1, var_v2], g.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
)
# load() should not add to TRAINABLE_VARIABLES. Higher levels of model
# building control retraining or frozen use of imported SavedModels.
self.assertCountEqual(
[], g.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
)
def test_load_in_func_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(1.0)
root.v2 = variables.Variable(2.0)
root.f = def_function.function(
lambda x: root.v2 * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
)
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
closure = autotrackable.AutoTrackable()
@def_function.function
def func(x):
if not hasattr(closure, "model"):
closure.model = load.load(path)
return closure.model.f(x)
inputs = constant_op.constant(2.0)
self.assertEqual(4.0, func(inputs).numpy())
def test_soft_matching(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
concrete_functions = root.f._list_all_concrete_functions_for_serialization() # pylint: disable=protected-access
self.assertLen(concrete_functions, 1)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_concrete_functions = imported.f._list_all_concrete_functions() # pylint: disable=protected-access
self.assertLen(restored_concrete_functions, 1)
with self.assertRaisesRegex(
TypeError, "Binding inputs to tf.function failed"
):
# We cannot call the function with a constant of shape ().
imported.f(constant_op.constant(2)).numpy()
# TODO(vbardiovsky): When classes are revived with input_signatures, we
# should also check that the calls below are not generating any more
# concrete functions.
self.assertAllEqual(
[2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()
)
def test_jit_compile(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# It'd be nice to use parameterize here, but the library does not support
# having parameterized test methods inside already-parameterized classes.
for jit_compile in (None, True, False):
@def_function.function(jit_compile=jit_compile)
def f(x):
return x + 1.0
root = module.Module()
root.f = f
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(imported.f._jit_compile, jit_compile)
def test_get_concrete_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, training=False):
if training:
return 2 * x
else:
return 3 * x
func.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True
)
func.get_concrete_function(tensor_spec.TensorSpec([None], dtypes.float32))
root = autotrackable.AutoTrackable()
root.f = func
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
concrete = imported.f.get_concrete_function(
training=True, x=tensor_spec.TensorSpec([None], dtypes.int32)
)
self.assertAllEqual(
[2, 4, 6, 8], concrete(x=constant_op.constant([1, 2, 3, 4])).numpy()
)
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32)
)
imported.f.get_concrete_function(
tensor_spec.TensorSpec([None], dtypes.int32), True
)
def test_concrete_function(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
self.assertAllEqual([2, 4], root.f(constant_op.constant([1, 2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(
[2, 4, 6, 8], imported.f(constant_op.constant([1, 2, 3, 4])).numpy()
)
self.assertAllEqual(
[2, 4, 6], imported.f(constant_op.constant([1, 2, 3])).numpy()
)
def test_concrete_function_captures(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Root(module.Module):
def __init__(self):
self.v = variables.Variable(1.0)
self.v1 = variables.Variable(1.0)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)]
)
def use_v(self, x):
return self.v + self.v1 + 1.0
root = Root()
self.assertIn(
root.v.handle,
root.use_v.get_concrete_function().graph.external_captures,
)
root = cycle(
root,
cycles,
signatures=root.use_v.get_concrete_function(),
use_cpp_bindings=use_cpp_bindings,
)
func_captures = root.use_v.get_concrete_function().graph.external_captures
self.assertLen(func_captures, 2)
self.assertTrue(any(root.v.handle is t for t in func_captures))
self.assertTrue(any(root.v1.handle is t for t in func_captures))
signature_captures = root.signatures[
"serving_default"
].graph.external_captures
self.assertLen(signature_captures, 2)
self.assertTrue(any(root.v.handle is t for t in signature_captures))
self.assertTrue(any(root.v1.handle is t for t in signature_captures))
def test_concrete_function_arg_names(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
self.assertAllEqual([2], root.f(constant_op.constant([1])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(
[2, 4, 6], imported.f(x=constant_op.constant([1, 2, 3])).numpy()
)
def test_concrete_function_no_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x):
return 2 * x
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(constant_op.constant([1]))
self.assertAllEqual([4], root.f(constant_op.constant([2])).numpy())
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual([6], imported.f(constant_op.constant([3])).numpy())
@test_util.run_in_graph_and_eager_modes
def test_concrete_function_backprop(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)]
)
def func(x):
return x**2.0
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function()
def _compute_gradient(function):
with backprop.GradientTape() as tape:
inp = constant_op.constant(1.0)
tape.watch(inp)
output = function(inp)
return tape.gradient(output, inp)
self.assertAllEqual(2.0, _compute_gradient(root.f))
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertAllEqual(2.0, _compute_gradient(imported.f))
def test_revived_concrete_function_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(x, y):
return x * (y + 1.0)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.float32),
)
self.assertEqual(
8.0,
root.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
# TODO(andresp): Fix exporting of loaded concrete functions as signatures.
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
8.0,
imported.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
def test_revived_concrete_function_tensorspec_kwargs(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(*args):
x, y = args
return x * (y + 1.0)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(
tensor_spec.TensorSpec([], dtypes.float32, name="x"),
tensor_spec.TensorSpec([], dtypes.float32, name="y"),
)
self.assertEqual(
8.0,
root.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
imported = cycle(
root, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
8.0,
imported.f(
y=constant_op.constant(3.0), x=constant_op.constant(2.0)
).numpy(),
)
def test_concrete_function_variable_argument(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
capture = variables.Variable(0)
@def_function.function
def func(v):
v.assign_add(1)
capture.assign_sub(1)
vsave = variables.Variable(1)
root = autotrackable.AutoTrackable()
root.f = func.get_concrete_function(vsave)
root.capture = capture
self.assertEqual(1, vsave.numpy())
root.f(vsave)
self.assertEqual(2, vsave.numpy())
self.assertEqual(-1, capture.numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
vload = variables.Variable(1)
imported.f(vload)
self.assertEqual(2, vload.numpy())
self.assertEqual(-2, imported.capture.numpy())
imported.f(v=vload)
self.assertEqual(3, vload.numpy())
self.assertEqual(-3, imported.capture.numpy())
self.assertEqual(-1, capture.numpy())
def test_function_and_component(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def func(v):
return v + 1
root = autotrackable.AutoTrackable()
root.func = func
root.concrete_func = func.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int32)
)
one = constant_op.constant(1)
self.assertEqual(2, root.func(one).numpy())
self.assertEqual(2, root.concrete_func(one).numpy())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(2, imported.func(one).numpy())
self.assertEqual(2, imported.concrete_func(one).numpy())
def test_dict(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = dict(a=variables.Variable(1.0))
root.variables["b"] = variables.Variable(2.0)
root.variables["c"] = 1
root.funcs = dict(
a=def_function.function(lambda: constant_op.constant(100.0))
)
root.funcs["conc"] = root.funcs["a"].get_concrete_function()
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables["a"].numpy())
self.assertEqual(2.0, imported.variables["b"].numpy())
self.assertEqual(set(["a", "b"]), set(imported.variables.keys()))
self.assertEqual(100.0, imported.funcs["a"]().numpy())
self.assertEqual(100.0, imported.funcs["conc"]().numpy())
def test_list(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = [variables.Variable(1.0)]
root.variables.append(1)
root.variables.append(variables.Variable(3.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables[0].numpy())
self.assertEqual(3.0, imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertLen(imported.variables, 3)
def test_tuple(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
root.variables = (variables.Variable(1.0), 1, variables.Variable(3.0))
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(1.0, imported.variables[0].numpy())
self.assertEqual(3.0, imported.variables[2].numpy())
self.assertIs(None, imported.variables[1])
self.assertLen(imported.variables, 3)
def test_functions_list(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
v1 = variables.Variable(1.0)
root.losses = [def_function.function(lambda: math_ops.reduce_sum(v1**2))]
root.variables = [v1]
@def_function.function
def _v2_loss():
if len(root.variables) == 1:
v2 = variables.Variable(2.0)
root.variables.append(v2)
return math_ops.reduce_sum(root.variables[1] ** 2)
root.losses.append(_v2_loss)
self.assertAllClose([1.0, 4.0], [loss() for loss in root.losses])
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose([1.0, 4.0], [loss() for loss in imported.losses])
imported.variables[0].assign(3.0)
imported.variables[1].assign(4.0)
self.assertAllClose([9.0, 16.0], [loss() for loss in imported.losses])
def test_captured_constant(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
const = array_ops.zeros([100])
root = autotrackable.AutoTrackable()
root.f = def_function.function(lambda: const + 1.0)
root.g = def_function.function(lambda: const + 2.0)
self.assertAllClose(array_ops.ones([100]), root.f())
self.assertAllClose(2.0 * array_ops.ones([100]), root.g())
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(array_ops.ones([100]), imported.f())
self.assertAllClose(2.0 * array_ops.ones([100]), imported.g())
# TODO(b/123408994): Use the public get_concrete_function.
f_concrete = imported.f._list_all_concrete_functions_for_serialization()[0]
g_concrete = imported.g._list_all_concrete_functions_for_serialization()[0]
self.assertLen(f_concrete.captured_inputs, 1)
self.assertLen(g_concrete.captured_inputs, 1)
# We should be using the same captured EagerTensor in both functions, not
# duplicating the constant.
self.assertIs(f_concrete.captured_inputs[0], g_concrete.captured_inputs[0])
def test_functions_accessed_once(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
def __init__(self):
self._counter = 0
@property
def make_func(self):
@def_function.function
def f():
return constant_op.constant(self._counter)
f.get_concrete_function() # force a trace
self._counter += 1
return f
exported = Exported()
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(0, imported.make_func().numpy())
self.assertEqual(1, exported.make_func().numpy())
def test_overwritten_signatures_error(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
exported = autotrackable.AutoTrackable()
exported.f = def_function.function(lambda: constant_op.constant(1.0))
imported = cycle(
exported,
cycles,
signatures={"key": exported.f.get_concrete_function()},
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(1.0, imported.signatures["key"]()["output_0"].numpy())
imported.signatures = {"key1": imported.signatures["key"]}
with self.assertRaisesRegex(ValueError, "signatures"):
save.save(imported, tempfile.mkdtemp(prefix=self.get_temp_dir()))
def test_signature_loading(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
def __init__(self):
self.v = variables.Variable(3.0)
@def_function.function
def do(self, x):
return self.v * x
exported = Exported()
imported = cycle(
exported,
cycles,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32)
),
use_cpp_bindings=use_cpp_bindings,
)
self.assertEqual(["serving_default"], list(imported.signatures.keys()))
imported_function = imported.signatures["serving_default"]
two = constant_op.constant(2.0)
self.assertEqual(6.0, imported_function(x=two)["output_0"].numpy())
imported.v.assign(4.0)
self.assertEqual(8.0, imported_function(x=two)["output_0"].numpy())
self.assertEqual(8.0, imported_function(two)["output_0"].numpy())
with self.assertRaises(TypeError):
# The signatures mapping is immutable
imported.signatures["random_key"] = 3
def test_names_normalized(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class ObjWithFunction(module.Module):
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="A-b"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="A/D"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="bar"),
tensor_spec.TensorSpec([], dtype=dtypes.int32, name="e"),
]
)
def foo(self, a, b, c, d=10, **options):
del options
return a + b + c + d
exported = ObjWithFunction()
with self.assertLogs(level="INFO") as logs:
imported = cycle(exported, cycles, use_cpp_bindings=use_cpp_bindings)
expected_message = (
"INFO:absl:Function `foo` contains input name(s) A-b, A/D with "
"unsupported characters which will be renamed to a_b, a_d in the "
"SavedModel."
)
self.assertIn(expected_message, logs.output)
loaded_signature = imported.signatures["serving_default"].inputs
self.assertTrue(
{"a_b:0", "a_d:0"}.issubset({arg.name for arg in loaded_signature}),
)
def test_multiple_argument_signatures_no_positional(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class Exported(autotrackable.AutoTrackable):
@def_function.function
def do(self, x, y):
return x + y
exported = Exported()
imported = cycle(
exported,
cycles,
signatures=exported.do.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32),
),
use_cpp_bindings=use_cpp_bindings,
)
with self.assertRaises(TypeError):
imported.signatures["serving_default"](
constant_op.constant(1.0), y=constant_op.constant(2.0)
)
self.assertEqual(
{"output_0": 3.0},
self.evaluate(
imported.signatures["serving_default"](
x=constant_op.constant(1.0), y=constant_op.constant(2.0)
)
),
)
def _make_model_with_tables(self):
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1_initializer = lookup_ops.KeyValueTensorInitializer(keys, values)
table1 = lookup_ops.HashTable(table1_initializer, default_val)
table2_file = self._make_asset("test\nfoo\nbrain\n")
table2_initializer = lookup_ops.TextFileIdTableInitializer(table2_file)
table2 = lookup_ops.HashTable(table2_initializer, default_val)
def _make_lookup_function(table):
signature = [tensor_spec.TensorSpec(None, dtypes.string)]
return def_function.function(input_signature=signature)(
lambda x: table.lookup(x)) # pylint: disable=unnecessary-lambda
root = autotrackable.AutoTrackable()
root.table1 = table1
root.lookup1 = _make_lookup_function(table1)
root.table2 = table2
root.lookup2 = _make_lookup_function(table2)
return root
def test_table(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = self._make_model_with_tables()
imported = cycle(root, cycles, signatures={})
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
self.assertAllEqual([0, -1, -1, 2], imported.lookup1(keys).numpy())
self.assertAllEqual([2, 0, 1, -1], imported.lookup2(keys).numpy())
def test_table_collections_untouched_eager(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def _gather_nonempty_collections():
graph = ops.get_default_graph()
gathered = {}
for collection in graph.collections:
collection_contents = graph.get_collection(collection)
if collection_contents:
gathered[collection] = collection_contents
return gathered
root = self._make_model_with_tables()
# Warm up collections to ignore those that don't expand every iteration,
# e.g. the __varscope collection.
cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
original_collections = _gather_nonempty_collections()
cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(original_collections, _gather_nonempty_collections())
def test_table_in_graph(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = self._make_model_with_tables()
if cycles > 1:
root = cycle(root, cycles - 1, use_cpp_bindings=use_cpp_bindings)
path = tempfile.mkdtemp(prefix=self.get_temp_dir())
save.save(root, path)
imported = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
with ops.Graph().as_default():
imported = test_load(path, use_cpp_bindings=use_cpp_bindings)
keys = constant_op.constant(["brain", "test", "foo", "surgery"])
output1 = imported.lookup1(keys)
output2 = imported.lookup2(keys)
with monitored_session.MonitoredSession() as sess:
self.assertAllEqual([0, -1, -1, 2], sess.run(output1))
self.assertAllEqual([2, 0, 1, -1], sess.run(output2))
def test_preserve_argspec(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(a, b, c): # pylint: disable=unused-argument
return None
original_fullargspec = tf_inspect.getfullargspec(f)
root = autotrackable.AutoTrackable()
root.f = def_function.function(f)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
restored_fullargspec = tf_inspect.getfullargspec(imported.f)
self.assertEqual(original_fullargspec, restored_fullargspec)
def test_canonicalize_inputs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(autograph=False)
def func(a=1, b=2, c=3, training=True):
if training:
return [a, b, c, training]
else:
return [c, b, a, training]
# TODO(b/123501567): Work-around to trigger generic traces of a function
# with extra non tensor args.
signature = 3 * [tensor_spec.TensorSpec(None, dtypes.float32)]
@def_function.function(input_signature=signature)
def trigger(a, b, c):
func(a, b, c, True)
func(a, b, c, False)
trigger.get_concrete_function()
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(), [1.0, 2.0, 3.0, True])
self.assertAllEqual(root.f(-1.0, training=False), [3.0, 2.0, -1.0, False])
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function"
):
root.f(["hello", 1.0])
def test_prefer_specific_trace(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(autograph=False)
def func(a):
if isinstance(a, int):
return a
else:
return a + 1
self.assertAllEqual(2, func(2).numpy())
self.assertAllEqual(3, func(constant_op.constant(2)).numpy())
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(2, root.f(2).numpy())
self.assertAllEqual(4, root.f(3).numpy())
self.assertAllEqual(3, root.f(constant_op.constant(2)).numpy())
self.assertAllEqual(4, root.f(constant_op.constant(3)).numpy())
def test_partial_with_non_tensor_defaults(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y=3):
return x + y
func = def_function.function(functools.partial(f, y=5))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x + y
func = def_function.function(functools.partial(f, constant_op.constant(5)))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 6)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 6)
def test_partial_with_positional_captured_tensors(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x + y
tensor = constant_op.constant(5) + constant_op.constant(7)
func = def_function.function(functools.partial(f, tensor))
root = autotrackable.AutoTrackable()
root.f = func
self.assertAllEqual(root.f(1), 13)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(1), 13)
def test_partial_keyword_hiding_default(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x=3, training=True, y=7):
if training:
return x + y
else:
return x + y + 2
func = def_function.function(functools.partial(f, y=6))
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f().numpy(), 9)
self.assertEqual(root.f(training=False).numpy(), 11)
def test_partial_with_kwargs(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(a, b, *args, **kwargs):
args_sum = sum(args)
return a + b + kwargs["some_tensor"] * kwargs["learning_rate"] + args_sum
constant_tensor = constant_op.constant(10)
func = def_function.function(
functools.partial(
f, 7, 1, 2, learning_rate=3, some_tensor=constant_tensor
)
)
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(4)).numpy(), 44)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f(constant_op.constant(5)).numpy(), 45)
def test_partial_bind_only_first_argument(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
if sys.version_info[0] < 3:
self.skipTest(
"Test is only valid in python3. Only then we get some more "
"advanced inspection of partials where this is allowed."
)
def f(x, y):
return x + y
partial_func = functools.partial(f, x=5)
tf_func = def_function.function(partial_func)
root = autotrackable.AutoTrackable()
root.f = tf_func
self.assertAllEqual(root.f(y=constant_op.constant(7)), 12)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllEqual(root.f(y=constant_op.constant(9)), 14)
def test_partial_with_passed_fn_as_default(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def f(x, y):
return x(3) + y
def my_func(a):
return 2 * a
func = def_function.function(functools.partial(f, my_func))
root = autotrackable.AutoTrackable()
root.f = func
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(root.f(constant_op.constant(3)).numpy(), 9)
def test_partial_with_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def full_function(a, b, c=3.0):
return a, b, c
partial = functools.partial(full_function, 1, c=4)
self.assertAllEqual((1, 2.0, 4), partial(2.0))
signature = [tensor_spec.TensorSpec([], dtypes.float32)]
func = def_function.function(partial, input_signature=signature)
root = autotrackable.AutoTrackable()
root.f = func
a, b, c = root.f(2.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 2.0, 4))
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
a, b, c = root.f(3.0)
self.assertAllEqual([a.numpy(), b.numpy(), c.numpy()], (1, 3.0, 4))
def test_convert_to_input_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[tensor_spec.TensorSpec([None], dtypes.int32)]
)
def func(x):
return x
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual([2], root.f([2]).numpy())
def test_named_tuple(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class NamedTupleType(collections.namedtuple("NamedTupleType", ["a", "b"])):
pass
@def_function.function
def f(x):
return x.a + x.b
f.get_concrete_function(
NamedTupleType(
a=tensor_spec.TensorSpec(None, dtypes.float32, name="a"),
b=tensor_spec.TensorSpec(None, dtypes.float32, name="b"),
)
)
obj = autotrackable.AutoTrackable()
obj.__call__ = f
if sys.version_info.major == 3 and sys.version_info.minor < 5:
# TODO(allenl): figure out why this doesn't work in Python3.4
self.skipTest("Not working in Python 3.4")
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(
3.0,
imported(
NamedTupleType(
a=constant_op.constant(1.0), b=constant_op.constant(2.0)
)
),
)
def test_extra_args(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def f(x):
return math_ops.add(x["a"], 1.0)
# Trigger a trace.
f({"a": constant_op.constant(2.0)})
obj = autotrackable.AutoTrackable()
obj.__call__ = f
imported = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(4.0, imported({"a": 3.0}).numpy())
with self.assertRaisesRegex(
ValueError, "Could not find matching concrete function to call"
):
imported({"a": 2.0, "b": 3.0})
def test_shapes_available(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([None, 3], dtypes.int32),
tensor_spec.TensorSpec([None, 2], dtypes.int32),
]
)
def func(x, y):
return array_ops.concat([x, y], axis=1)
root = autotrackable.AutoTrackable()
root.f = func
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
imported_graph = root.f.get_concrete_function().graph
input_x, input_y = imported_graph.inputs
self.assertEqual([None, 3], input_x.shape.as_list())
self.assertEqual([None, 2], input_y.shape.as_list())
(output,) = imported_graph.outputs
self.assertEqual([None, 5], output.shape.as_list())
signature = root.signatures["serving_default"]
self.assertEqual([None, 3], signature.inputs[0].shape.as_list())
self.assertEqual([None, 2], signature.inputs[1].shape.as_list())
self.assertEqual([None, 5], signature.outputs[0].shape.as_list())
def test_variables_destroyed(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
v1 = variables.Variable(1.0)
weak_v1 = weakref.ref(v1)
root = checkpoint.Checkpoint(v=v1)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
del v1
self.assertIsNone(weak_v1())
weak_v2 = weakref.ref(root.v)
del root
self.assertIsNone(weak_v2())
def test_variable_attributes_preserved(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
v = variables.Variable(
1.0,
trainable=False,
synchronization=variables.VariableSynchronization.NONE,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA,
)
self.assertEqual(variables.VariableSynchronization.NONE, v.synchronization)
self.assertEqual(
variables.VariableAggregation.ONLY_FIRST_REPLICA, v.aggregation
)
root = autotrackable.AutoTrackable()
root.v = v
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(False, root.v.trainable)
self.assertEqual(
variables.VariableSynchronization.NONE, root.v.synchronization
)
self.assertEqual(
variables.VariableAggregation.ONLY_FIRST_REPLICA, root.v.aggregation
)
def test_captured_dataset(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = dataset_ops.Dataset.range(5).map(lambda x: x**2)
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy(),
)
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(
3 * (1 + 4 + 9 + 16),
root(constant_op.constant(3, dtype=dtypes.int64)).numpy(),
)
def test_tuple_signature(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root.f = def_function.function(
lambda: (array_ops.ones([]), array_ops.zeros([])), input_signature=()
)
root = cycle(
root, cycles, signatures=root.f, use_cpp_bindings=use_cpp_bindings
)
self.assertEqual(
({"output_0": 1.0, "output_1": 0.0}),
self.evaluate(root.signatures["serving_default"]()),
)
def test_version_info(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(versions.__version__, root.tensorflow_version)
self.assertEqual(versions.__git_version__, root.tensorflow_git_version)
def test_load_grad_save(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = checkpoint.Checkpoint()
root.v = variables.Variable(2.0)
root.f = def_function.function(lambda x: root.v * x)
root.g = def_function.function(root.f)
for _ in range(cycles):
with backprop.GradientTape() as tape:
inp = constant_op.constant(2.0)
tape.watch(inp)
output = root.g(inp)
self.assertAllClose(4.0, output)
self.assertAllClose(2.0, tape.gradient(output, inp))
root = cycle(root, 1, use_cpp_bindings=use_cpp_bindings)
def test_destroy_resource(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
def get_handle():
return resource_variable_ops.var_handle_op(
shape=tensor_shape.as_shape([]),
dtype=dtypes.float32,
shared_name="my_var_name",
name="my_var",
container="my_container",
)
class MyResource(resource.TrackableResource):
def _create_resource(self):
return get_handle()
def _initialize(self):
resource_variable_ops.assign_variable_op(
self.resource_handle, 1.0, name="assign"
)
def _destroy_resource(self):
handle = get_handle()
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=True
)
class MyModel(autotrackable.AutoTrackable):
def __init__(self):
super(MyModel, self).__init__()
self.resource = MyResource()
@def_function.function(input_signature=[])
def increase(self):
handle = self.resource.resource_handle
resource_variable_ops.assign_add_variable_op(
handle, 10.0, name="assign_add"
)
return resource_variable_ops.read_variable_op(handle, dtypes.float32)
root = MyModel()
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(11, imported.increase().numpy()) # Create the resource.
handle = imported.resource.resource_handle
# Delete the imported SaveModel. Since we explicitly set the deleter, it
# should destroy the resource automatically.
del imported
# Try to destroy the resource again, should fail.
with self.assertRaisesRegex(
errors.NotFoundError, r"Resource .* does not exist."
):
resource_variable_ops.destroy_resource_op(
handle, ignore_lookup_error=False
)
def test_function_called_as_operation(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@framework_function.Defun(dtypes.float32)
def inner(x):
return x + 1.0
@def_function.function(
input_signature=[tensor_spec.TensorSpec([], dtypes.float32)]
)
def outer(x):
return inner(x)
root = module.Module()
root.f = outer
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertAllClose(2.0, imported.f(constant_op.constant(1.0)))
def test_ragged(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@def_function.function
def f(x, c=1):
"""Returns Tensor x incremented by Python constant c."""
return math_ops.add(x, c)
for c in (1, 2, 3):
_ = f.get_concrete_function(
ragged_tensor.RaggedTensorSpec([None, None], dtype=dtypes.int32), c
)
obj = autotrackable.AutoTrackable()
obj.f = f
imported1 = cycle(
obj, cycles, signatures={}, use_cpp_bindings=use_cpp_bindings
)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported1.f(rt), [[2, 3], [4]])
self.assertAllEqual(imported1.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported1.f(rt, 3), [[4, 5], [6]])
imported2 = cycle(obj, cycles, use_cpp_bindings=use_cpp_bindings)
rt = ragged_factory_ops.constant([[1, 2], [3]])
self.assertAllEqual(imported2.f(rt, 1), [[2, 3], [4]])
self.assertAllEqual(imported2.f(rt, 2), [[3, 4], [5]])
self.assertAllEqual(imported2.f(rt, 3), [[4, 5], [6]])
def test_accepts_io_device(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
options = load_options.LoadOptions()
self.assertIsNone(options.experimental_io_device)
options = load_options.LoadOptions(experimental_io_device="/job:localhost")
self.assertEqual("/job:localhost", options.experimental_io_device)
def _custom_saveable_object(self, cycles, use_cpp_bindings):
if context.is_tfrt_enabled():
self.skipTest("Disable due to b/190539415.")
root = autotrackable.AutoTrackable()
root.table = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table.insert("foo", 15)
root.table2 = lookup_ops.MutableHashTable(dtypes.string, dtypes.float32, -1)
root.table2.insert("idk", 21)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)]
)
def lookup(key):
return root.table.lookup(key)
root.lookup = lookup
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(self.evaluate(imported.lookup("foo")), 15)
self.assertEqual(self.evaluate(imported.lookup("idk")), -1)
if not saveable_compat.force_checkpoint_conversion_enabled():
self.assertEqual(
{"table"}, imported.table._self_saveable_object_factories.keys()
)
def test_load_custom_saveable_object(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
self._custom_saveable_object(cycles, use_cpp_bindings=use_cpp_bindings)
def test_load_custom_saveable_object_ckpt_conversion(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Tests custom saveable object with checkpoint conversion enabled (forces
# Trackable-based checkpoint implementation).
saveable_compat.force_checkpoint_conversion()
self._custom_saveable_object(cycles, use_cpp_bindings=use_cpp_bindings)
def test_load_resource_with_dependency(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# Test with StaticHashTable, which has a _initializer attribute that tracks
# the Asset vocab table.
class MyLookupModel(autotrackable.AutoTrackable):
def __init__(self, vocab_file):
vocab_initializer = lookup_ops.TextFileInitializer(
vocab_file,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER,
)
self._vocab_table = lookup_ops.StaticHashTable(
vocab_initializer, default_value=-1
)
@def_function.function(
input_signature=[tensor_spec.TensorSpec((None,), dtypes.string)]
)
def __call__(self, inputs):
return self._vocab_table.lookup(inputs)
vocab_file = self._make_asset("\n".join(["a", "b", "c", "d"]))
root = MyLookupModel(vocab_file)
imported = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
file_io.delete_file(vocab_file)
self.assertAllEqual(imported(constant_op.constant(["d", "b"])), [3, 1])
def test_custom_gradients(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
@custom_gradient.custom_gradient
def log1pexp(x):
e = math_ops.exp(x)
def grad(dy):
return dy * e # incorrect to check the custom gradients is respected.
return math_ops.log(1 + e), grad
@def_function.function
def g(x):
y = log1pexp(x)
@def_function.function
def g_nest():
return log1pexp(y)
return g_nest()
@def_function.function
def f(x):
return log1pexp(g(x * x))
v = variables.Variable(1.)
with backprop.GradientTape() as tape2:
with backprop.GradientTape() as tape:
tape.watch(v)
y = f(v)
expected_grads = tape.gradient(y, v)
expected_grad_grads = tape2.gradient(expected_grads, v)
root = autotrackable.AutoTrackable()
root.f = f
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape2:
with backprop.GradientTape() as tape:
tape.watch(v)
y = loaded.f(v)
grads = tape.gradient(y, v)
grad_grads = tape2.gradient(grads, v)
self.assertAllClose(grads, expected_grads)
self.assertAllClose(grad_grads, expected_grad_grads)
def test_custom_gradients_with_none_grad(self, cycles, use_cpp_bindings):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# https://github.com/google/jax/issues/7123
@custom_gradient.custom_gradient
def f(params, state):
def grad_fn(*args):
return args
return (params, state), grad_fn
@def_function.function(
input_signature=[
tensor_spec.TensorSpec([], dtypes.float32),
tensor_spec.TensorSpec([], dtypes.int32),
]
)
def predict(params, state):
return f(params, state)
params = variables.Variable(1.0)
# None grads only appear when state is an int.
state = constant_op.constant(3, dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(params)
y = predict(params, state)
expected_grads = tape.gradient(y, params)
root = autotrackable.AutoTrackable()
root.fn = predict
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape:
tape.watch(params)
y = loaded.fn(params, state)
grads = tape.gradient(y, params)
self.assertAllClose(grads, expected_grads)
def test_custom_gradients_with_none_grad_and_partial_shape(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
# https://github.com/google/jax/issues/7123
@custom_gradient.custom_gradient
def f(params, state):
def grad_fn(*args):
return args
return (params, state), grad_fn
@def_function.function(
input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.int32),
]
)
def predict(params, state):
return f(params, state)
params = variables.Variable(1.0)
# None grads only appear when state is an int.
state = constant_op.constant(3, dtype=dtypes.int32)
with backprop.GradientTape() as tape:
tape.watch(params)
y = predict(params, state)
expected_grads = tape.gradient(y, params)
root = autotrackable.AutoTrackable()
root.fn = predict
loaded = cycle(
root,
cycles,
save_option=save_options.SaveOptions(
experimental_custom_gradients=True
),
use_cpp_bindings=use_cpp_bindings,
)
with backprop.GradientTape() as tape:
tape.watch(params)
y = loaded.fn(params, state)
grads = tape.gradient(y, params)
self.assertAllClose(grads, expected_grads)
def test_signature_propagates_experimental_attr(
self, cycles, use_cpp_bindings
):
# TODO(b/264869228) Fix LoadTest
if use_cpp_bindings:
self.skipTest("Not implemented for cpp.")
root = autotrackable.AutoTrackable()
experimental_attributes = {"disable_summaries_at_runtime": ["x", True]}
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)],
experimental_attributes=experimental_attributes,
)
def f(x):
return x * 2.0
root.f = f
self.assertEqual(root.f(constant_op.constant(1.0)).numpy(), 2.0)
loaded = cycle(root, cycles, use_cpp_bindings=use_cpp_bindings)
self.assertEqual(loaded.f(constant_op.constant(1.0)).numpy(), 2.0)
self.assertProtoEquals(
r"""
list {
s: 'x',
b: True
}
""",
loaded.signatures["serving_default"].function_def.attr[
"disable_summaries_at_runtime"
],
)
@parameterized.named_parameters(*_test_params())
| LoadTest |
python | plotly__plotly.py | plotly/graph_objs/histogram/unselected/_marker.py | {
"start": 233,
"end": 3335
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram.unselected"
_path_str = "histogram.unselected.marker"
_valid_props = {"color", "opacity"}
@property
def color(self):
"""
Sets the marker color of unselected points, applied only when a
selection exists.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def opacity(self):
"""
Sets the marker opacity of unselected points, applied only when
a selection exists.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
"""
def __init__(self, arg=None, color=None, opacity=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.unselected.Marker`
color
Sets the marker color of unselected points, applied
only when a selection exists.
opacity
Sets the marker opacity of unselected points, applied
only when a selection exists.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.unselected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.unselected.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("opacity", arg, opacity)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | huggingface__transformers | src/transformers/models/jetmoe/configuration_jetmoe.py | {
"start": 876,
"end": 7548
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`JetMoeModel`]. It is used to instantiate a
JetMoe model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a configuration of the JetMoe-4B.
[jetmoe/jetmoe-8b](https://huggingface.co/jetmoe/jetmoe-8b)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the JetMoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`JetMoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each key and value in the Transformer encoder.
kv_channels (`int`, *optional*, defaults to 128):
Defines the number of channels for the key and value tensors.
intermediate_size (`int`, *optional*, defaults to 5632):
Dimension of the MLP representations.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. JetMoe's attention allows sequence of
up to 4096 tokens.
activation_function (`string`, *optional*, defaults to `"silu"`):
Defines the activation function for MLP experts.
num_local_experts (`int`, *optional*, defaults to 8):
Defines the number of experts in the MoE and MoA.
num_experts_per_tok (`int, *optional*, defaults to 2):
The number of experts to route per-token and for MoE and MoA.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss.
aux_loss_coef (`float`, *optional*, defaults to 0.01):
The coefficient for the auxiliary loss.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
rope_parameters (`RopeParameters`, *optional*):
Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
with longer `max_position_embeddings`.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import JetMoeModel, JetMoeConfig
>>> # Initializing a JetMoe 4B style configuration
>>> configuration = JetMoeConfig()
>>> # Initializing a model from the JetMoe 4B style configuration
>>> model = JetMoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "jetmoe"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"head_dim": "kv_channels"}
def __init__(
self,
vocab_size: Optional[int] = 32000,
hidden_size: Optional[int] = 2048,
num_hidden_layers: Optional[int] = 12,
num_key_value_heads: Optional[int] = 16,
kv_channels: Optional[int] = 128,
intermediate_size: Optional[int] = 5632,
max_position_embeddings: Optional[int] = 4096,
activation_function: Optional[str] = "silu",
num_local_experts: Optional[int] = 8,
num_experts_per_tok: Optional[int] = 2,
output_router_logits: Optional[bool] = False,
aux_loss_coef: Optional[float] = 0.01,
use_cache: Optional[bool] = True,
bos_token_id: Optional[int] = 1,
eos_token_id: Optional[int] = 2,
tie_word_embeddings: Optional[bool] = True,
rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
rms_norm_eps: Optional[int] = 1e-6,
initializer_range: Optional[float] = 0.01,
attention_dropout: Optional[float] = 0.0,
**kwargs,
):
if num_experts_per_tok > num_local_experts:
raise ValueError("`num_experts_per_tok` must be less than or equal to `num_local_experts`")
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_key_value_heads * num_experts_per_tok
self.num_key_value_heads = num_key_value_heads
self.kv_channels = kv_channels
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.activation_function = activation_function
self.num_local_experts = num_local_experts
self.num_experts_per_tok = num_experts_per_tok
self.output_router_logits = output_router_logits
self.aux_loss_coef = aux_loss_coef
self.use_cache = use_cache
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.rms_norm_eps = rms_norm_eps
self.rope_parameters = rope_parameters
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
__all__ = ["JetMoeConfig"]
| JetMoeConfig |
python | numpy__numpy | numpy/polynomial/tests/test_laguerre.py | {
"start": 3265,
"end": 5919
} | class ____:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5)) * 2 - 1
y = polyval(x, [1., 2., 3.])
def test_lagval(self):
# check empty input
assert_equal(lag.lagval([], [1]).size, 0)
# check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Llist]
for i in range(7):
msg = f"At i={i}"
tgt = y[i]
res = lag.lagval(x, [0] * i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
# check that shape is preserved
for i in range(3):
dims = [2] * i
x = np.zeros(dims)
assert_equal(lag.lagval(x, [1]).shape, dims)
assert_equal(lag.lagval(x, [1, 0]).shape, dims)
assert_equal(lag.lagval(x, [1, 0, 0]).shape, dims)
def test_lagval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d)
# test values
tgt = y1 * y2
res = lag.lagval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = lag.lagval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_lagval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test exceptions
assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d)
# test values
tgt = y1 * y2 * y3
res = lag.lagval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = lag.lagval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_laggrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j->ij', y1, y2)
res = lag.laggrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = lag.laggrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3) * 2)
def test_laggrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
# test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = lag.laggrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
# test shape
z = np.ones((2, 3))
res = lag.laggrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3) * 3)
| TestEvaluation |
python | python-pillow__Pillow | src/PIL/MpoImagePlugin.py | {
"start": 3096,
"end": 6722
} | class ____(JpegImagePlugin.JpegImageFile):
format = "MPO"
format_description = "MPO (CIPA DC-007)"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
self.fp.seek(0) # prep the fp in order to pass the JPEG test
JpegImagePlugin.JpegImageFile._open(self)
self._after_jpeg_open()
def _after_jpeg_open(self, mpheader: dict[int, Any] | None = None) -> None:
self.mpinfo = mpheader if mpheader is not None else self._getmp()
if self.mpinfo is None:
msg = "Image appears to be a malformed MPO file"
raise ValueError(msg)
self.n_frames = self.mpinfo[0xB001]
self.__mpoffsets = [
mpent["DataOffset"] + self.info["mpoffset"] for mpent in self.mpinfo[0xB002]
]
self.__mpoffsets[0] = 0
# Note that the following assertion will only be invalid if something
# gets broken within JpegImagePlugin.
assert self.n_frames == len(self.__mpoffsets)
del self.info["mpoffset"] # no longer needed
self.is_animated = self.n_frames > 1
self._fp = self.fp # FIXME: hack
self._fp.seek(self.__mpoffsets[0]) # get ready to read first frame
self.__frame = 0
self.offset = 0
# for now we can only handle reading and individual frame extraction
self.readonly = 1
def load_seek(self, pos: int) -> None:
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self._fp.seek(pos)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
if isinstance(self._fp, DeferredError):
raise self._fp.ex
self.fp = self._fp
self.offset = self.__mpoffsets[frame]
original_exif = self.info.get("exif")
if "exif" in self.info:
del self.info["exif"]
self.fp.seek(self.offset + 2) # skip SOI marker
if not self.fp.read(2):
msg = "No data found for frame"
raise ValueError(msg)
self.fp.seek(self.offset)
JpegImagePlugin.JpegImageFile._open(self)
if self.info.get("exif") != original_exif:
self._reload_exif()
self.tile = [
ImageFile._Tile("jpeg", (0, 0) + self.size, self.offset, self.tile[0][-1])
]
self.__frame = frame
def tell(self) -> int:
return self.__frame
@staticmethod
def adopt(
jpeg_instance: JpegImagePlugin.JpegImageFile,
mpheader: dict[int, Any] | None = None,
) -> MpoImageFile:
"""
Transform the instance of JpegImageFile into
an instance of MpoImageFile.
After the call, the JpegImageFile is extended
to be an MpoImageFile.
This is essentially useful when opening a JPEG
file that reveals itself as an MPO, to avoid
double call to _open.
"""
jpeg_instance.__class__ = MpoImageFile
mpo_instance = cast(MpoImageFile, jpeg_instance)
mpo_instance._after_jpeg_open(mpheader)
return mpo_instance
# ---------------------------------------------------------------------
# Registry stuff
# Note that since MPO shares a factory with JPEG, we do not need to do a
# separate registration for it here.
# Image.register_open(MpoImageFile.format,
# JpegImagePlugin.jpeg_factory, _accept)
Image.register_save(MpoImageFile.format, _save)
Image.register_save_all(MpoImageFile.format, _save_all)
Image.register_extension(MpoImageFile.format, ".mpo")
Image.register_mime(MpoImageFile.format, "image/mpo")
| MpoImageFile |
python | getsentry__sentry | tests/sentry/seer/explorer/test_custom_tool_utils.py | {
"start": 4335,
"end": 9699
} | class ____(TestCase):
def setUp(self):
super().setUp()
create_test_regions()
self.organization = self.create_organization()
def test_validate_tool_class_nested(self):
"""Test validation fails for nested classes."""
class OuterClass:
class NestedTool(ExplorerTool):
@classmethod
def get_description(cls):
return "Nested tool"
@classmethod
def get_params(cls):
return []
@classmethod
def execute(cls, organization, **kwargs):
return "test"
with pytest.raises(ValueError) as cm:
extract_tool_schema(OuterClass.NestedTool)
assert "module-level class" in str(cm.value)
def test_extract_tool_schema_basic(self):
"""Test extracting schema from a basic tool class."""
schema = extract_tool_schema(GetUserInfoTool)
assert schema.name == "GetUserInfoTool"
assert "GetUserInfoTool" in schema.module_path
assert schema.description == "Fetches user information"
assert len(schema.parameters) == 1
assert schema.parameters[0]["name"] == "user_id"
assert schema.parameters[0]["type"] == "integer"
assert schema.required == ["user_id"]
def test_extract_tool_schema_with_optional_params(self):
"""Test extracting schema with optional parameters."""
schema = extract_tool_schema(SearchLogsTool)
assert schema.name == "SearchLogsTool"
assert schema.description == "Search application logs"
assert len(schema.parameters) == 3
assert schema.required == ["query"] # Only required param
# Check parameter types
param_types = {p["name"]: p["type"] for p in schema.parameters}
assert param_types["query"] == "string"
assert param_types["limit"] == "integer"
assert param_types["include_archived"] == "boolean"
def test_extract_tool_schema_with_list_params(self):
"""Test extracting schema with list parameters."""
schema = extract_tool_schema(ProcessItemsTool)
assert len(schema.parameters) == 2
# Check list types
items_param = next(p for p in schema.parameters if p["name"] == "items")
assert items_param["type"] == "array"
assert items_param["items"]["type"] == "string"
priorities_param = next(p for p in schema.parameters if p["name"] == "priorities")
assert priorities_param["type"] == "array"
assert priorities_param["items"]["type"] == "integer"
def test_call_custom_tool_success(self):
"""Test calling a custom tool successfully."""
# Use test tool from this test module
module_path = "tests.sentry.seer.explorer.test_custom_tool_utils.TestCustomTool"
# Call via the utility function
result = call_custom_tool(
module_path=module_path,
allowed_prefixes=("sentry.", "tests.sentry."),
organization_id=self.organization.id,
message="Hi",
count=3,
)
assert result == "HiHiHi"
def test_call_custom_tool_with_optional_param(self):
"""Test calling a custom tool with default parameter."""
module_path = "tests.sentry.seer.explorer.test_custom_tool_utils.TestToolWithDefault"
result = call_custom_tool(
module_path=module_path,
allowed_prefixes=("sentry.", "tests.sentry."),
organization_id=self.organization.id,
value="Hello",
)
assert result == "Hello!"
def test_call_custom_tool_security_restriction(self):
"""Test that only allowed prefixes module paths are allowed."""
with pytest.raises(ValueError) as cm:
call_custom_tool(
module_path="os.system",
organization_id=self.organization.id,
command="ls",
)
assert "must start with one of" in str(cm.value)
assert "('sentry.',)" in str(cm.value)
def test_call_custom_tool_invalid_path(self):
"""Test calling with invalid module path."""
with pytest.raises(ValueError) as cm:
call_custom_tool(
module_path="sentry.nonexistent.module.function",
organization_id=self.organization.id,
)
assert "Could not import" in str(cm.value)
def test_call_custom_tool_wrong_return_type(self):
"""Test error when tool returns non-string."""
module_path = "tests.sentry.seer.explorer.test_custom_tool_utils.BadTool"
with pytest.raises(RuntimeError) as cm:
call_custom_tool(
module_path=module_path,
allowed_prefixes=("sentry.", "tests.sentry."),
organization_id=self.organization.id,
)
assert "must return str" in str(cm.value)
def test_tool_with_enum(self):
"""Test that EnumType is converted correctly."""
schema = extract_tool_schema(ToolWithEnum)
assert len(schema.parameters) == 1
unit_param = schema.parameters[0]
assert unit_param["name"] == "unit"
assert unit_param["type"] == "string"
assert unit_param["enum"] == ["celsius", "fahrenheit"]
| CustomToolUtilsTest |
python | astropy__astropy | astropy/visualization/interval.py | {
"start": 7813,
"end": 11601
} | class ____(BaseInterval):
"""
Interval based on IRAF's zscale.
Original implementation:
https://github.com/spacetelescope/stsci.numdisplay/blob/master/lib/stsci/numdisplay/zscale.py
Licensed under a 3-clause BSD style license (see AURA_LICENSE.rst).
Parameters
----------
n_samples : int, optional
The number of points in the array to sample for determining
scaling factors. Defaults to 1000.
.. versionchanged:: 7.0
``nsamples`` parameter is removed.
contrast : float, optional
The scaling factor (between 0 and 1) for determining the minimum
and maximum value. Larger values decrease the difference
between the minimum and maximum values used for display.
Defaults to 0.25.
max_reject : float, optional
If more than ``max_reject * npixels`` pixels are rejected, then
the returned values are the minimum and maximum of the data.
Defaults to 0.5.
min_npixels : int, optional
If there are less than ``min_npixels`` pixels remaining after
the pixel rejection, then the returned values are the minimum
and maximum of the data. Defaults to 5.
krej : float, optional
The number of sigma used for the rejection. Defaults to 2.5.
max_iterations : int, optional
The maximum number of iterations for the rejection. Defaults to
5.
"""
def __init__(
self,
n_samples=1000,
contrast=0.25,
max_reject=0.5,
min_npixels=5,
krej=2.5,
max_iterations=5,
):
self.n_samples = n_samples
self.contrast = contrast
self.max_reject = max_reject
self.min_npixels = min_npixels
self.krej = krej
self.max_iterations = max_iterations
def get_limits(self, values):
values = self._process_values(values)
# Sample the image
stride = int(max(1.0, values.size / self.n_samples))
samples = values[::stride][: self.n_samples]
samples.sort()
npix = len(samples)
vmin = samples[0]
vmax = samples[-1]
# Fit a line to the sorted array of samples
minpix = max(self.min_npixels, int(npix * self.max_reject))
x = np.arange(npix)
ngoodpix = npix
last_ngoodpix = npix + 1
# Bad pixels mask used in k-sigma clipping
badpix = np.zeros(npix, dtype=bool)
# Kernel used to dilate the bad pixels mask
ngrow = max(1, int(npix * 0.01))
kernel = np.ones(ngrow, dtype=bool)
for _ in range(self.max_iterations):
if ngoodpix >= last_ngoodpix or ngoodpix < minpix:
break
fit = np.polyfit(x, samples, deg=1, w=(~badpix).astype(int))
fitted = np.poly1d(fit)(x)
# Subtract fitted line from the data array
flat = samples - fitted
# Compute the k-sigma rejection threshold
threshold = self.krej * flat[~badpix].std()
# Detect and reject pixels further than k*sigma from the
# fitted line
badpix[(flat < -threshold) | (flat > threshold)] = True
# Convolve with a kernel of length ngrow
badpix = np.convolve(badpix, kernel, mode="same")
last_ngoodpix = ngoodpix
ngoodpix = np.sum(~badpix)
if ngoodpix >= minpix:
slope, _ = fit
if self.contrast > 0:
slope = slope / self.contrast
center_pixel = (npix - 1) // 2
median = np.median(samples)
vmin = max(vmin, median - (center_pixel - 1) * slope)
vmax = min(vmax, median + (npix - center_pixel) * slope)
return vmin, vmax
| ZScaleInterval |
python | ray-project__ray | python/ray/llm/_internal/common/utils/download_utils.py | {
"start": 609,
"end": 3721
} | class ____(enum.Enum):
"""Defines which files to download from cloud storage."""
MODEL_AND_TOKENIZER = enum.auto()
TOKENIZER_ONLY = enum.auto()
EXCLUDE_SAFETENSORS = enum.auto()
NONE = enum.auto()
def __bool__(self):
return self != NodeModelDownloadable.NONE
def union(self, other: "NodeModelDownloadable") -> "NodeModelDownloadable":
"""Return a NodeModelDownloadable that is a union of this and the other."""
if (
self == NodeModelDownloadable.MODEL_AND_TOKENIZER
or other == NodeModelDownloadable.MODEL_AND_TOKENIZER
):
return NodeModelDownloadable.MODEL_AND_TOKENIZER
if (
self == NodeModelDownloadable.EXCLUDE_SAFETENSORS
or other == NodeModelDownloadable.EXCLUDE_SAFETENSORS
):
return NodeModelDownloadable.EXCLUDE_SAFETENSORS
if (
self == NodeModelDownloadable.TOKENIZER_ONLY
or other == NodeModelDownloadable.TOKENIZER_ONLY
):
return NodeModelDownloadable.TOKENIZER_ONLY
return NodeModelDownloadable.NONE
def get_model_entrypoint(model_id: str) -> str:
"""Get the path to entrypoint of the model on disk if it exists, otherwise return the model id as is.
Entrypoint is typically <TRANSFORMERS_CACHE>/models--<model_id>/
Args:
model_id: Hugging Face model ID.
Returns:
The path to the entrypoint of the model on disk if it exists, otherwise the model id as is.
"""
from transformers.utils.hub import TRANSFORMERS_CACHE
model_dir = Path(
TRANSFORMERS_CACHE, f"models--{model_id.replace('/', '--')}"
).expanduser()
if not model_dir.exists():
return model_id
return str(model_dir.absolute())
def get_model_location_on_disk(model_id: str) -> str:
"""Get the location of the model on disk if exists, otherwise return the model id as is.
Args:
model_id: Hugging Face model ID.
"""
model_dir = Path(get_model_entrypoint(model_id))
model_id_or_path = model_id
model_dir_refs_main = Path(model_dir, "refs", "main")
if model_dir.exists():
if model_dir_refs_main.exists():
# If refs/main exists, use the snapshot hash to find the model
# and check if *config.json (could be config.json for general models
# or adapter_config.json for LoRA adapters) exists to make sure it
# follows HF model repo structure.
with open(model_dir_refs_main, "r") as f:
snapshot_hash = f.read().strip()
snapshot_hash_path = Path(model_dir, "snapshots", snapshot_hash)
if snapshot_hash_path.exists() and list(
Path(snapshot_hash_path).glob("*config.json")
):
model_id_or_path = str(snapshot_hash_path.absolute())
else:
# If it doesn't have refs/main, it is a custom model repo
# and we can just return the model_dir.
model_id_or_path = str(model_dir.absolute())
return model_id_or_path
| NodeModelDownloadable |
python | huggingface__transformers | src/transformers/models/nllb_moe/modeling_nllb_moe.py | {
"start": 26208,
"end": 29855
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: NllbMoeConfig, is_sparse: bool = False, layer_idx: Optional[int] = None):
super().__init__()
self.embed_dim = config.d_model
self.is_sparse = is_sparse
self.self_attn = NllbMoeAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.attn_dropout = nn.Dropout(config.dropout)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.cross_attention = NllbMoeAttention(
self.embed_dim,
config.decoder_attention_heads,
config.attention_dropout,
is_decoder=True,
config=config,
layer_idx=layer_idx,
)
self.cross_attention_layer_norm = nn.LayerNorm(self.embed_dim)
if not self.is_sparse:
self.ffn = NllbMoeDenseActDense(config, ffn_dim=config.decoder_ffn_dim)
else:
self.ffn = NllbMoeSparseMLP(config, ffn_dim=config.decoder_ffn_dim)
self.ff_layer_norm = nn.LayerNorm(config.d_model)
self.ff_dropout = nn.Dropout(config.activation_dropout)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
past_key_values=past_key_values,
attention_mask=attention_mask,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
if encoder_hidden_states is not None:
residual = hidden_states
hidden_states = self.cross_attention_layer_norm(hidden_states)
hidden_states, _ = self.cross_attention(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
past_key_values=past_key_values,
attention_mask=encoder_attention_mask,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.attn_dropout(hidden_states)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.ff_layer_norm(hidden_states)
if self.is_sparse:
hidden_states = self.ffn(hidden_states, attention_mask)
else:
hidden_states = self.ffn(hidden_states)
hidden_states = self.ff_dropout(hidden_states)
hidden_states = residual + hidden_states
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
return hidden_states
@auto_docstring
| NllbMoeDecoderLayer |
python | numba__numba | numba/tests/test_obj_lifetime.py | {
"start": 1054,
"end": 3806
} | class ____(object):
"""
An object which records events when instances created through it
are deleted. Custom events can also be recorded to aid in
diagnosis.
"""
def __init__(self):
self._counts = collections.defaultdict(int)
self._events = []
self._wrs = {}
def make_dummy(self, name):
"""
Make an object whose deletion will be recorded as *name*.
"""
return _Dummy(self, name)
def _add_dummy(self, dummy):
wr = weakref.ref(dummy, self._on_disposal)
self._wrs[wr] = dummy.name
__call__ = make_dummy
def mark(self, event):
"""
Manually append *event* to the recorded events.
*event* can be formatted using format().
"""
count = self._counts[event] + 1
self._counts[event] = count
self._events.append(event.format(count=count))
def _on_disposal(self, wr):
name = self._wrs.pop(wr)
self._events.append(name)
@property
def alive(self):
"""
A list of objects which haven't been deleted yet.
"""
return [wr() for wr in self._wrs]
@property
def recorded(self):
"""
A list of recorded events.
"""
return self._events
def simple_usecase1(rec):
a = rec('a')
b = rec('b')
c = rec('c')
a = b + c
rec.mark('--1--')
d = a + a # b + c + b + c
rec.mark('--2--')
return d
def simple_usecase2(rec):
a = rec('a')
b = rec('b')
rec.mark('--1--')
x = a
y = x
a = None
return y
def looping_usecase1(rec):
a = rec('a')
b = rec('b')
c = rec('c')
x = b
for y in a:
x = x + y
rec.mark('--loop bottom--')
rec.mark('--loop exit--')
x = x + c
return x
def looping_usecase2(rec):
a = rec('a')
b = rec('b')
cum = rec('cum')
for x in a:
rec.mark('--outer loop top--')
cum = cum + x
z = x + x
rec.mark('--inner loop entry #{count}--')
for y in b:
rec.mark('--inner loop top #{count}--')
cum = cum + y
rec.mark('--inner loop bottom #{count}--')
rec.mark('--inner loop exit #{count}--')
if cum:
cum = y + z
else:
# Never gets here, but let the Numba compiler see a `break` opcode
break
rec.mark('--outer loop bottom #{count}--')
else:
rec.mark('--outer loop else--')
rec.mark('--outer loop exit--')
return cum
def generator_usecase1(rec):
a = rec('a')
b = rec('b')
yield a
yield b
def generator_usecase2(rec):
a = rec('a')
b = rec('b')
for x in a:
yield x
yield b
| RefRecorder |
python | davidhalter__jedi | jedi/inference/gradual/typing.py | {
"start": 14907,
"end": 15601
} | class ____(Value):
def __init__(self, inference_state, parent_context, tree_node, type_value_set):
super().__init__(inference_state, parent_context)
self._type_value_set = type_value_set
self.tree_node = tree_node
def py__class__(self):
c, = self._type_value_set.py__class__()
return c
def py__call__(self, arguments):
return self._type_value_set.execute_annotation()
@property
def name(self):
from jedi.inference.compiled.value import CompiledValueName
return CompiledValueName(self, 'NewType')
def __repr__(self) -> str:
return '<NewType: %s>%s' % (self.tree_node, self._type_value_set)
| NewType |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 19222,
"end": 19512
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (
GrapheneMessageEvent,
GrapheneDisplayableEvent,
GrapheneStepEvent,
GrapheneMarkerEvent,
)
name = "ResourceInitSuccessEvent"
| GrapheneResourceInitSuccessEvent |
python | python-pillow__Pillow | src/PIL/WebPImagePlugin.py | {
"start": 825,
"end": 10054
} | class ____(ImageFile.ImageFile):
format = "WEBP"
format_description = "WebP image"
__loaded = 0
__logical_frame = 0
def _open(self) -> None:
# Use the newer AnimDecoder API to parse the (possibly) animated file,
# and access muxed chunks like ICC/EXIF/XMP.
self._decoder = _webp.WebPAnimDecoder(self.fp.read())
# Get info from decoder
self._size, loop_count, bgcolor, frame_count, mode = self._decoder.get_info()
self.info["loop"] = loop_count
bg_a, bg_r, bg_g, bg_b = (
(bgcolor >> 24) & 0xFF,
(bgcolor >> 16) & 0xFF,
(bgcolor >> 8) & 0xFF,
bgcolor & 0xFF,
)
self.info["background"] = (bg_r, bg_g, bg_b, bg_a)
self.n_frames = frame_count
self.is_animated = self.n_frames > 1
self._mode = "RGB" if mode == "RGBX" else mode
self.rawmode = mode
# Attempt to read ICC / EXIF / XMP chunks from file
icc_profile = self._decoder.get_chunk("ICCP")
exif = self._decoder.get_chunk("EXIF")
xmp = self._decoder.get_chunk("XMP ")
if icc_profile:
self.info["icc_profile"] = icc_profile
if exif:
self.info["exif"] = exif
if xmp:
self.info["xmp"] = xmp
# Initialize seek state
self._reset(reset=False)
def _getexif(self) -> dict[int, Any] | None:
if "exif" not in self.info:
return None
return self.getexif()._get_merged_dict()
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
# Set logical frame to requested position
self.__logical_frame = frame
def _reset(self, reset: bool = True) -> None:
if reset:
self._decoder.reset()
self.__physical_frame = 0
self.__loaded = -1
self.__timestamp = 0
def _get_next(self) -> tuple[bytes, int, int]:
# Get next frame
ret = self._decoder.get_next()
self.__physical_frame += 1
# Check if an error occurred
if ret is None:
self._reset() # Reset just to be safe
self.seek(0)
msg = "failed to decode next frame in WebP file"
raise EOFError(msg)
# Compute duration
data, timestamp = ret
duration = timestamp - self.__timestamp
self.__timestamp = timestamp
# libwebp gives frame end, adjust to start of frame
timestamp -= duration
return data, timestamp, duration
def _seek(self, frame: int) -> None:
if self.__physical_frame == frame:
return # Nothing to do
if frame < self.__physical_frame:
self._reset() # Rewind to beginning
while self.__physical_frame < frame:
self._get_next() # Advance to the requested frame
def load(self) -> Image.core.PixelAccess | None:
if self.__loaded != self.__logical_frame:
self._seek(self.__logical_frame)
# We need to load the image data for this frame
data, timestamp, duration = self._get_next()
self.info["timestamp"] = timestamp
self.info["duration"] = duration
self.__loaded = self.__logical_frame
# Set tile
if self.fp and self._exclusive_fp:
self.fp.close()
self.fp = BytesIO(data)
self.tile = [ImageFile._Tile("raw", (0, 0) + self.size, 0, self.rawmode)]
return super().load()
def load_seek(self, pos: int) -> None:
pass
def tell(self) -> int:
return self.__logical_frame
def _convert_frame(im: Image.Image) -> Image.Image:
# Make sure image mode is supported
if im.mode not in ("RGBX", "RGBA", "RGB"):
im = im.convert("RGBA" if im.has_transparency_data else "RGB")
return im
def _save_all(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
encoderinfo = im.encoderinfo.copy()
append_images = list(encoderinfo.get("append_images", []))
# If total frame count is 1, then save using the legacy API, which
# will preserve non-alpha modes
total = 0
for ims in [im] + append_images:
total += getattr(ims, "n_frames", 1)
if total == 1:
_save(im, fp, filename)
return
background: int | tuple[int, ...] = (0, 0, 0, 0)
if "background" in encoderinfo:
background = encoderinfo["background"]
elif "background" in im.info:
background = im.info["background"]
if isinstance(background, int):
# GifImagePlugin stores a global color table index in
# info["background"]. So it must be converted to an RGBA value
palette = im.getpalette()
if palette:
r, g, b = palette[background * 3 : (background + 1) * 3]
background = (r, g, b, 255)
else:
background = (background, background, background, 255)
duration = im.encoderinfo.get("duration", im.info.get("duration", 0))
loop = im.encoderinfo.get("loop", 0)
minimize_size = im.encoderinfo.get("minimize_size", False)
kmin = im.encoderinfo.get("kmin", None)
kmax = im.encoderinfo.get("kmax", None)
allow_mixed = im.encoderinfo.get("allow_mixed", False)
verbose = False
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
alpha_quality = im.encoderinfo.get("alpha_quality", 100)
method = im.encoderinfo.get("method", 0)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", "")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
xmp = im.encoderinfo.get("xmp", "")
if allow_mixed:
lossless = False
# Sensible keyframe defaults are from gif2webp.c script
if kmin is None:
kmin = 9 if lossless else 3
if kmax is None:
kmax = 17 if lossless else 5
# Validate background color
if (
not isinstance(background, (list, tuple))
or len(background) != 4
or not all(0 <= v < 256 for v in background)
):
msg = f"Background color is not an RGBA tuple clamped to (0-255): {background}"
raise OSError(msg)
# Convert to packed uint
bg_r, bg_g, bg_b, bg_a = background
background = (bg_a << 24) | (bg_r << 16) | (bg_g << 8) | (bg_b << 0)
# Setup the WebP animation encoder
enc = _webp.WebPAnimEncoder(
im.size,
background,
loop,
minimize_size,
kmin,
kmax,
allow_mixed,
verbose,
)
# Add each frame
frame_idx = 0
timestamp = 0
cur_idx = im.tell()
try:
for ims in [im] + append_images:
# Get number of frames in this image
nfr = getattr(ims, "n_frames", 1)
for idx in range(nfr):
ims.seek(idx)
frame = _convert_frame(ims)
# Append the frame to the animation encoder
enc.add(
frame.getim(),
round(timestamp),
lossless,
quality,
alpha_quality,
method,
)
# Update timestamp and frame index
if isinstance(duration, (list, tuple)):
timestamp += duration[frame_idx]
else:
timestamp += duration
frame_idx += 1
finally:
im.seek(cur_idx)
# Force encoder to flush frames
enc.add(None, round(timestamp), lossless, quality, alpha_quality, 0)
# Get the final output from the encoder
data = enc.assemble(icc_profile, exif, xmp)
if data is None:
msg = "cannot write file as WebP (encoder returned None)"
raise OSError(msg)
fp.write(data)
def _save(im: Image.Image, fp: IO[bytes], filename: str | bytes) -> None:
lossless = im.encoderinfo.get("lossless", False)
quality = im.encoderinfo.get("quality", 80)
alpha_quality = im.encoderinfo.get("alpha_quality", 100)
icc_profile = im.encoderinfo.get("icc_profile") or ""
exif = im.encoderinfo.get("exif", b"")
if isinstance(exif, Image.Exif):
exif = exif.tobytes()
if exif.startswith(b"Exif\x00\x00"):
exif = exif[6:]
xmp = im.encoderinfo.get("xmp", "")
method = im.encoderinfo.get("method", 4)
exact = 1 if im.encoderinfo.get("exact") else 0
im = _convert_frame(im)
data = _webp.WebPEncode(
im.getim(),
lossless,
float(quality),
float(alpha_quality),
icc_profile,
method,
exact,
exif,
xmp,
)
if data is None:
msg = "cannot write file as WebP (encoder returned None)"
raise OSError(msg)
fp.write(data)
Image.register_open(WebPImageFile.format, WebPImageFile, _accept)
if SUPPORTED:
Image.register_save(WebPImageFile.format, _save)
Image.register_save_all(WebPImageFile.format, _save_all)
Image.register_extension(WebPImageFile.format, ".webp")
Image.register_mime(WebPImageFile.format, "image/webp")
| WebPImageFile |
python | ray-project__ray | doc/source/ray-core/doc_code/placement_group_example.py | {
"start": 1730,
"end": 3537
} | class ____:
def __init__(self):
pass
def ready(self):
pass
# Create a GPU actor on the first bundle of index 0.
actor2 = Actor.options(
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg,
placement_group_bundle_index=0,
)
).remote()
# Verify that the GPU actor is scheduled.
ray.get(actor2.ready.remote(), timeout=10)
# __schedule_pg_3_end__
# __remove_pg_start__
# This API is asynchronous.
remove_placement_group(pg)
# Wait until placement group is killed.
time.sleep(1)
# Check that the placement group has died.
pprint(placement_group_table(pg))
"""
{'bundles': {0: {'GPU': 1.0}, 1: {'CPU': 1.0}},
'name': 'unnamed_group',
'placement_group_id': '40816b6ad474a6942b0edb45809b39c3',
'state': 'REMOVED',
'strategy': 'PACK'}
"""
# __remove_pg_end__
# __strategy_pg_start__
# Reserve a placement group of 2 bundles
# that have to be packed on the same node.
pg = placement_group([{"CPU": 1}, {"GPU": 1}], strategy="PACK")
# __strategy_pg_end__
remove_placement_group(pg)
# __detached_pg_start__
# driver_1.py
# Create a detached placement group that survives even after
# the job terminates.
pg = placement_group([{"CPU": 1}], lifetime="detached", name="global_name")
ray.get(pg.ready())
# __detached_pg_end__
remove_placement_group(pg)
# __get_pg_start__
# first_driver.py
# Create a placement group with a unique name within a namespace.
# Start Ray or connect to a Ray cluster using: ray.init(namespace="pg_namespace")
pg = placement_group([{"CPU": 1}], name="pg_name")
ray.get(pg.ready())
# second_driver.py
# Retrieve a placement group with a unique name within a namespace.
# Start Ray or connect to a Ray cluster using: ray.init(namespace="pg_namespace")
pg = ray.util.get_placement_group("pg_name")
# __get_pg_end__
| Actor |
python | GoogleCloudPlatform__python-docs-samples | functions/v2/typed/googlechatbot/main.py | {
"start": 997,
"end": 2008
} | class ____:
cardId: str
card: Dict[str, Any]
# Required to serialize the response
def to_dict(self) -> dict:
return {
"cardsV2": {
"cardId": self.cardId,
"card": self.card,
}
}
@functions_framework.typed
def googlechatbot(req: ChatRequest) -> ChatResponse:
displayName = req.message["sender"]["displayName"]
imageUrl = req.message["sender"]["avatarUrl"]
cardHeader = {
"title": f"Hello {displayName}!",
}
avatarWidget = {
"textParagraph": {"text": "Your avatar picture: "},
}
avatarImageWidget = {
"image": {"imageUrl": imageUrl},
}
avatarSection = {
"widgets": [avatarWidget, avatarImageWidget],
}
return ChatResponse(
cardId="avatarCard",
card={
"name": "Avatar Card",
"header": cardHeader,
"sections": [avatarSection],
},
)
# [END functions_typed_googlechatbot]
| ChatResponse |
python | plotly__plotly.py | plotly/graph_objs/icicle/_hoverlabel.py | {
"start": 233,
"end": 11234
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "icicle"
_path_str = "icicle.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.icicle.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.icicle.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.icicle.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.icicle.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.icicle.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | getsentry__sentry | src/sentry/analytics/event_manager.py | {
"start": 157,
"end": 661
} | class ____:
def __init__(self) -> None:
self._event_types: MutableMapping[Any, type[Event]] = {}
def register(self, event_cls: type[Event]) -> None:
event_type = event_cls.type
if event_type in self._event_types:
assert self._event_types[event_type] == event_cls
else:
self._event_types[event_type] = event_cls
def get(self, type: str) -> type[Event]:
return self._event_types[type]
default_manager = EventManager()
| EventManager |
python | astropy__astropy | astropy/units/tests/test_quantity_erfa_ufuncs.py | {
"start": 626,
"end": 12866
} | class ____:
@classmethod
def setup_class(cls):
cls.pv_unit = u.Unit("AU,AU/day")
cls.pv_value = np.array(
[
([1.0, 0.0, 0.0], [0.0, 0.0125, 0.0]),
([0.0, 1.0, 0.0], [-0.0125, 0.0, 0.0]),
],
dtype=erfa_ufunc.dt_pv,
)
cls.pv = cls.pv_value << cls.pv_unit
def test_cpv(self):
pv_copy = erfa_ufunc.cpv(self.pv)
assert_array_equal(pv_copy, self.pv)
assert not np.may_share_memory(pv_copy, self.pv)
def test_p2pv(self):
p2pv = erfa_ufunc.p2pv(self.pv["p"])
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
def test_p2pv_inplace(self):
# TODO: fix np.zeros_like.
out = np.zeros_like(self.pv_value) << self.pv_unit
p2pv = erfa_ufunc.p2pv(self.pv["p"], out=out)
assert out is p2pv
assert_array_equal(p2pv["p"], self.pv["p"])
assert_array_equal(
p2pv["v"], np.zeros(self.pv.shape + (3,), float) << u.m / u.s
)
def test_pv2p(self):
p = erfa_ufunc.pv2p(self.pv)
assert_array_equal(p, self.pv["p"])
out = np.zeros_like(p)
p2 = erfa_ufunc.pv2p(self.pv, out=out)
assert out is p2
assert_array_equal(p2, self.pv["p"])
def test_pv2s(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(self.pv.shape)) # latitude
assert r.unit == u.AU
assert_array_equal(r.value, np.ones(self.pv.shape))
assert td.unit == u.radian / u.day
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.day
assert_array_equal(pd.value, np.zeros(self.pv.shape))
assert rd.unit == u.AU / u.day
assert_array_equal(rd.value, np.zeros(self.pv.shape))
def test_pv2s_non_standard_units(self):
pv = self.pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
@pytest.mark.xfail(
reason=(
"erfa ufuncs cannot take different names; it is not yet clear whether "
"this is changeable; see https://github.com/liberfa/pyerfa/issues/77"
)
)
def test_pv2s_non_standard_names_and_units(self):
pv_value = np.array(self.pv_value, dtype=[("pos", "f8"), ("vel", "f8")])
pv = pv_value << u.Unit("Pa,Pa/m")
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(pv)
assert theta.unit == u.radian
assert_quantity_allclose(theta, [0, 90] * u.deg) # longitude
assert phi.unit == u.radian
assert_array_equal(phi.value, np.zeros(pv.shape)) # latitude
assert r.unit == u.Pa
assert_array_equal(r.value, np.ones(pv.shape))
assert td.unit == u.radian / u.m
assert_array_equal(td.value, np.array([0.0125] * 2))
assert pd.unit == u.radian / u.m
assert_array_equal(pd.value, np.zeros(pv.shape))
assert rd.unit == u.Pa / u.m
assert_array_equal(rd.value, np.zeros(pv.shape))
def test_s2pv(self):
theta, phi, r, td, pd, rd = erfa_ufunc.pv2s(self.pv)
# On purpose change some of the units away from expected by s2pv.
pv = erfa_ufunc.s2pv(
theta.to(u.deg), phi, r.to(u.m), td.to(u.deg / u.day), pd, rd.to(u.m / u.s)
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.mm / u.s, rtol=0)
def test_s2p_not_all_quantity(self):
# Test for a useful error message - see gh-16873.
# Non-quantity input should be treated as dimensionless and thus cannot
# be converted to radians.
with pytest.raises(
AttributeError,
match=(
"'NoneType' object has no attribute 'get_converter'"
".*\n.*treated as dimensionless"
),
):
erfa_ufunc.s2p(0.5, 0.5, 4 * u.km)
# Except if we have the right equivalency in place.
with u.add_enabled_equivalencies(u.dimensionless_angles()):
result = erfa_ufunc.s2p(0.5, 0.5, 4 * u.km)
expected = erfa_ufunc.s2p(0.5 * u.radian, 0.5 * u.radian, 4 * u.km)
assert_array_equal(result, expected)
def test_pvstar(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert ra.unit == u.radian
assert_quantity_allclose(ra, [0, 90] * u.deg)
assert dec.unit == u.radian
assert_array_equal(dec.value, np.zeros(self.pv.shape)) # latitude
assert pmr.unit == u.radian / u.year
assert_quantity_allclose(pmr, [0.0125, 0.0125] * u.radian / u.day)
assert pmd.unit == u.radian / u.year
assert_array_equal(pmd.value, np.zeros(self.pv.shape))
assert px.unit == u.arcsec
assert_quantity_allclose(px, 1 * u.radian)
assert rv.unit == u.km / u.s
# RV is non-zero because proper motion induces a small redshift
# due to second order Doppler shift.
assert_quantity_allclose(
rv, np.zeros(self.pv.shape) << (u.km / u.s), atol=1 * u.m / u.s
)
def test_starpv(self):
ra, dec, pmr, pmd, px, rv, stat = erfa_ufunc.pvstar(self.pv)
pv, stat = erfa_ufunc.starpv(
ra.to(u.deg), dec.to(u.deg), pmr, pmd, px, rv.to(u.m / u.s)
)
assert_array_equal(stat, np.zeros(self.pv.shape, dtype="i4"))
assert pv.unit == self.pv.unit
# Roundtrip is not as good as hoped on 32bit, not clear why.
# But proper motions are ridiculously high...
assert_quantity_allclose(pv["p"], self.pv["p"], atol=1 * u.m, rtol=0)
assert_quantity_allclose(pv["v"], self.pv["v"], atol=1 * u.m / u.s, rtol=0)
def test_pvtob(self):
pv = erfa_ufunc.pvtob(
[90, 0] * u.deg,
0.0 * u.deg,
100 * u.km,
0 * u.deg,
0 * u.deg,
0 * u.deg,
90 * u.deg,
)
assert pv.unit == u.StructuredUnit("m, m/s", names=("p", "v"))
assert pv.unit["v"] == u.m / u.s
assert_quantity_allclose(
pv["p"], [[-6478, 0, 0], [0, 6478, 0]] * u.km, atol=2 * u.km
)
assert_quantity_allclose(
pv["v"], [[0, -0.5, 0], [-0.5, 0, 0]] * u.km / u.s, atol=0.1 * u.km / u.s
)
def test_pvdpv(self):
pvdpv = erfa_ufunc.pvdpv(self.pv, self.pv)
assert pvdpv["pdp"].unit == self.pv.unit["p"] ** 2
assert pvdpv["pdv"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(
pvdpv["pdp"], np.einsum("...i,...i->...", self.pv["p"], self.pv["p"])
)
assert_array_equal(
pvdpv["pdv"], 2 * np.einsum("...i,...i->...", self.pv["p"], self.pv["v"])
)
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvdpv2 = erfa_ufunc.pvdpv(self.pv, z_axis)
assert pvdpv2["pdp"].unit == self.pv.unit["p"]
assert pvdpv2["pdv"].unit == self.pv.unit["v"]
assert_array_equal(pvdpv2["pdp"].value, np.zeros(self.pv.shape))
assert_array_equal(pvdpv2["pdv"].value, np.zeros(self.pv.shape))
def test_pvxpv(self):
pvxpv = erfa_ufunc.pvxpv(self.pv, self.pv)
assert pvxpv["p"].unit == self.pv.unit["p"] ** 2
assert pvxpv["v"].unit == self.pv.unit["p"] * self.pv.unit["v"]
assert_array_equal(pvxpv["p"].value, np.zeros(self.pv["p"].shape))
assert_array_equal(pvxpv["v"].value, np.zeros(self.pv["v"].shape))
z_axis = u.Quantity(np.array(([0, 0, 1], [0, 0, 0]), erfa_ufunc.dt_pv), "1,1/s")
pvxpv2 = erfa_ufunc.pvxpv(self.pv, z_axis)
assert pvxpv2["p"].unit == self.pv.unit["p"]
assert pvxpv2["v"].unit == self.pv.unit["v"]
assert_array_equal(pvxpv2["p"], [[0.0, -1, 0.0], [1.0, 0.0, 0.0]] * u.AU)
assert_array_equal(
pvxpv2["v"], [[0.0125, 0.0, 0.0], [0.0, 0.0125, 0.0]] * u.AU / u.day
)
def test_pvm(self):
pm, vm = erfa_ufunc.pvm(self.pv)
assert pm.unit == self.pv.unit["p"]
assert vm.unit == self.pv.unit["v"]
assert_array_equal(pm, np.linalg.norm(self.pv["p"], axis=-1))
assert_array_equal(vm, np.linalg.norm(self.pv["v"], axis=-1))
def test_pvmpv(self):
pvmpv = erfa_ufunc.pvmpv(self.pv, self.pv)
assert pvmpv.unit == self.pv.unit
assert_array_equal(pvmpv["p"], 0 * self.pv["p"])
assert_array_equal(pvmpv["v"], 0 * self.pv["v"])
def test_pvppv(self):
pvppv = erfa_ufunc.pvppv(self.pv, self.pv)
assert pvppv.unit == self.pv.unit
assert_array_equal(pvppv["p"], 2 * self.pv["p"])
assert_array_equal(pvppv["v"], 2 * self.pv["v"])
def test_pvu(self):
pvu = erfa_ufunc.pvu(86400 * u.s, self.pv)
assert pvu.unit == self.pv.unit
assert_array_equal(pvu["p"], self.pv["p"] + 1 * u.day * self.pv["v"])
assert_array_equal(pvu["v"], self.pv["v"])
def test_pvup(self):
pvup = erfa_ufunc.pvup(86400 * u.s, self.pv)
assert pvup.unit == self.pv.unit["p"]
assert_array_equal(pvup, self.pv["p"] + 1 * u.day * self.pv["v"])
def test_sxpv(self):
# Not a realistic example!!
sxpv = erfa_ufunc.sxpv(10.0, self.pv)
assert sxpv.unit == self.pv.unit
assert_array_equal(sxpv["p"], self.pv["p"] * 10)
assert_array_equal(sxpv["v"], self.pv["v"] * 10)
sxpv2 = erfa_ufunc.sxpv(30.0 * u.s, self.pv)
assert sxpv2.unit == u.StructuredUnit("AU s,AU s/d", names=("p", "v"))
assert_array_equal(sxpv2["p"], self.pv["p"] * 30 * u.s)
assert_array_equal(sxpv2["v"], self.pv["v"] * 30 * u.s)
def test_s2xpv(self):
# Not a realistic example!!
s2xpv = erfa_ufunc.s2xpv(10.0, 1 * u.s, self.pv)
assert s2xpv.unit == u.StructuredUnit("AU,AU s/d", names=("p", "v"))
assert_array_equal(s2xpv["p"], self.pv["p"] * 10)
assert_array_equal(s2xpv["v"], self.pv["v"] * u.s)
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_rxpv(self, r):
result = erfa_ufunc.rxpv(r, self.pv)
assert_array_equal(result["p"], np.einsum("...ij,...j->...i", r, self.pv["p"]))
assert_array_equal(result["v"], np.einsum("...ij,...j->...i", r, self.pv["v"]))
@pytest.mark.parametrize(
"r",
[
np.eye(3),
np.array(
[
[0.0, -1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 1.0],
]
),
np.eye(3) / u.s,
],
)
def test_trxpv(self, r):
result = erfa_ufunc.trxpv(r, self.pv)
assert_array_equal(
result["p"], np.einsum("...ij,...j->...i", r.T, self.pv["p"])
)
assert_array_equal(
result["v"], np.einsum("...ij,...j->...i", r.T, self.pv["v"])
)
| TestPVUfuncs |
python | optuna__optuna | optuna/artifacts/_filesystem.py | {
"start": 230,
"end": 2416
} | class ____:
"""An artifact store for file systems.
Args:
base_path:
The base path to a directory to store artifacts.
Example:
.. code-block:: python
import os
import optuna
from optuna.artifacts import FileSystemArtifactStore
from optuna.artifacts import upload_artifact
base_path = "./artifacts"
os.makedirs(base_path, exist_ok=True)
artifact_store = FileSystemArtifactStore(base_path=base_path)
def objective(trial: optuna.Trial) -> float:
... = trial.suggest_float("x", -10, 10)
file_path = generate_example(...)
upload_artifact(
artifact_store=artifact_store,
file_path=file_path,
study_or_trial=trial,
)
return ...
"""
def __init__(self, base_path: str | Path) -> None:
if isinstance(base_path, str):
base_path = Path(base_path)
# TODO(Shinichi): Check if the base_path is valid directory.
self._base_path = base_path
def open_reader(self, artifact_id: str) -> BinaryIO:
filepath = os.path.join(self._base_path, artifact_id)
try:
f = open(filepath, "rb")
except FileNotFoundError as e:
raise ArtifactNotFound("not found") from e
return f
def write(self, artifact_id: str, content_body: BinaryIO) -> None:
filepath = os.path.join(self._base_path, artifact_id)
with open(filepath, "wb") as f:
shutil.copyfileobj(content_body, f)
def remove(self, artifact_id: str) -> None:
filepath = os.path.join(self._base_path, artifact_id)
try:
os.remove(filepath)
except FileNotFoundError as e:
raise ArtifactNotFound("not found") from e
if TYPE_CHECKING:
# A mypy-runtime assertion to ensure that LocalArtifactBackend
# implements all abstract methods in ArtifactBackendProtocol.
from optuna.artifacts._protocol import ArtifactStore
_: ArtifactStore = FileSystemArtifactStore("")
| FileSystemArtifactStore |
python | crytic__slither | slither/solc_parsing/slither_compilation_unit_solc.py | {
"start": 3347,
"end": 39496
} | class ____(CallerContextExpression):
# pylint: disable=too-many-instance-attributes
def __init__(self, compilation_unit: SlitherCompilationUnit) -> None:
super().__init__()
self._compilation_unit: SlitherCompilationUnit = compilation_unit
self._contracts_by_id: Dict[int, Contract] = {}
# For top level functions, there should only be one `Function` since they can't be virtual and therefore can't be overridden.
self._functions_by_id: Dict[int, List[Function]] = defaultdict(list)
self.imports_by_id: Dict[int, Import] = {}
self.top_level_events_by_id: Dict[int, EventTopLevel] = {}
self.top_level_errors_by_id: Dict[int, EventTopLevel] = {}
self.top_level_structures_by_id: Dict[int, StructureTopLevel] = {}
self.top_level_variables_by_id: Dict[int, TopLevelVariable] = {}
self.top_level_type_aliases_by_id: Dict[int, TypeAliasTopLevel] = {}
self.top_level_enums_by_id: Dict[int, EnumTopLevel] = {}
self._parsed = False
self._analyzed = False
self._is_compact_ast = False
self._underlying_contract_to_parser: Dict[Contract, ContractSolc] = {}
self._structures_top_level_parser: List[StructureTopLevelSolc] = []
self._custom_error_parser: List[CustomErrorSolc] = []
self._variables_top_level_parser: List[TopLevelVariableSolc] = []
self._functions_top_level_parser: List[FunctionSolc] = []
self._using_for_top_level_parser: List[UsingForTopLevelSolc] = []
self._events_top_level_parser: List[EventTopLevelSolc] = []
self._all_functions_and_modifier_parser: List[FunctionSolc] = []
self._top_level_contracts_counter = 0
@property
def compilation_unit(self) -> SlitherCompilationUnit:
return self._compilation_unit
@property
def all_functions_and_modifiers_parser(self) -> List[FunctionSolc]:
return self._all_functions_and_modifier_parser
def add_function_or_modifier_parser(self, f: FunctionSolc) -> None:
self._all_functions_and_modifier_parser.append(f)
self._functions_by_id[f.underlying_function.id].append(f.underlying_function)
@property
def underlying_contract_to_parser(self) -> Dict[Contract, ContractSolc]:
return self._underlying_contract_to_parser
@property
def slither_parser(self) -> "SlitherCompilationUnitSolc":
return self
@property
def contracts_by_id(self) -> Dict[int, Contract]:
return self._contracts_by_id
@property
def functions_by_id(self) -> Dict[int, List[Function]]:
return self._functions_by_id
###################################################################################
###################################################################################
# region AST
###################################################################################
###################################################################################
def get_key(self) -> str:
if self._is_compact_ast:
return "nodeType"
return "name"
def get_children(self) -> str:
if self._is_compact_ast:
return "nodes"
return "children"
@property
def is_compact_ast(self) -> bool:
return self._is_compact_ast
# endregion
###################################################################################
###################################################################################
# region Parsing
###################################################################################
###################################################################################
def parse_top_level_from_json(self, json_data: str) -> bool:
try:
data_loaded = json.loads(json_data)
# Truffle AST
if "ast" in data_loaded:
self.parse_top_level_items(data_loaded["ast"], data_loaded["sourcePath"])
return True
# solc AST, where the non-json text was removed
if "attributes" in data_loaded:
filename = data_loaded["attributes"]["absolutePath"]
else:
filename = data_loaded["absolutePath"]
self.parse_top_level_items(data_loaded, filename)
return True
except ValueError:
first = json_data.find("{")
if first != -1:
last = json_data.rfind("}") + 1
filename = json_data[0:first]
json_data = json_data[first:last]
data_loaded = json.loads(json_data)
self.parse_top_level_items(data_loaded, filename)
return True
return False
def _parse_enum(self, top_level_data: Dict, filename: str) -> None:
if self.is_compact_ast:
name = top_level_data["name"]
canonicalName = top_level_data["canonicalName"]
else:
name = top_level_data["attributes"][self.get_key()]
if "canonicalName" in top_level_data["attributes"]:
canonicalName = top_level_data["attributes"]["canonicalName"]
else:
canonicalName = name
values = []
children = (
top_level_data["members"]
if "members" in top_level_data
else top_level_data.get("children", [])
)
for child in children:
assert child[self.get_key()] == "EnumValue"
if self.is_compact_ast:
values.append(child["name"])
else:
values.append(child["attributes"][self.get_key()])
scope = self.compilation_unit.get_scope(filename)
enum = EnumTopLevel(name, canonicalName, values, scope)
enum.set_offset(top_level_data["src"], self._compilation_unit)
self._compilation_unit.enums_top_level.append(enum)
scope.enums[name] = enum
refId = top_level_data["id"]
self.top_level_enums_by_id[refId] = enum
# pylint: disable=too-many-branches,too-many-statements,too-many-locals
def parse_top_level_items(self, data_loaded: Dict, filename: str) -> None:
if not data_loaded or data_loaded is None:
logger.error(
"crytic-compile returned an empty AST. "
"If you are trying to analyze a contract from etherscan or similar make sure it has source code available."
)
return
exported_symbols = {}
if "nodeType" in data_loaded:
self._is_compact_ast = True
exported_symbols = data_loaded.get("exportedSymbols", {})
else:
attributes = data_loaded.get("attributes", {})
exported_symbols = attributes.get("exportedSymbols", {})
if "sourcePaths" in data_loaded:
for sourcePath in data_loaded["sourcePaths"]:
if os.path.isfile(sourcePath):
self._compilation_unit.core.add_source_code(sourcePath)
if data_loaded[self.get_key()] == "root":
logger.error("solc <0.4 is not supported")
return
if data_loaded[self.get_key()] == "SourceUnit":
self._parse_source_unit(data_loaded, filename)
else:
logger.error("solc version is not supported")
return
if self.get_children() not in data_loaded:
return
scope = self.compilation_unit.get_scope(filename)
# Exported symbols includes a reference ID to all top-level definitions the file exports,
# including def's brought in by imports (even transitively) and def's local to the file.
for refId in exported_symbols.values():
scope.exported_symbols |= set(refId)
for top_level_data in data_loaded[self.get_children()]:
if top_level_data[self.get_key()] == "ContractDefinition":
contract = Contract(self._compilation_unit, scope)
contract_parser = ContractSolc(self, contract, top_level_data)
scope.contracts[contract.name] = contract
if "src" in top_level_data:
contract.set_offset(top_level_data["src"], self._compilation_unit)
self._underlying_contract_to_parser[contract] = contract_parser
elif top_level_data[self.get_key()] == "PragmaDirective":
if self._is_compact_ast:
pragma = Pragma(top_level_data["literals"], scope)
scope.pragmas.add(pragma)
else:
pragma = Pragma(top_level_data["attributes"]["literals"], scope)
scope.pragmas.add(pragma)
pragma.set_offset(top_level_data["src"], self._compilation_unit)
self._compilation_unit.pragma_directives.append(pragma)
elif top_level_data[self.get_key()] == "UsingForDirective":
scope = self.compilation_unit.get_scope(filename)
usingFor = UsingForTopLevel(scope)
usingFor_parser = UsingForTopLevelSolc(usingFor, top_level_data, self)
usingFor.set_offset(top_level_data["src"], self._compilation_unit)
scope.using_for_directives.add(usingFor)
self._compilation_unit.using_for_top_level.append(usingFor)
self._using_for_top_level_parser.append(usingFor_parser)
elif top_level_data[self.get_key()] == "ImportDirective":
referenceId = top_level_data["id"]
if self.is_compact_ast:
import_directive = Import(
Path(
top_level_data["absolutePath"],
),
scope,
)
scope.imports.add(import_directive)
# TODO investigate unitAlias in version < 0.7 and legacy ast
if "unitAlias" in top_level_data:
import_directive.alias = top_level_data["unitAlias"]
if "symbolAliases" in top_level_data:
symbol_aliases = top_level_data["symbolAliases"]
_handle_import_aliases(symbol_aliases, import_directive, scope)
else:
import_directive = Import(
Path(
top_level_data["attributes"].get("absolutePath", ""),
),
scope,
)
scope.imports.add(import_directive)
# TODO investigate unitAlias in version < 0.7 and legacy ast
if (
"attributes" in top_level_data
and "unitAlias" in top_level_data["attributes"]
):
import_directive.alias = top_level_data["attributes"]["unitAlias"]
import_directive.set_offset(top_level_data["src"], self._compilation_unit)
self._compilation_unit.import_directives.append(import_directive)
self.imports_by_id[referenceId] = import_directive
get_imported_scope = self.compilation_unit.get_scope(import_directive.filename)
scope.accessible_scopes.append(get_imported_scope)
elif top_level_data[self.get_key()] == "StructDefinition":
st = StructureTopLevel(self.compilation_unit, scope)
st.set_offset(top_level_data["src"], self._compilation_unit)
st_parser = StructureTopLevelSolc(st, top_level_data, self)
scope.structures[st.name] = st
self._compilation_unit.structures_top_level.append(st)
self._structures_top_level_parser.append(st_parser)
referenceId = top_level_data["id"]
self.top_level_structures_by_id[referenceId] = st
elif top_level_data[self.get_key()] == "EnumDefinition":
# Note enum don't need a complex parser, so everything is directly done
self._parse_enum(top_level_data, filename)
elif top_level_data[self.get_key()] == "VariableDeclaration":
var = TopLevelVariable(scope)
var_parser = TopLevelVariableSolc(var, top_level_data, self)
var.set_offset(top_level_data["src"], self._compilation_unit)
self._compilation_unit.variables_top_level.append(var)
self._variables_top_level_parser.append(var_parser)
scope.variables[var.name] = var
referenceId = top_level_data["id"]
self.top_level_variables_by_id[referenceId] = var
elif top_level_data[self.get_key()] == "FunctionDefinition":
func = FunctionTopLevel(self._compilation_unit, scope)
scope.functions.add(func)
func.set_offset(top_level_data["src"], self._compilation_unit)
func_parser = FunctionSolc(func, top_level_data, None, self)
self._compilation_unit.functions_top_level.append(func)
self._functions_top_level_parser.append(func_parser)
self.add_function_or_modifier_parser(func_parser)
elif top_level_data[self.get_key()] == "ErrorDefinition":
custom_error = CustomErrorTopLevel(self._compilation_unit, scope)
custom_error.set_offset(top_level_data["src"], self._compilation_unit)
custom_error_parser = CustomErrorSolc(custom_error, top_level_data, None, self)
scope.custom_errors.add(custom_error)
self._compilation_unit.custom_errors.append(custom_error)
self._custom_error_parser.append(custom_error_parser)
referenceId = top_level_data["id"]
self.top_level_errors_by_id[referenceId] = custom_error
elif top_level_data[self.get_key()] == "UserDefinedValueTypeDefinition":
assert "name" in top_level_data
alias = top_level_data["name"]
assert "underlyingType" in top_level_data
underlying_type = top_level_data["underlyingType"]
assert (
"nodeType" in underlying_type
and underlying_type["nodeType"] == "ElementaryTypeName"
)
assert "name" in underlying_type
original_type = ElementaryType(underlying_type["name"])
type_alias = TypeAliasTopLevel(original_type, alias, scope)
type_alias.set_offset(top_level_data["src"], self._compilation_unit)
self._compilation_unit.type_aliases[alias] = type_alias
scope.type_aliases[alias] = type_alias
referenceId = top_level_data["id"]
self.top_level_type_aliases_by_id[referenceId] = type_alias
elif top_level_data[self.get_key()] == "EventDefinition":
event = EventTopLevel(scope)
event.set_offset(top_level_data["src"], self._compilation_unit)
event_parser = EventTopLevelSolc(event, top_level_data, self) # type: ignore
self._events_top_level_parser.append(event_parser)
scope.events.add(event)
self._compilation_unit.events_top_level.append(event)
referenceId = top_level_data["id"]
self.top_level_events_by_id[referenceId] = event
else:
raise SlitherException(f"Top level {top_level_data[self.get_key()]} not supported")
def _parse_source_unit(self, data: Dict, filename: str) -> None:
if data[self.get_key()] != "SourceUnit":
return # handle solc prior 0.3.6
# match any char for filename
# filename can contain space, /, -, ..
name_candidates = re.findall("=+ (.+) =+", filename)
if name_candidates:
assert len(name_candidates) == 1
name: str = name_candidates[0]
else:
name = filename
sourceUnit = -1 # handle old solc, or error
if "src" in data:
sourceUnit_candidates = re.findall("[0-9]*:[0-9]*:([0-9]*)", data["src"])
if len(sourceUnit_candidates) == 1:
sourceUnit = int(sourceUnit_candidates[0])
if sourceUnit == -1:
# if source unit is not found
# We can still deduce it, by assigning to the last source_code added
# This works only for crytic compile.
# which used --combined-json ast, rather than --ast-json
# As a result -1 is not used as index
if self._compilation_unit.core.crytic_compile is not None:
sourceUnit = len(self._compilation_unit.core.source_code)
self._compilation_unit.source_units[sourceUnit] = name
if os.path.isfile(name) and not name in self._compilation_unit.core.source_code:
self._compilation_unit.core.add_source_code(name)
else:
lib_name = os.path.join("node_modules", name)
if os.path.isfile(lib_name) and not name in self._compilation_unit.core.source_code:
self._compilation_unit.core.add_source_code(lib_name)
# endregion
###################################################################################
###################################################################################
# region Analyze
###################################################################################
###################################################################################
@property
def parsed(self) -> bool:
return self._parsed
@property
def analyzed(self) -> bool:
return self._analyzed
def parse_contracts(self) -> None: # pylint: disable=too-many-statements,too-many-branches
if not self._underlying_contract_to_parser:
logger.info(
f"No contracts were found in {self._compilation_unit.core.filename}, check the correct compilation"
)
if self._parsed:
# pylint: disable=broad-exception-raised
raise Exception("Contract analysis can be run only once!")
def resolve_remapping_and_renaming(contract_parser: ContractSolc, want: str) -> Contract:
contract_name = contract_parser.remapping[want]
target = None
# For contracts that are imported and aliased e.g. 'import {A as B} from "./C.sol"',
# we look through the imports's (`Import`) renaming to find the original contract name
# and then look up the original contract in the import path's scope (`FileScope`).
for import_ in contract_parser.underlying_contract.file_scope.imports:
if contract_name in import_.renaming:
target = self.compilation_unit.get_scope(
import_.filename
).get_contract_from_name(import_.renaming[contract_name])
# Fallback to the current file scope if the contract is not found in the import path's scope.
# It is assumed that it isn't possible to defined a contract with the same name as "aliased" names.
if target is None:
target = contract_parser.underlying_contract.file_scope.get_contract_from_name(
contract_name
)
if target == contract_parser.underlying_contract:
raise InheritanceResolutionError(
"Could not resolve contract inheritance. This is likely caused by an import renaming that collides with existing names (see https://github.com/crytic/slither/issues/1758)."
f"\n Try changing `contract {target}` ({target.source_mapping}) to a unique name as a workaround."
"\n Please share the source code that caused this error here: https://github.com/crytic/slither/issues/"
)
assert target, f"Contract {contract_name} not found"
return target
# Update of the inheritance
for contract_parser in self._underlying_contract_to_parser.values():
ancestors = []
fathers = []
father_constructors = []
missing_inheritance = None
# Resolve linearized base contracts.
# Remove the first elem in linearizedBaseContracts as it is the contract itself.
for i in contract_parser.linearized_base_contracts[1:]:
if i in contract_parser.remapping:
target = resolve_remapping_and_renaming(contract_parser, i)
ancestors.append(target)
elif i in self._contracts_by_id:
ancestors.append(self._contracts_by_id[i])
else:
missing_inheritance = i
# Resolve immediate base contracts and attach references.
for (i, src) in contract_parser.baseContracts:
if i in contract_parser.remapping:
target = resolve_remapping_and_renaming(contract_parser, i)
fathers.append(target)
target.add_reference_from_raw_source(src, self.compilation_unit)
elif i in self._contracts_by_id:
target = self._contracts_by_id[i]
fathers.append(target)
target.add_reference_from_raw_source(src, self.compilation_unit)
else:
missing_inheritance = i
# Resolve immediate base constructor calls.
for i in contract_parser.baseConstructorContractsCalled:
if i in contract_parser.remapping:
target = resolve_remapping_and_renaming(contract_parser, i)
father_constructors.append(target)
elif i in self._contracts_by_id:
father_constructors.append(self._contracts_by_id[i])
else:
missing_inheritance = i
contract_parser.underlying_contract.set_inheritance(
ancestors, fathers, father_constructors
)
if missing_inheritance:
self._compilation_unit.contracts_with_missing_inheritance.add(
contract_parser.underlying_contract
)
txt = f"Missing inheritance {contract_parser.underlying_contract} ({contract_parser.compilation_unit.crytic_compile_compilation_unit.unique_id})\n"
txt += f"Missing inheritance ID: {missing_inheritance}\n"
if contract_parser.underlying_contract.inheritance:
txt += "Inheritance found:\n"
for contract_inherited in contract_parser.underlying_contract.inheritance:
txt += f"\t - {contract_inherited} (ID {contract_inherited.id})\n"
contract_parser.log_incorrect_parsing(txt)
contract_parser.set_is_analyzed(True)
contract_parser.delete_content()
contracts_to_be_analyzed = list(self._underlying_contract_to_parser.values())
# Any contract can refer another contract enum without need for inheritance
self._analyze_all_enums(contracts_to_be_analyzed)
# pylint: disable=expression-not-assigned
[c.set_is_analyzed(False) for c in self._underlying_contract_to_parser.values()]
libraries = [
c for c in contracts_to_be_analyzed if c.underlying_contract.contract_kind == "library"
]
contracts_to_be_analyzed = [
c for c in contracts_to_be_analyzed if c.underlying_contract.contract_kind != "library"
]
# We first parse the struct/variables/functions/contract
self._analyze_first_part(contracts_to_be_analyzed, libraries)
# pylint: disable=expression-not-assigned
[c.set_is_analyzed(False) for c in self._underlying_contract_to_parser.values()]
# We analyze the struct and parse and analyze the events
# A contract can refer in the variables a struct or a event from any contract
# (without inheritance link)
self._analyze_second_part(contracts_to_be_analyzed, libraries)
[c.set_is_analyzed(False) for c in self._underlying_contract_to_parser.values()]
# Then we analyse state variables, functions and modifiers
self._analyze_third_part(contracts_to_be_analyzed, libraries)
[c.set_is_analyzed(False) for c in self._underlying_contract_to_parser.values()]
self._analyze_using_for(contracts_to_be_analyzed, libraries)
self._parsed = True
def analyze_contracts(self) -> None: # pylint: disable=too-many-statements,too-many-branches
if not self._parsed:
raise SlitherException("Parse the contract before running analyses")
self._convert_to_slithir()
if not self._compilation_unit.core.skip_data_dependency:
compute_dependency(self._compilation_unit)
self._compilation_unit.compute_storage_layout()
self._analyzed = True
def _analyze_all_enums(self, contracts_to_be_analyzed: List[ContractSolc]) -> None:
while contracts_to_be_analyzed:
contract = contracts_to_be_analyzed[0]
contracts_to_be_analyzed = contracts_to_be_analyzed[1:]
all_father_analyzed = all(
self._underlying_contract_to_parser[father].is_analyzed
for father in contract.underlying_contract.inheritance
)
if not contract.underlying_contract.inheritance or all_father_analyzed:
self._analyze_enums(contract)
else:
contracts_to_be_analyzed += [contract]
def _analyze_first_part(
self,
contracts_to_be_analyzed: List[ContractSolc],
libraries: List[ContractSolc],
) -> None:
for lib in libraries:
self._parse_struct_var_modifiers_functions(lib)
# Start with the contracts without inheritance
# Analyze a contract only if all its fathers
# Were analyzed
while contracts_to_be_analyzed:
contract = contracts_to_be_analyzed[0]
contracts_to_be_analyzed = contracts_to_be_analyzed[1:]
all_father_analyzed = all(
self._underlying_contract_to_parser[father].is_analyzed
for father in contract.underlying_contract.inheritance
)
if not contract.underlying_contract.inheritance or all_father_analyzed:
self._parse_struct_var_modifiers_functions(contract)
else:
contracts_to_be_analyzed += [contract]
def _analyze_second_part(
self,
contracts_to_be_analyzed: List[ContractSolc],
libraries: List[ContractSolc],
) -> None:
for lib in libraries:
self._analyze_struct_events(lib)
self._analyze_top_level_variables()
self._analyze_top_level_structures()
self._analyze_top_level_events()
# Start with the contracts without inheritance
# Analyze a contract only if all its fathers
# Were analyzed
while contracts_to_be_analyzed:
contract = contracts_to_be_analyzed[0]
contracts_to_be_analyzed = contracts_to_be_analyzed[1:]
all_father_analyzed = all(
self._underlying_contract_to_parser[father].is_analyzed
for father in contract.underlying_contract.inheritance
)
if not contract.underlying_contract.inheritance or all_father_analyzed:
self._analyze_struct_events(contract)
else:
contracts_to_be_analyzed += [contract]
def _analyze_third_part(
self,
contracts_to_be_analyzed: List[ContractSolc],
libraries: List[ContractSolc],
) -> None:
for lib in libraries:
self._analyze_variables_modifiers_functions(lib)
# Start with the contracts without inheritance
# Analyze a contract only if all its fathers
# Were analyzed
while contracts_to_be_analyzed:
contract = contracts_to_be_analyzed[0]
contracts_to_be_analyzed = contracts_to_be_analyzed[1:]
all_father_analyzed = all(
self._underlying_contract_to_parser[father].is_analyzed
for father in contract.underlying_contract.inheritance
)
if not contract.underlying_contract.inheritance or all_father_analyzed:
self._analyze_variables_modifiers_functions(contract)
else:
contracts_to_be_analyzed += [contract]
def _analyze_using_for(
self, contracts_to_be_analyzed: List[ContractSolc], libraries: List[ContractSolc]
) -> None:
self._analyze_top_level_using_for()
for lib in libraries:
lib.analyze_using_for()
while contracts_to_be_analyzed:
contract = contracts_to_be_analyzed[0]
contracts_to_be_analyzed = contracts_to_be_analyzed[1:]
all_father_analyzed = all(
self._underlying_contract_to_parser[father].is_analyzed
for father in contract.underlying_contract.inheritance
)
if not contract.underlying_contract.inheritance or all_father_analyzed:
contract.analyze_using_for()
contract.set_is_analyzed(True)
else:
contracts_to_be_analyzed += [contract]
def _analyze_enums(self, contract: ContractSolc) -> None:
# Enum must be analyzed first
contract.analyze_enums()
contract.set_is_analyzed(True)
def _parse_struct_var_modifiers_functions(self, contract: ContractSolc) -> None:
contract.parse_structs() # struct can refer another struct
contract.parse_state_variables()
contract.parse_modifiers()
contract.parse_functions()
contract.parse_custom_errors()
contract.parse_type_alias()
contract.set_is_analyzed(True)
def _analyze_struct_events(self, contract: ContractSolc) -> None:
contract.analyze_constant_state_variables()
# Struct can refer to enum, or state variables
contract.analyze_structs()
# Event can refer to struct
contract.analyze_events()
contract.analyze_custom_errors()
contract.set_is_analyzed(True)
def _analyze_top_level_structures(self) -> None:
try:
for struct in self._structures_top_level_parser:
struct.analyze()
except (VariableNotFound, KeyError) as e:
raise SlitherException(f"Missing struct {e} during top level structure analyze") from e
def _analyze_top_level_variables(self) -> None:
try:
for var in self._variables_top_level_parser:
var.analyze(var)
except (VariableNotFound, KeyError) as e:
raise SlitherException(f"Missing {e} during variable analyze") from e
def _analyze_top_level_events(self) -> None:
try:
for event in self._events_top_level_parser:
event.analyze()
except (VariableNotFound, KeyError) as e:
raise SlitherException(f"Missing event {e} during top level event analyze") from e
def _analyze_params_top_level_function(self) -> None:
for func_parser in self._functions_top_level_parser:
func_parser.analyze_params()
self._compilation_unit.add_function(func_parser.underlying_function)
def _analyze_top_level_using_for(self) -> None:
for using_for in self._using_for_top_level_parser:
using_for.analyze()
def _analyze_params_custom_error(self) -> None:
for custom_error_parser in self._custom_error_parser:
custom_error_parser.analyze_params()
def _analyze_content_top_level_function(self) -> None:
try:
for func_parser in self._functions_top_level_parser:
func_parser.analyze_content()
except (VariableNotFound, KeyError) as e:
raise SlitherException(f"Missing {e} during top level function analyze") from e
def _analyze_variables_modifiers_functions(self, contract: ContractSolc) -> None:
# State variables, modifiers and functions can refer to anything
contract.analyze_params_modifiers()
contract.analyze_params_functions()
self._analyze_params_top_level_function()
self._analyze_params_custom_error()
contract.analyze_state_variables()
contract.analyze_content_modifiers()
contract.analyze_content_functions()
self._analyze_content_top_level_function()
contract.set_is_analyzed(True)
def _convert_to_slithir(self) -> None:
for contract in self._compilation_unit.contracts:
contract.add_constructor_variables()
for func in contract.functions + contract.modifiers:
try:
func.generate_slithir_and_analyze()
except AttributeError as e:
# This can happens for example if there is a call to an interface
# And the interface is redefined due to contract's name reuse
# But the available version misses some functions
self._underlying_contract_to_parser[contract].log_incorrect_parsing(
f"Impossible to generate IR for {contract.name}.{func.name} ({func.source_mapping}):\n {e}"
)
except Exception as e:
func_expressions = "\n".join([f"\t{ex}" for ex in func.expressions])
logger.error(
f"\nFailed to generate IR for {contract.name}.{func.name}. Please open an issue https://github.com/crytic/slither/issues.\n{contract.name}.{func.name} ({func.source_mapping}):\n "
f"{func_expressions}"
)
raise e
try:
contract.convert_expression_to_slithir_ssa()
except Exception as e:
logger.error(
f"\nFailed to convert IR to SSA for {contract.name} contract. Please open an issue https://github.com/crytic/slither/issues.\n "
)
raise e
for func in self._compilation_unit.functions_top_level:
try:
func.generate_slithir_and_analyze()
except AttributeError as e:
logger.error(
f"Impossible to generate IR for top level function {func.name} ({func.source_mapping}):\n {e}"
)
except Exception as e:
func_expressions = "\n".join([f"\t{ex}" for ex in func.expressions])
logger.error(
f"\nFailed to generate IR for top level function {func.name}. Please open an issue https://github.com/crytic/slither/issues.\n{func.name} ({func.source_mapping}):\n "
f"{func_expressions}"
)
raise e
try:
func.generate_slithir_ssa({})
except Exception as e:
func_expressions = "\n".join([f"\t{ex}" for ex in func.expressions])
logger.error(
f"\nFailed to convert IR to SSA for top level function {func.name}. Please open an issue https://github.com/crytic/slither/issues.\n{func.name} ({func.source_mapping}):\n "
f"{func_expressions}"
)
raise e
self._compilation_unit.propagate_function_calls()
for contract in self._compilation_unit.contracts:
contract.fix_phi()
contract.update_read_write_using_ssa()
# endregion
| SlitherCompilationUnitSolc |
python | ray-project__ray | python/ray/autoscaler/_private/spark/spark_job_server.py | {
"start": 7271,
"end": 11357
} | class ____(ThreadingHTTPServer):
"""
High level design:
1. In Ray on spark autoscaling mode, How to start and terminate Ray worker node ?
It uses spark job to launch Ray worker node,
and each spark job contains only one spark task, the corresponding spark task
creates Ray worker node as subprocess.
When autoscaler request terminating specific Ray worker node, it cancels
corresponding spark job to trigger Ray worker node termination.
Because we can only cancel spark job not spark task when we need to scale
down a Ray worker node. So we have to have one spark job for each Ray worker node.
2. How to create / cancel spark job from spark node provider?
Spark node provider runs in autoscaler process that is different process
than the one that executes "setup_ray_cluster" API. User calls "setup_ray_cluster"
API in spark application driver node, and the semantic is "setup_ray_cluster"
requests spark resources from this spark application.
Internally, "setup_ray_cluster" should use "spark session" instance to request
spark application resources. But spark node provider runs in another python
process, in order to share spark session to the separate NodeProvider process,
it sets up a spark job server that runs inside spark application driver process
(the process that calls "setup_ray_cluster" API), and in NodeProvider process,
it sends RPC request to the spark job server for creating spark jobs in the
spark application.
Note that we cannot create another spark session in NodeProvider process,
because if doing so, it means we create another spark application, and then
it causes NodeProvider requests resources belonging to the new spark application,
but we need to ensure all requested spark resources belong to
the original spark application that calls "setup_ray_cluster" API.
Note:
The server must inherit ThreadingHTTPServer because request handler uses
the active spark session in current process to create spark jobs, so all request
handler must be running in current process.
"""
def __init__(self, server_address, spark, ray_node_custom_env):
super().__init__(server_address, SparkJobServerRequestHandler)
self.spark = spark
# For ray on spark autoscaling mode,
# for each ray worker node, we create an individual spark job
# to launch it, the corresponding spark job has only one
# spark task that starts ray worker node, and the spark job
# is assigned with a unique spark job group ID that is used
# to cancel this spark job (i.e., kill corresponding ray worker node).
# Each spark task has status of pending, running, or terminated.
# the task_status_dict key is spark job group id,
# and value is the corresponding spark task status.
# each spark task holds a ray worker node.
self.task_status_dict = {}
self.last_worker_error = None
self.ray_node_custom_env = ray_node_custom_env
def shutdown(self) -> None:
super().shutdown()
for spark_job_group_id in list(self.task_status_dict.keys()):
self.spark.sparkContext.cancelJobGroup(spark_job_group_id)
# Sleep 1 second to wait for all spark job cancellation
# The spark job cancellation will do things asyncly in a background thread,
# On Databricks platform, when detaching a notebook, it triggers SIGTERM
# and then sigterm handler triggers Ray cluster shutdown, without sleep,
# after the SIGTERM handler execution the process is killed and then
# these cancelling spark job background threads are killed.
time.sleep(1)
def _start_spark_job_server(host, port, spark, ray_node_custom_env):
server = SparkJobServer((host, port), spark, ray_node_custom_env)
def run_server():
server.serve_forever()
server_thread = threading.Thread(target=run_server)
server_thread.setDaemon(True)
server_thread.start()
return server
| SparkJobServer |
python | mahmoud__boltons | boltons/dictutils.py | {
"start": 26465,
"end": 30935
} | class ____(dict):
"""Implements a one-to-one mapping dictionary. In addition to
inheriting from and behaving exactly like the builtin
:class:`dict`, all values are automatically added as keys on a
reverse mapping, available as the `inv` attribute. This
arrangement keeps key and value namespaces distinct.
Basic operations are intuitive:
>>> oto = OneToOne({'a': 1, 'b': 2})
>>> print(oto['a'])
1
>>> print(oto.inv[1])
a
>>> len(oto)
2
Overwrites happens in both directions:
>>> oto.inv[1] = 'c'
>>> print(oto.get('a'))
None
>>> len(oto)
2
For a very similar project, with even more one-to-one
functionality, check out `bidict <https://github.com/jab/bidict>`_.
"""
__slots__ = ('inv',)
def __init__(self, *a, **kw):
raise_on_dupe = False
if a:
if a[0] is _OTO_INV_MARKER:
self.inv = a[1]
dict.__init__(self, [(v, k) for k, v in self.inv.items()])
return
elif a[0] is _OTO_UNIQUE_MARKER:
a, raise_on_dupe = a[1:], True
dict.__init__(self, *a, **kw)
self.inv = self.__class__(_OTO_INV_MARKER, self)
if len(self) == len(self.inv):
# if lengths match, that means everything's unique
return
if not raise_on_dupe:
dict.clear(self)
dict.update(self, [(v, k) for k, v in self.inv.items()])
return
# generate an error message if the values aren't 1:1
val_multidict = {}
for k, v in self.items():
val_multidict.setdefault(v, []).append(k)
dupes = {v: k_list for v, k_list in
val_multidict.items() if len(k_list) > 1}
raise ValueError('expected unique values, got multiple keys for'
' the following values: %r' % dupes)
@classmethod
def unique(cls, *a, **kw):
"""This alternate constructor for OneToOne will raise an exception
when input values overlap. For instance:
>>> OneToOne.unique({'a': 1, 'b': 1})
Traceback (most recent call last):
...
ValueError: expected unique values, got multiple keys for the following values: ...
This even works across inputs:
>>> a_dict = {'a': 2}
>>> OneToOne.unique(a_dict, b=2)
Traceback (most recent call last):
...
ValueError: expected unique values, got multiple keys for the following values: ...
"""
return cls(_OTO_UNIQUE_MARKER, *a, **kw)
def __setitem__(self, key, val):
hash(val) # ensure val is a valid key
if key in self:
dict.__delitem__(self.inv, self[key])
if val in self.inv:
del self.inv[val]
dict.__setitem__(self, key, val)
dict.__setitem__(self.inv, val, key)
def __delitem__(self, key):
dict.__delitem__(self.inv, self[key])
dict.__delitem__(self, key)
def clear(self):
dict.clear(self)
dict.clear(self.inv)
def copy(self):
return self.__class__(self)
def pop(self, key, default=_MISSING):
if key in self:
dict.__delitem__(self.inv, self[key])
return dict.pop(self, key)
if default is not _MISSING:
return default
raise KeyError()
def popitem(self):
key, val = dict.popitem(self)
dict.__delitem__(self.inv, val)
return key, val
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def update(self, dict_or_iterable, **kw):
keys_vals = []
if isinstance(dict_or_iterable, dict):
for val in dict_or_iterable.values():
hash(val)
keys_vals = list(dict_or_iterable.items())
else:
for key, val in dict_or_iterable:
hash(key)
hash(val)
keys_vals = list(dict_or_iterable)
for val in kw.values():
hash(val)
keys_vals.extend(kw.items())
for key, val in keys_vals:
self[key] = val
def __repr__(self):
cn = self.__class__.__name__
dict_repr = dict.__repr__(self)
return f"{cn}({dict_repr})"
# marker for the secret handshake used internally to set up the invert ManyToMany
_PAIRING = object()
| OneToOne |
python | huggingface__transformers | tests/models/mm_grounding_dino/test_modeling_mm_grounding_dino.py | {
"start": 3136,
"end": 9847
} | class ____:
def __init__(
self,
parent,
batch_size=4,
is_training=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
num_queries=2,
num_channels=3,
image_size=98,
n_targets=8,
num_labels=2,
num_feature_levels=4,
encoder_n_points=2,
decoder_n_points=6,
max_text_len=7,
):
self.parent = parent
self.batch_size = batch_size
self.is_training = is_training
self.use_labels = use_labels
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.num_queries = num_queries
self.num_channels = num_channels
self.image_size = image_size
self.n_targets = n_targets
self.num_labels = num_labels
self.num_feature_levels = num_feature_levels
self.encoder_n_points = encoder_n_points
self.decoder_n_points = decoder_n_points
self.max_text_len = max_text_len
# we also set the expected seq length for both encoder and decoder
self.encoder_seq_length_vision = (
math.ceil(self.image_size / 8) ** 2
+ math.ceil(self.image_size / 16) ** 2
+ math.ceil(self.image_size / 32) ** 2
+ math.ceil(self.image_size / 64) ** 2
)
self.encoder_seq_length_text = self.max_text_len
self.decoder_seq_length = self.num_queries
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device)
# When using `MMGroundingDino` the text input template is '{label1}. {label2}. {label3. ... {labelN}.'
# Therefore to avoid errors when running tests with `labels` `input_ids` have to follow this structure.
# Otherwise when running `build_label_maps` it will throw an error when trying to split the input_ids into segments.
input_ids = torch.tensor([101, 3869, 1012, 11420, 3869, 1012, 102], device=torch_device)
input_ids = input_ids.unsqueeze(0).expand(self.batch_size, -1)
labels = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
labels = []
for i in range(self.batch_size):
target = {}
target["class_labels"] = torch.randint(
high=self.num_labels, size=(self.n_targets,), device=torch_device
)
target["boxes"] = generate_fake_bounding_boxes(self.n_targets).to(torch_device)
target["masks"] = torch.rand(self.n_targets, self.image_size, self.image_size, device=torch_device)
labels.append(target)
config = self.get_config()
return config, pixel_values, pixel_mask, input_ids, labels
def get_config(self):
swin_config = SwinConfig(
window_size=7,
embed_dim=8,
depths=[1, 1, 1, 1],
num_heads=[1, 1, 1, 1],
image_size=self.image_size,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
)
text_backbone = {
"hidden_size": 8,
"num_hidden_layers": 2,
"num_attention_heads": 2,
"intermediate_size": 8,
"max_position_embeddings": 8,
"model_type": "bert",
}
return MMGroundingDinoConfig(
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
num_queries=self.num_queries,
num_labels=self.num_labels,
num_feature_levels=self.num_feature_levels,
encoder_n_points=self.encoder_n_points,
decoder_n_points=self.decoder_n_points,
use_timm_backbone=False,
backbone_config=swin_config,
max_text_len=self.max_text_len,
text_config=text_backbone,
)
def prepare_config_and_inputs_for_common(self):
config, pixel_values, pixel_mask, input_ids, labels = self.prepare_config_and_inputs()
inputs_dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask, "input_ids": input_ids}
return config, inputs_dict
def create_and_check_model(self, config, pixel_values, pixel_mask, input_ids, labels):
model = MMGroundingDinoModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_size))
def create_and_check_object_detection_head_model(self, config, pixel_values, pixel_mask, input_ids, labels):
model = MMGroundingDinoForObjectDetection(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, input_ids=input_ids, labels=labels)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, config.max_text_len))
self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4))
@require_torch
# Copied from tests.models.grounding_dino.test_modeling_grounding_dino.GroundingDinoModelTest with Grounding->MMGrounding
| MMGroundingDinoModelTester |
python | tensorflow__tensorflow | tensorflow/python/autograph/tests/basic_list_test.py | {
"start": 2450,
"end": 4106
} | class ____(reference_test_base.TestCase):
def setUp(self):
super(ReferenceTest, self).setUp()
self.autograph_opts = tf.autograph.experimental.Feature.LISTS
def test_tensor_mutation(self):
self.assertConvertedMatchesNative(mutation, [0] * 10, 10)
def test_basic(self):
self.all_inputs_tensors = True
self.assertFunctionMatchesEager(element_access)
self.assertFunctionMatchesEager(element_update)
# TODO(mdan): This should raise a compilation, not runtime, error.
with self.assertRaisesRegex(
ValueError,
'cannot stack a list without knowing its element type; '
'use set_element_type to annotate it'):
self.function(type_not_annotated)(3)
self.assertFunctionMatchesEager(simple_fill, 5)
self.assertFunctionMatchesEager(nested_fill, 5, 3)
self.assertFunctionMatchesEager(read_write_loop, 4)
self.assertFunctionMatchesEager(simple_empty, 0)
self.assertFunctionMatchesEager(simple_empty, 2)
self.assertFunctionMatchesEager(simple_empty, 4)
# TODO(mdan): Allow explicitly setting the element shape to mitigate these.
# TODO(mdan): This should raise a friendlier runtime error.
# The error should spell out that empty lists cannot be stacked.
# Alternatively, we can also insert conditionals that construct a zero-sized
# Tensor of the appropriate type and shape, but we first want to make sure
# that doesn't degrade performance.
with self.assertRaises(ValueError):
self.function(simple_fill)(0)
with self.assertRaises(ValueError):
self.function(nested_fill)(0, 3)
if __name__ == '__main__':
tf.test.main()
| ReferenceTest |
python | django__django | django/contrib/postgres/search.py | {
"start": 11957,
"end": 12213
} | class ____(Func):
output_field = FloatField()
def __init__(self, expression, string, **extra):
if not hasattr(string, "resolve_expression"):
string = Value(string)
super().__init__(expression, string, **extra)
| TrigramBase |
python | django-extensions__django-extensions | django_extensions/management/commands/dumpscript.py | {
"start": 28804,
"end": 28899
} | class ____(Exception):
"""Value could not be parsed or should simply be skipped."""
| SkipValue |
python | huggingface__transformers | tests/models/instructblipvideo/test_modeling_instructblipvideo.py | {
"start": 8276,
"end": 11263
} | class ____:
def __init__(
self,
parent,
batch_size=12,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
projection_dim=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
dropout=0.1,
attention_dropout=0.1,
max_position_embeddings=512,
initializer_range=0.02,
bos_token_id=0,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.projection_dim = projection_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.scope = scope
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
qformer_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
qformer_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask, qformer_input_ids, qformer_attention_mask
def get_config(self):
return InstructBlipVideoQFormerConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
projection_dim=self.projection_dim,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
initializer_range=self.initializer_range,
bos_token_id=self.bos_token_id,
)
# this class is based on `OPTModelTester` found in tests/models/opt/test_modeling_opt.py
| InstructBlipVideoQFormerModelTester |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 27404,
"end": 29257
} | class ____(nn.Module):
"""
ResidualConvUnit, pre-activate residual unit.
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config: DPTConfig):
super().__init__()
self.use_batch_norm = config.use_batch_norm_in_fusion_residual
use_bias_in_fusion_residual = (
config.use_bias_in_fusion_residual
if config.use_bias_in_fusion_residual is not None
else not self.use_batch_norm
)
self.activation1 = nn.ReLU()
self.convolution1 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias_in_fusion_residual,
)
self.activation2 = nn.ReLU()
self.convolution2 = nn.Conv2d(
config.fusion_hidden_size,
config.fusion_hidden_size,
kernel_size=3,
stride=1,
padding=1,
bias=use_bias_in_fusion_residual,
)
if self.use_batch_norm:
self.batch_norm1 = nn.BatchNorm2d(config.fusion_hidden_size)
self.batch_norm2 = nn.BatchNorm2d(config.fusion_hidden_size)
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
residual = hidden_state
hidden_state = self.activation1(hidden_state)
hidden_state = self.convolution1(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm1(hidden_state)
hidden_state = self.activation2(hidden_state)
hidden_state = self.convolution2(hidden_state)
if self.use_batch_norm:
hidden_state = self.batch_norm2(hidden_state)
return hidden_state + residual
| DPTPreActResidualLayer |
python | getsentry__sentry | src/sentry/taskworker/state.py | {
"start": 168,
"end": 1133
} | class ____:
id: str
namespace: str
taskname: str
attempt: int
processing_deadline_duration: int
retries_remaining: bool
def current_task() -> CurrentTaskState | None:
if not hasattr(_current_state, "state"):
_current_state.state = None
return _current_state.state
def set_current_task(activation: TaskActivation) -> None:
retry_state = activation.retry_state
state = CurrentTaskState(
id=activation.id,
namespace=activation.namespace,
taskname=activation.taskname,
attempt=activation.retry_state.attempts,
# We subtract one, as attempts starts at 0, but `max_attempts`
# starts at 1.
retries_remaining=(retry_state.attempts < (retry_state.max_attempts - 1)),
processing_deadline_duration=activation.processing_deadline_duration,
)
_current_state.state = state
def clear_current_task() -> None:
_current_state.state = None
| CurrentTaskState |
python | redis__redis-py | tests/test_pubsub.py | {
"start": 33924,
"end": 34440
} | class ____:
@skip_if_server_version_lt("3.0.0")
@skip_if_redis_enterprise()
def test_connection_error_raised_when_connection_dies(self, r):
p = r.pubsub()
p.subscribe("foo")
assert wait_for_message(p) == make_message("subscribe", "foo", 1)
for client in r.client_list():
if client["cmd"] == "subscribe":
r.client_kill_filter(_id=client["id"])
with pytest.raises(ConnectionError):
wait_for_message(p)
| TestPubSubConnectionKilled |
python | plotly__plotly.py | _plotly_utils/png.py | {
"start": 10275,
"end": 10392
} | class ____(Exception):
def __str__(self):
return self.__class__.__name__ + ": " + " ".join(self.args)
| Error |
python | pytorch__pytorch | torch/testing/_internal/common_distributed.py | {
"start": 55644,
"end": 57469
} | class ____(nn.Module):
def __init__(
self,
forward_inputs: dict[nn.Module, torch.Tensor],
cast_forward_inputs: bool,
) -> None:
super().__init__()
self.c1 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
self.c2 = SaveForwardInputsModule(forward_inputs, cast_forward_inputs)
self.forward_inputs = forward_inputs
def forward(self, x: torch.Tensor) -> torch.Tensor:
self.forward_inputs[self] = x
return self.c2(self.c1(x))
@contextmanager
def _dynamo_dist_per_rank_init(
rank, world_size, backend=None, init_pg=True, fake_pg=False
):
# To avoid multiple inheritance from _dynamo.test_case.TestCase and MultiProcessTestCase,
# Just manually implement the most important part of the dynamo behavior to reset/clear.
if not fake_pg:
torch.accelerator.set_device_index(rank)
device_type = (
acc.type if (acc := torch.accelerator.current_accelerator()) else "cpu"
)
if backend is None:
backend = c10d.get_default_backend_for_device(device_type)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "6789"
if init_pg:
if fake_pg:
store = torch.testing._internal.distributed.fake_pg.FakeStore()
c10d.init_process_group(
backend="fake",
world_size=world_size,
rank=rank,
store=store,
)
else:
c10d.init_process_group(backend=backend, rank=rank, world_size=world_size)
torch._dynamo.reset()
torch._dynamo.utils.counters.clear()
try:
yield
finally:
torch._dynamo.reset()
torch._dynamo.utils.counters.clear()
if init_pg:
c10d.destroy_process_group()
| SaveForwardInputsModel |
python | huggingface__transformers | src/transformers/models/exaone4/modeling_exaone4.py | {
"start": 24150,
"end": 24253
} | class ____(GenericForTokenClassification, Exaone4PreTrainedModel):
pass
| Exaone4ForTokenClassification |
python | plotly__plotly.py | plotly/graph_objs/layout/_selection.py | {
"start": 235,
"end": 17147
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout"
_path_str = "layout.selection"
_valid_props = {
"line",
"name",
"opacity",
"path",
"templateitemname",
"type",
"x0",
"x1",
"xref",
"y0",
"y1",
"yref",
}
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.selection.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.layout.selection.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def opacity(self):
"""
Sets the opacity of the selection.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def path(self):
"""
For `type` "path" - a valid SVG path similar to `shapes.path`
in data coordinates. Allowed segments are: M, L and Z.
The 'path' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["path"]
@path.setter
def path(self, val):
self["path"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def type(self):
"""
Specifies the selection type to be drawn. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`) and (`x0`,`y1`). If "path", draw a custom SVG path
using `path`.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['rect', 'path']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def x0(self):
"""
Sets the selection's starting x position.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
@property
def x1(self):
"""
Sets the selection's end x position.
The 'x1' property accepts values of any type
Returns
-------
Any
"""
return self["x1"]
@x1.setter
def x1(self, val):
self["x1"] = val
@property
def xref(self):
"""
Sets the selection's x coordinate axis. If set to a x axis id
(e.g. "x" or "x2"), the `x` position refers to a x coordinate.
If set to "paper", the `x` position refers to the distance from
the left of the plotting area in normalized coordinates where 0
(1) corresponds to the left (right). If set to a x axis ID
followed by "domain" (separated by a space), the position
behaves like for "paper", but refers to the distance in
fractions of the domain length from the left of the domain of
that axis: e.g., *x2 domain* refers to the domain of the second
x axis and a x position of 0.5 refers to the point between the
left and the right of the domain of the second x axis.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
@property
def y0(self):
"""
Sets the selection's starting y position.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
@property
def y1(self):
"""
Sets the selection's end y position.
The 'y1' property accepts values of any type
Returns
-------
Any
"""
return self["y1"]
@y1.setter
def y1(self, val):
self["y1"] = val
@property
def yref(self):
"""
Sets the selection's x coordinate axis. If set to a y axis id
(e.g. "y" or "y2"), the `y` position refers to a y coordinate.
If set to "paper", the `y` position refers to the distance from
the bottom of the plotting area in normalized coordinates where
0 (1) corresponds to the bottom (top). If set to a y axis ID
followed by "domain" (separated by a space), the position
behaves like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the domain of
that axis: e.g., *y2 domain* refers to the domain of the second
y axis and a y position of 0.5 refers to the point between the
bottom and the top of the domain of the second y axis.
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?( domain)?$']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
@property
def _prop_descriptions(self):
return """\
line
:class:`plotly.graph_objects.layout.selection.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the selection.
path
For `type` "path" - a valid SVG path similar to
`shapes.path` in data coordinates. Allowed segments
are: M, L and Z.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the selection type to be drawn. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`) and (`x0`,`y1`). If "path", draw a custom
SVG path using `path`.
x0
Sets the selection's starting x position.
x1
Sets the selection's end x position.
xref
Sets the selection's x coordinate axis. If set to a x
axis id (e.g. "x" or "x2"), the `x` position refers to
a x coordinate. If set to "paper", the `x` position
refers to the distance from the left of the plotting
area in normalized coordinates where 0 (1) corresponds
to the left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis.
y0
Sets the selection's starting y position.
y1
Sets the selection's end y position.
yref
Sets the selection's x coordinate axis. If set to a y
axis id (e.g. "y" or "y2"), the `y` position refers to
a y coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
"""
def __init__(
self,
arg=None,
line=None,
name=None,
opacity=None,
path=None,
templateitemname=None,
type=None,
x0=None,
x1=None,
xref=None,
y0=None,
y1=None,
yref=None,
**kwargs,
):
"""
Construct a new Selection object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Selection`
line
:class:`plotly.graph_objects.layout.selection.Line`
instance or dict with compatible properties
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the selection.
path
For `type` "path" - a valid SVG path similar to
`shapes.path` in data coordinates. Allowed segments
are: M, L and Z.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Specifies the selection type to be drawn. If "rect", a
rectangle is drawn linking (`x0`,`y0`), (`x1`,`y0`),
(`x1`,`y1`) and (`x0`,`y1`). If "path", draw a custom
SVG path using `path`.
x0
Sets the selection's starting x position.
x1
Sets the selection's end x position.
xref
Sets the selection's x coordinate axis. If set to a x
axis id (e.g. "x" or "x2"), the `x` position refers to
a x coordinate. If set to "paper", the `x` position
refers to the distance from the left of the plotting
area in normalized coordinates where 0 (1) corresponds
to the left (right). If set to a x axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the left of the
domain of that axis: e.g., *x2 domain* refers to the
domain of the second x axis and a x position of 0.5
refers to the point between the left and the right of
the domain of the second x axis.
y0
Sets the selection's starting y position.
y1
Sets the selection's end y position.
yref
Sets the selection's x coordinate axis. If set to a y
axis id (e.g. "y" or "y2"), the `y` position refers to
a y coordinate. If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top). If set to a y axis ID followed by
"domain" (separated by a space), the position behaves
like for "paper", but refers to the distance in
fractions of the domain length from the bottom of the
domain of that axis: e.g., *y2 domain* refers to the
domain of the second y axis and a y position of 0.5
refers to the point between the bottom and the top of
the domain of the second y axis.
Returns
-------
Selection
"""
super().__init__("selections")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.Selection
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Selection`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("line", arg, line)
self._set_property("name", arg, name)
self._set_property("opacity", arg, opacity)
self._set_property("path", arg, path)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("type", arg, type)
self._set_property("x0", arg, x0)
self._set_property("x1", arg, x1)
self._set_property("xref", arg, xref)
self._set_property("y0", arg, y0)
self._set_property("y1", arg, y1)
self._set_property("yref", arg, yref)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Selection |
python | huggingface__transformers | src/transformers/models/blip/modeling_blip_text.py | {
"start": 3299,
"end": 9306
} | class ____(nn.Module):
def __init__(self, config, is_cross_attention, layer_idx=None):
super().__init__()
self.config = config
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention heads (%d)"
% (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.layer_idx = layer_idx
self.query = nn.Linear(config.hidden_size, self.all_head_size)
if is_cross_attention:
self.key = nn.Linear(config.encoder_hidden_size, self.all_head_size)
self.value = nn.Linear(config.encoder_hidden_size, self.all_head_size)
else:
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = (
self.query(hidden_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
attention_mask = encoder_attention_mask if is_cross_attention else attention_mask
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = curr_past_key_values.layers[self.layer_idx].keys
value_layer = curr_past_key_values.layers[self.layer_idx].values
else:
key_layer = (
self.key(current_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
value_layer = (
self.value(current_states)
.view(batch_size, -1, self.num_attention_heads, self.attention_head_size)
.transpose(1, 2)
)
if past_key_values is not None:
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_values.update(
key_layer, value_layer, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BlipTextModel forward() function)
attention_scores = attention_scores + attention_mask.to(attention_scores.device)
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs_dropped = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs_dropped, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return context_layer, attention_probs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert -> BlipText
| BlipTextSelfAttention |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec27.py | {
"start": 513,
"end": 1252
} | class ____(Protocol[P]):
def __call__(self, a: int, /, *args: P.args, **kwargs: P.kwargs) -> None: ...
ConcatCallableHandler: TypeAlias = Callable[Concatenate[int, P], None]
handler_callable1: Callable[..., None] = func1
concat_handler_callable1: ConcatCallableHandler[...] = func1
# This should generate an error because the first param of func2 is not int.
concat_handler_callable2: ConcatCallableHandler[...] = func2
handler1: Handler[...] = func1
concat_handler1: ConcatHandler[...] = func1
# This should generate an error because the first param of func2 is not int.
concat_handler2: ConcatHandler[...] = func2
def func0(x: ConcatCallableHandler[str, str]):
assert_type(x, Callable[[int, str, str], None])
| ConcatHandler |
python | sqlalchemy__sqlalchemy | test/orm/test_attributes.py | {
"start": 97728,
"end": 105093
} | class ____(fixtures.ORMTest):
def test_receive_changes(self):
"""test that Listeners can mutate the given value."""
class Foo:
pass
class Bar:
pass
def append(state, child, initiator):
b2 = Bar()
b2.data = b1.data + " appended"
return b2
def on_set(state, value, oldvalue, initiator):
return value + " modified"
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(Foo, "data", uselist=False, useobject=False)
_register_attribute(Foo, "barlist", uselist=True, useobject=True)
_register_attribute(
Foo, "barset", typecallable=set, uselist=True, useobject=True
)
_register_attribute(Bar, "data", uselist=False, useobject=False)
event.listen(Foo.data, "set", on_set, retval=True)
event.listen(Foo.barlist, "append", append, retval=True)
event.listen(Foo.barset, "append", append, retval=True)
f1 = Foo()
f1.data = "some data"
eq_(f1.data, "some data modified")
b1 = Bar()
b1.data = "some bar"
f1.barlist.append(b1)
assert b1.data == "some bar"
assert f1.barlist[0].data == "some bar appended"
f1.barset.add(b1)
assert f1.barset.pop().data == "some bar appended"
def test_named(self):
canary = Mock()
class Foo:
pass
class Bar:
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(Foo, "data", uselist=False, useobject=False)
_register_attribute(Foo, "barlist", uselist=True, useobject=True)
event.listen(Foo.data, "set", canary.set, named=True)
event.listen(Foo.barlist, "append", canary.append, named=True)
event.listen(Foo.barlist, "remove", canary.remove, named=True)
f1 = Foo()
b1 = Bar()
f1.data = 5
f1.barlist.append(b1)
f1.barlist.remove(b1)
eq_(
canary.mock_calls,
[
call.set(
oldvalue=attributes.NO_VALUE,
initiator=attributes.AttributeEventToken(
Foo.data.impl, attributes.OP_REPLACE
),
target=f1,
value=5,
),
call.append(
initiator=attributes.AttributeEventToken(
Foo.barlist.impl, attributes.OP_APPEND
),
target=f1,
value=b1,
),
call.remove(
initiator=attributes.AttributeEventToken(
Foo.barlist.impl, attributes.OP_REMOVE
),
target=f1,
value=b1,
),
],
)
def test_collection_link_events(self):
class Foo:
pass
class Bar:
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(Foo, "barlist", uselist=True, useobject=True)
canary = Mock()
event.listen(Foo.barlist, "init_collection", canary.init)
event.listen(Foo.barlist, "dispose_collection", canary.dispose)
f1 = Foo()
eq_(f1.barlist, [])
adapter_one = f1.barlist._sa_adapter
eq_(canary.init.mock_calls, [call(f1, [], adapter_one)])
b1 = Bar()
f1.barlist.append(b1)
b2 = Bar()
f1.barlist = [b2]
adapter_two = f1.barlist._sa_adapter
eq_(
canary.init.mock_calls,
[
call(f1, [b1], adapter_one), # note the f1.barlist that
# we saved earlier has been mutated
# in place, new as of [ticket:3913]
call(f1, [b2], adapter_two),
],
)
eq_(canary.dispose.mock_calls, [call(f1, [b1], adapter_one)])
def test_none_on_collection_event(self):
"""test that append/remove of None in collections emits events.
This is new behavior as of 0.8.
"""
class Foo:
pass
class Bar:
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(Foo, "barlist", uselist=True, useobject=True)
canary = []
def append(state, child, initiator):
canary.append((state, child))
def remove(state, child, initiator):
canary.append((state, child))
event.listen(Foo.barlist, "append", append)
event.listen(Foo.barlist, "remove", remove)
b1, b2 = Bar(), Bar()
f1 = Foo()
f1.barlist.append(None)
eq_(canary, [(f1, None)])
canary[:] = []
f1 = Foo()
f1.barlist = [None, b2]
eq_(canary, [(f1, None), (f1, b2)])
canary[:] = []
f1 = Foo()
f1.barlist = [b1, None, b2]
eq_(canary, [(f1, b1), (f1, None), (f1, b2)])
f1.barlist.remove(None)
eq_(canary, [(f1, b1), (f1, None), (f1, b2), (f1, None)])
def test_flag_modified(self):
canary = Mock()
class Foo:
pass
instrumentation.register_class(Foo)
_register_attribute(Foo, "bar")
event.listen(Foo.bar, "modified", canary)
f1 = Foo()
f1.bar = "hi"
attributes.flag_modified(f1, "bar")
eq_(
canary.mock_calls,
[
call(
f1,
attributes.AttributeEventToken(
Foo.bar.impl, attributes.OP_MODIFIED
),
)
],
)
def test_none_init_scalar(self):
canary = Mock()
class Foo:
pass
instrumentation.register_class(Foo)
_register_attribute(Foo, "bar")
event.listen(Foo.bar, "set", canary)
f1 = Foo()
eq_(f1.bar, None)
# reversal of approach in #3061
eq_(canary.mock_calls, [])
def test_none_init_object(self):
canary = Mock()
class Foo:
pass
instrumentation.register_class(Foo)
_register_attribute(Foo, "bar", useobject=True)
event.listen(Foo.bar, "set", canary)
f1 = Foo()
eq_(f1.bar, None)
# reversal of approach in #3061
eq_(canary.mock_calls, [])
def test_none_init_collection(self):
canary = Mock()
class Foo:
pass
class Bar:
pass
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
_register_attribute(Foo, "bar", useobject=True, uselist=True)
event.listen(Foo.bar, "set", canary)
f1 = Foo()
eq_(f1.bar, [])
assert "bar" not in f1.__dict__
adapter = Foo.bar.impl.get_collection(
attributes.instance_state(f1), attributes.instance_dict(f1)
)
assert adapter.empty
# reversal of approach in #3061
eq_(canary.mock_calls, [])
f1.bar.append(Bar())
assert "bar" in f1.__dict__
assert not adapter.empty
| ListenerTest |
python | getsentry__sentry | fixtures/integrations/mock_service.py | {
"start": 295,
"end": 3725
} | class ____(StubService):
"""
A mock is a service that replicates the functionality of a real software
system by implementing the same interface with simplified business logic.
For example, a mocked random dice_roll function might return `hash(time()) % 6`.
Like stubs, mocks can make tests simpler and more reliable.
"""
def __init__(self, mode="memory"):
"""
Initialize the mock instance. Wipe the previous instance's data if it exists.
"""
super().__init__()
self.mode = mode
self._next_error_code = None
self._next_ids = defaultdict(int)
if self.mode == "file":
path = os.path.join(FIXTURE_DIRECTORY, self.service_name, "data")
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
else:
self._memory: dict[str, dict[str, Any]] = defaultdict(dict)
def add_project(self, project):
"""
Create a new, empty project.
:param project: String name of project
:return: void
"""
self._next_ids.get(project) # touch
if self.mode == "file":
self._get_project_path(project)
def remove_project(self, project):
"""
Totally wipe out a project.
:param project: String name of project
:return: void
"""
del self._next_ids[project]
if self.mode == "file":
path = self._get_project_path(project)
shutil.rmtree(path)
def break_next_api_call(self, error_code=500):
"""
Simulate an outage for a single API call.
"""
self._next_error_code = error_code
def _throw_if_broken(self, message_option=None):
"""
See break_next_api_call.
:param message_option: What should the message be if this raises?
:raises: Generic Exception
"""
if self._next_error_code:
self._next_error_code = None
message = message_option or f"{self.service_name} is down"
raise Exception(f"{self._next_error_code}: {message}")
def _get_project_names(self):
return self._next_ids.keys()
def _get_new_ticket_name(self, project):
counter = self._next_ids[project]
self._next_ids[project] = counter + 1
return f"{project}-{base32_encode(counter)}"
def _get_project_path(self, project):
path = os.path.join(FIXTURE_DIRECTORY, self.service_name, "data", project)
os.makedirs(path, exist_ok=True)
return path
def _set_data(self, project, name, data):
if self.mode == "memory":
if not self._memory[project]:
self._memory[project] = defaultdict()
self._memory[project][name] = data
return
path = os.path.join(self._get_project_path(project), f"{name}.json")
with open(path, "wb") as f:
f.write(orjson.dumps(data))
def _get_data(self, project, name):
if self.mode == "memory":
if not self._memory[project]:
self._memory[project] = defaultdict()
return self._memory[project].get(name)
path = os.path.join(self._get_project_path(project), f"{name}.json")
if not os.path.exists(path):
return None
with open(path, "rb") as f:
return orjson.loads(f.read())
| MockService |
python | pypa__warehouse | tests/unit/admin/views/test_users.py | {
"start": 57353,
"end": 59686
} | class ____:
def test_burns_recovery_codes(self, db_request, monkeypatch, user_service):
user = UserFactory.create()
codes = user_service.generate_recovery_codes(user.id)
user_service._check_ratelimits = pretend.call_recorder(
user_service._check_ratelimits
)
# Burn one code in advance
user.recovery_codes[0].burned = datetime.datetime.now(datetime.UTC)
# Provide all the codes, plus one invalid code
db_request.POST["to_burn"] = "\n".join(codes) + "\ninvalid"
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
assert any(not code.burned for code in user.recovery_codes)
result = views.user_burn_recovery_codes(user, db_request)
assert all(code.burned for code in user.recovery_codes)
assert db_request.session.flash.calls == [
pretend.call("Burned 7 recovery code(s)", queue="success")
]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
assert user_service._check_ratelimits.calls == []
def test_no_recovery_codes_provided(self, db_request, monkeypatch, user_service):
user = UserFactory.create()
user_service.generate_recovery_codes(user.id)
db_request.POST["to_burn"] = ""
db_request.route_path = pretend.call_recorder(lambda *a, **kw: "/foobar")
db_request.session = pretend.stub(
flash=pretend.call_recorder(lambda *a, **kw: None)
)
assert all(not code.burned for code in user.recovery_codes)
result = views.user_burn_recovery_codes(user, db_request)
assert all(not code.burned for code in user.recovery_codes)
assert db_request.session.flash.calls == [
pretend.call("No recovery codes provided", queue="error")
]
assert db_request.route_path.calls == [
pretend.call("admin.user.detail", username=user.username)
]
assert result.status_code == 303
assert result.location == "/foobar"
| TestUserBurnRecoveryCodes |
python | numba__numba | numba/core/bytecode.py | {
"start": 22797,
"end": 25041
} | class ____(serialize.ReduceMixin):
"""
A function's identity and metadata.
Note this typically represents a function whose bytecode is
being compiled, not necessarily the top-level user function
(the two might be distinct).
"""
_unique_ids = itertools.count(1)
@classmethod
def from_function(cls, pyfunc):
"""
Create the FunctionIdentity of the given function.
"""
func = get_function_object(pyfunc)
code = get_code_object(func)
pysig = utils.pysignature(func)
if not code:
raise errors.ByteCodeSupportError(
"%s does not provide its bytecode" % func)
try:
func_qualname = func.__qualname__
except AttributeError:
func_qualname = func.__name__
self = cls()
self.func = func
self.func_qualname = func_qualname
self.func_name = func_qualname.split('.')[-1]
self.code = code
self.module = inspect.getmodule(func)
self.modname = (utils._dynamic_modname
if self.module is None
else self.module.__name__)
self.is_generator = inspect.isgeneratorfunction(func)
self.pysig = pysig
self.filename = code.co_filename
self.firstlineno = code.co_firstlineno
self.arg_count = len(pysig.parameters)
self.arg_names = list(pysig.parameters)
# Even the same function definition can be compiled into
# several different function objects with distinct closure
# variables, so we make sure to disambiguate using an unique id.
uid = next(cls._unique_ids)
self.unique_name = '{}${}'.format(self.func_qualname, uid)
self.unique_id = uid
return self
def derive(self):
"""Copy the object and increment the unique counter.
"""
return self.from_function(self.func)
def _reduce_states(self):
"""
NOTE: part of ReduceMixin protocol
"""
return dict(pyfunc=self.func)
@classmethod
def _rebuild(cls, pyfunc):
"""
NOTE: part of ReduceMixin protocol
"""
return cls.from_function(pyfunc)
| FunctionIdentity |
python | getsentry__sentry | src/sentry/api/permissions.py | {
"start": 1854,
"end": 2609
} | class ____(BasePermission):
"""
This permission class is used for endpoints that should ONLY be accessible
by staff.
"""
def has_permission(self, request: Request, view: object) -> bool:
return is_active_staff(request)
# NOTE(schew2381): This is a temporary permission that does NOT perform an OR
# between SuperuserPermission and StaffPermission. Instead, it uses StaffPermission
# if the option is enabled for the user, and otherwise checks SuperuserPermission. We
# need this to handle the transition for endpoints that will only be accessible to
# staff but not superuser, that currently use SuperuserPermission. Once staff is
# released to the everyone, we can delete this permission and use StaffPermission
| StaffPermission |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/one_hot_op_test.py | {
"start": 981,
"end": 12875
} | class ____(test.TestCase, parameterized.TestCase):
def _testOneHot(self,
truth,
use_gpu=False,
expected_err_re=None,
raises=None,
dtype=None,
**inputs):
with self.cached_session(use_gpu=use_gpu):
if raises is not None:
with self.assertRaises(raises):
array_ops.one_hot(dtype=dtype, **inputs)
else:
ans = array_ops.one_hot(dtype=dtype, **inputs)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllEqual(tf_ans, truth)
if dtype:
self.assertEqual(tf_ans.dtype, dtype)
self.assertEqual(tf_ans.shape, ans.get_shape())
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothOneHot(self, truth, expected_err_re=None, raises=None, **inputs):
self._testOneHot(truth, True, expected_err_re, raises, **inputs)
self._testOneHot(truth, False, expected_err_re, raises, **inputs)
def _testBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=0,
dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def _testDefaultBasic(self, dtype):
indices = np.asarray([0, 2, -1, 1], dtype=np.int64)
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
# axis == 0
self._testBothOneHot(
indices=indices, depth=depth, axis=0, dtype=dtype,
truth=truth.T) # Output is transpose version in this case
def testDefaultNoDtype(self):
self._testDefaultBasic(None)
@parameterized.parameters(
np.float16,
dtypes.bfloat16.as_numpy_dtype,
np.float32,
np.float64,
np.int8,
np.int32,
np.int64,
np.complex64,
np.complex128,
)
def testBasic(self, dtype):
self._testBasic(dtype)
self._testDefaultBasic(dtype)
def _testBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testDefaultValuesBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
truth = np.asarray(
[[[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(indices=indices, depth=depth, dtype=dtype, truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testValueTypeBatch(self, dtype):
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
dtype=dtype,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
dtype=dtype,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def _testEmpty(self, dtype):
indices = np.zeros((0, 16), dtype=np.int64)
depth = 3
on_value = np.asarray(1.0, dtype=dtype)
off_value = np.asarray(-1.0, dtype=dtype)
truth = np.empty((0, 16, 3), dtype=dtype)
# axis == -1
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
dtype=dtype,
truth=truth)
@parameterized.parameters(
np.float16,
dtypes.bfloat16.as_numpy_dtype,
np.float32,
np.float64,
np.int8,
np.int32,
np.int64,
np.complex64,
)
def testBatch(self, dtype):
self._testEmpty(dtype)
self._testBatch(dtype)
if dtype != np.complex64:
self._testDefaultValuesBatch(dtype)
self._testValueTypeBatch(dtype)
def testSimpleCases(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)
self._testBothOneHot(indices=indices, depth=depth, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, dtype=np.int32, truth=truth)
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, -1, -1], [-1, 1, -1], [-1, -1, 1]], dtype=np.int32)
self._testBothOneHot(
indices=indices, depth=depth, on_value=1, off_value=-1, truth=truth)
def testSingleValueGiven(self):
# Only on_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.int32)
self._testBothOneHot(indices=indices, depth=depth, on_value=1, truth=truth)
# Only off_value provided
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.float32)
self._testBothOneHot(
indices=indices, depth=depth, off_value=0.0, truth=truth)
def testString(self):
indices = [0, 1, 2]
depth = 3
truth = np.asarray([[b"1.0", b"0.0", b"0.0"], [b"0.0", b"1.0", b"0.0"],
[b"0.0", b"0.0", b"1.0"]])
on_value = np.asarray(b"1.0")
off_value = np.asarray(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=truth)
on_value = constant_op.constant(b"1.0")
off_value = constant_op.constant(b"0.0")
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=truth)
on_value = b"1.0"
off_value = b"0.0"
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=truth)
def testIndicesTypes(self):
tf_types = [dtypes.uint8, dtypes.int32, dtypes.int64]
np_types = [np.int32, np.int64]
for itype in tf_types + np_types:
# Note: to keep the tests simple in the case of uint8 the index -1 below
# maps to 255 which is out of the depth range, just like -1.
if itype in tf_types:
indices = constant_op.constant(
[[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
elif itype in np_types:
indices = np.asarray([[0, 2, -1, 1], [1, 0, 1, -1]], dtype=itype)
depth = 3
on_value = np.asarray(1.0, dtype=np.float32)
off_value = np.asarray(-1.0, dtype=np.float32)
truth = np.asarray(
[[[1.0, -1.0, -1.0], [-1.0, -1.0, 1.0], [-1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0]], [[-1.0, 1.0, -1.0], [1.0, -1.0, -1.0],
[-1.0, 1.0, -1.0], [-1.0, -1.0, -1.0]]],
dtype=np.float32)
# axis == -1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
truth=truth)
# axis == 1
self._testBothOneHot(
indices=indices,
on_value=on_value,
off_value=off_value,
depth=depth,
axis=1,
truth=[truth[0].T, truth[1].T]) # Do not transpose the batch
def testPrefixDimOverflow(self):
for itype in [dtypes.int32, dtypes.int64, dtypes.uint8]:
prefix_dim_size = 65536
depth = 2
x = [i % depth for i in range(prefix_dim_size)]
indices = constant_op.constant(x, dtype=itype)
truth = np.zeros((prefix_dim_size, depth), np.float32)
for i in range(prefix_dim_size):
truth[i, x[i]] = 1.0
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.0,
truth=truth)
def testOnOffMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = np.asarray(1.0, np.float64)
off_value = np.asarray(0.0, np.float32)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
truth=None,
raises=TypeError)
def testDtypeMismatchTypeError(self):
indices = [0, 1, 2]
depth = 3
on_value = constant_op.constant(1.0, dtypes.float32)
off_value = constant_op.constant(0.0, dtypes.float32)
dtype = np.int32
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=on_value,
dtype=dtype,
truth=None,
raises=TypeError)
self._testBothOneHot(
indices=indices,
depth=depth,
on_value=off_value,
dtype=dtype,
truth=None,
raises=TypeError)
def testConvertToTensorOfCorrectDtype(self):
indices = [0, 1, 2]
depth = 3
dtype = np.float16
truth = np.asarray([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
self._testBothOneHot(
truth=truth,
indices=indices,
depth=depth,
on_value=1.0,
off_value=constant_op.constant(0.0, dtype),
dtype=dtype)
self._testBothOneHot(
truth=truth,
indices=indices,
depth=depth,
on_value=constant_op.constant(1.0, dtype),
off_value=0.,
dtype=dtype)
self._testBothOneHot(
truth=truth,
indices=indices,
depth=depth,
on_value=1.0,
off_value=0.,
dtype=dtype)
def testOneHotUint8WithLargeArray(self):
with self.cached_session(use_gpu=False) as sess:
matrix = np.random.rand(256) * 10
tensor = constant_op.constant(matrix, dtypes.uint8, shape=matrix.shape)
tensor_one_hot = array_ops.one_hot(tensor, depth=10, axis=0)
self.assertEqual(sess.run(tensor_one_hot).shape, (10, 256))
if __name__ == "__main__":
test.main()
| OneHotTest |
python | python-pillow__Pillow | src/PIL/PdfParser.py | {
"start": 7719,
"end": 9318
} | class ____(_DictBase):
def __setattr__(self, key: str, value: Any) -> None:
if key == "data":
collections.UserDict.__setattr__(self, key, value)
else:
self[key.encode("us-ascii")] = value
def __getattr__(self, key: str) -> str | time.struct_time:
try:
value = self[key.encode("us-ascii")]
except KeyError as e:
raise AttributeError(key) from e
if isinstance(value, bytes):
value = decode_text(value)
if key.endswith("Date"):
if value.startswith("D:"):
value = value[2:]
relationship = "Z"
if len(value) > 17:
relationship = value[14]
offset = int(value[15:17]) * 60
if len(value) > 20:
offset += int(value[18:20])
format = "%Y%m%d%H%M%S"[: len(value) - 2]
value = time.strptime(value[: len(format) + 2], format)
if relationship in ["+", "-"]:
offset *= 60
if relationship == "+":
offset *= -1
value = time.gmtime(calendar.timegm(value) + offset)
return value
def __bytes__(self) -> bytes:
out = bytearray(b"<<")
for key, value in self.items():
if value is None:
continue
value = pdf_repr(value)
out.extend(b"\n")
out.extend(bytes(PdfName(key)))
out.extend(b" ")
out.extend(value)
out.extend(b"\n>>")
return bytes(out)
| PdfDict |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 8905,
"end": 9967
} | class ____(VOTableSpecWarning):
r"""Nonstandard XML id.
XML ids must match the following regular expression::
^[A-Za-z_][A-Za-z0-9_\.\-]*$
The VOTable 1.1 says the following:
According to the XML standard, the attribute ``ID`` is a
string beginning with a letter or underscore (``_``), followed
by a sequence of letters, digits, or any of the punctuation
characters ``.`` (dot), ``-`` (dash), ``_`` (underscore), or
``:`` (colon).
However, this is in conflict with the XML standard, which says
colons may not be used. VOTable 1.1's own schema does not allow a
colon here. Therefore, ``astropy.io.votable`` disallows the colon.
VOTable 1.2 corrects this error in the specification.
**References**: `1.1
<http://www.ivoa.net/documents/VOTable/20040811/REC-VOTable-1.1-20040811.html#sec:name>`__,
`XML Names <https://www.w3.org/TR/xml-names/>`__
"""
message_template = "{} attribute '{}' is invalid. Must be a standard XML id"
default_args = ("x", "y")
| W02 |
python | kamyu104__LeetCode-Solutions | Python/distinct-echo-substrings.py | {
"start": 1273,
"end": 1979
} | class ____(object):
def distinctEchoSubstrings(self, text):
"""
:type text: str
:rtype: int
"""
result = set()
for l in xrange(1, len(text)//2+1):
count = sum(text[i] == text[i+l] for i in xrange(l))
for i in xrange(len(text)-2*l):
if count == l:
result.add(text[i:i+l])
count += (text[i+l] == text[i+l+l]) - (text[i] == text[i+l])
if count == l:
result.add(text[len(text)-2*l:len(text)-2*l+l])
return len(result)
# Time: O(n^2 + d), d is the duplicated of result substrings size
# Space: O(r), r is the size of result substrings set
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/self-crossing.py | {
"start": 29,
"end": 1050
} | class ____(object):
def isSelfCrossing(self, x):
"""
:type x: List[int]
:rtype: bool
"""
if len(x) >= 5 and x[3] == x[1] and x[4] + x[0] >= x[2]:
# Crossing in a loop:
# 2
# 3 ┌────┐
# └─══>┘1
# 4 0 (overlapped)
return True
for i in xrange(3, len(x)):
if x[i] >= x[i - 2] and x[i - 3] >= x[i - 1]:
# Case 1:
# i-2
# i-1┌─┐
# └─┼─>i
# i-3
return True
elif i >= 5 and x[i - 4] <= x[i - 2] and x[i] + x[i - 4] >= x[i - 2] and \
x[i - 1] <= x[i - 3] and x[i - 5] + x[i - 1] >= x[i - 3]:
# Case 2:
# i-4
# ┌──┐
# │i<┼─┐
# i-3│ i-5│i-1
# └────┘
# i-2
return True
return False
| Solution |
python | tensorflow__tensorflow | tensorflow/python/distribute/sharded_variable.py | {
"start": 6423,
"end": 8878
} | class ____(Partitioner):
"""Partitioner that keeps shards below `max_shard_bytes`.
This partitioner ensures each shard has at most `max_shard_bytes`, and tries
to allocate as few shards as possible, i.e., keeping shard size as large
as possible.
If the partitioner hits the `max_shards` limit, then each shard may end up
larger than `max_shard_bytes`. By default `max_shards` equals `None` and no
limit on the number of shards is enforced.
Examples:
>>> partitioner = MaxSizePartitioner(max_shard_bytes=4)
>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)
>>> [6, 1]
>>> partitioner = MaxSizePartitioner(max_shard_bytes=4, max_shards=2)
>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)
>>> [2, 1]
>>> partitioner = MaxSizePartitioner(max_shard_bytes=1024)
>>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32)
>>> [1, 1]
>>>
>>> # use in ParameterServerStrategy
>>> # strategy = tf.distribute.experimental.ParameterServerStrategy(
>>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)
"""
def __init__(self, max_shard_bytes, max_shards=None, bytes_per_string=16):
"""Creates a new `MaxSizePartitioner`.
Args:
max_shard_bytes: The maximum size any given shard is allowed to be.
max_shards: The maximum number of shards in `int` created taking
precedence over `max_shard_bytes`.
bytes_per_string: If the partition value is of type string, this provides
an estimate of how large each string is.
"""
if max_shard_bytes < 1:
raise ValueError(
'Argument `max_shard_bytes` must be positive. '
f'Received {max_shard_bytes}'
)
if max_shards and max_shards < 1:
raise ValueError(
f'Argument `max_shards` must be positive. Received {max_shards}'
)
if bytes_per_string < 1:
raise ValueError(
'Argument `bytes_per_string` must be positive. '
f'Received: {bytes_per_string}'
)
self._max_shard_bytes = max_shard_bytes
self._max_shards = max_shards
self._bytes_per_string = bytes_per_string
def __call__(self, shape, dtype, axis=0):
return partitioned_variables.variable_axis_size_partitioner(
max_shard_bytes=self._max_shard_bytes,
max_shards=self._max_shards,
bytes_per_string_element=self._bytes_per_string,
axis=axis,
)(shape, dtype)
| MaxSizePartitioner |
python | jazzband__django-oauth-toolkit | tests/test_auth_backends.py | {
"start": 5881,
"end": 7543
} | class ____(BaseTest):
def dummy_get_response(self, request):
return HttpResponse()
def test_middleware_wrong_headers(self):
m = OAuth2ExtraTokenMiddleware(self.dummy_get_response)
request = self.factory.get("/a-resource")
m(request)
self.assertFalse(hasattr(request, "access_token"))
auth_headers = {
"HTTP_AUTHORIZATION": "Beerer " + "badstring", # a Beer token for you!
}
request = self.factory.get("/a-resource", **auth_headers)
m(request)
self.assertFalse(hasattr(request, "access_token"))
def test_middleware_token_does_not_exist(self):
m = OAuth2ExtraTokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "badtokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
m(request)
self.assertFalse(hasattr(request, "access_token"))
def test_middleware_success(self):
m = OAuth2ExtraTokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
m(request)
self.assertEqual(request.access_token, self.token)
def test_middleware_response(self):
m = OAuth2ExtraTokenMiddleware(self.dummy_get_response)
auth_headers = {
"HTTP_AUTHORIZATION": "Bearer " + "tokstr",
}
request = self.factory.get("/a-resource", **auth_headers)
response = m(request)
self.assertIsInstance(response, HttpResponse)
| TestOAuth2ExtraTokenMiddleware |
python | matplotlib__matplotlib | lib/matplotlib/testing/compare.py | {
"start": 2639,
"end": 2941
} | class ____:
def __call__(self, orig, dest):
try:
subprocess.run(
[mpl._get_executable_info("magick").executable, orig, dest],
check=True)
except subprocess.CalledProcessError as e:
raise _ConverterError() from e
| _MagickConverter |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/binary_test.py | {
"start": 3299,
"end": 4672
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, in_one, in_two, dtype, device, op_func):
self.inputs = {
"in_one": torch.bernoulli(0.5 * torch.ones(in_one, device=device)).to(
dtype=dtype
),
"in_two": torch.bernoulli(0.5 * torch.ones(in_two, device=device)).to(
dtype=dtype
),
}
self.op_func = op_func
def forward(self, in_one, in_two):
return self.op_func(in_one, in_two)
op_bench.generate_pt_tests_from_op_list(
binary_ops_bcast_list, binary_configs_broadcast, BinaryOpBcastBenchmark
)
# Benchmark ops performance without broadcast
binary_ops_list = op_bench.op_list(
attr_names=["op_name", "op_func"],
attrs=[["logical_and", torch.logical_and]],
)
binary_short_configs = op_bench.config_list(
attr_names=["M", "N", "K"],
attrs=[
[1, 1, 1],
[64, 64, 64],
[64, 64, 128],
],
cross_product_configs={
"device": ["cpu", "cuda"],
"dtype_one": [torch.bool],
"dtype_two": [torch.bool],
},
tags=["short"],
)
binary_long_configs = op_bench.cross_product_configs(
M=[8, 128],
N=[32, 64],
K=[256, 512],
device=["cpu", "cuda"],
dtype_one=[torch.bool, torch.bool],
dtype_two=[torch.bool, torch.bool],
tags=["long"],
)
| BinaryOpBcastBenchmark |
python | kamyu104__LeetCode-Solutions | Python/distribute-candies-to-people.py | {
"start": 1031,
"end": 1966
} | class ____(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
# find max integer p s.t. sum(1 + 2 + ... + p) <= C
left, right = 1, candies
while left <= right:
mid = left + (right-left)//2
if not ((mid <= candies*2 // (mid+1))):
right = mid-1
else:
left = mid+1
p = right
remaining = candies - (p+1)*p//2
rows, cols = divmod(p, num_people)
result = [0]*num_people
for i in xrange(num_people):
result[i] = (i+1)*(rows+1) + (rows*(rows+1)//2)*num_people if i < cols else \
(i+1)*rows + ((rows-1)*rows//2)*num_people
result[cols] += remaining
return result
# Time: O(sqrt(c)), c is the number of candies
# Space: O(1)
| Solution2 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/roles.py | {
"start": 3878,
"end": 4330
} | class ____(TypedColumnsClauseRole[_T_co]):
# note when using generics for ExpressionElementRole,
# the generic type needs to be in
# sqlalchemy.sql.coercions._impl_lookup mapping also.
# these are set up for basic types like int, bool, str, float
# right now
__slots__ = ()
_role_name = "SQL expression element"
def label(self, name: Optional[str]) -> Label[_T]:
raise NotImplementedError()
| ExpressionElementRole |
python | apache__airflow | providers/standard/src/airflow/providers/standard/utils/weekday.py | {
"start": 961,
"end": 2685
} | class ____(enum.IntEnum):
"""Python Enum containing Days of the Week."""
MONDAY = 1
TUESDAY = 2
WEDNESDAY = 3
THURSDAY = 4
FRIDAY = 5
SATURDAY = 6
SUNDAY = 7
@classmethod
def get_weekday_number(cls, week_day_str: str):
"""
Return the ISO Week Day Number for a Week Day.
:param week_day_str: Full Name of the Week Day. Example: "Sunday"
:return: ISO Week Day Number corresponding to the provided Weekday
"""
sanitized_week_day_str = week_day_str.upper()
if sanitized_week_day_str not in cls.__members__:
raise AttributeError(f'Invalid Week Day passed: "{week_day_str}"')
return cls[sanitized_week_day_str]
@classmethod
def convert(cls, day: str | WeekDay) -> int:
"""Return the day number in the week."""
if isinstance(day, WeekDay):
return day
return cls.get_weekday_number(week_day_str=day)
@classmethod
def validate_week_day(
cls,
week_day: str | WeekDay | Iterable[str] | Iterable[WeekDay],
) -> set[int]:
"""Validate each item of iterable and create a set to ease compare of values."""
if not isinstance(week_day, Iterable):
if isinstance(week_day, WeekDay):
week_day = {week_day}
else:
raise TypeError(
f"Unsupported Type for week_day parameter: {type(week_day)}."
"Input should be iterable type:"
"str, set, list, dict or Weekday enum type"
)
if isinstance(week_day, str):
week_day = {week_day}
return {cls.convert(item) for item in week_day}
| WeekDay |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/exceptions.py | {
"start": 987,
"end": 1334
} | class ____(Exception):
"""Raise when ECS tasks fail to start AFTER processing the request."""
def __init__(self, message: str):
self.message = message
super().__init__(message)
def __reduce__(self):
"""Return ECSTask state and its message."""
return EcsTaskFailToStart, (self.message)
| EcsTaskFailToStart |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/_marker.py | {
"start": 233,
"end": 28903
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d"
_path_str = "scatter3d.marker"
_valid_props = {
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"line",
"opacity",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color` is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to scatter3d.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.scatter3d.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.scatter3d.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def opacity(self):
"""
Sets the marker opacity. Note that the marker opacity for
scatter3d traces must be a scalar value for performance
reasons. To set a blending opacity value (i.e. which is not
transparent), set "marker.color" to an rgba color and use its
alpha channel.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color` is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def symbol(self):
"""
Sets the marker symbol type.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
['circle', 'circle-open', 'cross', 'diamond',
'diamond-open', 'square', 'square-open', 'x']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `symbol`.
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatter3d.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.scatter3d.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity. Note that the marker opacity
for scatter3d traces must be a scalar value for
performance reasons. To set a blending opacity value
(i.e. which is not transparent), set "marker.color" to
an rgba color and use its alpha channel.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter3d.Marker`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.scatter3d.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.scatter3d.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity. Note that the marker opacity
for scatter3d traces must be a scalar value for
performance reasons. To set a blending opacity value
(i.e. which is not transparent), set "marker.color" to
an rgba color and use its alpha channel.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("line", arg, line)
self._set_property("opacity", arg, opacity)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._set_property("size", arg, size)
self._set_property("sizemin", arg, sizemin)
self._set_property("sizemode", arg, sizemode)
self._set_property("sizeref", arg, sizeref)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("symbol", arg, symbol)
self._set_property("symbolsrc", arg, symbolsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | GoogleCloudPlatform__python-docs-samples | containeranalysis/snippets/samples_test.py | {
"start": 1991,
"end": 2653
} | class ____:
"""Custom class to handle incoming Pub/Sub messages."""
def __init__(self, expected_msg_nums: int, done_event: threading.Event) -> None:
# initialize counter to 0 on initialization
self.msg_count = 0
self.expected_msg_nums = expected_msg_nums
self.done_event = done_event
def pubsub_callback(self, message: Message) -> None:
# every time a pubsub message comes in, print it and count it
self.msg_count += 1
print(f"Message {self.msg_count}: {message.data}")
message.ack()
if self.msg_count == self.expected_msg_nums:
self.done_event.set()
| MessageReceiver |
python | numpy__numpy | numpy/distutils/tests/test_fcompiler_gnu.py | {
"start": 1168,
"end": 1643
} | class ____:
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
v = fc.version_match(vs)
assert_(v == version, (vs, v))
def test_not_g77(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, _ in gfortran_version_strings:
v = fc.version_match(vs)
assert_(v is None, (vs, v))
| TestG77Versions |
python | optuna__optuna | optuna/visualization/_terminator_improvement.py | {
"start": 869,
"end": 7458
} | class ____(NamedTuple):
trial_numbers: list[int]
improvements: list[float]
errors: list[float] | None
@experimental_func("3.2.0")
def plot_terminator_improvement(
study: Study,
plot_error: bool = False,
improvement_evaluator: BaseImprovementEvaluator | None = None,
error_evaluator: BaseErrorEvaluator | None = None,
min_n_trials: int = DEFAULT_MIN_N_TRIALS,
) -> "go.Figure":
"""Plot the potentials for future objective improvement.
This function visualizes the objective improvement potentials, evaluated
with ``improvement_evaluator``.
It helps to determine whether we should continue the optimization or not.
You can also plot the error evaluated with
``error_evaluator`` if the ``plot_error`` argument is set to :obj:`True`.
Note that this function may take some time to compute
the improvement potentials.
Args:
study:
A :class:`~optuna.study.Study` object whose trials are plotted
for their improvement.
plot_error:
A flag to show the error. If it is set to :obj:`True`, errors
evaluated by ``error_evaluator`` are also plotted as line graph.
Defaults to :obj:`False`.
improvement_evaluator:
An object that evaluates the improvement of the objective function.
Defaults to :class:`~optuna.terminator.RegretBoundEvaluator`.
error_evaluator:
An object that evaluates the error inherent in the objective function.
Defaults to :class:`~optuna.terminator.CrossValidationErrorEvaluator`.
min_n_trials:
The minimum number of trials before termination is considered.
Terminator improvements for trials below this value are
shown in a lighter color. Defaults to ``20``.
Returns:
A :class:`plotly.graph_objects.Figure` object.
"""
_imports.check()
info = _get_improvement_info(study, plot_error, improvement_evaluator, error_evaluator)
return _get_improvement_plot(info, min_n_trials)
def _get_improvement_info(
study: Study,
get_error: bool = False,
improvement_evaluator: BaseImprovementEvaluator | None = None,
error_evaluator: BaseErrorEvaluator | None = None,
) -> _ImprovementInfo:
if study._is_multi_objective():
raise ValueError("This function does not support multi-objective optimization study.")
if improvement_evaluator is None:
improvement_evaluator = RegretBoundEvaluator()
if error_evaluator is None:
if isinstance(improvement_evaluator, BestValueStagnationEvaluator):
error_evaluator = StaticErrorEvaluator(constant=0)
else:
error_evaluator = CrossValidationErrorEvaluator()
trial_numbers = []
completed_trials = []
improvements = []
errors = []
for trial in tqdm.tqdm(study.trials):
if trial.state == optuna.trial.TrialState.COMPLETE:
completed_trials.append(trial)
if len(completed_trials) == 0:
continue
trial_numbers.append(trial.number)
improvement = improvement_evaluator.evaluate(
trials=completed_trials, study_direction=study.direction
)
improvements.append(improvement)
if get_error:
error = error_evaluator.evaluate(
trials=completed_trials, study_direction=study.direction
)
errors.append(error)
if len(errors) == 0:
return _ImprovementInfo(
trial_numbers=trial_numbers, improvements=improvements, errors=None
)
else:
return _ImprovementInfo(
trial_numbers=trial_numbers, improvements=improvements, errors=errors
)
def _get_improvement_scatter(
trial_numbers: list[int],
improvements: list[float],
opacity: float = 1.0,
showlegend: bool = True,
) -> "go.Scatter":
plotly_blue_with_opacity = f"rgba(99, 110, 250, {opacity})"
return go.Scatter(
x=trial_numbers,
y=improvements,
mode="markers+lines",
marker=dict(color=plotly_blue_with_opacity),
line=dict(color=plotly_blue_with_opacity),
name="Terminator Improvement",
showlegend=showlegend,
legendgroup="improvement",
)
def _get_error_scatter(
trial_numbers: list[int],
errors: list[float] | None,
) -> "go.Scatter":
if errors is None:
return go.Scatter()
plotly_red = "rgb(239, 85, 59)"
return go.Scatter(
x=trial_numbers,
y=errors,
mode="markers+lines",
name="Error",
marker=dict(color=plotly_red),
line=dict(color=plotly_red),
)
def _get_y_range(info: _ImprovementInfo, min_n_trials: int) -> tuple[float, float]:
min_value = min(info.improvements)
if info.errors is not None:
min_value = min(min_value, min(info.errors))
# Determine the display range based on trials after min_n_trials.
if len(info.trial_numbers) > min_n_trials:
max_value = max(info.improvements[min_n_trials:])
# If there are no trials after min_trials, determine the display range based on all trials.
else:
max_value = max(info.improvements)
if info.errors is not None:
max_value = max(max_value, max(info.errors))
padding = (max_value - min_value) * PADDING_RATIO_Y
return min_value - padding, max_value + padding
def _get_improvement_plot(info: _ImprovementInfo, min_n_trials: int) -> "go.Figure":
n_trials = len(info.trial_numbers)
fig = go.Figure(
layout=go.Layout(
title="Terminator Improvement Plot",
xaxis=dict(title="Trial"),
yaxis=dict(title="Terminator Improvement"),
)
)
if n_trials == 0:
_logger.warning("There are no complete trials.")
return fig
fig.add_trace(
_get_improvement_scatter(
info.trial_numbers[: min_n_trials + 1],
info.improvements[: min_n_trials + 1],
# Plot line with a lighter color until the number of trials reaches min_n_trials.
OPACITY,
n_trials <= min_n_trials, # Avoid showing legend twice.
)
)
if n_trials > min_n_trials:
fig.add_trace(
_get_improvement_scatter(
info.trial_numbers[min_n_trials:],
info.improvements[min_n_trials:],
)
)
fig.add_trace(_get_error_scatter(info.trial_numbers, info.errors))
fig.update_yaxes(range=_get_y_range(info, min_n_trials))
return fig
| _ImprovementInfo |
python | PrefectHQ__prefect | tests/cli/test_work_pool.py | {
"start": 21228,
"end": 27827
} | class ____:
async def test_update_description(self, prefect_client, work_pool):
assert work_pool.description is None
assert work_pool.type is not None
assert work_pool.base_job_template is not None
assert work_pool.is_paused is not None
assert work_pool.concurrency_limit is None
metamorphosis = (
"One morning, as Gregor Samsa was waking up from anxious dreams, he"
" discovered that in bed he had been changed into a monstrous verminous"
" bug."
)
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"update",
work_pool.name,
"--description",
metamorphosis,
],
expected_code=0,
expected_output=f"Updated work pool '{work_pool.name}'",
)
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.description == metamorphosis
# assert all other fields unchanged
assert client_res.name == work_pool.name
assert client_res.type == work_pool.type
assert client_res.base_job_template == work_pool.base_job_template
assert client_res.is_paused == work_pool.is_paused
assert client_res.concurrency_limit == work_pool.concurrency_limit
async def test_update_concurrency_limit(self, prefect_client, work_pool):
assert work_pool.description is None
assert work_pool.type is not None
assert work_pool.base_job_template is not None
assert work_pool.is_paused is not None
assert work_pool.concurrency_limit is None
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"update",
work_pool.name,
"--concurrency-limit",
123456,
],
expected_code=0,
expected_output=f"Updated work pool '{work_pool.name}'",
)
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.concurrency_limit == 123456
# assert all other fields unchanged
assert client_res.name == work_pool.name
assert client_res.description == work_pool.description
assert client_res.type == work_pool.type
assert client_res.base_job_template == work_pool.base_job_template
assert client_res.is_paused == work_pool.is_paused
# Verify that the concurrency limit is unmodified when changing another
# setting
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"update",
work_pool.name,
"--description",
"Hello world lorem ipsum",
],
expected_code=0,
expected_output=f"Updated work pool '{work_pool.name}'",
)
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.concurrency_limit == 123456
assert client_res.description == "Hello world lorem ipsum"
# assert all other fields unchanged
assert client_res.name == work_pool.name
assert client_res.type == work_pool.type
assert client_res.base_job_template == work_pool.base_job_template
assert client_res.is_paused == work_pool.is_paused
async def test_update_base_job_template(self, prefect_client, work_pool):
assert work_pool.description is None
assert work_pool.type is not None
assert work_pool.base_job_template is not None
assert work_pool.is_paused is not None
assert work_pool.concurrency_limit is None
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"update",
work_pool.name,
"--base-job-template",
Path(__file__).parent / "base-job-templates" / "process-worker.json",
],
expected_code=0,
expected_output=f"Updated work pool '{work_pool.name}'",
)
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.base_job_template != work_pool.base_job_template
assert client_res.base_job_template == {
"job_configuration": {"command": "{{ command }}", "name": "{{ name }}"},
"variables": {
"type": "object",
"properties": {
"name": {
"title": "Name",
"description": "Description.",
"type": "string",
},
"command": {
"title": "Command",
"description": "Command to run.",
"type": "string",
},
},
},
}
# assert all other fields unchanged
assert client_res.name == work_pool.name
assert client_res.description == work_pool.description
assert client_res.type == work_pool.type
assert client_res.is_paused == work_pool.is_paused
assert client_res.concurrency_limit == work_pool.concurrency_limit
async def test_update_multi(self, prefect_client, work_pool):
assert work_pool.description is None
assert work_pool.type is not None
assert work_pool.base_job_template is not None
assert work_pool.is_paused is not None
assert work_pool.concurrency_limit is None
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"work-pool",
"update",
work_pool.name,
"--description",
"Foo bar baz",
"--concurrency-limit",
300,
],
expected_code=0,
expected_output=f"Updated work pool '{work_pool.name}'",
)
client_res = await prefect_client.read_work_pool(work_pool.name)
assert client_res.description == "Foo bar baz"
assert client_res.concurrency_limit == 300
# assert all other fields unchanged
assert client_res.name == work_pool.name
assert client_res.type == work_pool.type
assert client_res.base_job_template == work_pool.base_job_template
assert client_res.is_paused == work_pool.is_paused
| TestUpdate |
python | TheAlgorithms__Python | machine_learning/astar.py | {
"start": 687,
"end": 1395
} | class ____:
"""
Class cell represents a cell in the world which have the properties:
position: represented by tuple of x and y coordinates initially set to (0,0).
parent: Contains the parent cell object visited before we arrived at this cell.
g, h, f: Parameters used when calling our heuristic function.
"""
def __init__(self):
self.position = (0, 0)
self.parent = None
self.g = 0
self.h = 0
self.f = 0
"""
Overrides equals method because otherwise cell assign will give
wrong results.
"""
def __eq__(self, cell):
return self.position == cell.position
def showcell(self):
print(self.position)
| Cell |
python | mlflow__mlflow | dev/clint/tests/rules/test_no_class_based_tests.py | {
"start": 304,
"end": 508
} | class ____:
def test_feature_a(self):
assert True
def test_feature_b(self):
assert True
def helper_method(self):
return 42
# Bad - another class-based test
| TestSomething |
python | aimacode__aima-python | probabilistic_learning.py | {
"start": 122,
"end": 5334
} | class ____:
"""
A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist).
"""
def __init__(self, observations=None, default=0):
"""
Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing.
"""
if observations is None:
observations = []
self.dictionary = {}
self.n_obs = 0
self.default = default
self.sampler = None
for o in observations:
self.add(o)
def add(self, o):
"""Add an observation o to the distribution."""
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""
Include o among the possible observations, whether or not
it's been observed yet.
"""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"""Return an estimate of the probability of item."""
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"""Return (count, obs) tuples for the n most frequent observations."""
return heapq.nlargest(n, [(v, k) for (k, v) in self.dictionary.items()])
def sample(self):
"""Return a random sample from the distribution."""
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()), list(self.dictionary.values()))
return self.sampler()
def NaiveBayesLearner(dataset, continuous=True, simple=False):
if simple:
return NaiveBayesSimple(dataset)
if continuous:
return NaiveBayesContinuous(dataset)
else:
return NaiveBayesDiscrete(dataset)
def NaiveBayesSimple(distribution):
"""
A simple naive bayes classifier that takes as input a dictionary of
CountingProbDist objects and classifies items according to these distributions.
The input dictionary is in the following form:
(ClassName, ClassProb): CountingProbDist
"""
target_dist = {c_name: prob for c_name, prob in distribution.keys()}
attr_dists = {c_name: count_prob for (c_name, _), count_prob in distribution.items()}
def predict(example):
"""Predict the target value for example. Calculate probabilities for each
class and pick the max."""
def class_probability(target_val):
attr_dist = attr_dists[target_val]
return target_dist[target_val] * product(attr_dist[a] for a in example)
return max(target_dist.keys(), key=class_probability)
return predict
def NaiveBayesDiscrete(dataset):
"""
Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too.
"""
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
attr_dists = {(gv, attr): CountingProbDist(dataset.values[attr]) for gv in target_vals for attr in dataset.inputs}
for example in dataset.examples:
target_val = example[dataset.target]
target_dist.add(target_val)
for attr in dataset.inputs:
attr_dists[target_val, attr].add(example[attr])
def predict(example):
"""
Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently.
"""
def class_probability(target_val):
return (target_dist[target_val] * product(attr_dists[target_val, attr][example[attr]]
for attr in dataset.inputs))
return max(target_vals, key=class_probability)
return predict
def NaiveBayesContinuous(dataset):
"""
Count how many times each target value occurs.
Also, find the means and deviations of input attribute values for each target value.
"""
means, deviations = dataset.find_means_and_deviations()
target_vals = dataset.values[dataset.target]
target_dist = CountingProbDist(target_vals)
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(target_val):
prob = target_dist[target_val]
for attr in dataset.inputs:
prob *= gaussian(means[target_val][attr], deviations[target_val][attr], example[attr])
return prob
return max(target_vals, key=class_probability)
return predict
| CountingProbDist |
python | huggingface__transformers | src/transformers/models/emu3/modeling_emu3.py | {
"start": 19105,
"end": 21207
} | class ____(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: Optional[int] = None,
quant_channels: Optional[int] = None,
):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.quant_channels = quant_channels
if quant_channels is None:
self.norm1 = nn.GroupNorm(num_channels=in_channels, num_groups=32, eps=1e-6, affine=True)
self.norm2 = nn.GroupNorm(num_channels=out_channels, num_groups=32, eps=1e-6, affine=True)
else:
self.norm1 = Emu3VQVAESpatialNorm(quant_channels, in_channels)
self.norm2 = Emu3VQVAESpatialNorm(quant_channels, out_channels)
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
)
if self.in_channels != self.out_channels:
self.nin_shortcut = nn.Conv2d(
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0,
)
def forward(self, hidden_states: torch.Tensor, quant_channels: Optional[torch.Tensor] = None):
norm_args = () if self.quant_channels is None else (quant_channels,)
residual = hidden_states
hidden_states = self.norm1(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv1(hidden_states)
hidden_states = self.norm2(hidden_states, *norm_args)
hidden_states *= torch.sigmoid(hidden_states)
hidden_states = self.conv2(hidden_states)
if self.in_channels != self.out_channels:
residual = self.nin_shortcut(residual)
return residual + hidden_states
| Emu3VQVAEResnetBlock |
python | kamyu104__LeetCode-Solutions | Python/super-ugly-number.py | {
"start": 808,
"end": 1553
} | class ____(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
uglies, idx, heap, ugly_set = [0] * n, [0] * len(primes), [], set([1])
uglies[0] = 1
for k, p in enumerate(primes):
heapq.heappush(heap, (p, k))
ugly_set.add(p)
for i in xrange(1, n):
uglies[i], k = heapq.heappop(heap)
while (primes[k] * uglies[idx[k]]) in ugly_set:
idx[k] += 1
heapq.heappush(heap, (primes[k] * uglies[idx[k]], k))
ugly_set.add(primes[k] * uglies[idx[k]])
return uglies[-1]
# Time: O(n * logk) ~ O(n * klogk)
# Space: O(n + k)
| Solution2 |
python | PyCQA__pylint | doc/data/messages/s/super-init-not-called/bad.py | {
"start": 118,
"end": 224
} | class ____(Fruit):
def __init__(self): # [super-init-not-called]
print("Creating an apple")
| Apple |
python | Textualize__textual | src/textual/widgets/_button.py | {
"start": 946,
"end": 1052
} | class ____(Exception):
"""Exception raised if an invalid button variant is used."""
| InvalidButtonVariant |
python | scikit-learn__scikit-learn | benchmarks/bench_plot_nmf.py | {
"start": 6555,
"end": 15395
} | class ____(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(
self,
n_components=None,
solver="pg",
init=None,
tol=1e-4,
max_iter=200,
random_state=None,
alpha=0.0,
l1_ratio=0.0,
nls_max_iter=10,
):
super().__init__(
n_components=n_components,
init=init,
solver=solver,
tol=tol,
max_iter=max_iter,
random_state=random_state,
alpha_W=alpha,
alpha_H=alpha,
l1_ratio=l1_ratio,
)
self.nls_max_iter = nls_max_iter
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self)
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self)
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=("csr", "csc"))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if not isinstance(n_components, numbers.Integral) or n_components <= 0:
raise ValueError(
"Number of components must be a positive integer; got (n_components=%r)"
% n_components
)
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError(
"Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter
)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError(
"Tolerance for stopping criteria must be positive; got (tol=%r)"
% self.tol
)
# check W and H, or initialize them
if self.init == "custom" and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(
X, n_components, init=self.init, random_state=self.random_state
)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X,
W,
H,
self.tol,
self.max_iter,
self.nls_max_iter,
self.alpha,
self.l1_ratio,
)
else: # transform
Wt, _, n_iter = _nls_subproblem(
X.T,
H.T,
W.T,
self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio,
)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn(
"Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning,
)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = "bgr"
markers = "ovs"
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df["init"])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df["method"])):
mask = np.logical_and(
results_df["init"] == init, results_df["method"] == method
)
selected_items = results_df[mask]
plt.plot(
selected_items["time"],
selected_items["loss"],
color=colors[j % len(colors)],
ls="-",
marker=markers[j % len(markers)],
label=method,
)
plt.legend(loc=0, fontsize="x-small")
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=["X", "W0", "H0"])
def bench_one(
name, X, W0, H0, X_shape, clf_type, clf_params, init, n_components, random_state
):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(("nndsvd", "nndsvdar", "random")):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params["alpha"] = alpha
clf_params["l1_ratio"] = l1_ratio
clf_params["max_iter"] = max_iter
clf_params["tol"] = tol
clf_params["random_state"] = rs
clf_params["init"] = "custom"
clf_params["n_components"] = n_components
this_loss, duration = bench_one(
name, X, W, H, X.shape, clf_type, clf_params, init, n_components, rs
)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results, columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(
shuffle=True, random_state=1, remove=("headers", "footers", "quotes")
)
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words="english")
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [
("Coordinate Descent", NMF, cd_iters, {"solver": "cd"}),
("Projected Gradient", _PGNMF, pg_iters, {"solver": "pg"}),
("Multiplicative Update", NMF, mu_iters, {"solver": "mu"}),
]
return clfs
if __name__ == "__main__":
alpha = 0.0
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(
X_faces,
clfs,
plot_name,
n_components,
tol,
alpha,
l1_ratio,
)
plt.show()
| _PGNMF |
python | pyca__cryptography | src/cryptography/hazmat/primitives/hashes.py | {
"start": 4349,
"end": 4725
} | class ____(HashAlgorithm):
name = "blake2b"
_max_digest_size = 64
_min_digest_size = 1
block_size = 128
def __init__(self, digest_size: int):
if digest_size != 64:
raise ValueError("Digest size must be 64")
self._digest_size = digest_size
@property
def digest_size(self) -> int:
return self._digest_size
| BLAKE2b |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.