language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | torch/compiler/_cache.py | {
"start": 1768,
"end": 3419
} | class ____:
"""
Factory for creating CacheArtifact objects based on their type
"""
_artifact_types: dict[str, type[CacheArtifact]] = {}
@classmethod
def register(cls, artifact_cls: type[CacheArtifact]) -> type[CacheArtifact]:
artifact_type_key = artifact_cls.type()
assert artifact_cls.type() not in cls._artifact_types, (
f"Artifact of type={artifact_type_key} already registered in mega-cache artifact factory"
)
cls._artifact_types[artifact_type_key] = artifact_cls
setattr(
CacheInfo,
f"{artifact_type_key}_artifacts",
property(lambda self: self.artifacts[artifact_type_key]),
)
return artifact_cls
@classmethod
def _get_artifact_type(cls, artifact_type_key: str) -> type[CacheArtifact]:
assert artifact_type_key in cls._artifact_types, (
f"Artifact of type={artifact_type_key} not registered in mega-cache artifact factory"
)
return cls._artifact_types[artifact_type_key]
@classmethod
def create(cls, artifact_type_key: str, key: str, content: bytes) -> CacheArtifact:
artifact_cls = cls._get_artifact_type(artifact_type_key)
# pyrefly: ignore [bad-instantiation]
return artifact_cls(key, content)
@classmethod
def encode_create(
cls, artifact_type_key: str, key: str, content: Any
) -> CacheArtifact:
artifact_cls = cls._get_artifact_type(artifact_type_key)
# pyrefly: ignore [bad-instantiation]
return artifact_cls(key, artifact_cls.encode(content))
@dataclasses.dataclass
| CacheArtifactFactory |
python | huggingface__transformers | src/transformers/models/codegen/modeling_codegen.py | {
"start": 9216,
"end": 10037
} | class ____(nn.Module):
def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * embed_dim
super().__init__()
embed_dim = config.n_embd
self.fc_in = nn.Linear(embed_dim, intermediate_size)
self.fc_out = nn.Linear(intermediate_size, embed_dim)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, hidden_states: Optional[torch.FloatTensor]) -> torch.FloatTensor:
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
# Copied from transformers.models.gptj.modeling_gptj.GPTJBlock with GPTJ->CodeGen
| CodeGenMLP |
python | huggingface__transformers | src/transformers/models/siglip2/modular_siglip2.py | {
"start": 4866,
"end": 4912
} | class ____(SiglipConfig):
pass
| Siglip2Config |
python | wandb__wandb | wandb/sdk/artifacts/_generated/add_artifact_collection_tags.py | {
"start": 222,
"end": 322
} | class ____(GQLResult):
result: Optional[AddArtifactCollectionTagsResult]
| AddArtifactCollectionTags |
python | numba__numba | numba/tests/test_nrt_refct.py | {
"start": 240,
"end": 2911
} | class ____(EnableNRTStatsMixin, TestCase):
def setUp(self):
# Clean up any NRT-backed objects hanging in a dead reference cycle
gc.collect()
super(TestNrtRefCt, self).setUp()
def test_no_return(self):
"""
Test issue #1291
"""
@njit
def foo(n):
for i in range(n):
temp = np.zeros(2)
return 0
n = 10
init_stats = rtsys.get_allocation_stats()
foo(n)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc, n)
self.assertEqual(cur_stats.free - init_stats.free, n)
def test_escaping_var_init_in_loop(self):
"""
Test issue #1297
"""
@njit
def g(n):
x = np.zeros((n, 2))
for i in range(n):
y = x[i]
for i in range(n):
y = x[i]
return 0
init_stats = rtsys.get_allocation_stats()
g(10)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc, 1)
self.assertEqual(cur_stats.free - init_stats.free, 1)
def test_invalid_computation_of_lifetime(self):
"""
Test issue #1573
"""
@njit
def if_with_allocation_and_initialization(arr1, test1):
tmp_arr = np.zeros_like(arr1)
for i in range(tmp_arr.shape[0]):
pass
if test1:
np.zeros_like(arr1)
return tmp_arr
arr = np.random.random((5, 5)) # the values are not consumed
init_stats = rtsys.get_allocation_stats()
if_with_allocation_and_initialization(arr, False)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)
def test_del_at_beginning_of_loop(self):
"""
Test issue #1734
"""
@njit
def f(arr):
res = 0
for i in (0, 1):
# `del t` is issued here before defining t. It must be
# correctly handled by the lowering phase.
t = arr[i]
if t[i] > 1:
res += t[i]
return res
arr = np.ones((2, 2))
init_stats = rtsys.get_allocation_stats()
f(arr)
cur_stats = rtsys.get_allocation_stats()
self.assertEqual(cur_stats.alloc - init_stats.alloc,
cur_stats.free - init_stats.free)
if __name__ == '__main__':
unittest.main()
| TestNrtRefCt |
python | huggingface__transformers | src/transformers/models/trocr/processing_trocr.py | {
"start": 989,
"end": 3565
} | class ____(ProcessorMixin):
r"""
Constructs a TrOCR processor which wraps a vision image processor and a TrOCR tokenizer into a single processor.
[`TrOCRProcessor`] offers all the functionalities of [`ViTImageProcessor`/`DeiTImageProcessor`] and
[`RobertaTokenizer`/`XLMRobertaTokenizer`]. See the [`~TrOCRProcessor.__call__`] and [`~TrOCRProcessor.decode`] for
more information.
Args:
image_processor ([`ViTImageProcessor`/`DeiTImageProcessor`], *optional*):
An instance of [`ViTImageProcessor`/`DeiTImageProcessor`]. The image processor is a required input.
tokenizer ([`RobertaTokenizer`/`XLMRobertaTokenizer`], *optional*):
An instance of [`RobertaTokenizer`/`XLMRobertaTokenizer`]. The tokenizer is a required input.
"""
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
super().__init__(image_processor, tokenizer)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
**kwargs: Unpack[TrOCRProcessorKwargs],
) -> BatchFeature:
"""
When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
[`~AutoImageProcessor.__call__`] and returns its output. If used in the context
[`~TrOCRProcessor.as_target_processor`] this method forwards all its arguments to TrOCRTokenizer's
[`~TrOCRTokenizer.__call__`]. Please refer to the docstring of the above two methods for more information.
"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
output_kwargs = self._merge_kwargs(
TrOCRProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
if text is not None:
encodings = self.tokenizer(text, **output_kwargs["text_kwargs"])
if text is None:
return inputs
elif images is None:
return encodings
else:
inputs["labels"] = encodings["input_ids"]
return inputs
@property
def model_input_names(self):
image_processor_input_names = self.image_processor.model_input_names
return image_processor_input_names + ["labels"]
__all__ = ["TrOCRProcessor"]
| TrOCRProcessor |
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 36236,
"end": 46673
} | class ____(GitPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = GitEmbeddings(config)
self.image_encoder = GitVisionModel(config.vision_config)
self.encoder = GitEncoder(config)
self.visual_projection = GitProjection(config)
if config.num_image_with_embedding is not None:
self.img_temporal_embedding = nn.ParameterList(
nn.Parameter(torch.zeros(1, 1, config.vision_config.hidden_size))
for _ in range(config.num_image_with_embedding)
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _generate_future_mask(self, size: int, dtype: torch.dtype, device: torch.device) -> torch.Tensor:
# Default mask is for forward direction. Flip for backward direction.
mask = torch.triu(torch.ones(size, size, device=device, dtype=dtype), diagonal=1)
mask = mask.masked_fill(mask == 1, float("-inf"))
return mask
def create_attention_mask(self, tgt, memory, tgt_mask, past_key_values_length, memory_key_padding_mask=None):
num_tgt = tgt.shape[1]
num_memory = memory.shape[1]
device = tgt.device
dtype = tgt.dtype
top_left = torch.zeros((num_memory, num_memory), device=device, dtype=dtype)
top_right = torch.full(
(num_memory, num_tgt + past_key_values_length),
float("-inf"),
device=tgt.device,
dtype=dtype,
)
bottom_left = torch.zeros(
(num_tgt, num_memory),
dtype=dtype,
device=tgt_mask.device,
)
if past_key_values_length > 0:
tgt_mask = torch.zeros(
(tgt_mask.shape[0], tgt_mask.shape[0] + past_key_values_length),
dtype=dtype,
device=tgt_mask.device,
)
left = torch.cat((top_left, bottom_left), dim=0)
right = torch.cat((top_right, tgt_mask.to(dtype)), dim=0)
full_attention_mask = torch.cat((left, right), dim=1)[None, :]
if memory_key_padding_mask is None:
memory_key_padding_mask = torch.full((memory.shape[0], memory.shape[1]), fill_value=False, device=device)
# if it is False, it means valid. That is, it is not a padding
if memory_key_padding_mask.dtype != torch.bool:
raise ValueError("Memory key padding mask must be a boolean tensor.")
zero_negative_infinity = torch.zeros_like(memory_key_padding_mask, dtype=tgt.dtype)
zero_negative_infinity[memory_key_padding_mask] = float("-inf")
full_attention_mask = full_attention_mask.expand(
(memory_key_padding_mask.shape[0], num_memory + num_tgt, num_memory + past_key_values_length + num_tgt)
)
full_attention_mask = full_attention_mask.clone()
origin_left = full_attention_mask[:, :, :num_memory]
update = zero_negative_infinity[:, None, :]
full_attention_mask[:, :, :num_memory] = origin_left + update
# add axis for multi-head
full_attention_mask = full_attention_mask[:, None, :, :]
return full_attention_mask
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
pixel_values: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPooling]:
r"""
Examples:
```python
>>> from transformers import AutoProcessor, AutoModel
>>> import requests
>>> from PIL import Image
>>> processor = AutoProcessor.from_pretrained("microsoft/git-base")
>>> model = AutoModel.from_pretrained("microsoft/git-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "this is an image of two cats"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
seq_length = input_shape[1]
# past_key_values_length
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = (
past_key_values.get_seq_length()
if not isinstance(past_key_values, Cache)
else past_key_values.get_seq_length()
)
projected_visual_features = None
if pixel_values is not None:
if pixel_values.ndim == 4:
# here we assume pixel_values is of shape (batch_size, num_channels, height, width)
visual_features = self.image_encoder(
pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
).last_hidden_state
elif pixel_values.ndim == 5:
# here we assume pixel_values is of shape (batch_size, num_frames, num_channels, height, width)
visual_features = []
for frame_idx in range(pixel_values.shape[1]):
visual_features_frame = self.image_encoder(
pixel_values[:, frame_idx, :, :], interpolate_pos_encoding=interpolate_pos_encoding
).last_hidden_state
visual_features_frame += self.img_temporal_embedding[frame_idx]
visual_features.append(visual_features_frame)
# finally, concatenate all features along sequence dimension
visual_features = torch.cat(visual_features, dim=1)
else:
raise ValueError("pixel_values must be of rank 4 or 5")
projected_visual_features = self.visual_projection(visual_features)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
if projected_visual_features is None:
projected_visual_features = torch.zeros(
(embedding_output.shape[0], 0, embedding_output.shape[2]),
dtype=embedding_output.dtype,
device=embedding_output.device,
)
# Repeat visual features to match embedding batch size.
projected_visual_features = projected_visual_features.repeat(
embedding_output.size(0) // projected_visual_features.size(0), 1, 1
)
# concatenate patch token and text token embeddings
hidden_states = torch.cat((projected_visual_features, embedding_output), dim=1)
# By default, an additive causal mask is created
# for masking the future (one direction).
tgt_mask = self._generate_future_mask(seq_length, embedding_output.dtype, embedding_output.device)
# Create an attention mask of shape (batch_size, 1, tgt_seq_len, src_seq_len)
combined_attention_mask = self.create_attention_mask(
tgt=embedding_output,
memory=projected_visual_features,
tgt_mask=tgt_mask,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# if the user provides an attention mask, we add it to the default one
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _prepare_4d_attention_mask(
attention_mask, embedding_output.dtype, tgt_len=input_shape[-1]
).to(embedding_output.device)
if past_key_values_length > 0:
expanded_attn_mask = expanded_attn_mask[:, :, -past_key_values_length:, :]
else:
combined_attention_mask[:, :, -input_shape[1] :, -input_shape[1] :] += expanded_attn_mask
encoder_outputs = self.encoder(
hidden_states,
attention_mask=combined_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
pixel_values_present=pixel_values is not None,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return BaseModelOutputWithPast(
last_hidden_state=sequence_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
GIT Model with a `language modeling` head on top for autoregressive language modeling.
"""
)
| GitModel |
python | allegroai__clearml | clearml/backend_api/services/v2_9/workers.py | {
"start": 56579,
"end": 58040
} | class ____(Request):
"""
Returns worker statistics metric keys grouped by categories.
:param worker_ids: List of worker ids to collect metrics for. If not provided
or empty then all the company workers metrics are analyzed.
:type worker_ids: Sequence[str]
"""
_service = "workers"
_action = "get_metric_keys"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"worker_ids": {
"description": "List of worker ids to collect metrics for. If not provided or empty then all the company workers metrics are analyzed.",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, worker_ids: Optional[List[str]] = None, **kwargs: Any) -> None:
super(GetMetricKeysRequest, self).__init__(**kwargs)
self.worker_ids = worker_ids
@schema_property("worker_ids")
def worker_ids(self) -> Optional[List[str]]:
return self._property_worker_ids
@worker_ids.setter
def worker_ids(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_worker_ids = None
return
self.assert_isinstance(value, "worker_ids", (list, tuple))
self.assert_isinstance(value, "worker_ids", six.string_types, is_array=True)
self._property_worker_ids = value
| GetMetricKeysRequest |
python | django__django | tests/app_loading/tests.py | {
"start": 127,
"end": 2607
} | class ____(SimpleTestCase):
def setUp(self):
self.egg_dir = "%s/eggs" % os.path.dirname(__file__)
self.addCleanup(apps.clear_cache)
def test_egg1(self):
"""Models module can be loaded from an app in an egg"""
egg_name = "%s/modelapp.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=["app_with_models"]):
models_module = apps.get_app_config("app_with_models").models_module
self.assertIsNotNone(models_module)
del apps.all_models["app_with_models"]
def test_egg2(self):
"""
Loading an app from an egg that has no models returns no models (and no
error).
"""
egg_name = "%s/nomodelapp.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=["app_no_models"]):
models_module = apps.get_app_config("app_no_models").models_module
self.assertIsNone(models_module)
del apps.all_models["app_no_models"]
def test_egg3(self):
"""
Models module can be loaded from an app located under an egg's
top-level package.
"""
egg_name = "%s/omelet.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=["omelet.app_with_models"]):
models_module = apps.get_app_config("app_with_models").models_module
self.assertIsNotNone(models_module)
del apps.all_models["app_with_models"]
def test_egg4(self):
"""
Loading an app with no models from under the top-level egg package
generates no error.
"""
egg_name = "%s/omelet.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=["omelet.app_no_models"]):
models_module = apps.get_app_config("app_no_models").models_module
self.assertIsNone(models_module)
del apps.all_models["app_no_models"]
def test_egg5(self):
"""
Loading an app from an egg that has an import error in its models
module raises that error.
"""
egg_name = "%s/brokenapp.egg" % self.egg_dir
with extend_sys_path(egg_name):
with self.assertRaisesMessage(ImportError, "modelz"):
with self.settings(INSTALLED_APPS=["broken_app"]):
pass
| EggLoadingTest |
python | getsentry__sentry | tests/sentry/workflow_engine/processors/test_data_condition_group.py | {
"start": 4122,
"end": 5051
} | class ____(TestCase):
def setUp(self) -> None:
self.data_condition_group = self.create_data_condition_group(
logic_type=DataConditionGroup.Type.ANY
)
self.data_condition = self.create_data_condition(
type=Condition.GREATER,
comparison=5,
condition_result=DetectorPriorityLevel.HIGH,
condition_group=self.data_condition_group,
)
self.data_condition_two = self.create_data_condition(
type=Condition.GREATER,
comparison=3,
condition_result=DetectorPriorityLevel.LOW,
condition_group=self.data_condition_group,
)
self.conditions = [self.data_condition, self.data_condition_two]
def get_conditions_to_evaluate(self, value: int) -> list[tuple[DataCondition, int]]:
return [(condition, value) for condition in self.conditions]
| TestEvaluationConditionCase |
python | joke2k__faker | faker/providers/address/es_CL/__init__.py | {
"start": 144,
"end": 19785
} | class ____(AddressProvider):
# Source for regions, provinces and communes
# https://www.subdere.gov.cl/documentacion/c%C3%B3digos-%C3%BAnicos-
# territoriales-actualizados-al-06-de-septiembre-2018
regions: Dict[str, str] = {
"TA": "Región de Tarapacá",
"AN": "Región de Antofagasta",
"AT": "Región de Atacama",
"CO": "Región de Coquimbo",
"VA": "Región de Valparaíso",
"LI": "Región del Libertador General Bernardo O'Higgins",
"ML": "Región del Maule",
"BI": "Región del Biobío",
"AR": "Región de La Araucanía",
"LL": "Región de Los Lagos",
"AI": "Región de Aysén del General Carlos Ibáñez del Campo",
"MA": "Región de Magallanes y de la Antártica Chilena",
"RM": "Región Metropolitana",
"LR": "Región de Los Ríos",
"AP": "Región de Arica y Parinacota",
"NB": "Región de Ñuble",
}
provinces: Dict[str, str] = {
"011": "Iquique",
"014": "Tamarugal",
"021": "Antofagasta",
"022": "El Loa",
"023": "Tocopilla",
"031": "Copiapó",
"032": "Chañaral",
"033": "Huasco",
"041": "Elqui",
"042": "Choapa",
"043": "Limarí",
"051": "Valparaíso",
"052": "Isla de Pascua",
"053": "Los Andes",
"054": "Petorca",
"055": "Quillota",
"056": "San Antonio",
"057": "San Felipe de Aconcagua",
"058": "Marga Marga",
"061": "Cachapoal",
"062": "Cardenal Caro",
"063": "Colchagua",
"071": "Talca",
"072": "Cauquenes",
"073": "Curicó",
"074": "Linares",
"081": "Concepción",
"082": "Arauco",
"083": "Biobío",
"091": "Cautín",
"092": "Malleco",
"101": "Llanquihue",
"102": "Chiloé",
"103": "Osorno",
"104": "Palena",
"111": "Coyhaique",
"112": "Aysén",
"113": "Capitán Prat",
"114": "General Carrera",
"121": "Magallanes",
"122": "Antártica Chilena",
"123": "Tierra del Fuego",
"124": "Última Esperanza",
"131": "Santiago",
"132": "Cordillera",
"133": "Chacabuco",
"134": "Maipo",
"135": "Melipilla",
"136": "Talagante",
"141": "Valdivia",
"142": "Ranco",
"151": "Arica",
"152": "Parinacota",
"161": "Diguillín",
"162": "Itata",
"163": "Punilla",
}
communes: Dict[str, str] = {
"15101": "Arica",
"15102": "Camarones",
"15201": "Putre",
"15202": "General Lagos",
"01101": "Iquique",
"01402": "Camiña",
"01403": "Colchane",
"01404": "Huara",
"01405": "Pica",
"01401": "Pozo Almonte",
"01107": "Alto Hospicio",
"02101": "Antofagasta",
"02102": "Mejillones",
"02103": "Sierra Gorda",
"02104": "Taltal",
"02201": "Calama",
"02202": "Ollagüe",
"02203": "San Pedro de Atacama",
"02301": "Tocopilla",
"02302": "María Elena",
"03101": "Copiapó",
"03102": "Caldera",
"03103": "Tierra Amarilla",
"03201": "Chañaral",
"03202": "Diego de Almagro",
"03301": "Vallenar",
"03302": "Alto del Carmen",
"03303": "Freirina",
"03304": "Huasco",
"04101": "La Serena",
"04102": "Coquimbo",
"04103": "Andacollo",
"04104": "La Higuera",
"04105": "Paiguano",
"04106": "Vicuña",
"04201": "Illapel",
"04202": "Canela",
"04203": "Los Vilos",
"04204": "Salamanca",
"04301": "Ovalle",
"04302": "Combarbalá",
"04303": "Monte Patria",
"04304": "Punitaqui",
"04305": "Río Hurtado",
"05101": "Valparaíso",
"05102": "Casablanca",
"05103": "Concón",
"05104": "Juan Fernández",
"05105": "Puchuncaví",
"05801": "Quilpué",
"05107": "Quintero",
"05804": "Villa Alemana",
"05109": "Viña del Mar",
"05201": "Isla de Pascua",
"05301": "Los Andes",
"05302": "Calle Larga",
"05303": "Rinconada",
"05304": "San Esteban",
"05401": "La Ligua",
"05402": "Cabildo",
"05403": "Papudo",
"05404": "Petorca",
"05405": "Zapallar",
"05501": "Quillota",
"05502": "Calera",
"05503": "Hijuelas",
"05504": "La Cruz",
"05802": "Limache",
"05506": "Nogales",
"05803": "Olmué",
"05601": "San Antonio",
"05602": "Algarrobo",
"05603": "Cartagena",
"05604": "El Quisco",
"05605": "El Tabo",
"05606": "Santo Domingo",
"05701": "San Felipe",
"05702": "Catemu",
"05703": "Llaillay",
"05704": "Panquehue",
"05705": "Putaendo",
"05706": "Santa María",
"06101": "Rancagua",
"06102": "Codegua",
"06103": "Coinco",
"06104": "Coltauco",
"06105": "Doñihue",
"06106": "Graneros",
"06107": "Las Cabras",
"06108": "Machalí",
"06109": "Malloa",
"06110": "Mostazal",
"06111": "Olivar",
"06112": "Peumo",
"06113": "Pichidegua",
"06114": "Quinta de Tilcoco",
"06115": "Rengo",
"06116": "Requínoa",
"06117": "San Vicente",
"06201": "Pichilemu",
"06202": "La Estrella",
"06203": "Litueche",
"06204": "Marchihue",
"06205": "Navidad",
"06206": "Paredones",
"06301": "San Fernando",
"06302": "Chépica",
"06303": "Chimbarongo",
"06304": "Lolol",
"06305": "Nancagua",
"06306": "Palmilla",
"06307": "Peralillo",
"06308": "Placilla",
"06309": "Pumanque",
"06310": "Santa Cruz",
"07101": "Talca",
"07102": "Constitución",
"07103": "Curepto",
"07104": "Empedrado",
"07105": "Maule",
"07106": "Pelarco",
"07107": "Pencahue",
"07108": "Río Claro",
"07109": "San Clemente",
"07110": "San Rafael",
"07201": "Cauquenes",
"07202": "Chanco",
"07203": "Pelluhue",
"07301": "Curicó",
"07302": "Hualañé",
"07303": "Licantén",
"07304": "Molina",
"07305": "Rauco",
"07306": "Romeral",
"07307": "Sagrada Familia",
"07308": "Teno",
"07309": "Vichuquén",
"07401": "Linares",
"07402": "Colbún",
"07403": "Longaví",
"07404": "Parral",
"07405": "Retiro",
"07406": "San Javier",
"07407": "Villa Alegre",
"07408": "Yerbas Buenas",
"08101": "Concepción",
"08102": "Coronel",
"08103": "Chiguayante",
"08104": "Florida",
"08105": "Hualqui",
"08106": "Lota",
"08107": "Penco",
"08108": "San Pedro de la Paz",
"08109": "Santa Juana",
"08110": "Talcahuano",
"08111": "Tomé",
"08112": "Hualpén",
"08201": "Lebu",
"08202": "Arauco",
"08203": "Cañete",
"08204": "Contulmo",
"08205": "Curanilahue",
"08206": "Los Álamos",
"08207": "Tirúa",
"08301": "Los Ángeles",
"08302": "Antuco",
"08303": "Cabrero",
"08304": "Laja",
"08305": "Mulchén",
"08306": "Nacimiento",
"08307": "Negrete",
"08308": "Quilaco",
"08309": "Quilleco",
"08310": "San Rosendo",
"08311": "Santa Bárbara",
"08312": "Tucapel",
"08313": "Yumbel",
"08314": "Alto Biobío",
"16101": "Chillán",
"16102": "Bulnes",
"16202": "Cobquecura",
"16203": "Coelemu",
"16302": "Coihueco",
"16103": "Chillán Viejo",
"16104": "El Carmen",
"16204": "Ninhue",
"16303": "Ñiquén",
"16105": "Pemuco",
"16106": "Pinto",
"16205": "Portezuelo",
"16107": "Quillón",
"16201": "Quirihue",
"16206": "Ránquil",
"16301": "San Carlos",
"16304": "San Fabián",
"16108": "San Ignacio",
"16305": "San Nicolás",
"16207": "Treguaco",
"16109": "Yungay",
"09101": "Temuco",
"09102": "Carahue",
"09103": "Cunco",
"09104": "Curarrehue",
"09105": "Freire",
"09106": "Galvarino",
"09107": "Gorbea",
"09108": "Lautaro",
"09109": "Loncoche",
"09110": "Melipeuco",
"09111": "Nueva Imperial",
"09112": "Padre Las Casas",
"09113": "Perquenco",
"09114": "Pitrufquén",
"09115": "Pucón",
"09116": "Saavedra",
"09117": "Teodoro Schmidt",
"09118": "Toltén",
"09119": "Vilcún",
"09120": "Villarrica",
"09121": "Cholchol",
"09201": "Angol",
"09202": "Collipulli",
"09203": "Curacautín",
"09204": "Ercilla",
"09205": "Lonquimay",
"09206": "Los Sauces",
"09207": "Lumaco",
"09208": "Purén",
"09209": "Renaico",
"09210": "Traiguén",
"09211": "Victoria",
"14101": "Valdivia",
"14102": "Corral",
"14202": "Futrono",
"14201": "La Unión",
"14203": "Lago Ranco",
"14103": "Lanco",
"14104": "Los Lagos",
"14105": "Máfil",
"14106": "Mariquina",
"14107": "Paillaco",
"14108": "Panguipulli",
"14204": "Río Bueno",
"10101": "Puerto Montt",
"10102": "Calbuco",
"10103": "Cochamó",
"10104": "Fresia",
"10105": "Frutillar",
"10106": "Los Muermos",
"10107": "Llanquihue",
"10108": "Maullín",
"10109": "Puerto Varas",
"10201": "Castro",
"10202": "Ancud",
"10203": "Chonchi",
"10204": "Curaco de Vélez",
"10205": "Dalcahue",
"10206": "Puqueldón",
"10207": "Queilén",
"10208": "Quellón",
"10209": "Quemchi",
"10210": "Quinchao",
"10301": "Osorno",
"10302": "Puerto Octay",
"10303": "Purranque",
"10304": "Puyehue",
"10305": "Río Negro",
"10306": "San Juan de la Costa",
"10307": "San Pablo",
"10401": "Chaitén",
"10402": "Futaleufú",
"10403": "Hualaihué",
"10404": "Palena",
"11101": "Coihaique",
"11102": "Lago Verde",
"11201": "Aisén",
"11202": "Cisnes",
"11203": "Guaitecas",
"11301": "Cochrane",
"11302": "O'Higgins",
"11303": "Tortel",
"11401": "Chile Chico",
"11402": "Río Ibáñez",
"12101": "Punta Arenas",
"12102": "Laguna Blanca",
"12103": "Río Verde",
"12104": "San Gregorio",
"12201": "Cabo de Hornos",
"12202": "Antártica",
"12301": "Porvenir",
"12302": "Primavera",
"12303": "Timaukel",
"12401": "Natales",
"12402": "Torres del Paine",
"13101": "Santiago",
"13102": "Cerrillos",
"13103": "Cerro Navia",
"13104": "Conchalí",
"13105": "El Bosque",
"13106": "Estación Central",
"13107": "Huechuraba",
"13108": "Independencia",
"13109": "La Cisterna",
"13110": "La Florida",
"13111": "La Granja",
"13112": "La Pintana",
"13113": "La Reina",
"13114": "Las Condes",
"13115": "Lo Barnechea",
"13116": "Lo Espejo",
"13117": "Lo Prado",
"13118": "Macul",
"13119": "Maipú",
"13120": "Ñuñoa",
"13121": "Pedro Aguirre Cerda",
"13122": "Peñalolén",
"13123": "Providencia",
"13124": "Pudahuel",
"13125": "Quilicura",
"13126": "Quinta Normal",
"13127": "Recoleta",
"13128": "Renca",
"13129": "San Joaquín",
"13130": "San Miguel",
"13131": "San Ramón",
"13132": "Vitacura",
"13201": "Puente Alto",
"13202": "Pirque",
"13203": "San José de Maipo",
"13301": "Colina",
"13302": "Lampa",
"13303": "Tiltil",
"13401": "San Bernardo",
"13402": "Buin",
"13403": "Calera de Tango",
"13404": "Paine",
"13501": "Melipilla",
"13502": "Alhué",
"13503": "Curacaví",
"13504": "María Pinto",
"13505": "San Pedro",
"13601": "Talagante",
"13602": "El Monte",
"13603": "Isla de Maipo",
"13604": "Padre Hurtado",
"13605": "Peñaflor",
}
street_prefixes = OrderedDict(
[
("Calle", 0.6),
("Avenida", 0.1),
("Avda.", 0.1),
("Av.", 0.1),
("Pasaje", 0.04),
("Psje.", 0.04),
("Camino", 0.02),
]
)
street_suffixes = (
"Norte",
"Sur",
)
city_formats = ("{{city}}",)
street_name_formats = (
"{{street_prefix}} {{common_street_name}}",
"{{street_prefix}} {{historic_people_street_name}}",
"{{street_prefix}} {{first_name_male}} {{last_name}}",
"{{street_prefix}} {{first_name_female}} {{last_name}}",
"{{street_prefix}} {{plant_street_name}}",
"{{common_street_name}}",
"{{historic_people_street_name}}",
"{{plant_street_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
)
building_number_formats = OrderedDict(
[
("%###", 0.35),
("%##", 0.35),
("%#", 0.25),
("%", 0.05),
]
)
street_address_formats = (
"{{street_name}} {{building_number}}",
"{{street_name}} {{building_number}} {{secondary_address}}",
)
address_formats = OrderedDict(
[
("{{street_address}}\n{{commune_and_region}}, {{postcode}}", 0.4),
("{{street_address}}\n{{commune_and_region}}", 0.4),
("{{highway_name}}, km {{random_int:big_kilometer}}", 0.1),
("{{road_name}}, km {{random_int:kilometer}}, {{region}}", 0.1),
]
)
secondary_address_formats = ("Dpto. @@##", "Piso @#", "Of. %##@")
common_street_names = OrderedDict(
[
("Arturo Prat", 0.118812),
("Esmeralda", 0.107261),
("Manuel Rodríguez", 0.105611),
("Gabriela Mistral", 0.104785),
("Los Aromos", 0.104785),
("Las Rosas", 0.098185),
("Caupolicán", 0.094884),
("Lautaro", 0.094059),
("Los Alerces", 0.086634),
("Los Copihues", 0.084983),
]
)
# Some chilean historic people. Full names come first, then its variants
historic_people_street_names = (
("Alonso de Ercilla",),
("Alonso de Ribera",),
("Álvaro Casanova", "Casanova"),
("Aníbal Pinto Garmendia", "Aníbal Pinto"),
("Antonio Varas",),
("Arturo Alessandri Palma", "Arturo Alessandri"),
("Benjamín Vicuña Mackenna", "Vicuña Mackenna", "Mackenna"),
("Bernardo O'Higgins", "O'Higgins"),
("Camilo Henríquez",),
("Caupolicán",),
("Colo Colo",),
("Diego Barros Arana", "Barros Arana"),
("Diego Portales", "Portales"),
("Domingo Santa María", "Santa María"),
("Eliodoro Yáñez",),
("Enrique Mac Iver", "Mac Iver"),
("Eusebio Lillo",),
("Francisco Bilbao", "Bilbao"),
("José de San Martín", "San Martín"),
("José Manuel Balmaceda", "Balmaceda"),
("José Miguel Carrera",),
("José Victorino Lastarria", "Lastarria"),
("Juan Mackenna",),
("Lord Thomas Cochrane", "Lord Cochrane", "Cochrane"),
("Los Carrera",),
("Manuel Antonio Matta", "Matta"),
("Manuel Bulnes", "Bulnes"),
("Manuel José Irarrázaval", "Irarrázabal"),
("Manuel Montt",),
("Manuel Rodríguez",),
("Manuel Baquedano", "Baquedano"),
("Michimalonco",),
("Padre Alberto Hurtado", "Alberto Hurtado"),
("Patricio Lynch", "Lynch"),
("Paula Jaraquemada",),
("Pedro Aguirre Cerda",),
("Pedro de Valdivia",),
("Pedro Montt",),
("Ramón Barros Luco", "Barros Luco"),
("Ramón Carnicer",),
("Ramón Freire", "Freire"),
("Ramón Picarte", "Picarte"),
("Salvador Allende Gossens", "Salvador Allende"),
("Santa Rosa",),
)
# Some streets are named by plants
plant_street_names: ElementsType[str] = (
"Los Cactus",
"Los Laureles",
"Los Piñones",
"Los Helechos",
"Los Higos",
"Los Abedules",
"Los Encinos",
"Los Palmitos",
"Los Naranjos",
"Los Robles",
"Los Pinos",
"Los Coihues",
"Los Calafates",
"Los Digitales",
"Los Lirios",
"Los Tilos",
"Los Girasoles",
"Las Azucenas",
"Las Lilas",
"Las Hortensias",
"Las Margaritas",
"Las Maravillas",
"Las Manzanillas",
"Las Mandarinas",
"Las Araucarias",
"Las Mosquetas",
"Las Malvas",
"Las Mosquetas",
)
road_names = ("Ruta T-%#", "Ruta U-%##", "Ruta %##-CH")
highway_names = ("Ruta 5 Norte", "Ruta 5 Sur")
def commune(self) -> str:
return self.random_element(self.communes.values())
def province(self) -> str:
return self.random_element(self.provinces.values())
def region(self) -> str:
return self.random_element(self.regions.values())
def commune_code(self) -> str:
return self.random_element(self.communes.keys())
def province_code(self) -> str:
return self.random_element(self.provinces.keys())
def region_code(self) -> str:
return self.random_element(self.regions.keys())
def common_street_name(self) -> str:
return self.random_element(self.common_street_names)
def plant_street_name(self) -> str:
return self.random_element(self.plant_street_names)
def historic_people_street_name(self) -> str:
person_names: Tuple[str, ...] = self.random_element(self.historic_people_street_names)
return self.random_element(person_names)
def street_prefix(self) -> str:
return self.random_element(self.street_prefixes)
def secondary_address(self) -> str:
return self.numerify(self.random_element(self.secondary_address_formats))
def commune_and_region(self) -> str:
commune_code = self.commune_code()
commune_name = self.communes[commune_code]
region_index = int(commune_code[0:2]) - 1
region_name = tuple(self.regions.values())[region_index]
return f"{commune_name:s}, {region_name:s}"
def road_name(self) -> str:
self.generator.set_arguments("kilometer", {"min": 1, "max": 35})
return self.numerify(self.generator.parse(self.random_element(self.road_names)))
def highway_name(self) -> str:
self.generator.set_arguments("big_kilometer", {"min": 1, "max": 1000})
return self.numerify(self.generator.parse(self.random_element(self.highway_names)))
def postcode(self) -> str:
return self.numerify("######0")
administrative_unit = region
city = commune
| Provider |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/cards.py | {
"start": 1722,
"end": 2085
} | class ____(CreditCard):
"""
https://developer.paypal.com/braintree/docs/reference/response/android-pay-card
"""
google_transaction_id: str
source_card_type: str
source_description: str
is_network_tokenized: bool
source_card_last_4: str
source_card_type: str
virtual_card_last_4: str
virtual_card_type: str
| AndroidPayCard |
python | allegroai__clearml | clearml/utilities/requests_toolbelt/multipart/decoder.py | {
"start": 850,
"end": 1990
} | class ____(object):
"""
The ``BodyPart`` object is a ``Response``-like interface to an individual
subpart of a multipart response. It is expected that these will
generally be created by objects of the ``MultipartDecoder`` class.
Like ``Response``, there is a ``CaseInsensitiveDict`` object named headers,
``content`` to access bytes, ``text`` to access unicode, and ``encoding``
to access the unicode codec.
"""
def __init__(self, content, encoding):
self.encoding = encoding
headers = {}
# Split into header section (if any) and the content
if b'\r\n\r\n' in content:
first, self.content = _split_on_find(content, b'\r\n\r\n')
if first != b'':
headers = _header_parser(first.lstrip(), encoding)
else:
raise ImproperBodyPartContentException(
'content does not contain CR-LF-CR-LF'
)
self.headers = CaseInsensitiveDict(headers)
@property
def text(self):
"""Content of the ``BodyPart`` in unicode."""
return self.content.decode(self.encoding)
| BodyPart |
python | getsentry__sentry | src/sentry/analytics/events/api_token_created.py | {
"start": 74,
"end": 183
} | class ____(analytics.Event):
user_id: int | None = None
analytics.register(ApiTokenCreated)
| ApiTokenCreated |
python | joblib__joblib | joblib/numpy_pickle_compat.py | {
"start": 4163,
"end": 5505
} | class ____(NDArrayWrapper):
"""An object to be persisted instead of numpy arrays.
This object store the Zfile filename in which
the data array has been persisted, and the meta information to
retrieve it.
The reason that we store the raw buffer data of the array and
the meta information, rather than array representation routine
(tobytes) is that it enables us to use completely the strided
model to avoid memory copies (a and a.T store as fast). In
addition saving the heavy information separately can avoid
creating large temporary buffers when unpickling data with
large arrays.
"""
def __init__(self, filename, init_args, state):
"""Constructor. Store the useful information for later."""
self.filename = filename
self.state = state
self.init_args = init_args
def read(self, unpickler):
"""Reconstruct the array from the meta-information and the z-file."""
# Here we a simply reproducing the unpickling mechanism for numpy
# arrays
filename = os.path.join(unpickler._dirname, self.filename)
array = _reconstruct(*self.init_args)
with open(filename, "rb") as f:
data = read_zfile(f)
state = self.state + (data,)
array.__setstate__(state)
return array
| ZNDArrayWrapper |
python | spack__spack | lib/spack/spack/vendor/markupsafe/__init__.py | {
"start": 7825,
"end": 8934
} | class ____:
"""Helper for :meth:`Markup.__mod__`."""
__slots__ = ("obj", "escape")
def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
self.obj = obj
self.escape = escape
def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
return _MarkupEscapeHelper(self.obj[item], self.escape)
def __str__(self) -> str:
return str(self.escape(self.obj))
def __repr__(self) -> str:
return str(self.escape(repr(self.obj)))
def __int__(self) -> int:
return int(self.obj)
def __float__(self) -> float:
return float(self.obj)
# circular import
try:
from ._speedups import escape as escape
from ._speedups import escape_silent as escape_silent
from ._speedups import soft_str as soft_str
from ._speedups import soft_unicode
except ImportError:
from ._native import escape as escape
from ._native import escape_silent as escape_silent # noqa: F401
from ._native import soft_str as soft_str # noqa: F401
from ._native import soft_unicode # noqa: F401
| _MarkupEscapeHelper |
python | readthedocs__readthedocs.org | readthedocs/api/v3/serializers.py | {
"start": 30425,
"end": 30808
} | class ____(ProjectSerializer):
"""
Serializer to render a Project when listed under ProjectRelationship.
It's exactly the same as ``ProjectSerializer`` but without some fields.
"""
class Meta(ProjectSerializer.Meta):
fields = [
field for field in ProjectSerializer.Meta.fields if field not in ["subproject_of"]
]
| ChildProjectSerializer |
python | qdrant__qdrant-client | qdrant_client/local/sparse_distances.py | {
"start": 2449,
"end": 10351
} | class ____:
def __init__(self, context_pairs: list[SparseContextPair]):
self.context_pairs = context_pairs
def transform_sparse(
self, foo: Callable[["SparseVector"], "SparseVector"]
) -> "SparseContextQuery":
return SparseContextQuery(
context_pairs=[
SparseContextPair(foo(pair.positive), foo(pair.negative))
for pair in self.context_pairs
]
)
SparseQueryVector = Union[
SparseVector,
SparseDiscoveryQuery,
SparseContextQuery,
SparseRecoQuery,
]
def calculate_distance_sparse(
query: SparseVector, vectors: list[SparseVector], empty_is_zero: bool = False
) -> types.NumpyArray:
"""Calculate distances between a query sparse vector and a list of sparse vectors.
Args:
query (SparseVector): The query sparse vector.
vectors (list[SparseVector]): A list of sparse vectors to compare against.
empty_is_zero (bool): If True, distance between vectors with no overlap is treated as zero.
Otherwise, it is treated as negative infinity.
Simple nearest search requires `empty_is_zero` to be False, while methods like
recommend, discovery, and context search require True.
"""
scores = []
for vector in vectors:
score = sparse_dot_product(query, vector)
if score is not None:
scores.append(score)
elif not empty_is_zero:
# means no overlap
scores.append(np.float32("-inf"))
else:
scores.append(np.float32(0.0))
return np.array(scores, dtype=np.float32)
# Expects sorted indices
# Returns None if no overlap
def sparse_dot_product(vector1: SparseVector, vector2: SparseVector) -> Optional[np.float32]:
result = 0.0
i, j = 0, 0
overlap = False
assert is_sorted(vector1), "Query sparse vector must be sorted"
assert is_sorted(vector2), "Sparse vector to compare with must be sorted"
while i < len(vector1.indices) and j < len(vector2.indices):
if vector1.indices[i] == vector2.indices[j]:
overlap = True
result += vector1.values[i] * vector2.values[j]
i += 1
j += 1
elif vector1.indices[i] < vector2.indices[j]:
i += 1
else:
j += 1
if overlap:
return np.float32(result)
else:
return None
def calculate_sparse_discovery_ranks(
context: list[SparseContextPair],
vectors: list[SparseVector],
) -> types.NumpyArray:
overall_ranks: types.NumpyArray = np.zeros(len(vectors), dtype=np.int32)
for pair in context:
# Get distances to positive and negative vectors
pos = calculate_distance_sparse(pair.positive, vectors, empty_is_zero=True)
neg = calculate_distance_sparse(pair.negative, vectors, empty_is_zero=True)
pair_ranks = np.array(
[
1 if is_bigger else 0 if is_equal else -1
for is_bigger, is_equal in zip(pos > neg, pos == neg)
]
)
overall_ranks += pair_ranks
return overall_ranks
def calculate_sparse_discovery_scores(
query: SparseDiscoveryQuery, vectors: list[SparseVector]
) -> types.NumpyArray:
ranks = calculate_sparse_discovery_ranks(query.context, vectors)
# Get distances to target
distances_to_target = calculate_distance_sparse(query.target, vectors, empty_is_zero=True)
sigmoided_distances = np.fromiter(
(scaled_fast_sigmoid(xi) for xi in distances_to_target), np.float32
)
return ranks + sigmoided_distances
def calculate_sparse_context_scores(
query: SparseContextQuery, vectors: list[SparseVector]
) -> types.NumpyArray:
overall_scores: types.NumpyArray = np.zeros(len(vectors), dtype=np.float32)
for pair in query.context_pairs:
# Get distances to positive and negative vectors
pos = calculate_distance_sparse(pair.positive, vectors, empty_is_zero=True)
neg = calculate_distance_sparse(pair.negative, vectors, empty_is_zero=True)
difference = pos - neg - EPSILON
pair_scores = np.fromiter(
(fast_sigmoid(xi) for xi in np.minimum(difference, 0.0)), np.float32
)
overall_scores += pair_scores
return overall_scores
def calculate_sparse_recommend_best_scores(
query: SparseRecoQuery, vectors: list[SparseVector]
) -> types.NumpyArray:
def get_best_scores(examples: list[SparseVector]) -> types.NumpyArray:
vector_count = len(vectors)
# Get scores to all examples
scores: list[types.NumpyArray] = []
for example in examples:
score = calculate_distance_sparse(example, vectors, empty_is_zero=True)
scores.append(score)
# Keep only max for each vector
if len(scores) == 0:
scores.append(np.full(vector_count, -np.inf))
best_scores = np.array(scores, dtype=np.float32).max(axis=0)
return best_scores
pos = get_best_scores(query.positive)
neg = get_best_scores(query.negative)
# Choose from best positive or best negative,
# in both cases we apply sigmoid and then negate depending on the order
return np.where(
pos > neg,
np.fromiter((scaled_fast_sigmoid(xi) for xi in pos), pos.dtype),
np.fromiter((-scaled_fast_sigmoid(xi) for xi in neg), neg.dtype),
)
def calculate_sparse_recommend_sum_scores(
query: SparseRecoQuery, vectors: list[SparseVector]
) -> types.NumpyArray:
def get_sum_scores(examples: list[SparseVector]) -> types.NumpyArray:
vector_count = len(vectors)
scores: list[types.NumpyArray] = []
for example in examples:
score = calculate_distance_sparse(example, vectors, empty_is_zero=True)
scores.append(score)
if len(scores) == 0:
scores.append(np.zeros(vector_count))
sum_scores = np.array(scores, dtype=np.float32).sum(axis=0)
return sum_scores
pos = get_sum_scores(query.positive)
neg = get_sum_scores(query.negative)
return pos - neg
# Expects sorted indices
def combine_aggregate(vector1: SparseVector, vector2: SparseVector, op: Callable) -> SparseVector:
result = empty_sparse_vector()
i, j = 0, 0
while i < len(vector1.indices) and j < len(vector2.indices):
if vector1.indices[i] == vector2.indices[j]:
result.indices.append(vector1.indices[i])
result.values.append(op(vector1.values[i], vector2.values[j]))
i += 1
j += 1
elif vector1.indices[i] < vector2.indices[j]:
result.indices.append(vector1.indices[i])
result.values.append(op(vector1.values[i], 0.0))
i += 1
else:
result.indices.append(vector2.indices[j])
result.values.append(op(0.0, vector2.values[j]))
j += 1
while i < len(vector1.indices):
result.indices.append(vector1.indices[i])
result.values.append(op(vector1.values[i], 0.0))
i += 1
while j < len(vector2.indices):
result.indices.append(vector2.indices[j])
result.values.append(op(0.0, vector2.values[j]))
j += 1
return result
# Expects sorted indices
def sparse_avg(vectors: Sequence[SparseVector]) -> SparseVector:
result = empty_sparse_vector()
if len(vectors) == 0:
return result
sparse_count = 0
for vector in vectors:
sparse_count += 1
result = combine_aggregate(result, vector, lambda v1, v2: v1 + v2)
result.values = np.divide(result.values, sparse_count).tolist()
return result
# Expects sorted indices
def merge_positive_and_negative_avg(
positive: SparseVector, negative: SparseVector
) -> SparseVector:
return combine_aggregate(positive, negative, lambda pos, neg: pos + pos - neg)
| SparseContextQuery |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/snap/dep_snapshot.py | {
"start": 6651,
"end": 7744
} | class ____(
NamedTuple(
"_InputDependencySnap",
[
("input_name", str),
("upstream_output_snaps", Sequence[OutputHandleSnap]),
("is_dynamic_collect", bool),
],
)
):
def __new__(
cls,
input_name: str,
upstream_output_snaps: Sequence[OutputHandleSnap],
is_dynamic_collect: bool = False,
):
return super().__new__(
cls,
input_name=check.str_param(input_name, "input_name"),
upstream_output_snaps=check.sequence_param(
upstream_output_snaps, "upstream_output_snaps", of_type=OutputHandleSnap
),
# Could be derived from a dependency type enum as well
# if we wanted to persist that
is_dynamic_collect=check.bool_param(is_dynamic_collect, "is_dynamic_collect"),
)
# Use old names in storage for backcompat
@whitelist_for_serdes(
storage_name="SolidInvocationSnap",
storage_field_names={"node_name": "solid_name", "node_def_name": "solid_def_name"},
)
| InputDependencySnap |
python | tensorflow__tensorflow | tensorflow/python/util/deprecation_test.py | {
"start": 40271,
"end": 40975
} | class ____(test.TestCase):
def testSingleDeprecatedEndpoint(self):
@deprecation.deprecated_endpoints("foo1")
def foo():
pass
self.assertEqual(("foo1",), foo._tf_deprecated_api_names)
def testMultipleDeprecatedEndpoint(self):
@deprecation.deprecated_endpoints("foo1", "foo2")
def foo():
pass
self.assertEqual(("foo1", "foo2"), foo._tf_deprecated_api_names)
def testCannotSetDeprecatedEndpointsTwice(self):
with self.assertRaises(deprecation.DeprecatedNamesAlreadySetError):
@deprecation.deprecated_endpoints("foo1")
@deprecation.deprecated_endpoints("foo2")
def foo(): # pylint: disable=unused-variable
pass
| DeprecatedEndpointsTest |
python | astropy__astropy | astropy/units/tests/test_logarithmic.py | {
"start": 7638,
"end": 11708
} | class ____:
@pytest.mark.parametrize("physical_unit", pu_sample)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_physical_unit_conversion(self, lu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to their non-log counterparts."""
lu1 = lu_unit(physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(physical_unit, 0.0) == 1.0
assert physical_unit.is_equivalent(lu1)
assert physical_unit.to(lu1, 1.0) == 0.0
pu = u.Unit(8.0 * physical_unit)
assert lu1.is_equivalent(physical_unit)
assert lu1.to(pu, 0.0) == 0.125
assert pu.is_equivalent(lu1)
assert_allclose(pu.to(lu1, 0.125), 0.0, atol=1.0e-15)
# Check we round-trip.
value = np.linspace(0.0, 10.0, 6)
assert_allclose(pu.to(lu1, lu1.to(pu, value)), value, atol=1.0e-15)
# And that we're not just returning True all the time.
pu2 = u.g
assert not lu1.is_equivalent(pu2)
with pytest.raises(u.UnitsError):
lu1.to(pu2)
assert not pu2.is_equivalent(lu1)
with pytest.raises(u.UnitsError):
pu2.to(lu1)
@pytest.mark.parametrize("lu_unit", lu_units)
def test_container_unit_conversion(self, lu_unit):
"""Check that conversion to logarithmic units (u.mag, u.dB, u.dex)
is only possible when the physical unit is dimensionless."""
values = np.linspace(0.0, 10.0, 6)
lu1 = lu_unit(u.dimensionless_unscaled)
assert lu1.is_equivalent(lu1.function_unit)
assert_allclose(lu1.to(lu1.function_unit, values), values)
lu2 = lu_unit(u.Jy)
assert not lu2.is_equivalent(lu2.function_unit)
with pytest.raises(u.UnitsError):
lu2.to(lu2.function_unit, values)
@pytest.mark.parametrize("physical_unit", pu_sample)
@pytest.mark.parametrize("tlu_unit", lu_units)
@pytest.mark.parametrize("flu_unit", lu_units)
def test_subclass_conversion(self, flu_unit, tlu_unit, physical_unit):
"""Check various LogUnit subclasses are equivalent and convertible
to each other if they correspond to equivalent physical units."""
values = np.linspace(0.0, 10.0, 6)
flu = flu_unit(physical_unit)
tlu = tlu_unit(physical_unit)
assert flu.is_equivalent(tlu)
assert_allclose(flu.to(tlu), flu.function_unit.to(tlu.function_unit))
assert_allclose(
flu.to(tlu, values), values * flu.function_unit.to(tlu.function_unit)
)
tlu2 = tlu_unit(u.Unit(100.0 * physical_unit))
assert flu.is_equivalent(tlu2)
# Check that we round-trip.
assert_allclose(flu.to(tlu2, tlu2.to(flu, values)), values, atol=1.0e-15)
tlu3 = tlu_unit(physical_unit.to_system(u.si)[0])
assert flu.is_equivalent(tlu3)
assert_allclose(flu.to(tlu3, tlu3.to(flu, values)), values, atol=1.0e-15)
tlu4 = tlu_unit(u.g)
assert not flu.is_equivalent(tlu4)
with pytest.raises(u.UnitsError):
flu.to(tlu4, values)
def test_unit_decomposition(self):
lu = u.mag(u.Jy)
assert lu.decompose() == u.mag(u.Jy.decompose())
assert lu.decompose().physical_unit.bases == [u.kg, u.s]
assert lu.si == u.mag(u.Jy.si)
assert lu.si.physical_unit.bases == [u.kg, u.s]
assert lu.cgs == u.mag(u.Jy.cgs)
assert lu.cgs.physical_unit.bases == [u.g, u.s]
def test_unit_multiple_possible_equivalencies(self):
lu = u.mag(u.Jy)
assert lu.is_equivalent(pu_sample)
def test_magnitude_conversion_fails_message(self):
"""Check that "dimensionless" magnitude units include a message in their
exception text suggesting a possible cause of the problem.
"""
with pytest.raises(
u.UnitConversionError,
match="Did you perhaps subtract magnitudes so the unit got lost?",
):
(10 * u.ABmag - 2 * u.ABmag).to(u.nJy)
| TestLogUnitConversion |
python | python__mypy | mypyc/ir/ops.py | {
"start": 45649,
"end": 46782
} | class ____(RegisterOp):
"""Binary float arithmetic op (e.g., r1 = r2 + r3).
These ops are low-level and are similar to the corresponding C
operations (and somewhat different from Python operations).
The left and right values must be floats.
"""
error_kind = ERR_NEVER
ADD: Final = 0
SUB: Final = 1
MUL: Final = 2
DIV: Final = 3
MOD: Final = 4
op_str: Final = {ADD: "+", SUB: "-", MUL: "*", DIV: "/", MOD: "%"}
def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None:
super().__init__(line)
self.type = float_rprimitive
self.lhs = lhs
self.rhs = rhs
self.op = op
def sources(self) -> list[Value]:
return [self.lhs, self.rhs]
def set_sources(self, new: list[Value]) -> None:
(self.lhs, self.rhs) = new
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_float_op(self)
# We can't have this in the FloatOp class body, because of
# https://github.com/mypyc/mypyc/issues/932.
float_op_to_id: Final = {op: op_id for op_id, op in FloatOp.op_str.items()}
@final
| FloatOp |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 11861,
"end": 12048
} | class ____(SubclassSelectorAbstractBaseModel):
abstract_field = models.CharField(max_length=30, default="test_af")
class Meta:
abstract = True
| SubclassSelectorAbstractModel |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/idna/codec.py | {
"start": 1805,
"end": 2885
} | class ____(codecs.BufferedIncrementalDecoder):
def _buffer_decode(self, data: Any, errors: str, final: bool) -> Tuple[str, int]:
if errors != 'strict':
raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
if not data:
return ('', 0)
if not isinstance(data, str):
data = str(data, 'ascii')
labels = _unicode_dots_re.split(data)
trailing_dot = ''
if labels:
if not labels[-1]:
trailing_dot = '.'
del labels[-1]
elif not final:
# Keep potentially unfinished label until the next call
del labels[-1]
if labels:
trailing_dot = '.'
result = []
size = 0
for label in labels:
result.append(ulabel(label))
if size:
size += 1
size += len(label)
result_str = '.'.join(result) + trailing_dot
size += len(trailing_dot)
return (result_str, size)
| IncrementalDecoder |
python | walkccc__LeetCode | solutions/149. Max Points on a Line/149.py | {
"start": 0,
"end": 899
} | class ____:
def maxPoints(self, points: list[list[int]]) -> int:
ans = 0
def gcd(a: int, b: int) -> int:
return a if b == 0 else gcd(b, a % b)
def getSlope(p: list[int], q: list[int]) -> tuple[int, int]:
dx = p[0] - q[0]
dy = p[1] - q[1]
if dx == 0:
return (0, p[0])
if dy == 0:
return (p[1], 0)
d = gcd(dx, dy)
return (dx // d, dy // d)
for i, p in enumerate(points):
slopeCount = collections.defaultdict(int)
samePoints = 1
maxPoints = 0 # the maximum number of points with the same slope
for j in range(i + 1, len(points)):
q = points[j]
if p == q:
samePoints += 1
else:
slope = getSlope(p, q)
slopeCount[slope] += 1
maxPoints = max(maxPoints, slopeCount[slope])
ans = max(ans, samePoints + maxPoints)
return ans
| Solution |
python | skorch-dev__skorch | skorch/_version.py | {
"start": 7833,
"end": 14342
} | class ____(_BaseVersion):
_regex = re.compile(
r"^\s*" + VERSION_PATTERN + r"\s*$",
re.VERBOSE | re.IGNORECASE,
)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(
match.group("pre_l"),
match.group("pre_n"),
),
post=_parse_letter_version(
match.group("post_l"),
match.group("post_n1") or match.group("post_n2"),
),
dev=_parse_letter_version(
match.group("dev_l"),
match.group("dev_n"),
),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
# Pre-release
if self._version.pre is not None:
parts.append("".join(str(x) for x in self._version.pre))
# Post-release
if self._version.post is not None:
parts.append(".post{0}".format(self._version.post[1]))
# Development release
if self._version.dev is not None:
parts.append(".dev{0}".format(self._version.dev[1]))
# Local version segment
if self._version.local is not None:
parts.append(
"+{0}".format(".".join(str(x) for x in self._version.local))
)
return "".join(parts)
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self._version.epoch != 0:
parts.append("{0}!".format(self._version.epoch))
# Release segment
parts.append(".".join(str(x) for x in self._version.release))
return "".join(parts)
@property
def local(self):
version_string = str(self)
if "+" in version_string:
return version_string.split("+", 1)[1]
@property
def is_prerelease(self):
return bool(self._version.dev or self._version.pre)
@property
def is_postrelease(self):
return bool(self._version.post)
def _parse_letter_version(letter, number):
if letter:
# We assume there is an implicit 0 in a pre-release if there is
# no numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower-case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume that if we are given a number but not given a letter,
# then this is using the implicit post release syntax (e.g., 1.0-1)
letter = "post"
return letter, int(number)
_local_version_seperators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_seperators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non-zero, then take the rest,
# re-reverse it back into the correct order, and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(
itertools.dropwhile(
lambda x: x == 0,
reversed(release),
)
))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre-segment, but we _only_ want to do this
# if there is no pre- or a post-segment. If we have one of those, then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post-segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alphanumeric segments sort before numeric segments
# - Alphanumeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple(
(i, "") if isinstance(i, int) else (-Infinity, i)
for i in local
)
return epoch, release, pre, post, dev, local
| Version |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 58418,
"end": 59535
} | class ____(Operation):
def __init__(self, shape, *, name=None):
super().__init__(name=name)
self.shape = shape
def call(self, x):
return backend.numpy.broadcast_to(x, self.shape)
def compute_output_spec(self, x):
# Catch broadcasting errors for clear error messages.
broadcast_shapes(x.shape, self.shape)
return KerasTensor(self.shape, dtype=x.dtype)
@keras_export(
[
"keras.ops.broadcast_to",
"keras.ops.numpy.broadcast_to",
]
)
def broadcast_to(x, shape):
"""Broadcast a tensor to a new shape.
Args:
x: The tensor to broadcast.
shape: The shape of the desired tensor. A single integer `i` is
interpreted as `(i,)`.
Returns:
A tensor with the desired shape.
Examples:
>>> x = keras.ops.array([1, 2, 3])
>>> keras.ops.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
if any_symbolic_tensors((x,)):
return BroadcastTo(shape=shape).symbolic_call(x)
return backend.numpy.broadcast_to(x, shape)
| BroadcastTo |
python | sqlalchemy__sqlalchemy | test/sql/test_compiler.py | {
"start": 223003,
"end": 224317
} | class ____(fixtures.TestBase):
@classmethod
def setup_test_class(cls):
class CatchCol(ColumnClause):
pass
class CatchTable(TableClause):
pass
cls.column = CatchCol("x")
cls.table = CatchTable("y")
cls.criterion = cls.column == CatchCol("y")
@compiles(CatchCol)
def compile_col(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_column(element)
@compiles(CatchTable)
def compile_table(element, compiler, **kw):
assert "canary" in kw
return compiler.visit_table(element)
def _do_test(self, element):
d = default.DefaultDialect()
d.statement_compiler(d, element, compile_kwargs={"canary": True})
def test_binary(self):
self._do_test(self.column == 5)
def test_select(self):
s = (
select(self.column)
.select_from(self.table)
.where(self.column == self.criterion)
.order_by(self.column)
)
self._do_test(s)
def test_case(self):
c = case((self.criterion, self.column), else_=self.column)
self._do_test(c)
def test_cast(self):
c = cast(self.column, Integer)
self._do_test(c)
| KwargPropagationTest |
python | getsentry__sentry | src/sentry/api/endpoints/rule_snooze.py | {
"start": 2838,
"end": 3050
} | class ____(CamelSnakeSerializer):
target = serializers.CharField(required=True, allow_null=False)
until = serializers.DateTimeField(required=False, allow_null=True)
@register(RuleSnooze)
| RuleSnoozeValidator |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/clients/test_service_application.py | {
"start": 229,
"end": 7569
} | class ____(TestCase):
gt = ServiceApplicationClient.grant_type
private_key = """
-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJopeQRRKKpZI4s5i+UPwVpupG
AlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98gj0x7zamu0Ck1LtQ4c7pFMVah
5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8mfvGGg3xNjTMO7IdrwIDAQAB
AoGBAOQ2KuH8S5+OrsL4K+wfjoCi6MfxCUyqVU9GxocdM1m30WyWRFMEz2nKJ8fR
p3vTD4w8yplTOhcoXdQZl0kRoaDzrcYkm2VvJtQRrX7dKFT8dR8D/Tr7dNQLOXfC
DY6xveQczE7qt7Vk7lp4FqmxBsaaEuokt78pOOjywZoInjZhAkEA9wz3zoZNT0/i
rf6qv2qTIeieUB035N3dyw6f1BGSWYaXSuerDCD/J1qZbAPKKhyHZbVawFt3UMhe
542UftBaxQJBAO0iJy1I8GQjGnS7B3yvyH3CcLYGy296+XO/2xKp/d/ty1OIeovx
C60pLNwuFNF3z9d2GVQAdoQ89hUkOtjZLeMCQQD0JO6oPHUeUjYT+T7ImAv7UKVT
Suy30sKjLzqoGw1kR+wv7C5PeDRvscs4wa4CW9s6mjSrMDkDrmCLuJDtmf55AkEA
kmaMg2PNrjUR51F0zOEFycaaqXbGcFwe1/xx9zLmHzMDXd4bsnwt9kk+fe0hQzVS
JzatanQit3+feev1PN3QewJAWv4RZeavEUhKv+kLe95Yd0su7lTLVduVgh4v5yLT
Ga6FHdjGPcfajt+nrpB1n8UQBEH9ZxniokR/IPvdMlxqXA==
-----END RSA PRIVATE KEY-----
"""
public_key = """
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDk1/bxyS8Q8jiheHeYYp/4rEKJ
opeQRRKKpZI4s5i+UPwVpupGAlwXWfzXwSMaKPAoKJNdu7tqKRniqst5uoHXw98g
j0x7zamu0Ck1LtQ4c7pFMVah5IYGhBi2E9ycNS329W27nJPWNCbESTu7snVlG8V8
mfvGGg3xNjTMO7IdrwIDAQAB
-----END PUBLIC KEY-----
"""
subject = 'resource-owner@provider.com'
issuer = 'the-client@provider.com'
audience = 'https://provider.com/token'
client_id = "someclientid"
scope = ["/profile"]
kwargs = {
"some": "providers",
"require": "extra arguments"
}
body = "isnot=empty"
body_up = "not=empty&grant_type=%s" % gt
body_kwargs = body_up + "&some=providers&require=extra+arguments"
token_json = ('{ "access_token":"2YotnFZFEjr1zCsicMWpAA",'
' "token_type":"example",'
' "expires_in":3600,'
' "scope":"/profile",'
' "example_parameter":"example_value"}')
token = {
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"scope": ["/profile"],
"example_parameter": "example_value"
}
@patch('time.time')
def test_request_body(self, t):
t.return_value = time()
self.token['expires_at'] = self.token['expires_in'] + t.return_value
client = ServiceApplicationClient(
self.client_id, private_key=self.private_key)
# Basic with min required params
body = client.prepare_request_body(issuer=self.issuer,
subject=self.subject,
audience=self.audience,
body=self.body)
r = Request('https://a.b', body=body)
self.assertEqual(r.isnot, 'empty')
self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
self.assertEqual(claim['iss'], self.issuer)
# audience verification is handled during decode now
self.assertEqual(claim['sub'], self.subject)
self.assertEqual(claim['iat'], int(t.return_value))
self.assertNotIn('nbf', claim)
self.assertNotIn('jti', claim)
# Missing issuer parameter
self.assertRaises(ValueError, client.prepare_request_body,
issuer=None, subject=self.subject, audience=self.audience, body=self.body)
# Missing subject parameter
self.assertRaises(ValueError, client.prepare_request_body,
issuer=self.issuer, subject=None, audience=self.audience, body=self.body)
# Missing audience parameter
self.assertRaises(ValueError, client.prepare_request_body,
issuer=self.issuer, subject=self.subject, audience=None, body=self.body)
# Optional kwargs
not_before = time() - 3600
jwt_id = '8zd15df4s35f43sd'
body = client.prepare_request_body(issuer=self.issuer,
subject=self.subject,
audience=self.audience,
body=self.body,
not_before=not_before,
jwt_id=jwt_id)
r = Request('https://a.b', body=body)
self.assertEqual(r.isnot, 'empty')
self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
self.assertEqual(claim['iss'], self.issuer)
# audience verification is handled during decode now
self.assertEqual(claim['sub'], self.subject)
self.assertEqual(claim['iat'], int(t.return_value))
self.assertEqual(claim['nbf'], not_before)
self.assertEqual(claim['jti'], jwt_id)
@patch('time.time')
def test_request_body_no_initial_private_key(self, t):
t.return_value = time()
self.token['expires_at'] = self.token['expires_in'] + t.return_value
client = ServiceApplicationClient(
self.client_id, private_key=None)
# Basic with private key provided
body = client.prepare_request_body(issuer=self.issuer,
subject=self.subject,
audience=self.audience,
body=self.body,
private_key=self.private_key)
r = Request('https://a.b', body=body)
self.assertEqual(r.isnot, 'empty')
self.assertEqual(r.grant_type, ServiceApplicationClient.grant_type)
claim = jwt.decode(r.assertion, self.public_key, audience=self.audience, algorithms=['RS256'])
self.assertEqual(claim['iss'], self.issuer)
# audience verification is handled during decode now
self.assertEqual(claim['sub'], self.subject)
self.assertEqual(claim['iat'], int(t.return_value))
# No private key provided
self.assertRaises(ValueError, client.prepare_request_body,
issuer=self.issuer, subject=self.subject, audience=self.audience, body=self.body)
@patch('time.time')
def test_parse_token_response(self, t):
t.return_value = time()
self.token['expires_at'] = self.token['expires_in'] + round(t.return_value)
client = ServiceApplicationClient(self.client_id)
# Parse code and state
response = client.parse_request_body_response(self.token_json, scope=self.scope)
self.assertEqual(response, self.token)
self.assertEqual(client.access_token, response.get("access_token"))
self.assertEqual(client.refresh_token, response.get("refresh_token"))
self.assertEqual(client.token_type, response.get("token_type"))
# Mismatching state
self.assertRaises(Warning, client.parse_request_body_response, self.token_json, scope="invalid")
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '2'
token = client.parse_request_body_response(self.token_json, scope="invalid")
self.assertTrue(token.scope_changed)
del os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE']
| ServiceApplicationClientTest |
python | apache__airflow | devel-common/src/tests_common/_internals/capture_warnings.py | {
"start": 4809,
"end": 10462
} | class ____:
"""Internal plugin for capture warnings during the tests run."""
node_key: str = "capture_warnings_node"
def __init__(self, config: pytest.Config, output_path: str | None = None):
output_path = output_path or os.environ.get("CAPTURE_WARNINGS_OUTPUT") or "warnings.txt"
warning_output_path = Path(os.path.expandvars(os.path.expandvars(output_path)))
if not warning_output_path.is_absolute():
warning_output_path = TESTS_DIR.joinpath(output_path)
self.warning_output_path = warning_output_path
self.config = config
self.root_path = config.rootpath
self.is_worker_node = hasattr(config, "workerinput")
self.captured_warnings: dict[CapturedWarning, int] = {}
def add_captured_warnings(self, cap_warning: list[CapturedWarning]) -> None:
for cw in cap_warning:
if cw not in self.captured_warnings:
self.captured_warnings[cw] = 1
else:
self.captured_warnings[cw] += 1
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_collection(self, session: pytest.Session):
with CapturedWarning.capture_warnings("collect", self.root_path, None) as records:
yield
self.add_captured_warnings(records)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_load_initial_conftests(self, early_config: pytest.Config):
with CapturedWarning.capture_warnings("collect", self.root_path, None) as records:
yield
self.add_captured_warnings(records)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_runtest_protocol(self, item: pytest.Item):
with CapturedWarning.capture_warnings("runtest", self.root_path, item.nodeid) as records:
yield
self.add_captured_warnings(records)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_sessionfinish(self, session: pytest.Session, exitstatus: int):
"""Save warning captures in the session finish on xdist worker node."""
with CapturedWarning.capture_warnings("config", self.root_path, None) as records:
yield
self.add_captured_warnings(records)
if self.is_worker_node and self.captured_warnings and hasattr(self.config, "workeroutput"):
self.config.workeroutput[self.node_key] = tuple(
[(cw.dumps(), count) for cw, count in self.captured_warnings.items()]
)
@pytest.hookimpl(optionalhook=True)
def pytest_testnodedown(self, node, error):
"""Get warning captures from the xdist worker node."""
if not (workeroutput := getattr(node, "workeroutput", {})):
return
node_captured_warnings: tuple[tuple[str, int]] = workeroutput.get(self.node_key)
if not node_captured_warnings:
return
for cw_ser, count in node_captured_warnings:
if (cw := CapturedWarning.loads(cw_ser)) in self.captured_warnings:
self.captured_warnings[cw] += count
else:
self.captured_warnings[cw] = count
@staticmethod
def sorted_groupby(it, grouping_key: Callable):
"""Sort and group by items by the grouping_key."""
for group, grouped_data in itertools.groupby(sorted(it, key=grouping_key), key=grouping_key):
yield group, list(grouped_data)
@pytest.hookimpl(hookwrapper=True, trylast=True)
def pytest_terminal_summary(self, terminalreporter, exitstatus: int, config: pytest.Config):
with CapturedWarning.capture_warnings("collect", self.root_path, None) as records:
yield
self.add_captured_warnings(records)
if self.is_worker_node: # No need to print/write file on worker node
return
if self.warning_output_path.exists(): # Cleanup file.
self.warning_output_path.open("w").close()
if not self.captured_warnings:
return
if not self.warning_output_path.parent.exists():
self.warning_output_path.parent.mkdir(parents=True, exist_ok=True)
terminalreporter.section(
f"Warning summary. Total: {sum(self.captured_warnings.values()):,}, "
f"Unique: {len({cw.uniq_key for cw in self.captured_warnings}):,}",
yellow=True,
bold=True,
)
for group, grouped_data in self.sorted_groupby(self.captured_warnings.items(), lambda x: x[0].group):
color = {}
if group in ("airflow", "providers"):
color["red"] = True
elif group == "tests":
color["yellow"] = True
else:
color["white"] = True
terminalreporter.write(group, bold=True, **color)
terminalreporter.write(
f": total {sum(item[1] for item in grouped_data):,}, "
f"unique {len({item[0].uniq_key for item in grouped_data}):,}\n"
)
for when, when_data in self.sorted_groupby(grouped_data, lambda x: x[0].when):
terminalreporter.write(
f" {when}: total {sum(item[1] for item in when_data):,}, "
f"unique {len({item[0].uniq_key for item in when_data}):,}\n"
)
with self.warning_output_path.open("w") as fp:
for cw, count in self.captured_warnings.items():
fp.write(f"{cw.output(count)}\n")
terminalreporter.write("Warnings saved into ")
terminalreporter.write(os.fspath(self.warning_output_path), yellow=True)
terminalreporter.write(" file.\n")
| CaptureWarningsPlugin |
python | pypa__pip | src/pip/_internal/network/session.py | {
"start": 10698,
"end": 19188
} | class ____(requests.Session):
timeout: int | None = None
def __init__(
self,
*args: Any,
retries: int = 0,
cache: str | None = None,
trusted_hosts: Sequence[str] = (),
index_urls: list[str] | None = None,
ssl_context: SSLContext | None = None,
**kwargs: Any,
) -> None:
"""
:param trusted_hosts: Domains not to emit warnings for when not using
HTTPS.
"""
super().__init__(*args, **kwargs)
# Namespace the attribute with "pip_" just in case to prevent
# possible conflicts with the base class.
self.pip_trusted_origins: list[tuple[str, int | None]] = []
self.pip_proxy = None
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth(index_urls=index_urls)
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interrupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
# A 500 may indicate transient error in Amazon S3
# A 502 may be a transient error from a CDN like CloudFlare or CloudFront
# A 520 or 527 - may indicate transient error in CloudFlare
status_forcelist=[500, 502, 503, 520, 527],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
) # type: ignore
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching so we'll use it for all http:// URLs.
# If caching is disabled, we will also use it for
# https:// hosts that we've marked as ignoring
# TLS errors for (trusted-hosts).
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
# We want to _only_ cache responses on securely fetched origins or when
# the host is specified as trusted. We do this because
# we can't validate the response of an insecurely/untrusted fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual eviction from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
ssl_context=ssl_context,
)
self._trusted_host_adapter = InsecureCacheControlAdapter(
cache=SafeFileCache(cache),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
self._trusted_host_adapter = insecure_adapter
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
for host in trusted_hosts:
self.add_trusted_host(host, suppress_logging=True)
def update_index_urls(self, new_index_urls: list[str]) -> None:
"""
:param new_index_urls: New index urls to update the authentication
handler with.
"""
self.auth.index_urls = new_index_urls
def add_trusted_host(
self, host: str, source: str | None = None, suppress_logging: bool = False
) -> None:
"""
:param host: It is okay to provide a host that has previously been
added.
:param source: An optional source string, for logging where the host
string came from.
"""
if not suppress_logging:
msg = f"adding trusted host: {host!r}"
if source is not None:
msg += f" (from {source})"
logger.info(msg)
parsed_host, parsed_port = parse_netloc(host)
if parsed_host is None:
raise ValueError(f"Trusted host URL must include a host part: {host!r}")
if (parsed_host, parsed_port) not in self.pip_trusted_origins:
self.pip_trusted_origins.append((parsed_host, parsed_port))
self.mount(
build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
)
self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
if not parsed_port:
self.mount(
build_url_from_netloc(host, scheme="http") + ":",
self._trusted_host_adapter,
)
# Mount wildcard ports for the same host.
self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
yield from SECURE_ORIGINS
for host, port in self.pip_trusted_origins:
yield ("*", host, "*" if port is None else port)
def is_secure_origin(self, location: Link) -> bool:
# Determine if this url used a secure transport mechanism
parsed = urllib.parse.urlparse(str(location))
origin_protocol, origin_host, origin_port = (
parsed.scheme,
parsed.hostname,
parsed.port,
)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
origin_protocol = origin_protocol.rsplit("+", 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in self.iter_secure_origins():
secure_protocol, secure_host, secure_port = secure_origin
if origin_protocol != secure_protocol and secure_protocol != "*":
continue
try:
addr = ipaddress.ip_address(origin_host or "")
network = ipaddress.ip_network(secure_host)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (
origin_host
and origin_host.lower() != secure_host.lower()
and secure_host != "*"
):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port matches.
if (
origin_port != secure_port
and secure_port != "*"
and secure_port is not None
):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
origin_host,
origin_host,
)
return False
def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Allow setting a default proxies on a session
kwargs.setdefault("proxies", self.proxies)
# Dispatch the actual request
return super().request(method, url, *args, **kwargs)
| PipSession |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_metaclass.py | {
"start": 1179,
"end": 1265
} | class ____(metaclass=invalid_metaclass_2): # [invalid-metaclass]
pass
| InvalidSecond |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/unbatch_test.py | {
"start": 1632,
"end": 9969
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testUnbatchWithUnknownRankInput(self):
dataset = dataset_ops.Dataset.from_tensors([0, 1, 2, 3]).unbatch()
self.assertDatasetProduces(dataset, range(4))
@combinations.generate(test_base.default_test_combinations())
def testUnbatchScalarDataset(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = (dtypes.int32,) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchNestedDataset(self):
data = dataset_ops.Dataset.from_tensors(
[dataset_ops.Dataset.range(10) for _ in range(10)])
data = data.unbatch().flat_map(lambda x: x)
self.assertDatasetProduces(data, list(range(10)) * 10)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithStrings(self):
data = tuple([math_ops.range(10) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithSparseTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
data = dataset_ops.Dataset.from_tensors(st)
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [
sparse_tensor.SparseTensorValue([[i]], [i], [10]) for i in range(10)
]
self.assertDatasetProduces(data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
st = sparse_tensor.SparseTensorValue(
indices=[[i, i] for i in range(10)],
values=list(range(10)),
dense_shape=[10, 10])
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
data = data.unbatch()
data = data.batch(5)
data = data.unbatch()
expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
ragged_factory_ops.constant_value([[i]]))
for i in range(10)]
self.assertDatasetProduces(
data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithRaggedTensor(self):
rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
[[5]], [[6]], [[7]], [[8]], [[9]]])
data = dataset_ops.Dataset.from_tensors(rt)
data = data.unbatch()
data = data.batch(5)
data = data.batch(2)
data = data.unbatch()
expected_output = [
ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
]
self.assertDatasetProduces(
data, expected_output=expected_output)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchSingleElementTupleDataset(self):
data = tuple([(math_ops.range(10),) for _ in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32,),) * 3
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchMultiElementTupleDataset(self):
data = tuple([(math_ops.range(10 * i, 10 * i + 10),
array_ops.fill([10], "hi")) for i in range(3)])
data = dataset_ops.Dataset.from_tensor_slices(data)
expected_types = ((dtypes.int32, dtypes.string),) * 3
data = data.batch(2)
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertAllEqual(expected_types,
dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(
data,
[((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchEmpty(self):
data = dataset_ops.Dataset.from_tensors(
(constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
constant_op.constant([], shape=[0, 4, 0])))
data = data.unbatch()
self.assertDatasetProduces(data, [])
@combinations.generate(test_base.default_test_combinations())
def testUnbatchStaticShapeMismatch(self):
data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
np.arange(9)))
with self.assertRaises(ValueError):
data.unbatch()
@combinations.generate(test_base.graph_only_combinations())
def testUnbatchDynamicShapeMismatch(self):
ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
ph2 = array_ops.placeholder(dtypes.int32, shape=None)
data = dataset_ops.Dataset.from_tensors((ph1, ph2))
data = data.unbatch()
iterator = dataset_ops.make_initializable_iterator(data)
next_element = iterator.get_next()
with self.cached_session() as sess:
# Mismatch in the 0th dimension.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: np.arange(8).astype(np.int32)
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
# No 0th dimension (i.e. scalar value) for one component.
sess.run(
iterator.initializer,
feed_dict={
ph1: np.arange(7).astype(np.int32),
ph2: 7
})
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element)
@combinations.generate(test_base.default_test_combinations())
def testUnbatchDatasetWithUintDtypes(self):
components = (
np.tile(np.array([[0], [1], [2], [3]], dtype=np.uint8), 2),
np.tile(np.array([[1], [2], [3], [256]], dtype=np.uint16), 2),
np.tile(np.array([[2], [3], [4], [65536]], dtype=np.uint32), 2),
np.tile(np.array([[3], [4], [5], [4294967296]], dtype=np.uint64), 2),
)
expected_types = (dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64)
expected_output = [tuple([c[i] for c in components]) for i in range(4)]
data = dataset_ops.Dataset.from_tensor_slices(components)
data = data.batch(2)
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
data = data.unbatch()
self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
self.assertDatasetProduces(data, expected_output)
@combinations.generate(test_base.default_test_combinations())
def testNoneComponent(self):
dataset = dataset_ops.Dataset.from_tensors(
(list(range(10)), None)).unbatch().map(lambda x, y: x)
self.assertDatasetProduces(dataset, expected_output=range(10))
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors([42]).unbatch(name="unbatch")
self.assertDatasetProduces(dataset, [42])
| UnbatchTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_rich_string07.py | {
"start": 315,
"end": 1361
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("rich_string07.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", "a", bold, "bc", "defg")
worksheet.write_rich_string("B4", "abc", italic, "de", "fg")
worksheet.write_rich_string("C5", "a", bold, "bc", "defg")
worksheet.write_rich_string("D6", "abc", italic, "de", "fg")
worksheet.write_rich_string("E7", "a", bold, "bcdef", "g")
worksheet.write_rich_string("F8", italic, "abcd", "efg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pallets__werkzeug | examples/couchy/utils.py | {
"start": 1516,
"end": 2718
} | class ____:
def __init__(self, results, per_page, page, endpoint):
self.results = results
self.per_page = per_page
self.page = page
self.endpoint = endpoint
@cached_property
def count(self):
return len(self.results)
@cached_property
def entries(self):
return self.results[
((self.page - 1) * self.per_page) : (
((self.page - 1) * self.per_page) + self.per_page
)
]
@property
def has_previous(self):
"""Return True if there are pages before the current one."""
return self.page > 1
@property
def has_next(self):
"""Return True if there are pages after the current one."""
return self.page < self.pages
@property
def previous(self):
"""Return the URL for the previous page."""
return url_for(self.endpoint, page=self.page - 1)
@property
def next(self):
"""Return the URL for the next page."""
return url_for(self.endpoint, page=self.page + 1)
@property
def pages(self):
"""Return the number of pages."""
return max(0, self.count - 1) // self.per_page + 1
| Pagination |
python | openai__openai-python | tests/api_resources/fine_tuning/test_jobs.py | {
"start": 13404,
"end": 27279
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.create(
model="gpt-4o-mini",
training_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.create(
model="gpt-4o-mini",
training_file="file-abc123",
hyperparameters={
"batch_size": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
},
integrations=[
{
"type": "wandb",
"wandb": {
"project": "my-wandb-project",
"entity": "entity",
"name": "name",
"tags": ["custom-tag"],
},
}
],
metadata={"foo": "string"},
method={
"type": "supervised",
"dpo": {
"hyperparameters": {
"batch_size": "auto",
"beta": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
}
},
"reinforcement": {
"grader": {
"input": "input",
"name": "name",
"operation": "eq",
"reference": "reference",
"type": "string_check",
},
"hyperparameters": {
"batch_size": "auto",
"compute_multiplier": "auto",
"eval_interval": "auto",
"eval_samples": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
"reasoning_effort": "default",
},
},
"supervised": {
"hyperparameters": {
"batch_size": "auto",
"learning_rate_multiplier": "auto",
"n_epochs": "auto",
}
},
},
seed=42,
suffix="x",
validation_file="file-abc123",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.create(
model="gpt-4o-mini",
training_file="file-abc123",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.create(
model="gpt-4o-mini",
training_file="file-abc123",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.retrieve(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.with_raw_response.retrieve(
"",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.list()
assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.list(
after="string",
limit=0,
metadata={"foo": "string"},
)
assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.list()
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.list() as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJob], job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_method_cancel(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_raw_response_cancel(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_streaming_response_cancel(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.cancel(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_cancel(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.with_raw_response.cancel(
"",
)
@parametrize
async def test_method_list_events(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
async def test_method_list_events_with_all_params(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
after="string",
limit=0,
)
assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
async def test_raw_response_list_events(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"])
@parametrize
async def test_streaming_response_list_events(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.list_events(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(AsyncCursorPage[FineTuningJobEvent], job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list_events(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.with_raw_response.list_events(
"",
)
@parametrize
async def test_method_pause(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_raw_response_pause(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_streaming_response_pause(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.pause(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_pause(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.with_raw_response.pause(
"",
)
@parametrize
async def test_method_resume(self, async_client: AsyncOpenAI) -> None:
job = await async_client.fine_tuning.jobs.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_raw_response_resume(self, async_client: AsyncOpenAI) -> None:
response = await async_client.fine_tuning.jobs.with_raw_response.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
@parametrize
async def test_streaming_response_resume(self, async_client: AsyncOpenAI) -> None:
async with async_client.fine_tuning.jobs.with_streaming_response.resume(
"ft-AF1WoRqd3aJAHsqc9NY7iL8F",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
job = await response.parse()
assert_matches_type(FineTuningJob, job, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_resume(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `fine_tuning_job_id` but received ''"):
await async_client.fine_tuning.jobs.with_raw_response.resume(
"",
)
| TestAsyncJobs |
python | huggingface__transformers | src/transformers/models/phi/modeling_phi.py | {
"start": 1473,
"end": 7958
} | class ____(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: PhiConfig, device=None):
super().__init__()
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_type = self.config.rope_parameters["rope_type"]
rope_init_fn: Callable = self.compute_default_rope_parameters
if self.rope_type != "default":
rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = inv_freq
@staticmethod
def compute_default_rope_parameters(
config: Optional[PhiConfig] = None,
device: Optional["torch.device"] = None,
seq_len: Optional[int] = None,
) -> tuple["torch.Tensor", float]:
"""
Computes the inverse frequencies according to the original RoPE implementation
Args:
config ([`~transformers.PreTrainedConfig`]):
The model configuration.
device (`torch.device`):
The device to use for initialization of the inverse frequencies.
seq_len (`int`, *optional*):
The current sequence length. Unused for this type of RoPE.
Returns:
Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
"""
base = config.rope_parameters["rope_theta"]
partial_rotary_factor = config.rope_parameters.get("partial_rotary_factor", 1.0)
head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
dim = int(head_dim * partial_rotary_factor)
attention_factor = 1.0 # Unused in this type of RoPE
# Compute the inverse frequencies
inv_freq = 1.0 / (
base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
)
return inv_freq, attention_factor
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
@use_kernel_func_from_hub("rotary_pos_emb")
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| PhiRotaryEmbedding |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 339022,
"end": 339871
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of
UpdateEnterpriseMembersCanDeleteIssuesSetting
"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set the members can delete
issues setting.
"""
setting_value = sgqlc.types.Field(sgqlc.types.non_null(EnterpriseEnabledDisabledSettingValue), graphql_name="settingValue")
"""The value for the members can delete issues setting on the
enterprise.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateEnterpriseMembersCanDeleteIssuesSettingInput |
python | jina-ai__jina | tests/integration/streaming/test_streaming.py | {
"start": 223,
"end": 1520
} | class ____(Executor):
@requests(on='/hello')
async def task(self, doc: Document, **kwargs):
for i in range(100):
yield Document(text=f'{doc.text} {i}')
@requests(on='/world')
async def non_gen_task(self, docs: DocumentArray, **kwargs):
return docs
@pytest.mark.asyncio
@pytest.mark.parametrize('protocol', ['http', 'grpc'])
@pytest.mark.parametrize('include_gateway', [False, True])
@pytest.mark.parametrize('reuse_session', [False, True])
async def test_streaming_deployment(protocol, include_gateway, reuse_session):
if reuse_session and protocol != 'http':
return
port = random_port()
docs = []
with Deployment(
uses=MyExecutor,
timeout_ready=-1,
protocol=protocol,
port=port,
include_gateway=include_gateway,
):
client = Client(port=port, protocol=protocol, asyncio=True, reuse_session=reuse_session)
i = 0
async for doc in client.stream_doc(
on='/hello',
inputs=Document(text='hello world'),
return_type=Document,
input_type=Document,
):
docs.append(doc.text)
i += 1
assert docs == [f'hello world {i}' for i in range(100)]
assert len(docs) == 100
| MyExecutor |
python | django__django | tests/utils_tests/test_inspect.py | {
"start": 260,
"end": 652
} | class ____:
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address="home", age=25, *args, **kwargs):
return kwargs
@classmethod
def cls_all_kinds(cls, name, address="home", age=25, *args, **kwargs):
return kwargs
| Person |
python | numba__numba | numba/tests/test_dyn_array.py | {
"start": 22541,
"end": 22800
} | class ____(TestNdZeros):
def setUp(self):
super(TestNdOnes, self).setUp()
self.pyfunc = np.ones
@unittest.expectedFailure
def test_1d_dtype_str_structured_dtype(self):
super().test_1d_dtype_str_structured_dtype()
| TestNdOnes |
python | pytorch__pytorch | torch/_dynamo/output_graph.py | {
"start": 9608,
"end": 12254
} | class ____:
"""
A base class containing fields that are considered "persistent" when we
want to save all the important state for reconstrucing guards in a different
process. Normally we don't need to add states here, but we may have to when
the information is needed to serialize the guards, so the fields here are
supposed to be serializable as a requirement.
"""
local_scope: Scope
global_scope: Scope
# This records the initial torch function mode stack for guarding
torch_function_mode_stack: list[torch.overrides.TorchFunctionMode]
guard_on_key_order: set[Source]
# Map from graph input's `Source` to sizes / strides metadata
input_source_to_sizes_strides: dict[Source, dict[str, Any]]
dual_level: int
functorch_layers: list[torch._functorch.pyfunctorch.FuncTorchInterpreter]
current_device: Optional[torch.device]
global_state_guard: torch._C._dynamo.guards.GlobalStateGuard
_guards: torch._guards.GuardsSet
_aotautograd_guards: list[torch._guards.GuardEnvExpr]
# Whether or not the guards should be checked for correctness
export: bool = False
skip_guards_check: bool = False
export_constraints: bool = False
name_of_builtins_dict_key_in_fglobals: Optional[str] = None
@property
def shape_env(self) -> ShapeEnv:
raise AssertionError(f"shape_env shouldn't be accessed from {type(self)}")
@property
def guards(self) -> torch._guards.GuardsSet:
return self._guards
@property
def aotautograd_guards(self) -> list[torch._guards.GuardEnvExpr]:
return self._aotautograd_guards
def dump_guards_state(self) -> "OutputGraphGuardsState":
# Dump a serializable version of self without extras
return OutputGraphGuardsState(
local_scope=self.local_scope,
global_scope=self.global_scope,
torch_function_mode_stack=self.torch_function_mode_stack,
guard_on_key_order=self.guard_on_key_order,
input_source_to_sizes_strides=self.input_source_to_sizes_strides,
dual_level=self.dual_level,
functorch_layers=self.functorch_layers,
current_device=self.current_device,
global_state_guard=self.global_state_guard,
name_of_builtins_dict_key_in_fglobals=self.name_of_builtins_dict_key_in_fglobals,
export=self.export,
export_constraints=self.export_constraints,
_guards=self.guards,
_aotautograd_guards=self.aotautograd_guards,
skip_guards_check=self.skip_guards_check,
)
@dataclass
| OutputGraphGuardsState |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 97220,
"end": 98692
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("now", 0) == "dabar"
assert self.locale._format_timeframe("second", 1) == "sekundės"
assert self.locale._format_timeframe("seconds", 3) == "3 sekundžių"
assert self.locale._format_timeframe("seconds", 30) == "30 sekundžių"
assert self.locale._format_timeframe("minute", 1) == "minutės"
assert self.locale._format_timeframe("minutes", 4) == "4 minučių"
assert self.locale._format_timeframe("minutes", 40) == "40 minučių"
assert self.locale._format_timeframe("hour", 1) == "valandos"
assert self.locale._format_timeframe("hours", 23) == "23 valandų"
assert self.locale._format_timeframe("day", 1) == "dieną"
assert self.locale._format_timeframe("days", 12) == "12 dienų"
assert self.locale._format_timeframe("month", 1) == "mėnesio"
assert self.locale._format_timeframe("months", 2) == "2 mėnesių"
assert self.locale._format_timeframe("months", 11) == "11 mėnesių"
assert self.locale._format_timeframe("year", 1) == "metų"
assert self.locale._format_timeframe("years", 2) == "2 metų"
def test_weekday(self):
dt = arrow.Arrow(2015, 4, 11, 17, 30, 00)
assert self.locale.day_name(dt.isoweekday()) == "šeštadienis"
assert self.locale.day_abbreviation(dt.isoweekday()) == "še"
@pytest.mark.usefixtures("lang_locale")
| TestLithuanianLocale |
python | gevent__gevent | src/gevent/resolver/dnspython.py | {
"start": 7592,
"end": 8137
} | class ____(dns.resolver.Answer):
# Answer class for HostsResolver object
def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
self.response = None
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.canonical_name = qname
if not rrset and raise_on_no_answer:
raise dns.resolver.NoAnswer()
self.rrset = rrset
self.expiration = (time.time() +
rrset.ttl if hasattr(rrset, 'ttl') else 0)
| _HostsAnswer |
python | astropy__astropy | astropy/utils/data.py | {
"start": 70403,
"end": 84778
} | class ____(ValueError):
"""Record the URL or file that was a problem.
Using clear_download_cache on the .bad_file or .bad_url attribute,
whichever is not None, should resolve this particular problem.
"""
def __init__(self, *args, bad_urls=None, bad_files=None, **kwargs):
super().__init__(*args, **kwargs)
self.bad_urls = bad_urls if bad_urls is not None else []
self.bad_files = bad_files if bad_files is not None else []
def check_download_cache(pkgname="astropy"):
"""Do a consistency check on the cache.
.. note::
Since v5.0, this function no longer returns anything.
Because the cache is shared by all versions of ``astropy`` in all virtualenvs
run by your user, possibly concurrently, it could accumulate problems.
This could lead to hard-to-debug problems or wasted space. This function
detects a number of incorrect conditions, including nonexistent files that
are indexed, files that are indexed but in the wrong place, and, if you
request it, files whose content does not match the hash that is indexed.
This function also returns a list of non-indexed files. A few will be
associated with the shelve object; their exact names depend on the backend
used but will probably be based on ``urlmap``. The presence of other files
probably indicates that something has gone wrong and inaccessible files
have accumulated in the cache. These can be removed with
:func:`clear_download_cache`, either passing the filename returned here, or
with no arguments to empty the entire cache and return it to a
reasonable, if empty, state.
Parameters
----------
pkgname : str, optional
The package name to use to locate the download cache, i.e., for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Raises
------
`~astropy.utils.data.CacheDamaged`
To indicate a problem with the cache contents; the exception contains
a ``.bad_files`` attribute containing a set of filenames to allow the
user to use :func:`clear_download_cache` to remove the offending items.
OSError, RuntimeError
To indicate some problem with the cache structure. This may need a full
:func:`clear_download_cache` to resolve, or may indicate some kind of
misconfiguration.
"""
bad_files = set()
messages = set()
dldir = _get_download_cache_loc(pkgname=pkgname)
with os.scandir(dldir) as it:
for entry in it:
f = os.path.abspath(os.path.join(dldir, entry.name))
if entry.name.startswith("rmtree-"):
if f not in _tempfilestodel:
bad_files.add(f)
messages.add(f"Cache entry {entry.name} not scheduled for deletion")
elif entry.is_dir():
for sf in os.listdir(f):
if sf in ["url", "contents"]:
continue
sf = os.path.join(f, sf)
bad_files.add(sf)
messages.add(f"Unexpected file f{sf}")
urlf = os.path.join(f, "url")
url = None
if not os.path.isfile(urlf):
bad_files.add(urlf)
messages.add(f"Problem with URL file f{urlf}")
else:
url = get_file_contents(urlf, encoding="utf-8")
if not _is_url(url):
bad_files.add(f)
messages.add(f"Malformed URL: {url}")
else:
hashname = _url_to_dirname(url)
if entry.name != hashname:
bad_files.add(f)
messages.add(
f"URL hashes to {hashname} but is stored in"
f" {entry.name}"
)
if not os.path.isfile(os.path.join(f, "contents")):
bad_files.add(f)
if url is None:
messages.add(f"Hash {entry.name} is missing contents")
else:
messages.add(
f"URL {url} with hash {entry.name} is missing contents"
)
else:
bad_files.add(f)
messages.add(f"Left-over non-directory {f} in cache")
if bad_files:
raise CacheDamaged("\n".join(messages), bad_files=bad_files)
def _rmtree(path, replace=None):
"""More-atomic rmtree. Ignores missing directory."""
with TemporaryDirectory(
prefix="rmtree-", dir=os.path.dirname(os.path.abspath(path))
) as d:
try:
os.rename(path, os.path.join(d, "to-zap"))
except FileNotFoundError:
pass
except PermissionError:
warn(
CacheMissingWarning(
f"Unable to remove directory {path} because a file in it "
"is in use and you are on Windows",
path,
)
)
raise
except OSError as e:
if e.errno == errno.EXDEV:
warn(e.strerror, AstropyWarning)
shutil.move(path, os.path.join(d, "to-zap"))
else:
raise
if replace is not None:
try:
os.rename(replace, path)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
elif e.errno == errno.EXDEV:
warn(e.strerror, AstropyWarning)
shutil.move(replace, path)
else:
raise
def import_file_to_cache(
url_key, filename, remove_original=False, pkgname="astropy", *, replace=True
):
"""Import the on-disk file specified by filename to the cache.
The provided ``url_key`` will be the name used in the cache. The file
should contain the contents of this URL, at least notionally (the URL may
be temporarily or permanently unavailable). It is using ``url_key`` that
users will request these contents from the cache. See :func:`download_file` for
details.
If ``url_key`` already exists in the cache, it will be updated to point to
these imported contents, and its old contents will be deleted from the
cache.
Parameters
----------
url_key : str
The key to index the file under. This should probably be
the URL where the file was located, though if you obtained
it from a mirror you should use the URL of the primary
location.
filename : str
The file whose contents you want to import.
remove_original : bool
Whether to remove the original file (``filename``) once import is
complete.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
replace : boolean, optional
Whether or not to replace an existing object in the cache, if one exists.
If replacement is not requested but the object exists, silently pass.
"""
cache_dir = _get_download_cache_loc(pkgname=pkgname)
cache_dirname = _url_to_dirname(url_key)
local_dirname = os.path.join(cache_dir, cache_dirname)
local_filename = os.path.join(local_dirname, "contents")
with TemporaryDirectory(
prefix="temp_dir", dir=cache_dir, ignore_cleanup_errors=True
) as temp_dir:
temp_filename = os.path.join(temp_dir, "contents")
# Make sure we're on the same filesystem
# This will raise an exception if the url_key doesn't turn into a valid filename
shutil.copy(filename, temp_filename)
with open(os.path.join(temp_dir, "url"), "w", encoding="utf-8") as f:
f.write(url_key)
if replace:
_rmtree(local_dirname, replace=temp_dir)
else:
try:
os.rename(temp_dir, local_dirname)
except FileExistsError:
# already there, fine
pass
except OSError as e:
if e.errno == errno.ENOTEMPTY:
# already there, fine
pass
else:
raise
if remove_original:
os.remove(filename)
return os.path.abspath(local_filename)
def get_cached_urls(pkgname="astropy"):
"""
Get the list of URLs in the cache. Especially useful for looking up what
files are stored in your cache when you don't have internet access.
The listed URLs are the keys programs should use to access the file
contents, but those contents may have actually been obtained from a mirror.
See `~download_file` for details.
Parameters
----------
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
Returns
-------
cached_urls : list
List of cached URLs.
See Also
--------
cache_contents : obtain a dictionary listing everything in the cache
"""
return sorted(cache_contents(pkgname=pkgname).keys())
def cache_contents(pkgname="astropy"):
"""Obtain a dict mapping cached URLs to filenames.
This dictionary is a read-only snapshot of the state of the cache when this
function was called. If other processes are actively working with the
cache, it is possible for them to delete files that are listed in this
dictionary. Use with some caution if you are working on a system that is
busy with many running astropy processes, although the same issues apply to
most functions in this module.
"""
r = {}
try:
dldir = _get_download_cache_loc(pkgname=pkgname)
except OSError:
return _NOTHING
with os.scandir(dldir) as it:
for entry in it:
if entry.is_dir:
url = get_file_contents(
os.path.join(dldir, entry.name, "url"), encoding="utf-8"
)
r[url] = os.path.abspath(os.path.join(dldir, entry.name, "contents"))
return MappingProxyType(r)
def export_download_cache(
filename_or_obj, urls=None, overwrite=False, pkgname="astropy"
):
"""Exports the cache contents as a ZIP file.
Parameters
----------
filename_or_obj : str or file-like
Where to put the created ZIP file. Must be something the zipfile
module can write to.
urls : iterable of str or None
The URLs to include in the exported cache. The default is all
URLs currently in the cache. If a URL is included in this list
but is not currently in the cache, a KeyError will be raised.
To ensure that all are in the cache use `~download_file`
or `~download_files_in_parallel`.
overwrite : bool, optional
If filename_or_obj is a filename that exists, it will only be
overwritten if this is True.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
import_download_cache : import the contents of such a ZIP file
import_file_to_cache : import a single file directly
"""
if urls is None:
urls = get_cached_urls(pkgname)
with zipfile.ZipFile(filename_or_obj, "w" if overwrite else "x") as z:
for u in urls:
fn = download_file(u, cache=True, sources=[], pkgname=pkgname)
# Do not use os.path.join because ZIP files want
# "/" on all platforms
z_fn = urllib.parse.quote(u, safe="")
z.write(fn, z_fn)
def import_download_cache(
filename_or_obj, urls=None, update_cache=False, pkgname="astropy"
):
"""Imports the contents of a ZIP file into the cache.
Each member of the ZIP file should be named by a quoted version of the
URL whose contents it stores. These names are decoded with
:func:`~urllib.parse.unquote`.
Parameters
----------
filename_or_obj : str or file-like
Where the stored ZIP file is. Must be something the :mod:`~zipfile`
module can read from.
urls : set of str or list of str or None
The URLs to import from the ZIP file. The default is all
URLs in the file.
update_cache : bool, optional
If True, any entry in the ZIP file will overwrite the value in the
cache; if False, leave untouched any entry already in the cache.
pkgname : `str`, optional
The package name to use to locate the download cache. i.e. for
``pkgname='astropy'`` the default cache location is
``~/.astropy/cache``.
See Also
--------
export_download_cache : export the contents the cache to of such a ZIP file
import_file_to_cache : import a single file directly
"""
with zipfile.ZipFile(filename_or_obj, "r") as z, TemporaryDirectory() as d:
for i, zf in enumerate(z.infolist()):
url = urllib.parse.unquote(zf.filename)
# FIXME(aarchiba): do we want some kind of validation on this URL?
# urllib.parse might do something sensible...but what URLs might
# they have?
# is_url in this file is probably a good check, not just here
# but throughout this file.
if urls is not None and url not in urls:
continue
if not update_cache and is_url_in_cache(url, pkgname=pkgname):
continue
f_temp_name = os.path.join(d, str(i))
with z.open(zf) as f_zip, open(f_temp_name, "wb") as f_temp:
block = f_zip.read(conf.download_block_size)
while block:
f_temp.write(block)
block = f_zip.read(conf.download_block_size)
import_file_to_cache(
url, f_temp_name, remove_original=True, pkgname=pkgname
)
| CacheDamaged |
python | coleifer__peewee | tests/regressions.py | {
"start": 27301,
"end": 28399
} | class ____(ModelTestCase):
@requires_models(RS, RD)
def test_regression_count_distinct(self):
rs = RS.create(name='rs')
nums = [0, 1, 2, 3, 2, 1, 0]
RD.insert_many([('k%s' % i, i, rs) for i in nums]).execute()
query = RD.select(RD.key).distinct()
self.assertEqual(query.count(), 4)
# Try re-selecting using the id/key, which are all distinct.
query = query.select(RD.id, RD.key)
self.assertEqual(query.count(), 7)
# Re-select the key/value, of which there are 4 distinct.
query = query.select(RD.key, RD.value)
self.assertEqual(query.count(), 4)
query = rs.rds.select(RD.key).distinct()
self.assertEqual(query.count(), 4)
query = rs.rds.select(RD.key, RD.value).distinct()
self.assertEqual(query.count(), 4) # Was returning 7!
@requires_models(RKV)
def test_regression_count_distinct_cpk(self):
RKV.insert_many([('k%s' % i, i, i) for i in range(5)]).execute()
self.assertEqual(RKV.select().distinct().count(), 5)
| TestRegressionCountDistinct |
python | run-llama__llama_index | llama-index-integrations/postprocessor/llama-index-postprocessor-bedrock-rerank/llama_index/postprocessor/bedrock_rerank/base.py | {
"start": 626,
"end": 8949
} | class ____(BaseNodePostprocessor):
top_n: int = Field(default=2, description="Top N nodes to return.")
rerank_model_name: str = Field(
default=Models.COHERE_RERANK_V3_5.value,
description="The modelId of the Bedrock model to use.",
)
rerank_model_arn: Optional[str] = Field(
default=None,
description="Optional custom model ARN to use.",
)
profile_name: Optional[str] = Field(
default=None,
description=(
"The name of AWS profile to use. "
"If not given, then the default profile is used."
),
)
aws_access_key_id: Optional[str] = Field(
default=None, description="AWS Access Key ID to use."
)
aws_secret_access_key: Optional[str] = Field(
default=None, description="AWS Secret Access Key to use."
)
aws_session_token: Optional[str] = Field(
default=None, description="AWS Session Token to use."
)
region_name: Optional[str] = Field(
default=None,
description=(
"AWS region name to use. Uses region configured in AWS CLI if not passed."
),
)
botocore_session: Optional[Any] = Field(
default=None,
description="Use this Botocore session instead of creating a new default one.",
exclude=True,
)
botocore_config: Optional[Any] = Field(
default=None,
description=(
"Custom configuration object to use instead of the default generated one."
),
exclude=True,
)
max_retries: int = Field(
default=10,
description="The maximum number of API retries.",
gt=0,
)
timeout: float = Field(
default=60.0,
description=(
"The timeout for the Bedrock API request in seconds. "
"It will be used for both connect and read timeouts."
),
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the Bedrock client.",
)
_client: Any = PrivateAttr()
_model_package_arn: str = PrivateAttr()
def __init__(
self,
top_n: int = 2,
rerank_model_name: str = Models.COHERE_RERANK_V3_5.value,
rerank_model_arn: Optional[str] = None,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
client: Optional[Any] = None,
botocore_session: Optional[Any] = None,
botocore_config: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
timeout: float = 60.0,
**kwargs: Any,
):
super().__init__(**kwargs)
self.top_n = top_n
self.rerank_model_name = rerank_model_name
self.rerank_model_arn = rerank_model_arn
self.profile_name = profile_name
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.region_name = region_name
self.botocore_session = botocore_session
self.botocore_config = botocore_config
self.max_retries = max_retries
self.timeout = timeout
self.additional_kwargs = additional_kwargs or {}
session_kwargs = {
"profile_name": self.profile_name,
"region_name": self.region_name,
"aws_access_key_id": self.aws_access_key_id,
"aws_secret_access_key": self.aws_secret_access_key,
"aws_session_token": self.aws_session_token,
"botocore_session": self.botocore_session,
}
try:
import boto3
from botocore.config import Config
config = (
Config(
retries={"max_attempts": self.max_retries, "mode": "standard"},
connect_timeout=self.timeout,
read_timeout=self.timeout,
)
if self.botocore_config is None
else self.botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"The 'boto3' package was not found. Install it with 'pip install boto3'"
)
self.region_name = self.region_name or session.region_name
if client is not None:
self._client = client
else:
try:
self._client = session.client("bedrock-agent-runtime", config=config)
except Exception as e:
raise ValueError(f"Failed to create Bedrock Agent Runtime client: {e}")
if self.rerank_model_arn:
self._model_package_arn = self.rerank_model_arn
else:
self._model_package_arn = f"arn:aws:bedrock:{self.region_name}::foundation-model/{self.rerank_model_name}"
@classmethod
def class_name(cls) -> str:
return "AWSBedrockRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
if dispatcher:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.rerank_model_name,
)
)
if query_bundle is None:
raise ValueError("Missing query bundle in extra info.")
if len(nodes) == 0:
return []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.rerank_model_name,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
texts = [
node.node.get_content(metadata_mode=MetadataMode.EMBED)
for node in nodes
]
# Prepare the text sources for AWS Bedrock
text_sources = []
for text in texts:
text_sources.append(
{
"type": "INLINE",
"inlineDocumentSource": {
"type": "TEXT",
"textDocument": {"text": text},
},
}
)
# change top_n if the number of nodes is less than top_n
top_n = min(self.top_n, len(nodes))
queries = [
{
"type": "TEXT",
"textQuery": {"text": query_bundle.query_str},
}
]
reranking_configuration = {
"type": "BEDROCK_RERANKING_MODEL",
"bedrockRerankingConfiguration": {
"numberOfResults": top_n,
"modelConfiguration": {
"modelArn": self._model_package_arn,
},
},
}
try:
response = self._client.rerank(
queries=queries,
sources=text_sources,
rerankingConfiguration=reranking_configuration,
)
results = response["results"]
except Exception as e:
raise RuntimeError(f"Failed to invoke AWS Bedrock model: {e}")
new_nodes = []
for result in results:
index = result["index"]
relevance_score = result.get("relevanceScore", 0.0)
new_node_with_score = NodeWithScore(
node=nodes[index].node,
score=relevance_score,
)
new_nodes.append(new_node_with_score)
event.on_end(payload={EventPayload.NODES: new_nodes})
dispatcher.event(ReRankEndEvent(nodes=new_nodes))
return new_nodes
# backwards compatibility name change
AWSBedrockRerank = BedrockRerank
| BedrockRerank |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/bernoulli_nb.py | {
"start": 492,
"end": 2942
} | class ____(AutoSklearnClassificationAlgorithm):
def __init__(self, alpha, fit_prior, random_state=None, verbose=0):
self.alpha = alpha
self.fit_prior = fit_prior
self.random_state = random_state
self.verbose = int(verbose)
self.estimator = None
def fit(self, X, y):
import sklearn.naive_bayes
self.fit_prior = check_for_bool(self.fit_prior)
self.estimator = sklearn.naive_bayes.BernoulliNB(
alpha=self.alpha, fit_prior=self.fit_prior
)
self.classes_ = np.unique(y.astype(int))
# Fallback for multilabel classification
if len(y.shape) > 1 and y.shape[1] > 1:
import sklearn.multiclass
self.estimator = sklearn.multiclass.OneVsRestClassifier(
self.estimator, n_jobs=1
)
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "BernoulliNB",
"name": "Bernoulli Naive Bayes classifier",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
# the smoothing parameter is a non-negative float
# I will limit it to 1000 and put it on a logarithmic scale. (SF)
# Please adjust that, if you know a proper range, this is just a guess.
alpha = UniformFloatHyperparameter(
name="alpha", lower=1e-2, upper=100, default_value=1, log=True
)
fit_prior = CategoricalHyperparameter(
name="fit_prior", choices=["True", "False"], default_value="True"
)
cs.add_hyperparameters([alpha, fit_prior])
return cs
| BernoulliNB |
python | pytorch__pytorch | test/fx/test_partitioner_order.py | {
"start": 787,
"end": 922
} | class ____(torch.nn.Module):
def forward(self, x):
y = torch.add(x, x)
z = torch.add(y, x)
return z
| AddModule |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/config_migrations.py | {
"start": 710,
"end": 4405
} | class ____:
"""
This class stands for migrating the config at runtime.
This migration is backwards compatible with the previous version, as new property will be created.
When falling back to the previous source version connector will use old property `custom_queries`.
Add `segments.date` for all queries where it was previously added by IncrementalCustomQuery class.
"""
message_repository: MessageRepository = InMemoryMessageRepository()
@classmethod
def should_migrate(cls, config: Mapping[str, Any]) -> bool:
"""
Determines if a configuration requires migration.
Args:
- config (Mapping[str, Any]): The configuration data to check.
Returns:
- True: If the configuration requires migration.
- False: Otherwise.
"""
return "custom_queries_array" not in config
@classmethod
def update_custom_queries(cls, config: Mapping[str, Any], source: Source = None) -> Mapping[str, Any]:
"""
Update custom queries with segments.date field.
Args:
- config (Mapping[str, Any]): The configuration from which the key should be removed.
- source (Source, optional): The data source. Defaults to None.
Returns:
- Mapping[str, Any]: The configuration after removing the key.
"""
custom_queries = []
for query in config.get("custom_queries", []):
new_query = query.copy()
try:
query_object = GAQL.parse(query["query"])
except ValueError:
message = f"The custom GAQL query {query['table_name']} failed. Validate your GAQL query with the Google Ads query validator. https://developers.google.com/google-ads/api/fields/v13/query_validator"
raise AirbyteTracedException(message=message, failure_type=FailureType.config_error)
if query_object.resource_name not in FULL_REFRESH_CUSTOM_TABLE and "segments.date" not in query_object.fields:
query_object = query_object.append_field("segments.date")
new_query["query"] = str(query_object)
custom_queries.append(new_query)
config["custom_queries_array"] = custom_queries
return config
@classmethod
def modify_and_save(cls, config_path: str, source: Source, config: Mapping[str, Any]) -> Mapping[str, Any]:
"""
Modifies the configuration and then saves it back to the source.
Args:
- config_path (str): The path where the configuration is stored.
- source (Source): The data source.
- config (Mapping[str, Any]): The current configuration.
Returns:
- Mapping[str, Any]: The updated configuration.
"""
migrated_config = cls.update_custom_queries(config, source)
source.write_config(migrated_config, config_path)
return migrated_config
@classmethod
def migrate(cls, args: List[str], source: Source) -> None:
"""
Orchestrates the configuration migration process.
It first checks if the `--config` argument is provided, and if so,
determines whether migration is needed, and then performs the migration
if required.
Args:
- args (List[str]): List of command-line arguments.
- source (Source): The data source.
"""
config_path = AirbyteEntrypoint(source).extract_config(args)
if config_path:
config = source.read_config(config_path)
if cls.should_migrate(config):
emit_configuration_as_airbyte_control_message(cls.modify_and_save(config_path, source, config))
| MigrateCustomQuery |
python | pytest-dev__pytest-xdist | src/xdist/remote.py | {
"start": 860,
"end": 1812
} | class ____:
"""
Simplified implementation of the same interface as py.log, for backward compatibility
since we dropped the dependency on pylib.
Note: this is defined here because this module can't depend on xdist, so we need
to have the other way around.
"""
def __init__(self, name: str, *, enabled: bool = True) -> None:
self.name = name
self.enabled = enabled
def __repr__(self) -> str:
return f"{type(self).__name__}({self.name!r}, enabled={self.enabled})"
def __call__(self, *a: Any, **k: Any) -> None:
if self.enabled:
print(f"[{self.name}]", *a, **k, file=sys.stderr)
def __getattr__(self, name: str) -> Producer:
return type(self)(name, enabled=self.enabled)
def worker_title(title: str) -> None:
try:
setproctitle(title)
except Exception:
# changing the process name is very optional, no errors please
pass
| Producer |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 6126,
"end": 6738
} | class ____(GreatExpectationsError):
def __init__(self, result_dict) -> None:
template = """\
Invalid result values were found when trying to instantiate an ExpectationValidationResult.
- Invalid result values are likely caused by inconsistent cache values.
- Great Expectations enables caching by default.
- Please ensure that caching behavior is consistent between the underlying Dataset (e.g. Spark) and Great Expectations.
Result: {}
""" # noqa: E501 # FIXME CoP
self.message = template.format(json.dumps(result_dict, indent=2))
super().__init__(self.message)
| InvalidCacheValueError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-mixpanel/source_mixpanel/streams.py | {
"start": 9727,
"end": 12934
} | class ____(HttpStatusErrorHandler):
"""
Custom error handler for handling export errors specific to Mixpanel streams.
This handler addresses:
- 400 status code with "to_date cannot be later than today" message, indicating a potential timezone mismatch.
- ConnectionResetError during response parsing, indicating a need to retry the request.
If the response does not match these specific cases, the handler defers to the parent class's implementation.
Attributes:
stream (HttpStream): The HTTP stream associated with this error handler.
"""
def __init__(self, stream: HttpStream, **kwargs): # type: ignore # noqa
self.stream = stream
super().__init__(**kwargs)
def interpret_response(self, response_or_exception: Optional[Union[requests.Response, Exception]] = None) -> ErrorResolution:
if isinstance(response_or_exception, requests.Response):
if response_or_exception.status_code == requests.codes.bad_request:
if "to_date cannot be later than today" in response_or_exception.text:
self.stream._timezone_mismatch = True
message = (
"Your project timezone must be misconfigured. Please set it to the one defined in your Mixpanel project settings. "
"Stopping current stream sync."
)
return ErrorResolution(
response_action=ResponseAction.IGNORE,
failure_type=FailureType.config_error,
error_message=message,
)
if "Unable to authenticate request" in response_or_exception.text:
message = (
f"Your credentials might have expired. Please update your config with valid credentials."
f" See more details: {response_or_exception.text}"
)
return ErrorResolution(
response_action=ResponseAction.FAIL,
failure_type=FailureType.config_error,
error_message=message,
)
if response_or_exception.status_code == 402:
message = f"Unable to perform a request. Payment Required: {response_or_exception.json()['error']}"
return ErrorResolution(
response_action=ResponseAction.FAIL,
failure_type=FailureType.transient_error,
error_message=message,
)
try:
# trying to parse response to avoid ConnectionResetError and retry if it occurs
self.stream.iter_dicts(response_or_exception.iter_lines(decode_unicode=True))
except ConnectionResetError:
return ErrorResolution(
response_action=ResponseAction.RETRY,
failure_type=FailureType.transient_error,
error_message=f"Response status code: {response_or_exception.status_code}. Retrying...",
)
return super().interpret_response(response_or_exception)
| ExportErrorHandler |
python | pallets__quart | src/quart/ctx.py | {
"start": 5936,
"end": 7657
} | class ____(_BaseRequestWebsocketContext):
"""The context relating to the specific websocket, bound to the current task.
Do not use directly, prefer the
:func:`~quart.Quart.websocket_context` or
:func:`~quart.Quart.test_websocket_context` instead.
Attributes:
_after_websocket_functions: List of functions to execute after the current
websocket, see :func:`after_this_websocket`.
"""
def __init__(
self,
app: Quart,
request: Websocket,
session: SessionMixin | None = None,
) -> None:
super().__init__(app, request, session)
self._after_websocket_functions: list[AfterWebsocketCallable] = []
@property
def websocket(self) -> Websocket:
return cast(Websocket, self.request_websocket)
async def push(self) -> None:
await super()._push_appctx(_cv_websocket.set(self))
await super()._push()
async def pop(self, exc: BaseException | None = _sentinel) -> None: # type: ignore
try:
if len(self._cv_tokens) == 1:
if exc is _sentinel:
exc = sys.exc_info()[1]
await self.app.do_teardown_websocket(exc, self)
finally:
ctx = _cv_websocket.get()
token, app_ctx = self._cv_tokens.pop()
_cv_websocket.reset(token)
if app_ctx is not None:
await app_ctx.pop(exc)
if ctx is not self:
raise AssertionError(
f"Popped wrong request context. ({ctx!r} instead of {self!r})"
)
async def __aenter__(self) -> WebsocketContext:
await self.push()
return self
| WebsocketContext |
python | Textualize__textual | docs/examples/styles/scrollbar_corner_color.py | {
"start": 385,
"end": 633
} | class ____(App):
CSS_PATH = "scrollbar_corner_color.tcss"
def compose(self):
yield Label(TEXT.replace("\n", " ") + "\n" + TEXT * 10)
if __name__ == "__main__":
app = ScrollbarCornerColorApp()
app.run()
| ScrollbarCornerColorApp |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 83000,
"end": 83621
} | class ____(StatefulSymbolicContext):
"""
The correct symbolic context for a given inner tensor of a traceable tensor subclass
may differ from that of the outer symbolic context. This structure allows for this
flexibility, with inner symbolic contexts mapped via attr -> symbolic context.
"""
inner_contexts: dict[str, SymbolicContext] = None # type: ignore[assignment]
def __post_init__(self) -> None:
super().__post_init__()
if self.inner_contexts is None:
# pyrefly: ignore [bad-assignment]
self.inner_contexts = {}
@dataclass
| SubclassSymbolicContext |
python | sqlalchemy__sqlalchemy | test/orm/test_manytomany.py | {
"start": 542,
"end": 12072
} | class ____(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"place",
metadata,
Column(
"place_id",
Integer,
test_needs_autoincrement=True,
primary_key=True,
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
)
Table(
"transition",
metadata,
Column(
"transition_id",
Integer,
test_needs_autoincrement=True,
primary_key=True,
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
)
Table(
"place_thingy",
metadata,
Column(
"thingy_id",
Integer,
test_needs_autoincrement=True,
primary_key=True,
),
Column(
"place_id",
Integer,
ForeignKey("place.place_id"),
nullable=False,
),
Column("name", String(30), nullable=False),
test_needs_acid=True,
)
# association table #1
Table(
"place_input",
metadata,
Column("place_id", Integer, ForeignKey("place.place_id")),
Column(
"transition_id",
Integer,
ForeignKey("transition.transition_id"),
),
test_needs_acid=True,
)
# association table #2
Table(
"place_output",
metadata,
Column("place_id", Integer, ForeignKey("place.place_id")),
Column(
"transition_id",
Integer,
ForeignKey("transition.transition_id"),
),
test_needs_acid=True,
)
Table(
"place_place",
metadata,
Column("pl1_id", Integer, ForeignKey("place.place_id")),
Column("pl2_id", Integer, ForeignKey("place.place_id")),
test_needs_acid=True,
)
@classmethod
def setup_classes(cls):
class Place(cls.Basic):
def __init__(self, name):
self.name = name
class PlaceThingy(cls.Basic):
def __init__(self, name):
self.name = name
class Transition(cls.Basic):
def __init__(self, name):
self.name = name
def test_overlapping_attribute_error(self):
place, Transition, place_input, Place, transition = (
self.tables.place,
self.classes.Transition,
self.tables.place_input,
self.classes.Place,
self.tables.transition,
)
self.mapper_registry.map_imperatively(
Place,
place,
properties={
"transitions": relationship(
Transition, secondary=place_input, backref="places"
)
},
)
self.mapper_registry.map_imperatively(
Transition,
transition,
properties={
"places": relationship(
Place, secondary=place_input, backref="transitions"
)
},
)
assert_raises_message(
sa.exc.ArgumentError,
"property of that name exists",
sa.orm.configure_mappers,
)
def test_self_referential_roundtrip(self):
place, Place, place_place = (
self.tables.place,
self.classes.Place,
self.tables.place_place,
)
self.mapper_registry.map_imperatively(
Place,
place,
properties={
"places": relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id,
)
},
)
sess = fixture_session()
p1 = Place("place1")
p2 = Place("place2")
p3 = Place("place3")
p4 = Place("place4")
p5 = Place("place5")
p6 = Place("place6")
p7 = Place("place7")
sess.add_all((p1, p2, p3, p4, p5, p6, p7))
p1.places.append(p2)
p1.places.append(p3)
p5.places.append(p6)
p6.places.append(p1)
p7.places.append(p1)
p1.places.append(p5)
p4.places.append(p3)
p3.places.append(p4)
sess.commit()
eq_(p1.places, [p2, p3, p5])
eq_(p5.places, [p6])
eq_(p7.places, [p1])
eq_(p6.places, [p1])
eq_(p4.places, [p3])
eq_(p3.places, [p4])
eq_(p2.places, [])
def test_self_referential_bidirectional_mutation(self):
place, Place, place_place = (
self.tables.place,
self.classes.Place,
self.tables.place_place,
)
self.mapper_registry.map_imperatively(
Place,
place,
properties={
"child_places": relationship(
Place,
secondary=place_place,
primaryjoin=place.c.place_id == place_place.c.pl1_id,
secondaryjoin=place.c.place_id == place_place.c.pl2_id,
order_by=place_place.c.pl2_id,
backref="parent_places",
)
},
)
sess = fixture_session()
p1 = Place("place1")
p2 = Place("place2")
p2.parent_places = [p1]
sess.add_all([p1, p2])
p1.parent_places.append(p2)
sess.commit()
assert p1 in p2.parent_places
assert p2 in p1.parent_places
def test_joinedload_on_double(self):
"""test that a mapper can have two eager relationships to the same
table, via two different association tables. aliases are required.
"""
(
place_input,
transition,
Transition,
PlaceThingy,
place,
place_thingy,
Place,
place_output,
) = (
self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.PlaceThingy,
self.tables.place,
self.tables.place_thingy,
self.classes.Place,
self.tables.place_output,
)
self.mapper_registry.map_imperatively(PlaceThingy, place_thingy)
self.mapper_registry.map_imperatively(
Place,
place,
properties={"thingies": relationship(PlaceThingy, lazy="joined")},
)
self.mapper_registry.map_imperatively(
Transition,
transition,
properties=dict(
inputs=relationship(Place, place_output, lazy="joined"),
outputs=relationship(Place, place_input, lazy="joined"),
),
)
tran = Transition("transition1")
tran.inputs.append(Place("place1"))
tran.outputs.append(Place("place2"))
tran.outputs.append(Place("place3"))
sess = fixture_session()
sess.add(tran)
sess.commit()
r = sess.query(Transition).all()
self.assert_unordered_result(
r,
Transition,
{
"name": "transition1",
"inputs": (Place, [{"name": "place1"}]),
"outputs": (Place, [{"name": "place2"}, {"name": "place3"}]),
},
)
def test_bidirectional(self):
place_input, transition, Transition, Place, place, place_output = (
self.tables.place_input,
self.tables.transition,
self.classes.Transition,
self.classes.Place,
self.tables.place,
self.tables.place_output,
)
self.mapper_registry.map_imperatively(Place, place)
self.mapper_registry.map_imperatively(
Transition,
transition,
properties=dict(
inputs=relationship(
Place,
place_output,
backref=backref(
"inputs", order_by=transition.c.transition_id
),
order_by=Place.place_id,
),
outputs=relationship(
Place,
place_input,
backref=backref(
"outputs", order_by=transition.c.transition_id
),
order_by=Place.place_id,
),
),
)
t1 = Transition("transition1")
t2 = Transition("transition2")
t3 = Transition("transition3")
p1 = Place("place1")
p2 = Place("place2")
p3 = Place("place3")
sess = fixture_session()
sess.add_all([p3, p1, t1, t2, p2, t3])
t1.inputs.append(p1)
t1.inputs.append(p2)
t1.outputs.append(p3)
t2.inputs.append(p1)
p2.inputs.append(t2)
p3.inputs.append(t2)
p1.outputs.append(t1)
sess.commit()
self.assert_result(
[t1],
Transition,
{"outputs": (Place, [{"name": "place3"}, {"name": "place1"}])},
)
self.assert_result(
[p2],
Place,
{
"inputs": (
Transition,
[{"name": "transition1"}, {"name": "transition2"}],
)
},
)
@testing.requires.updateable_autoincrement_pks
@testing.requires.sane_multi_rowcount
def test_stale_conditions(self):
Place, Transition, place_input, place, transition = (
self.classes.Place,
self.classes.Transition,
self.tables.place_input,
self.tables.place,
self.tables.transition,
)
self.mapper_registry.map_imperatively(
Place,
place,
properties={
"transitions": relationship(
Transition, secondary=place_input, passive_updates=False
)
},
)
self.mapper_registry.map_imperatively(Transition, transition)
p1 = Place("place1")
t1 = Transition("t1")
p1.transitions.append(t1)
sess = fixture_session()
sess.add_all([p1, t1])
sess.commit()
p1.place_id
p1.transitions
sess.execute(place_input.delete())
p1.place_id = 7
assert_raises_message(
orm_exc.StaleDataError,
r"UPDATE statement on table 'place_input' expected to "
r"update 1 row\(s\); Only 0 were matched.",
sess.commit,
)
sess.rollback()
p1.place_id
p1.transitions
sess.execute(place_input.delete())
p1.transitions.remove(t1)
assert_raises_message(
orm_exc.StaleDataError,
r"DELETE statement on table 'place_input' expected to "
r"delete 1 row\(s\); Only 0 were matched.",
sess.commit,
)
| M2MTest |
python | facebookresearch__faiss | tests/test_fast_scan.py | {
"start": 2374,
"end": 4130
} | class ____(unittest.TestCase):
def do_test_rounding(self, implem=4, metric=faiss.METRIC_L2):
ds = datasets.SyntheticDataset(32, 2000, 5000, 200)
index = faiss.index_factory(32, 'PQ16x4', metric)
index.train(ds.get_train())
index.add(ds.get_database())
Dref, Iref = index.search(ds.get_queries(), 10)
nq = Iref.shape[0]
index2 = faiss.IndexPQFastScan(index)
# simply repro normal search
index2.implem = 2
D2, I2 = index2.search(ds.get_queries(), 10)
np.testing.assert_array_equal(I2, Iref)
np.testing.assert_array_equal(D2, Dref)
# rounded LUT with correction
index2.implem = implem
D4, I4 = index2.search(ds.get_queries(), 10)
# check accuracy of indexes
recalls = {}
for rank in 1, 10:
recalls[rank] = (Iref[:, :1] == I4[:, :rank]).sum() / nq
min_r1 = 0.98 if metric == faiss.METRIC_INNER_PRODUCT else 0.99
self.assertGreaterEqual(recalls[1], min_r1)
self.assertGreater(recalls[10], 0.995)
# check accuracy of distances
# err3 = ((D3 - D2) ** 2).sum()
err4 = ((D4 - D2) ** 2).sum()
nf = (D2 ** 2).sum()
self.assertLess(err4, nf * 1e-4)
def test_implem_4(self):
self.do_test_rounding(4)
def test_implem_4_ip(self):
self.do_test_rounding(4, faiss.METRIC_INNER_PRODUCT)
def test_implem_12(self):
self.do_test_rounding(12)
def test_implem_12_ip(self):
self.do_test_rounding(12, faiss.METRIC_INNER_PRODUCT)
def test_implem_14(self):
self.do_test_rounding(14)
def test_implem_14_ip(self):
self.do_test_rounding(12, faiss.METRIC_INNER_PRODUCT)
| TestRounding |
python | getsentry__sentry | fixtures/integrations/jira/stub_client.py | {
"start": 144,
"end": 2122
} | class ____(StubService):
service_name = "jira"
def get_create_meta_for_project(self, project):
response = self._get_stub_data("createmeta_response.json")
if project == "10001":
response["projects"][0]["id"] = "10001"
return response["projects"][0]
def get_issue_fields(self, project_id, issue_type_id):
return self._get_stub_data("issue_fields_response.json")
def get_issue_types(self, project_id):
return self._get_stub_data("issue_types_response.json")
def get_priorities(self):
return self._get_stub_data("priorities_response.json")
def get_versions(self, project_id):
return self._get_stub_data("versions_response.json")
def get_projects_paginated(self, params: dict[str, Any] | None = None):
return self._get_stub_data("projects_paginated.json")
def get_projects_list(self, cached: bool = True):
return self._get_stub_data("project_list_response.json")
def get_issue(self, issue_key):
return self._get_stub_data("get_issue_response.json")
def create_comment(self, issue_id, comment):
return comment
def update_comment(self, issue_key, comment_id, comment):
return comment
def create_issue(self, raw_form_data):
return {"key": "APP-123"}
def get_transitions(self, issue_key):
return self._get_stub_data("transition_response.json")["transitions"]
def transition_issue(self, issue_key, transition_id):
pass
def user_id_field(self) -> str:
return "accountId"
def get_user(self, user_id):
user = self._get_stub_data("user.json")
if user["accountId"] == user_id:
return user
raise ApiError("no user found")
def get_valid_statuses(self):
return self._get_stub_data("status_response.json")
def search_users_for_project(self, project, username):
return [self._get_stub_data("user.json")]
| StubJiraApiClient |
python | apache__airflow | airflow-core/tests/unit/serialization/test_serde.py | {
"start": 5535,
"end": 5638
} | class ____(BaseModel):
__version__: ClassVar[int] = 1
x: int
v: V
u: tuple
@attr.define
| U |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py | {
"start": 957,
"end": 1063
} | class ____:
try:
...
except Exception:
def __eq__(self, other): ...
| MaybeEqTryExcept |
python | getsentry__sentry | src/sentry/workflow_engine/handlers/condition/event_frequency_handlers.py | {
"start": 1994,
"end": 3326
} | class ____(DataConditionHandler[list[int]]):
group = DataConditionHandler.Group.ACTION_FILTER
subgroup = DataConditionHandler.Subgroup.FREQUENCY
comparison_json_schema = {
"type": "object",
"properties": {
"interval": {"type": "string", "enum": list(STANDARD_INTERVALS.keys())},
"value": {"type": "integer", "minimum": 0},
"comparison_interval": {"type": "string", "enum": list(COMPARISON_INTERVALS.keys())},
"filters": {
"type": "array",
"items": {
"anyOf": [
TaggedEventConditionHandler.comparison_json_schema,
EventAttributeConditionHandler.comparison_json_schema,
],
},
},
},
"required": ["interval", "value", "comparison_interval"],
"additionalProperties": False,
}
@staticmethod
def evaluate_value(value: list[int], comparison: Any) -> DataConditionResult:
if not isinstance(value, list) or len(value) != 2:
return False
return percent_increase(value[0], value[1]) > comparison["value"]
# Percent sessions values must be between 0-100 (%)
@condition_handler_registry.register(Condition.PERCENT_SESSIONS_COUNT)
| EventFrequencyPercentHandler |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-string-after-reverse.py | {
"start": 2714,
"end": 2927
} | class ____(object):
def lexSmallest(self, s):
"""
:type s: str
:rtype: str
"""
return min(min(s[:k][::-1]+s[k:], s[:-k]+s[-k:][::-1]) for k in xrange(1, len(s)+1))
| Solution3 |
python | tensorflow__tensorflow | tensorflow/examples/speech_commands/accuracy_utils.py | {
"start": 779,
"end": 6049
} | class ____(object):
"""Get streaming accuracy statistics every time a new command is founded.
Attributes:
_how_many_gt: How many ground truths.
_how_many_gt_matched: How many ground truths have been matched.
_how_many_fp: How many commands have been fired as false positive.
_how_many_c: How many commands have been fired correctly.
_how_many_w: How many commands have been fired wrongly.
_gt_occurrence: A list to record which commands and when it occurs in the
input audio stream.
_previous_c: A variable to record the last status of _how_many_c.
_previous_w: A variable to record the last status of _how_many_w.
_previous_fp: A variable to record the last status of _how_many_fp.
"""
def __init__(self):
"""Init StreamingAccuracyStats with void or zero values."""
self._how_many_gt = 0
self._how_many_gt_matched = 0
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
self._gt_occurrence = []
self._previous_c = 0
self._previous_w = 0
self._previous_fp = 0
def read_ground_truth_file(self, file_name):
"""Load ground truth and timestamp pairs and store it in time order."""
with open(file_name, 'r') as f:
for line in f:
line_split = line.strip().split(',')
if len(line_split) != 2:
continue
timestamp = round(float(line_split[1]))
label = line_split[0]
self._gt_occurrence.append([label, timestamp])
self._gt_occurrence = sorted(self._gt_occurrence, key=lambda item: item[1])
def delta(self):
"""Compute delta of StreamingAccuracyStats against last status."""
fp_delta = self._how_many_fp - self._previous_fp
w_delta = self._how_many_w - self._previous_w
c_delta = self._how_many_c - self._previous_c
if fp_delta == 1:
recognition_state = '(False Positive)'
elif c_delta == 1:
recognition_state = '(Correct)'
elif w_delta == 1:
recognition_state = '(Wrong)'
else:
raise ValueError('Unexpected state in statistics')
# Update the previous status
self._previous_c = self._how_many_c
self._previous_w = self._how_many_w
self._previous_fp = self._how_many_fp
return recognition_state
def calculate_accuracy_stats(self, found_words, up_to_time_ms,
time_tolerance_ms):
"""Calculate accuracy statistics when a new commands is founded.
Given ground truth and corresponding predictions founded by
model, figure out how many were correct. Take a tolerance time, so that only
predictions up to a point in time are considered.
Args:
found_words: A list of all founded commands up to now.
up_to_time_ms: End timestamp of this audio piece.
time_tolerance_ms: The tolerance milliseconds before and after
up_to_time_ms to match a ground truth.
"""
if up_to_time_ms == -1:
latest_possible_time = np.inf
else:
latest_possible_time = up_to_time_ms + time_tolerance_ms
self._how_many_gt = 0
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if ground_truth_time > latest_possible_time:
break
self._how_many_gt += 1
self._how_many_fp = 0
self._how_many_c = 0
self._how_many_w = 0
has_gt_matched = []
for found_word in found_words:
found_label = found_word[0]
found_time = found_word[1]
earliest_time = found_time - time_tolerance_ms
latest_time = found_time + time_tolerance_ms
has_matched_been_found = False
for ground_truth in self._gt_occurrence:
ground_truth_time = ground_truth[1]
if (ground_truth_time > latest_time or
ground_truth_time > latest_possible_time):
break
if ground_truth_time < earliest_time:
continue
ground_truth_label = ground_truth[0]
if (ground_truth_label == found_label and
has_gt_matched.count(ground_truth_time) == 0):
self._how_many_c += 1
else:
self._how_many_w += 1
has_gt_matched.append(ground_truth_time)
has_matched_been_found = True
break
if not has_matched_been_found:
self._how_many_fp += 1
self._how_many_gt_matched = len(has_gt_matched)
def print_accuracy_stats(self):
"""Write a human-readable description of the statistics to stdout."""
if self._how_many_gt == 0:
tf.compat.v1.logging.info('No ground truth yet, {}false positives'.format(
self._how_many_fp))
else:
any_match_percentage = self._how_many_gt_matched / self._how_many_gt * 100
correct_match_percentage = self._how_many_c / self._how_many_gt * 100
wrong_match_percentage = self._how_many_w / self._how_many_gt * 100
false_positive_percentage = self._how_many_fp / self._how_many_gt * 100
tf.compat.v1.logging.info(
'{:.1f}% matched, {:.1f}% correct, {:.1f}% wrong, '
'{:.1f}% false positive'.format(any_match_percentage,
correct_match_percentage,
wrong_match_percentage,
false_positive_percentage))
| StreamingAccuracyStats |
python | eventlet__eventlet | eventlet/green/thread.py | {
"start": 1313,
"end": 4964
} | class ____:
def __init__(self, greenthread=None):
self._greenthread = greenthread
self._done = False
def _set_done(self):
self._done = True
def is_done(self):
if self._greenthread is not None:
return self._greenthread.dead
return self._done
@property
def ident(self):
return get_ident(self._greenthread)
def join(self, timeout=None):
if not hasattr(self._greenthread, "wait"):
return
if timeout is not None:
return with_timeout(timeout, self._greenthread.wait)
return self._greenthread.wait()
def _make_thread_handle(ident):
greenthread = greenlet.getcurrent()
assert ident == get_ident(greenthread)
return _ThreadHandle(greenthread=greenthread)
def __spawn_green(function, args=(), kwargs=None, joinable=False):
if ((3, 4) <= sys.version_info < (3, 13)
and getattr(function, '__module__', '') == 'threading'
and hasattr(function, '__self__')):
# In Python 3.4-3.12, threading.Thread uses an internal lock
# automatically released when the python thread state is deleted.
# With monkey patching, eventlet uses green threads without python
# thread state, so the lock is not automatically released.
#
# Wrap _bootstrap_inner() to release explicitly the thread state lock
# when the thread completes.
thread = function.__self__
bootstrap_inner = thread._bootstrap_inner
def wrap_bootstrap_inner():
try:
bootstrap_inner()
finally:
# The lock can be cleared (ex: by a fork())
if getattr(thread, "_tstate_lock", None) is not None:
thread._tstate_lock.release()
thread._bootstrap_inner = wrap_bootstrap_inner
kwargs = kwargs or {}
spawn_func = greenthread.spawn if joinable else greenthread.spawn_n
return spawn_func(__thread_body, function, args, kwargs)
def start_joinable_thread(function, handle=None, daemon=True):
g = __spawn_green(function, joinable=True)
if handle is None:
handle = _ThreadHandle(greenthread=g)
else:
handle._greenthread = g
return handle
def start_new_thread(function, args=(), kwargs=None):
g = __spawn_green(function, args=args, kwargs=kwargs)
return get_ident(g)
start_new = start_new_thread
def _get_main_thread_ident():
greenthread = greenlet.getcurrent()
while greenthread.parent is not None:
greenthread = greenthread.parent
return get_ident(greenthread)
def allocate_lock(*a):
return LockType(1)
allocate = allocate_lock
def exit():
raise greenlet.GreenletExit
exit_thread = __thread.exit_thread
def interrupt_main():
curr = greenlet.getcurrent()
if curr.parent and not curr.parent.dead:
curr.parent.throw(KeyboardInterrupt())
else:
raise KeyboardInterrupt()
if hasattr(__thread, 'stack_size'):
__original_stack_size__ = __thread.stack_size
def stack_size(size=None):
if size is None:
return __original_stack_size__()
if size > __original_stack_size__():
return __original_stack_size__(size)
else:
pass
# not going to decrease stack_size, because otherwise other greenlets in
# this thread will suffer
from eventlet.corolocal import local as _local
if hasattr(__thread, 'daemon_threads_allowed'):
daemon_threads_allowed = __thread.daemon_threads_allowed
if hasattr(__thread, '_shutdown'):
_shutdown = __thread._shutdown
| _ThreadHandle |
python | google__jax | docs/autodidax.py | {
"start": 87104,
"end": 106360
} | class ____(NamedTuple):
aval: ShapedArray
register_pytree_node(UndefPrimal,
lambda u: (u.aval, ()),
lambda aval, _: UndefPrimal(aval))
# -
# We use `UndefPrimal` instances to indicate which arguments with respect to
# which we want to transpose. These arise because in general, being explicit
# about closed-over values, we want to transpose functions of type
# `a -> b -o c` to functions of type `a -> c -o b`. Even more generally, the
# inputs with respect to which the function is linear could be scattered through
# the argument list. So we indicate the linear positions using `UndefPrimal`.
# We register `UndefPrimal` as a pytree node because the pytree mechanism gives
# a handy way to prune these placeholders out of argument lists.
#
# Next, we can write `eval_jaxpr_transposed`, along with transpose rules for
# all primitives which can be linear in at least one argument:
# +
# NB: the analogous function in JAX is called 'backward_pass'
def eval_jaxpr_transposed(jaxpr: Jaxpr, args: list[Any], cotangents: list[Any]
) -> list[Any]:
primal_env: dict[Var, Any] = {}
ct_env: dict[Var, Any] = {}
def read_primal(x: Atom) -> Any:
return primal_env.get(x, UndefPrimal(x.aval)) if type(x) is Var else x.val
def write_primal(v: Var, val: Any) -> None:
if type(val) is not UndefPrimal:
primal_env[v] = val
def read_cotangent(v: Var) -> Any:
return ct_env.pop(v, np.zeros(v.aval.shape, v.aval.dtype))
def write_cotangent(x: Atom, val: Any):
if type(x) is Var and val is not None:
ct_env[x] = add(ct_env[x], val) if x in ct_env else val
map(write_primal, jaxpr.in_binders, args)
map(write_cotangent, jaxpr.outs, cotangents)
for eqn in jaxpr.eqns[::-1]:
primals_in = map(read_primal, eqn.inputs)
cts_in = map(read_cotangent, eqn.out_binders)
rule = transpose_rules[eqn.primitive]
cts_out = rule(cts_in, *primals_in, **eqn.params)
map(write_cotangent, eqn.inputs, cts_out)
return [read_cotangent(v) for v, x in zip(jaxpr.in_binders, args)
if type(x) is UndefPrimal]
transpose_rules = {}
# +
def mul_transpose_rule(cts, x, y):
z_bar, = cts
assert (type(x) is UndefPrimal) ^ (type(y) is UndefPrimal)
return [mul(z_bar, y), None] if type(x) is UndefPrimal else [None, mul(x, z_bar)]
transpose_rules[mul_p] = mul_transpose_rule
def neg_transpose_rule(cts, x):
ybar, = cts
assert type(x) is UndefPrimal
return [neg(ybar)]
transpose_rules[neg_p] = neg_transpose_rule
def add_transpose_rule(cts, x, y):
z_bar, = cts
return [z_bar, z_bar]
transpose_rules[add_p] = add_transpose_rule
def reduce_sum_transpose_rule(cts, x, *, axis):
y_bar, = cts
return [broadcast(y_bar, x.aval.shape, axis)]
transpose_rules[reduce_sum_p] = reduce_sum_transpose_rule
def xla_call_transpose_rule(cts, *invals, jaxpr, num_consts):
del num_consts # Unused
undef_primals = [type(x) is UndefPrimal for x in invals]
transposed_jaxpr, new_consts = transpose_jaxpr(jaxpr, tuple(undef_primals))
residuals, _ = partition_list(undef_primals, invals)
outs = bind(xla_call_p, *new_consts, *residuals, *cts,
jaxpr=transposed_jaxpr, num_consts=len(new_consts))
outs = iter(outs)
return [next(outs) if undef else None for undef in undef_primals]
transpose_rules[xla_call_p] = xla_call_transpose_rule
@lru_cache
def transpose_jaxpr(jaxpr: Jaxpr, undef_primals: tuple[bool, ...]
) -> tuple[Jaxpr, list[Any]]:
avals_in, avals_out = typecheck_jaxpr(jaxpr)
traceable = partial(eval_jaxpr_transposed, jaxpr)
args = [UndefPrimal(a) if u else a for a, u in zip(avals_in, undef_primals)]
trans_jaxpr, consts, _ = make_jaxpr(traceable, tuple(args), tuple(avals_out))
typecheck_jaxpr(trans_jaxpr)
return trans_jaxpr, consts
# -
# Now that we can linearize and transpose, we can finally write `grad`:
def grad(f):
def gradfun(x, *xs):
y, f_vjp = vjp(f, x, *xs)
if np.shape(y) != (): raise TypeError
x_bar, *_ = f_vjp(np.ones(np.shape(y), np.result_type(y)))
return x_bar
return gradfun
y, f_vjp = vjp(sin, 3.)
print(f_vjp(1.), cos(3.))
# +
def f(x):
y = sin(x) * 2.
z = - y + x
return z
print(grad(f)(3.))
# +
@jit
def f(x):
y = x * 2.
z = g(y)
return z
@jit
def g(x):
return cos(x) * 2.
print(grad(f)(3.))
# -
# Here's something of a compositionality stress test:
# +
# from core_test.py fun_with_nested_calls_2
def foo(x):
@jit
def bar(y):
def baz(w):
q = jit(lambda x: y)(x)
q = q + jit(lambda: y)()
q = q + jit(lambda y: w + y)(y)
q = jit(lambda w: jit(sin)(x) * y)(1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return bar(x)
def assert_allclose(*vals):
for v1, v2 in zip(vals[:-1], vals[1:]):
np.testing.assert_allclose(v1, v2)
ans1 = f(3.)
ans2 = jit(f)(3.)
ans3, _ = jvp(f, (3.,), (5.,))
ans4, _ = jvp(jit(f), (3.,), (5.,))
assert_allclose(ans1, ans2, ans3, ans4)
deriv1 = grad(f)(3.)
deriv2 = grad(jit(f))(3.)
deriv3 = jit(grad(jit(f)))(3.)
_, deriv4 = jvp(f, (3.,), (1.,))
_, deriv5 = jvp(jit(f), (3.,), (1.,))
assert_allclose(deriv1, deriv2, deriv3, deriv4, deriv5)
hess1 = grad(grad(f))(3.)
hess2 = grad(grad(jit(f)))(3.)
hess3 = grad(jit(grad(f)))(3.)
hess4 = jit(grad(grad(f)))(3.)
_, hess5 = jvp(grad(f), (3.,), (1.,))
_, hess6 = jvp(jit(grad(f)), (3.,), (1.,))
_, hess7 = jvp(jit(grad(f)), (3.,), (1.,))
assert_allclose(hess1, hess2, hess3, hess4, hess5, hess6, hess7)
# -
# ## Part 5: the control flow primitives `cond`
#
# Next we'll add higher-order primitives for staged-out control flow. These
# resemble `jit` from Part 3, another higher-order primitive, but differ in that
# they are parameterized by multiple callables rather than just one.
# ### Adding `cond`
#
# We introduce a `cond` primitive to represent conditional application of one
# function or another inside a jaxpr. We write the type of `cond` as
# `Bool -> (a -> b) -> (a -> b) -> a -> b`. In words, `cond` takes a boolean
# representing the predicate and two functions of equal types. Depending on the
# value of the predicate, it applies one function or the other to its final
# argument.
#
# In Python, we represent it as a function which itself takes two functions as
# arguments. As with `jit`, the first step is to call `make_jaxpr` on its
# callable arguments to turn them into jaxprs:
# +
def cond(pred, true_fn, false_fn, *operands):
avals_in = [raise_to_shaped(get_aval(x)) for x in operands]
true_jaxpr, true_consts, out_tree = make_jaxpr(true_fn, *avals_in)
false_jaxpr, false_consts, out_tree_ = make_jaxpr(false_fn, *avals_in)
if out_tree != out_tree_: raise TypeError
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
if typecheck_jaxpr(true_jaxpr) != typecheck_jaxpr(false_jaxpr):
raise TypeError
outs = bind_cond(pred, *true_consts, *false_consts, *operands,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return tree_unflatten(out_tree, outs)
cond_p = Primitive('cond')
def _join_jaxpr_consts(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
assert jaxpr1_type.in_types[n1:] == jaxpr2_type.in_types[n2:]
consts1, rest1 = split_list(jaxpr1.in_binders, n1)
consts2, rest2 = split_list(jaxpr2.in_binders, n2)
new_jaxpr1 = Jaxpr(consts1 + consts2 + rest1, jaxpr1.eqns, jaxpr1.outs)
new_jaxpr2 = Jaxpr(consts1 + consts2 + rest2, jaxpr2.eqns, jaxpr2.outs)
return new_jaxpr1, new_jaxpr2
def bind_cond(pred, *args, true_jaxpr, false_jaxpr):
assert len(args) == len(true_jaxpr.in_binders) == len(false_jaxpr.in_binders)
return bind(cond_p, pred, *args, true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
# -
# We require `true_jaxpr` and `false_jaxpr` to have the same type, but because
# they might close over different constants (and because jaxprs can only
# represent closed terms, i.e. can't have free variables and are instead
# closure-converted) we need to use the helper `_join_jaxpr_consts` to make
# consistent the input binder lists of the two jaxprs. (To be more economical we
# could try to identify pairs of constants with the same shapes, but instead we
# just concatenate the lists of constants.)
#
# Next we can turn to adding interpreter rules for `cond`. Its evaluation rule
# is simple:
def cond_impl(pred, *operands, true_jaxpr, false_jaxpr):
if pred:
return eval_jaxpr(true_jaxpr, operands)
else:
return eval_jaxpr(false_jaxpr, operands)
impl_rules[cond_p] = cond_impl
out = cond(True, lambda: 3, lambda: 4)
print(out)
# For its JVP and vmap rules, we only need to call the same `jvp_jaxpr` and
# `vmap_jaxpr` utilities we created for `jit`, followed by another pass of
# `_join_jaxpr_consts`:
def cond_jvp_rule(primals, tangents, *, true_jaxpr, false_jaxpr):
pred, *primals = primals
_ , *tangents = tangents
true_jaxpr , true_consts = jvp_jaxpr(true_jaxpr)
false_jaxpr, false_consts = jvp_jaxpr(false_jaxpr)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *primals, *tangents,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
primals_out, tangents_out = split_half(outs)
return primals_out, tangents_out
jvp_rules[cond_p] = cond_jvp_rule
out, out_tan = jvp(lambda x: cond(True, lambda: x * x, lambda: 0.), (1.,), (1.,))
print(out_tan)
def cond_vmap_rule(axis_size, vals_in, dims_in, *, true_jaxpr, false_jaxpr):
pred , *vals_in = vals_in
pred_dim, *dims_in = dims_in
if pred_dim is not not_mapped: raise NotImplementedError # TODO
true_jaxpr, true_consts = vmap_jaxpr(true_jaxpr, axis_size, tuple(dims_in))
false_jaxpr, false_consts = vmap_jaxpr(false_jaxpr, axis_size, tuple(dims_in))
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
assert typecheck_jaxpr(true_jaxpr) == typecheck_jaxpr(false_jaxpr)
outs = bind_cond(pred, *true_consts, *false_consts, *vals_in,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
return outs, [0] * len(outs)
vmap_rules[cond_p] = cond_vmap_rule
xs = np.array([1., 2., 3])
out = vmap(lambda x: cond(True, lambda: x + 1., lambda: 0.), (0,))(xs)
print(out)
# Notice that we're not currently supporting the case where the predicate value
# itself is batched. In mainline JAX, we handle this case by transforming the
# conditional to a [select primitive](https://docs.jax.dev/en/latest/_autosummary/jax.lax.select.html).
# That transformation is semantically correct so long as `true_fun` and
# `false_fun` do not involve any side-effecting primitives.
#
# Another thing not represented here, but present in the mainline JAX, is that
# applying transformations to two jaxprs of equal type might result in jaxprs of
# different types. For example, applying the mainline JAX version of
# `vmap_jaxpr` to the identity-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( a ) }
# ```
#
# would result in a jaxpr with a batched output, of type
# `[float32[10]] -> [float32[10]]` if the batch size were 10, while applying it
# to the zero-function jaxpr
#
# ```
# { lambda a:float32[] .
# let
# in ( 0. ) }
# ```
#
# would result in a jaxpr with an unbatched output, of type
# `[float32[10]] -> [float32[]]`. This is an optimization, aimed at not batching
# values unnecessarily. But it means that in `cond` we'd need an extra step of
# joining the two transformed jaxprs to have consistent output types. We don't
# need this step here because we chose `vmap_jaxpr` always to batch all outputs
# over the leading axis.
# Next we can turn to abstract evaluation and XLA lowering rules:
# +
def cond_abstract_eval(pred_type, *in_types, true_jaxpr, false_jaxpr):
if pred_type != ShapedArray((), np.dtype('bool')): raise TypeError
jaxpr_type = typecheck_jaxpr(true_jaxpr)
if jaxpr_type != typecheck_jaxpr(false_jaxpr):
raise TypeError
if not all(t1 == t2 for t1, t2 in zip(jaxpr_type.in_types, in_types)):
raise TypeError
return jaxpr_type.out_types
abstract_eval_rules[cond_p] = cond_abstract_eval
def cond_translation(c, in_avals, out_avals, in_vals, *, true_jaxpr, false_jaxpr):
del in_avals # Unused
pred, *in_vals = in_vals
op = hlo.IfOp([aval_to_ir_type(aval) for aval in out_avals], pred)
with ir.InsertionPoint(op.true_branch.blocks.append()):
hlo.return_(jaxpr_subcomp(c, true_jaxpr, in_vals))
with ir.InsertionPoint(op.false_branch.blocks.append()):
hlo.return_(jaxpr_subcomp(c, false_jaxpr, in_vals))
return op.results
hlo_translations[cond_p] = cond_translation
# -
out = jit(lambda: cond(False, lambda: 1, lambda: 2))()
print(out)
# Finally, to support reverse-mode automatic differentiation, we need partial
# evaluation and transposition rules. For partial evaluation, we need to
# introduce another jaxpr-munging utility, `_join_jaxpr_res`, to handle the fact
# that applying partial evaluation to `true_fun` and `false_fun` will in general
# result in distinct residuals. We use `_join_jaxpr_res` to make the output
# types of the transformed jaxprs consistent (while `_join_jaxpr_consts` dealt
# with input types).
# +
def cond_partial_eval(trace, tracers, *, true_jaxpr, false_jaxpr):
pred_tracer, *tracers = tracers
assert pred_tracer.pval.is_known
pred = pred_tracer.pval.const
in_uks = [not t.pval.is_known for t in tracers]
*jaxprs, out_uks, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, in_uks)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
known_tracers, unknown_tracers = partition_list(in_uks, tracers)
known_vals = [t.pval.const for t in known_tracers]
outs1_res = bind_cond(pred, *known_vals,
true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1)
outs1, res = split_list(outs1_res, len(outs1_res) - num_res)
pred_tracer_ = trace.instantiate_const(full_raise(trace, pred_tracer))
res_tracers = [trace.instantiate_const(full_raise(trace, x)) for x in res]
outs2 = [PartialEvalTracer(trace, PartialVal.unknown(v.aval), None)
for v in t_jaxpr2.outs]
eqn = JaxprEqnRecipe(cond_p, [pred_tracer_, *res_tracers, *unknown_tracers],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
[v.aval for v in t_jaxpr2.outs], map(ref, outs2))
for t in outs2: t.recipe = eqn
return merge_lists(out_uks, outs1, outs2)
partial_eval_rules[cond_p] = cond_partial_eval
def _cond_partial_eval(true_jaxpr: Jaxpr, false_jaxpr: Jaxpr, in_uks: list[bool]
) -> tuple[Jaxpr, Jaxpr, Jaxpr, Jaxpr, list[bool], int]:
_, _, t_out_uks, _ = partial_eval_jaxpr(true_jaxpr , in_uks)
_, _, f_out_uks, _ = partial_eval_jaxpr(false_jaxpr, in_uks)
out_uks = map(op.or_, t_out_uks, f_out_uks)
t_jaxpr1, t_jaxpr2, _, t_nres = partial_eval_jaxpr(true_jaxpr , in_uks, out_uks)
f_jaxpr1, f_jaxpr2, _, f_nres = partial_eval_jaxpr(false_jaxpr, in_uks, out_uks)
t_jaxpr1, f_jaxpr1 = _join_jaxpr_res(t_jaxpr1, f_jaxpr1, t_nres, f_nres)
t_jaxpr2, f_jaxpr2 = _join_jaxpr_consts(t_jaxpr2, f_jaxpr2, t_nres, f_nres)
assert typecheck_jaxpr(t_jaxpr1) == typecheck_jaxpr(f_jaxpr1)
assert typecheck_jaxpr(t_jaxpr2) == typecheck_jaxpr(f_jaxpr2)
num_res = t_nres + f_nres
return t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2, out_uks, num_res
def _join_jaxpr_res(jaxpr1: Jaxpr, jaxpr2: Jaxpr, n1: int, n2: int
) -> tuple[Jaxpr, Jaxpr]:
jaxpr1_type, jaxpr2_type = typecheck_jaxpr(jaxpr1), typecheck_jaxpr(jaxpr2)
out_types1, _ = split_list(jaxpr1_type.out_types, len(jaxpr1.outs) - n1)
out_types2, _ = split_list(jaxpr2_type.out_types, len(jaxpr2.outs) - n2)
assert out_types1 == out_types2
outs1, res1 = split_list(jaxpr1.outs, len(jaxpr1.outs) - n1)
outs2, res2 = split_list(jaxpr2.outs, len(jaxpr2.outs) - n2)
zeros_like1 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res1]
zeros_like2 = [Lit(np.zeros(v.aval.shape, v.aval.dtype)) for v in res2]
new_jaxpr1 = Jaxpr(jaxpr1.in_binders, jaxpr1.eqns, outs1 + res1 + zeros_like2)
new_jaxpr2 = Jaxpr(jaxpr2.in_binders, jaxpr2.eqns, outs2 + zeros_like1 + res2)
return new_jaxpr1, new_jaxpr2
# -
_, f_lin = linearize(lambda x: cond(True, lambda: x, lambda: 0.), 1.)
out = f_lin(3.14)
print(out)
def cond_peval_eqn(unks_in: list[bool], eqn: JaxprEqn,
) -> tuple[JaxprEqn, JaxprEqn, list[bool], list[Atom]]:
pred_unk, *unks_in = unks_in
assert not pred_unk
true_jaxpr, false_jaxpr = eqn.params['true_jaxpr'], eqn.params['false_jaxpr']
*jaxprs, unks_out, num_res = _cond_partial_eval(true_jaxpr, false_jaxpr, unks_in)
t_jaxpr1, f_jaxpr1, t_jaxpr2, f_jaxpr2 = jaxprs
ins1, ins2 = partition_list(unks_in, eqn.inputs[1:])
outs1, outs2 = partition_list(unks_out, eqn.out_binders)
residuals, _ = split_list(t_jaxpr2.in_binders, num_res)
eqn1 = JaxprEqn(cond_p, [eqn.inputs[0], *ins1],
dict(true_jaxpr=t_jaxpr1, false_jaxpr=f_jaxpr1),
outs1 + residuals)
eqn2 = JaxprEqn(cond_p, [eqn.inputs[0], *residuals, *ins2],
dict(true_jaxpr=t_jaxpr2, false_jaxpr=f_jaxpr2),
outs2)
res = [eqn.inputs[0], *residuals] if type(eqn.inputs[0]) is Var else residuals
return eqn1, eqn2, unks_out, res
partial_eval_jaxpr_rules[cond_p] = cond_peval_eqn
_, f_lin = linearize(jit(lambda x: cond(True, lambda: x, lambda: 0.)), 1.)
out = f_lin(3.14)
print(out)
# Transposition is a fairly straightforward application of `transpose_jaxpr`:
def cond_transpose_rule(cts, pred, *invals, true_jaxpr, false_jaxpr):
undef_primals = tuple(type(x) is UndefPrimal for x in invals)
true_jaxpr, true_consts = transpose_jaxpr(true_jaxpr, undef_primals)
false_jaxpr, false_consts = transpose_jaxpr(false_jaxpr, undef_primals)
true_jaxpr, false_jaxpr = _join_jaxpr_consts(
true_jaxpr, false_jaxpr, len(true_consts), len(false_consts))
res = [x for x in invals if type(x) is not UndefPrimal]
outs = bind_cond(pred, *true_consts, *false_consts, *res, *cts,
true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)
outs = iter(outs)
return [None] + [next(outs) if type(x) is UndefPrimal else None for x in invals]
transpose_rules[cond_p] = cond_transpose_rule
out = grad(lambda x: cond(True, lambda: x * x, lambda: 0.))(1.)
print(out)
# + tags=["hide-input"]
def pprint_cond(names: defaultdict[Var, str], eqn: JaxprEqn) -> PPrint:
true_jaxpr, false_jaxpr = eqn.params['true_jaxpr'], eqn.params['false_jaxpr']
new_params = {k:v for k, v in eqn.params.items() if not k.endswith('jaxpr')}
lhs = pp(' '.join(var_str(names, v) for v in eqn.out_binders))
rhs = (pp(eqn.primitive.name) >> pp_params(new_params) >>
pp(' '.join(names[x] if isinstance(x, Var) else str(x.val)
for x in eqn.inputs)))
return vcat([lhs >> pp(' = ') >> rhs,
pp_jaxpr(true_jaxpr).indent(2),
pp_jaxpr(false_jaxpr).indent(2)])
pp_rules[cond_p] = pprint_cond
| UndefPrimal |
python | joke2k__faker | faker/providers/person/ro_RO/__init__.py | {
"start": 44,
"end": 14167
} | class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}",
"{{first_name_female}} {{first_name_female}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{first_name_male}} {{last_name}}",
)
# sources: https://ro.wikipedia.org/wiki/List%C4%83_de_prenume_rom%C3%A2ne%C8%99ti
first_names_female = (
"Ada",
"Adela",
"Adelaida",
"Adelina",
"Adina",
"Adriana",
"Agata",
"Aglaia",
"Agripina",
"Aida",
"Alberta",
"Albertina",
"Alexandra",
"Alexandrina",
"Alice",
"Alida",
"Alina",
"Alis",
"Alma",
"Amalia",
"Amanda",
"Amelia",
"Ana",
"Anabela",
"Anaida",
"Anamaria",
"Anastasia",
"Anca",
"Ancuța",
"Anda",
"Andra",
"Andrada",
"Andreea",
"Anemona",
"Aneta",
"Angela",
"Anghelina",
"Anica",
"Anișoara",
"Antoaneta",
"Antonela",
"Antonia",
"Anuța",
"Ariadna",
"Ariana",
"Arina",
"Aristița",
"Artemisa",
"Astrid",
"Atena",
"Augustina",
"Aura",
"Aurelia",
"Aureliana",
"Aurica",
"Aurora",
"Axenia",
"Beatrice",
"Betina",
"Bianca",
"Blanduzia",
"Bogdana",
"Brândușa",
"Camelia",
"Carina",
"Carla",
"Carmen",
"Carmina",
"Carolina",
"Casandra",
"Casiana",
"Caterina",
"Catinca",
"Catrina",
"Catrinel",
"Cătălina",
"Cecilia",
"Celia",
"Cerasela",
"Cezara",
"Cipriana",
"Clara",
"Clarisa",
"Claudia",
"Clementina",
"Cleopatra",
"Codrina",
"Codruța",
"Constanța",
"Constantina",
"Consuela",
"Coralia",
"Corina",
"Cornelia",
"Cosmina",
"Crenguța",
"Crina",
"Cristina",
"Daciana",
"Dafina",
"Daiana",
"Dalia",
"Dana",
"Daniela",
"Daria",
"Dariana",
"Delia",
"Demetra",
"Denisa",
"Despina",
"Diana",
"Dida",
"Didina",
"Dimitrina",
"Dina",
"Dochia",
"Doina",
"Domnica",
"Dora",
"Doriana",
"Dorina",
"Dorli",
"Draga",
"Dumitra",
"Dumitrana",
"Ecaterina",
"Eftimia",
"Elena",
"Eleonora",
"Eliana",
"Elisabeta",
"Elisaveta",
"Eliza",
"Elodia",
"Elvira",
"Emanuela",
"Emilia",
"Erica",
"Estera",
"Eufrosina",
"Eugenia",
"Eusebia",
"Eva",
"Evanghelina",
"Evelina",
"Fabia",
"Fabiana",
"Felicia",
"Filofteia",
"Fiona",
"Flavia",
"Floare",
"Floarea",
"Flora",
"Florența",
"Florentina",
"Floriana",
"Florica",
"Florina",
"Francesca",
"Frusina",
"Gabriela",
"Geanina",
"Gențiana",
"Georgeta",
"Georgia",
"Georgiana",
"Geta",
"Gherghina",
"Gianina",
"Gina",
"Giorgiana",
"Grațiana",
"Grațiela",
"Henrieta",
"Heracleea",
"Hortensia",
"Iasmina",
"Ica",
"Ileana",
"Ilinca",
"Ilona",
"Ina",
"Ioana",
"Ioanina",
"Iolanda",
"Ionela",
"Ionelia",
"Iosefina",
"Iridenta",
"Irina",
"Iris",
"Isabela",
"Iulia",
"Iuliana",
"Iustina",
"Ivona",
"Izabela",
"Jana",
"Janeta",
"Janina",
"Jasmina",
"Jeana",
"Julia",
"Julieta",
"Larisa",
"Laura",
"Laurenția",
"Lavinia",
"Lăcrămioara",
"Leana",
"Lelia",
"Leontina",
"Leopoldina",
"Letiția",
"Lia",
"Liana",
"Lidia",
"Ligia",
"Lili",
"Liliana",
"Lioara",
"Livia",
"Loredana",
"Lorelei",
"Lorena",
"Luana",
"Lucia",
"Luciana",
"Lucreția",
"Ludmila",
"Ludovica",
"Luiza",
"Luminița",
"Magdalena",
"Maia",
"Malvina",
"Manuela",
"Mara",
"Marcela",
"Marcheta",
"Marga",
"Margareta",
"Maria",
"Mariana",
"Maricica",
"Marilena",
"Marina",
"Marinela",
"Marioara",
"Marta",
"Matilda",
"Mădălina",
"Mălina",
"Mărioara",
"Măriuca",
"Melania",
"Melina",
"Mihaela",
"Milena",
"Mina",
"Minodora",
"Mioara",
"Mirabela",
"Mirela",
"Mirona",
"Miruna",
"Mona",
"Monalisa",
"Monica",
"Nadia",
"Narcisa",
"Natalia",
"Natașa",
"Nicoleta",
"Niculina",
"Nidia",
"Noemi",
"Nora",
"Norica",
"Oana",
"Octavia",
"Octaviana",
"Ofelia",
"Olga",
"Olimpia",
"Olivia",
"Ortansa",
"Otilia",
"Ozana",
"Pamela",
"Paraschiva",
"Patricia",
"Paula",
"Paulica",
"Paulina",
"Petronela",
"Petruța",
"Pompilia",
"Profira",
"Rada",
"Rafila",
"Raluca",
"Ramona",
"Rebeca",
"Renata",
"Rica",
"Roberta",
"Robertina",
"Rodica",
"Romanița",
"Romina",
"Roxana",
"Roxelana",
"Roza",
"Rozalia",
"Ruxanda",
"Ruxandra",
"Sabina",
"Sabrina",
"Safta",
"Salomea",
"Sanda",
"Saveta",
"Savina",
"Sânziana",
"Semenica",
"Severina",
"Sidonia",
"Silvana",
"Silvia",
"Silviana",
"Simina",
"Simona",
"Smaranda",
"Sofia",
"Sonia",
"Sorana",
"Sorina",
"Speranța",
"Stana",
"Stanca",
"Stela",
"Steliana",
"Steluța",
"Suzana",
"Svetlana",
"Ștefana",
"Ștefania",
"Tamara",
"Tania",
"Tatiana",
"Teea",
"Teodora",
"Teodosia",
"Teona",
"Tiberia",
"Timea",
"Tinca",
"Tincuța",
"Tudora",
"Tudorița",
"Tudosia",
"Valentina",
"Valeria",
"Vanesa",
"Varvara",
"Vasilica",
"Venera",
"Vera",
"Veronica",
"Veta",
"Vicenția",
"Victoria",
"Violeta",
"Viorela",
"Viorica",
"Virginia",
"Viviana",
"Vlădelina",
"Voichița",
"Xenia",
"Zaharia",
"Zamfira",
"Zaraza",
"Zenobia",
"Zenovia",
"Zina",
"Zoe",
)
first_names_male = (
"Achim",
"Adam",
"Adelin",
"Adi",
"Adonis",
"Adrian",
"Agnos",
"Albert",
"Aleodor",
"Alex",
"Alexandru",
"Alexe",
"Alin",
"Alistar",
"Amedeu",
"Amza",
"Anatolie",
"Andrei",
"Andrian",
"Angel",
"Anghel",
"Antim",
"Anton",
"Antonie",
"Antoniu",
"Arian",
"Aristide",
"Arsenie",
"Augustin",
"Aurel",
"Aurelian",
"Aurică",
"Avram",
"Axinte",
"Barbu",
"Bartolomeu",
"Basarab",
"Bănel",
"Bebe",
"Beniamin",
"Benone",
"Bernard",
"Bogdan",
"Brăduț",
"Bucur",
"Caius",
"Camil",
"Cantemir",
"Carol",
"Casian",
"Cazimir",
"Călin",
"Cătălin",
"Cedrin",
"Cezar",
"Ciprian",
"Claudiu",
"Codin",
"Codrin",
"Codruț",
"Constantin",
"Cornel",
"Corneliu",
"Corvin",
"Cosmin",
"Costache",
"Costel",
"Costin",
"Crin",
"Cristea",
"Cristian",
"Cristobal",
"Cristofor",
"Dacian",
"Damian",
"Dan",
"Daniel",
"Darius",
"David",
"Decebal",
"Denis",
"Dinu",
"Dominic",
"Dorel",
"Dorian",
"Dorin",
"Dorinel",
"Doru",
"Dragoș",
"Ducu",
"Dumitru",
"Edgar",
"Edmond",
"Eduard",
"Eftimie",
"Emanoil",
"Emanuel",
"Emanuil",
"Emil",
"Emilian",
"Eremia",
"Eric",
"Ernest",
"Eugen",
"Eusebiu",
"Eustațiu",
"Fabian",
"Felix",
"Filip",
"Fiodor",
"Flaviu",
"Florea",
"Florentin",
"Florian",
"Florin",
"Francisc",
"Frederic",
"Gabi",
"Gabriel",
"Gelu",
"George",
"Georgel",
"Georgian",
"Ghenadie",
"Gheorghe",
"Gheorghiță",
"Ghiță",
"Gică",
"Gicu",
"Giorgian",
"Grațian",
"Gregorian",
"Grigore",
"Haralamb",
"Haralambie",
"Horațiu",
"Horea",
"Horia",
"Iacob",
"Iancu",
"Ianis",
"Ieremia",
"Ilarie",
"Ilarion",
"Ilie",
"Inocențiu",
"Ioan",
"Ion",
"Ionel",
"Ionică",
"Ionuț",
"Iosif",
"Irinel",
"Iulian",
"Iuliu",
"Iurie",
"Iustin",
"Iustinian",
"Ivan",
"Jan",
"Jean",
"Jenel",
"Ladislau",
"Lascăr",
"Laurențiu",
"Laurian",
"Lazăr",
"Leonard",
"Leontin",
"Leordean",
"Lică",
"Liviu",
"Lorin",
"Luca",
"Lucențiu",
"Lucian",
"Lucrețiu",
"Ludovic",
"Manole",
"Marcel",
"Marcu",
"Marian",
"Marin",
"Marius",
"Martin",
"Matei",
"Maxim",
"Maximilian",
"Mădălin",
"Mihai",
"Mihail",
"Mihnea",
"Mircea",
"Miron",
"Mitică",
"Mitruț",
"Mugur",
"Mugurel",
"Nae",
"Narcis",
"Nechifor",
"Nelu",
"Nichifor",
"Nicoară",
"Nicodim",
"Nicolae",
"Nicolaie",
"Nicu",
"Niculiță",
"Nicușor",
"Nicuță",
"Norbert",
"Norman",
"Octav",
"Octavian",
"Octaviu",
"Olimpian",
"Olimpiu",
"Oliviu",
"Ovidiu",
"Pamfil",
"Panagachie",
"Panait",
"Paul",
"Pavel",
"Pătru",
"Petre",
"Petrică",
"Petrișor",
"Petru",
"Petruț",
"Pleșu",
"Pompiliu",
"Radu",
"Rafael",
"Rareș",
"Raul",
"Răducu",
"Răzvan",
"Relu",
"Remus",
"Robert",
"Romeo",
"Romulus",
"Sabin",
"Sandu",
"Sandu",
"Sava",
"Sebastian",
"Sergiu",
"Sever",
"Severin",
"Silvian",
"Silviu",
"Simi",
"Simion",
"Sinică",
"Sorin",
"Stan",
"Stancu",
"Stelian",
"Șerban",
"Ștefan",
"Teodor",
"Teofil",
"Teohari",
"Theodor",
"Tiberiu",
"Timotei",
"Titus",
"Todor",
"Toma",
"Traian",
"Tudor",
"Valentin",
"Valeriu",
"Valter",
"Vasile",
"Vasilică",
"Veniamin",
"Vicențiu",
"Victor",
"Vincențiu",
"Viorel",
"Visarion",
"Vlad",
"Vladimir",
"Vlaicu",
"Voicu",
"Zamfir",
"Zeno",
)
first_names = first_names_female + first_names_male
# sources: https://ro.wikipedia.org/wiki/Lista_celor_mai_uzuale_nume_de_familie#Rom%C3%A2nia
last_names = (
"Aanei",
"Ababei",
"Albu",
"Ardelean",
"Barbu",
"Cristea",
"Diaconescu",
"Diaconu",
"Dima",
"Dinu",
"Dobre",
"Dochioiu",
"Dumitrescu",
"Eftimie",
"Ene",
"Florea",
"Georgescu",
"Gheorghiu",
"Ionescu",
"Ioniță",
"Manole",
"Marin",
"Mazilescu",
"Mocanu",
"Nemeș",
"Nistor",
"Nistor",
"Niță",
"Oprea",
"Pop",
"Popa",
"Popescu",
"Preda",
"Pușcașu",
"Stan",
"Stancu",
"Stoica",
"Stănescu",
"Suciu",
"Tabacu",
"Toma",
"Tomescu",
"Tudor",
"Voinea",
)
| Provider |
python | pytorch__pytorch | torch/nn/modules/pooling.py | {
"start": 26516,
"end": 30710
} | class ____(_AvgPoolNd):
r"""Applies a 2D average pooling over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
for :attr:`padding` number of points.
Note:
When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
or the input. Sliding windows that would start in the right padded region are ignored.
.. note::
pad should be at most half of effective kernel size.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
- a single ``int`` or a single-element tuple -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
count_include_pad: when True, will include the zero-padding in the averaging calculation
divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
- Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
Per the note above, if ``ceil_mode`` is True and :math:`(H_{out} - 1)\times \text{stride}[0]\geq H_{in}
+ \text{padding}[0]`, we skip the last window as it would start in the bottom padded region,
resulting in :math:`H_{out}` being reduced by one.
The same applies for :math:`W_{out}`.
Examples::
>>> # pool of square window of size=3, stride=2
>>> m = nn.AvgPool2d(3, stride=2)
>>> # pool of non-square window
>>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
>>> input = torch.randn(20, 16, 50, 32)
>>> output = m(input)
"""
__constants__ = [
"kernel_size",
"stride",
"padding",
"ceil_mode",
"count_include_pad",
"divisor_override",
]
kernel_size: _size_2_t
stride: _size_2_t
padding: _size_2_t
ceil_mode: bool
count_include_pad: bool
def __init__(
self,
kernel_size: _size_2_t,
stride: Optional[_size_2_t] = None,
padding: _size_2_t = 0,
ceil_mode: bool = False,
count_include_pad: bool = True,
divisor_override: Optional[int] = None,
) -> None:
super().__init__()
self.kernel_size = kernel_size
self.stride = stride if (stride is not None) else kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.divisor_override = divisor_override
def forward(self, input: Tensor) -> Tensor:
"""Runs the forward pass."""
return F.avg_pool2d(
input,
self.kernel_size,
self.stride,
self.padding,
self.ceil_mode,
self.count_include_pad,
self.divisor_override,
)
| AvgPool2d |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-s3/source_s3/v4/legacy_config_transformer.py | {
"start": 588,
"end": 7840
} | class ____:
"""
Class that takes in S3 source configs in the legacy format and transforms them into
configs that can be used by the new S3 source built with the file-based CDK.
"""
@classmethod
def convert(cls, legacy_config: SourceS3Spec) -> Mapping[str, Any]:
transformed_config = {
"bucket": legacy_config.provider.bucket,
"streams": [
{
"name": legacy_config.dataset,
"globs": cls._create_globs(legacy_config.path_pattern),
"legacy_prefix": legacy_config.provider.path_prefix,
"validation_policy": "Emit Record",
}
],
}
if legacy_config.provider.start_date:
transformed_config["start_date"] = cls._transform_seconds_to_micros(legacy_config.provider.start_date)
if legacy_config.provider.aws_access_key_id:
transformed_config["aws_access_key_id"] = legacy_config.provider.aws_access_key_id
if legacy_config.provider.aws_secret_access_key:
transformed_config["aws_secret_access_key"] = legacy_config.provider.aws_secret_access_key
if legacy_config.provider.endpoint:
transformed_config["endpoint"] = legacy_config.provider.endpoint
if legacy_config.user_schema and legacy_config.user_schema != "{}":
transformed_config["streams"][0]["input_schema"] = legacy_config.user_schema
if legacy_config.format:
transformed_config["streams"][0]["format"] = cls._transform_file_format(legacy_config.format)
return transformed_config
@classmethod
def _create_globs(cls, path_pattern: str) -> List[str]:
if "|" in path_pattern:
return path_pattern.split("|")
else:
return [path_pattern]
@classmethod
def _transform_seconds_to_micros(cls, datetime_str: str) -> str:
try:
parsed_datetime = datetime.strptime(datetime_str, SECONDS_FORMAT)
return parsed_datetime.strftime(MICROS_FORMAT)
except ValueError as e:
raise ValueError("Timestamp could not be parsed when transforming legacy connector config") from e
@classmethod
def _transform_file_format(cls, format_options: Union[CsvFormat, ParquetFormat, AvroFormat, JsonlFormat]) -> Mapping[str, Any]:
if isinstance(format_options, AvroFormat):
return {"filetype": "avro"}
elif isinstance(format_options, CsvFormat):
additional_reader_options = cls.parse_config_options_str("additional_reader_options", format_options.additional_reader_options)
advanced_options = cls.parse_config_options_str("advanced_options", format_options.advanced_options)
csv_options = {
"filetype": "csv",
"delimiter": format_options.delimiter,
"quote_char": format_options.quote_char,
"double_quote": format_options.double_quote,
# values taken from https://github.com/apache/arrow/blob/43c05c56b37daa93e76b94bc3e6952d56d1ea3f2/cpp/src/arrow/csv/options.cc#L41-L45
"null_values": additional_reader_options.pop(
"null_values",
[
"",
"#N/A",
"#N/A N/A",
"#NA",
"-1.#IND",
"-1.#QNAN",
"-NaN",
"-nan",
"1.#IND",
"1.#QNAN",
"N/A",
"NA",
"NULL",
"NaN",
"n/a",
"nan",
"null",
],
),
"true_values": additional_reader_options.pop("true_values", ["1", "True", "TRUE", "true"]),
"false_values": additional_reader_options.pop("false_values", ["0", "False", "FALSE", "false"]),
"inference_type": "Primitive Types Only" if format_options.infer_datatypes else "None",
"strings_can_be_null": additional_reader_options.pop("strings_can_be_null", False),
}
if format_options.escape_char:
csv_options["escape_char"] = format_options.escape_char
if format_options.encoding:
csv_options["encoding"] = format_options.encoding
if skip_rows := advanced_options.pop("skip_rows", None):
csv_options["skip_rows_before_header"] = skip_rows
if skip_rows_after_names := advanced_options.pop("skip_rows_after_names", None):
csv_options["skip_rows_after_header"] = skip_rows_after_names
if column_names := advanced_options.pop("column_names", None):
csv_options["header_definition"] = {
"header_definition_type": "User Provided",
"column_names": column_names,
}
advanced_options.pop("autogenerate_column_names", None)
elif advanced_options.pop("autogenerate_column_names", None):
csv_options["header_definition"] = {"header_definition_type": "Autogenerated"}
else:
csv_options["header_definition"] = {"header_definition_type": "From CSV"}
cls._filter_legacy_noops(advanced_options)
if advanced_options or additional_reader_options:
raise ValueError(
"The config options you selected are no longer supported.\n" + f"advanced_options={advanced_options}"
if advanced_options
else "" + f"additional_reader_options={additional_reader_options}"
if additional_reader_options
else ""
)
return csv_options
elif isinstance(format_options, JsonlFormat):
return {"filetype": "jsonl"}
elif isinstance(format_options, ParquetFormat):
return {"filetype": "parquet", "decimal_as_float": True}
else:
# This should never happen because it would fail schema validation
raise ValueError(f"Format filetype {format_options} is not a supported file type")
@classmethod
def parse_config_options_str(cls, options_field: str, options_value: Optional[str]) -> Dict[str, Any]:
options_str = options_value or "{}"
try:
return json.loads(options_str)
except json.JSONDecodeError as error:
raise ValueError(f"Malformed {options_field} config json: {error}. Please ensure that it is a valid JSON.")
@staticmethod
def _filter_legacy_noops(advanced_options: Dict[str, Any]):
ignore_all = ("auto_dict_encode", "timestamp_parsers", "block_size")
ignore_by_value = (("check_utf8", False),)
for option in ignore_all:
advanced_options.pop(option, None)
for option, value_to_ignore in ignore_by_value:
if advanced_options.get(option) == value_to_ignore:
advanced_options.pop(option)
| LegacyConfigTransformer |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 55349,
"end": 56816
} | class ____(ASTOperator):
def __init__(self, op: str) -> None:
self.op = op
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTOperatorBuildIn):
return NotImplemented
return self.op == other.op
def __hash__(self) -> int:
return hash(self.op)
def get_id(self, version: int) -> str:
if version == 1:
ids = _id_operator_v1
if self.op not in ids:
raise NoOldIdError
else:
ids = _id_operator_v2
if self.op not in ids:
raise Exception(
'Internal error: Built-in operator "%s" can not '
'be mapped to an id.' % self.op
)
return ids[self.op]
def _stringify(self, transform: StringifyTransform) -> str:
if self.op in {'new', 'new[]', 'delete', 'delete[]'} or self.op[0] in 'abcnox':
return 'operator ' + self.op
else:
return 'operator' + self.op
def _describe_identifier(
self,
signode: TextElement,
identnode: TextElement,
env: BuildEnvironment,
symbol: Symbol,
) -> None:
signode += addnodes.desc_sig_keyword('operator', 'operator')
if self.op in {'new', 'new[]', 'delete', 'delete[]'} or self.op[0] in 'abcnox':
signode += addnodes.desc_sig_space()
identnode += addnodes.desc_sig_operator(self.op, self.op)
| ASTOperatorBuildIn |
python | coleifer__peewee | peewee.py | {
"start": 78269,
"end": 80145
} | class ____(Query):
def __init__(self, table, returning=None, **kwargs):
self.table = table
self._returning = returning
self._return_cursor = True if returning else False
super(_WriteQuery, self).__init__(**kwargs)
def cte(self, name, recursive=False, columns=None, materialized=None):
return CTE(name, self, recursive=recursive, columns=columns,
materialized=materialized)
@Node.copy
def returning(self, *returning):
self._returning = returning
self._return_cursor = True if returning else False
def apply_returning(self, ctx):
if self._returning:
with ctx.scope_source():
ctx.literal(' RETURNING ').sql(CommaNodeList(self._returning))
return ctx
def _execute(self, database):
if self._returning:
cursor = self.execute_returning(database)
else:
cursor = database.execute(self)
return self.handle_result(database, cursor)
def execute_returning(self, database):
if self._cursor_wrapper is None:
cursor = database.execute(self)
self._cursor_wrapper = self._get_cursor_wrapper(cursor)
return self._cursor_wrapper
def handle_result(self, database, cursor):
if self._return_cursor:
return cursor
return database.rows_affected(cursor)
def _set_table_alias(self, ctx):
ctx.alias_manager[self.table] = self.table.__name__
def __sql__(self, ctx):
super(_WriteQuery, self).__sql__(ctx)
# We explicitly set the table alias to the table's name, which ensures
# that if a sub-select references a column on the outer table, we won't
# assign it a new alias (e.g. t2) but will refer to it as table.column.
self._set_table_alias(ctx)
return ctx
| _WriteQuery |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-azure-inference/llama_index/embeddings/azure_inference/base.py | {
"start": 782,
"end": 7211
} | class ____(BaseEmbedding):
"""
Azure AI model inference for embeddings.
Examples:
```python
from llama_index.core import Settings
from llama_index.embeddings.azure_inference import AzureAIEmbeddingsModel
llm = AzureAIEmbeddingsModel(
endpoint="https://[your-endpoint].inference.ai.azure.com",
credential="your-api-key",
)
# # If using Microsoft Entra ID authentication, you can create the
# # client as follows
#
# from azure.identity import DefaultAzureCredential
#
# embed_model = AzureAIEmbeddingsModel(
# endpoint="https://[your-endpoint].inference.ai.azure.com",
# credential=DefaultAzureCredential()
# )
#
# # If you plan to use asynchronous calling, make sure to use the async
# # credentials as follows
#
# from azure.identity.aio import DefaultAzureCredential as DefaultAzureCredentialAsync
#
# embed_model = AzureAIEmbeddingsModel(
# endpoint="https://[your-endpoint].inference.ai.azure.com",
# credential=DefaultAzureCredentialAsync()
# )
# Once the client is instantiated, you can set the context to use the model
Settings.embed_model = embed_model
documents = SimpleDirectoryReader("./data").load_data()
index = VectorStoreIndex.from_documents(documents)
```
"""
model_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs model parameters."
)
_client: EmbeddingsClient = PrivateAttr()
_async_client: EmbeddingsClientAsync = PrivateAttr()
def __init__(
self,
endpoint: str = None,
credential: Union[str, AzureKeyCredential, "TokenCredential"] = None,
model_name: str = None,
api_version: str = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
num_workers: Optional[int] = None,
client_kwargs: Optional[Dict[str, Any]] = None,
**kwargs: Any,
):
client_kwargs = client_kwargs or {}
endpoint = get_from_param_or_env(
"endpoint", endpoint, "AZURE_INFERENCE_ENDPOINT", None
)
credential = get_from_param_or_env(
"credential", credential, "AZURE_INFERENCE_CREDENTIAL", None
)
credential = (
AzureKeyCredential(credential)
if isinstance(credential, str)
else credential
)
if not endpoint:
raise ValueError(
"You must provide an endpoint to use the Azure AI model inference LLM."
"Pass the endpoint as a parameter or set the AZURE_INFERENCE_ENDPOINT"
"environment variable."
)
if not credential:
raise ValueError(
"You must provide an credential to use the Azure AI model inference LLM."
"Pass the credential as a parameter or set the AZURE_INFERENCE_CREDENTIAL"
)
if api_version:
client_kwargs["api_version"] = api_version
client = EmbeddingsClient(
endpoint=endpoint,
credential=credential,
user_agent="llamaindex",
**client_kwargs,
)
async_client = EmbeddingsClientAsync(
endpoint=endpoint,
credential=credential,
user_agent="llamaindex",
**client_kwargs,
)
if not model_name:
try:
# Get model info from the endpoint. This method may not be supported by all
# endpoints.
model_info = client.get_model_info()
model_name = model_info.get("model_name", None)
except HttpResponseError:
logger.warning(
f"Endpoint '{self._client._config.endpoint}' does not support model metadata retrieval. "
"Unable to populate model attributes."
)
super().__init__(
model_name=model_name or "unknown",
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
num_workers=num_workers,
**kwargs,
)
self._client = client
self._async_client = async_client
@classmethod
def class_name(cls) -> str:
return "AzureAIEmbeddingsModel"
@property
def _model_kwargs(self) -> Dict[str, Any]:
additional_kwargs = {}
if self.model_name and self.model_name != "unknown":
additional_kwargs["model"] = self.model_name
if self.model_kwargs:
# pass any extra model parameters
additional_kwargs.update(self.model_kwargs)
return additional_kwargs
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._client.embed(input=[query], **self._model_kwargs).data[0].embedding
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return (
(await self._async_client.embed(input=[query], **self._model_kwargs))
.data[0]
.embedding
)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._client.embed(input=[text], **self._model_kwargs).data[0].embedding
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return (
(await self._async_client.embed(input=[text], **self._model_kwargs))
.data[0]
.embedding
)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
embedding_response = self._client.embed(input=texts, **self._model_kwargs).data
return [embed.embedding for embed in embedding_response]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
embedding_response = await self._async_client.embed(
input=texts, **self._model_kwargs
)
return [embed.embedding for embed in embedding_response.data]
| AzureAIEmbeddingsModel |
python | huggingface__transformers | src/transformers/models/d_fine/modeling_d_fine.py | {
"start": 69893,
"end": 75642
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DFineImageProcessor.post_process_object_detection`] to retrieve the
unnormalized (absolute) bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`):
Stacked intermediate hidden states (output of each layer of the decoder).
intermediate_logits (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, config.num_labels)`):
Stacked intermediate logits (logits of each layer of the decoder).
intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate reference points (reference points of each layer of the decoder).
intermediate_predicted_corners (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked intermediate predicted corners (predicted corners of each layer of the decoder).
initial_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`):
Stacked initial reference points (initial reference points of each layer of the decoder).
init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Initial reference points sent through the Transformer decoder.
enc_topk_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_topk_bboxes (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the encoder.
enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are
picked as region proposals in the first stage. Output of bounding box binary classification (i.e.
foreground and background).
enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`):
Logits of predicted bounding boxes coordinates in the first stage.
denoising_meta_values (`dict`):
Extra dictionary for the denoising related values
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
intermediate_hidden_states: Optional[torch.FloatTensor] = None
intermediate_logits: Optional[torch.FloatTensor] = None
intermediate_reference_points: Optional[torch.FloatTensor] = None
intermediate_predicted_corners: Optional[torch.FloatTensor] = None
initial_reference_points: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
init_reference_points: Optional[tuple[torch.FloatTensor]] = None
enc_topk_logits: Optional[torch.FloatTensor] = None
enc_topk_bboxes: Optional[torch.FloatTensor] = None
enc_outputs_class: Optional[torch.FloatTensor] = None
enc_outputs_coord_logits: Optional[torch.FloatTensor] = None
denoising_meta_values: Optional[dict] = None
@auto_docstring(
custom_intro="""
RT-DETR Model (consisting of a backbone and encoder-decoder) outputting bounding boxes and logits to be further
decoded into scores and classes.
"""
)
| DFineObjectDetectionOutput |
python | ray-project__ray | python/ray/tune/experimental/output.py | {
"start": 6518,
"end": 6615
} | class ____:
trial_infos: List[List[str]]
more_info: str
@dataclass
| _PerStatusTrialTableData |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 41514,
"end": 42251
} | class ____(ObjectBaseModel):
"""An ORM representation of a block schema reference."""
parent_block_schema_id: UUID = Field(
default=..., description="ID of block schema the reference is nested within"
)
parent_block_schema: Optional[BlockSchema] = Field(
default=None, description="The block schema the reference is nested within"
)
reference_block_schema_id: UUID = Field(
default=..., description="ID of the nested block schema"
)
reference_block_schema: Optional[BlockSchema] = Field(
default=None, description="The nested block schema"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
| BlockSchemaReference |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-recharge/unit_tests/integration/pagination.py | {
"start": 271,
"end": 562
} | class ____(PaginationStrategy):
def __init__(self, request: HttpRequest, next_page_token: str) -> None:
self._next_page_token = next_page_token
def update(self, response: Dict[str, Any]) -> None:
response["next_cursor"] = self._next_page_token
| RechargePaginationStrategy |
python | python-poetry__poetry | src/poetry/console/commands/install.py | {
"start": 372,
"end": 8429
} | class ____(InstallerCommand):
name = "install"
description = "Installs the project dependencies."
options: ClassVar[list[Option]] = [
*InstallerCommand._group_dependency_options(),
option(
"sync",
None,
"Synchronize the environment with the locked packages and the specified"
" groups. (<warning>Deprecated</warning>)",
),
option(
"no-root", None, "Do not install the root package (the current project)."
),
option(
"no-directory",
None,
"Do not install any directory path dependencies; useful to install"
" dependencies without source code, e.g. for caching of Docker layers)",
flag=True,
multiple=False,
),
option(
"dry-run",
None,
"Output the operations but do not execute anything "
"(implicitly enables --verbose).",
),
option(
"extras",
"E",
"Extra sets of dependencies to install.",
flag=False,
multiple=True,
),
option("all-extras", None, "Install all extra dependencies."),
option("all-groups", None, "Install dependencies from all groups."),
option("only-root", None, "Exclude all dependencies."),
option(
"compile",
None,
"Compile Python source files to bytecode.",
),
]
help = """\
The <info>install</info> command reads the <comment>poetry.lock</> file from
the current directory, processes it, and downloads and installs all the
libraries and dependencies outlined in that file. If the file does not
exist it will look for <comment>pyproject.toml</> and do the same.
<info>poetry install</info>
By default, the above command will also install the current project. To install only the
dependencies and not including the current project, run the command with the
<info>--no-root</info> option like below:
<info> poetry install --no-root</info>
If you want to use Poetry only for dependency management but not for packaging,
you can set the "package-mode" to false in your pyproject.toml file.
"""
_loggers: ClassVar[list[str]] = [
"poetry.repositories.pypi_repository",
"poetry.inspection.info",
]
@property
def activated_groups(self) -> set[NormalizedName]:
if self.option("only-root"):
return set()
else:
return super().activated_groups
@property
def _alternative_sync_command(self) -> str:
return "poetry sync"
@property
def _with_synchronization(self) -> bool:
with_synchronization = self.option("sync")
if with_synchronization:
self.line_error(
"<warning>The `<fg=yellow;options=bold>--sync</>` option is"
" deprecated and slated for removal, use the"
f" `<fg=yellow;options=bold>{self._alternative_sync_command}</>`"
" command instead.</warning>"
)
return bool(with_synchronization)
def handle(self) -> int:
from poetry.core.masonry.utils.module import ModuleOrPackageNotFoundError
from poetry.masonry.builders.editable import EditableBuilder
if not self.option("no-plugins"):
PluginManager.ensure_project_plugins(self.poetry, self.io)
if self.option("extras") and self.option("all-extras"):
self.line_error(
"<error>You cannot specify explicit"
" `<fg=yellow;options=bold>--extras</>` while installing"
" using `<fg=yellow;options=bold>--all-extras</>`.</error>"
)
return 1
if self.option("only-root") and any(
self.option(key) for key in {"with", "without", "only", "all-groups"}
):
self.line_error(
"<error>The `<fg=yellow;options=bold>--with</>`,"
" `<fg=yellow;options=bold>--without</>`,"
" `<fg=yellow;options=bold>--only</>` and"
" `<fg=yellow;options=bold>--all-groups</>`"
" options cannot be used with"
" the `<fg=yellow;options=bold>--only-root</>`"
" option.</error>"
)
return 1
if self.option("only-root") and self.option("no-root"):
self.line_error(
"<error>You cannot specify `<fg=yellow;options=bold>--no-root</>`"
" when using `<fg=yellow;options=bold>--only-root</>`.</error>"
)
return 1
if (
self.option("only") or self.option("with") or self.option("without")
) and self.option("all-groups"):
self.line_error(
"<error>You cannot specify `<fg=yellow;options=bold>--with</>`,"
" `<fg=yellow;options=bold>--without</>`, or"
" `<fg=yellow;options=bold>--only</>` when using"
" `<fg=yellow;options=bold>--all-groups</>`.</error>"
)
return 1
extras: list[str]
if self.option("all-extras"):
extras = list(self.poetry.package.extras.keys())
else:
extras = []
for extra in self.option("extras", []):
extras += extra.split()
self.installer.extras(extras)
self.installer.only_groups(self.activated_groups)
self.installer.skip_directory(self.option("no-directory"))
self.installer.dry_run(self.option("dry-run"))
self.installer.requires_synchronization(self._with_synchronization)
self.installer.executor.enable_bytecode_compilation(self.option("compile"))
self.installer.verbose(self.io.is_verbose())
return_code = self.installer.run()
if return_code != 0:
return return_code
if self.option("no-root") or not self.poetry.is_package_mode:
return 0
log_install = (
"<b>Installing</> the current project:"
f" <c1>{self.poetry.package.pretty_name}</c1>"
f" (<{{tag}}>{self.poetry.package.pretty_version}</>)"
)
overwrite = self.io.output.is_decorated() and not self.io.is_debug()
self.line("")
self.write(log_install.format(tag="c2"))
if not overwrite:
self.line("")
if self.option("dry-run"):
self.line("")
return 0
# Prior to https://github.com/python-poetry/poetry-core/pull/629
# the existence of a module/package was checked when creating the
# EditableBuilder. Afterwards, the existence is checked after
# executing the build script (if there is one),
# i.e. during EditableBuilder.build().
try:
builder = EditableBuilder(self.poetry, self.env, self.io)
builder.build()
except (ModuleOrPackageNotFoundError, FileNotFoundError) as e:
# This is likely due to the fact that the project is an application
# not following the structure expected by Poetry.
# No need for an editable install in this case.
self.line("")
self.line_error(
f"Error: The current project could not be installed: {e}\n"
"If you do not want to install the current project"
" use <c1>--no-root</c1>.\n"
"If you want to use Poetry only for dependency management"
" but not for packaging, you can disable package mode by setting"
" <c1>package-mode = false</> in your pyproject.toml file.\n"
"If you did intend to install the current project, you may need"
" to set `packages` in your pyproject.toml file.\n",
style="error",
)
return 1
if overwrite:
self.overwrite(log_install.format(tag="success"))
self.line("")
return 0
| InstallCommand |
python | huggingface__transformers | src/transformers/models/camembert/modeling_camembert.py | {
"start": 38687,
"end": 43396
} | class ____(CamembertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.roberta = CamembertModel(config, add_pooling_layer=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], MultipleChoiceModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
token_type_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
This parameter can only be used when the model is initialized with `type_vocab_size` parameter with value
>= 2. All the value in this tensor should be always < type_vocab_size.
[What are token type IDs?](../glossary#token-type-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
position_ids (`torch.LongTensor` of shape `(batch_size, num_choices, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
"""
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
flat_inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.roberta(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
inputs_embeds=flat_inputs_embeds,
return_dict=True,
**kwargs,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
# move labels to correct device
labels = labels.to(reshaped_logits.device)
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
| CamembertForMultipleChoice |
python | pandas-dev__pandas | pandas/io/formats/format.py | {
"start": 49890,
"end": 50294
} | class ____(_GenericArrayFormatter):
def _format_strings(self) -> list[str]:
if self.leading_space is False:
formatter_str = lambda x: f"{x:d}".format(x=x)
else:
formatter_str = lambda x: f"{x: d}".format(x=x)
formatter = self.formatter or formatter_str
fmt_values = [formatter(x) for x in self.values]
return fmt_values
| _IntArrayFormatter |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/linear_discriminant_analysis.py | {
"start": 152,
"end": 1395
} | class ____():
"""The Linear Discriminant Analysis classifier, also known as Fisher's linear discriminant.
Can besides from classification also be used to reduce the dimensionaly of the dataset.
"""
def __init__(self):
self.w = None
def transform(self, X, y):
self.fit(X, y)
# Project data onto vector
X_transform = X.dot(self.w)
return X_transform
def fit(self, X, y):
# Separate data by class
X1 = X[y == 0]
X2 = X[y == 1]
# Calculate the covariance matrices of the two datasets
cov1 = calculate_covariance_matrix(X1)
cov2 = calculate_covariance_matrix(X2)
cov_tot = cov1 + cov2
# Calculate the mean of the two datasets
mean1 = X1.mean(0)
mean2 = X2.mean(0)
mean_diff = np.atleast_1d(mean1 - mean2)
# Determine the vector which when X is projected onto it best separates the
# data by class. w = (mean1 - mean2) / (cov1 + cov2)
self.w = np.linalg.pinv(cov_tot).dot(mean_diff)
def predict(self, X):
y_pred = []
for sample in X:
h = sample.dot(self.w)
y = 1 * (h < 0)
y_pred.append(y)
return y_pred
| LDA |
python | getsentry__sentry | tests/symbolicator/test_payload_full.py | {
"start": 2744,
"end": 14819
} | class ____(RelayStoreHelper, TransactionTestCase):
@pytest.fixture(autouse=True)
def initialize(self, live_server):
self.project.update_option("sentry:builtin_symbol_sources", [])
self.min_ago = before_now(minutes=1).isoformat()
with (
patch("sentry.auth.system.is_internal_ip", return_value=True),
self.options({"system.url-prefix": live_server.url}),
):
# Run test case
yield
def get_event(self, event_id):
return eventstore.backend.get_event_by_id(self.project.id, event_id)
def test_real_resolving(self) -> None:
url = reverse(
"sentry-api-0-dsym-files",
kwargs={
"organization_id_or_slug": self.project.organization.slug,
"project_id_or_slug": self.project.slug,
},
)
self.login_as(user=self.user)
out = BytesIO()
f = zipfile.ZipFile(out, "w")
f.write(get_fixture_path("native", "hello.dsym"), "dSYM/hello")
f.close()
response = self.client.post(
url,
{
"file": SimpleUploadedFile(
"symbols.zip", out.getvalue(), content_type="application/zip"
)
},
format="multipart",
)
assert response.status_code == 201, response.content
assert len(response.json()) == 1
event = self.post_and_retrieve_event(REAL_RESOLVING_EVENT_DATA)
assert event.data["culprit"] == "main"
candidates = event.data["debug_meta"]["images"][0]["candidates"]
redact_location(candidates)
event.data["debug_meta"]["images"][0]["candidates"] = candidates
insta_snapshot_native_stacktrace_data(self, event.data)
def test_debug_id_resolving(self) -> None:
file = File.objects.create(
name="crash.pdb", type="default", headers={"Content-Type": "text/x-breakpad"}
)
path = get_fixture_path("native", "windows.sym")
with open(path, "rb") as f:
file.putfile(f)
ProjectDebugFile.objects.create(
file=file,
object_name="crash.pdb",
cpu_name="x86",
project_id=self.project.id,
debug_id="3249d99d-0c40-4931-8610-f4e4fb0b6936-1",
code_id="5AB380779000",
)
self.login_as(user=self.user)
event_data = {
"contexts": {
"device": {"arch": "x86"},
"os": {"build": "", "name": "Windows", "type": "os", "version": "10.0.14393"},
},
"debug_meta": {
"images": [
{
"id": "3249d99d-0c40-4931-8610-f4e4fb0b6936-1",
"image_addr": "0x2a0000",
"image_size": 36864,
"name": "C:\\projects\\breakpad-tools\\windows\\Release\\crash.exe",
"type": "symbolic",
}
]
},
"exception": {
"stacktrace": {
"frames": [
{
"function": "<unknown>",
"instruction_addr": "0x2a2a3d",
"package": "C:\\projects\\breakpad-tools\\windows\\Release\\crash.exe",
}
]
},
"thread_id": 1636,
"type": "EXCEPTION_ACCESS_VIOLATION_WRITE",
"value": "Fatal Error: EXCEPTION_ACCESS_VIOLATION_WRITE",
},
"platform": "native",
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
assert event.data["culprit"] == "main"
candidates = event.data["debug_meta"]["images"][0]["candidates"]
redact_location(candidates)
event.data["debug_meta"]["images"][0]["candidates"] = candidates
insta_snapshot_native_stacktrace_data(self, event.data)
@pytest.mark.skip(reason="flaky: #93040")
def test_missing_dsym(self) -> None:
self.login_as(user=self.user)
event = self.post_and_retrieve_event(REAL_RESOLVING_EVENT_DATA)
assert event.data["culprit"] == "unknown"
insta_snapshot_native_stacktrace_data(self, event.data)
def test_missing_debug_images(self) -> None:
self.login_as(user=self.user)
payload = dict(project=self.project.id, **REAL_RESOLVING_EVENT_DATA)
del payload["debug_meta"]
event = self.post_and_retrieve_event(payload)
assert event.data["culprit"] == "unknown"
insta_snapshot_native_stacktrace_data(self, event.data)
def test_resolving_with_candidates_sentry_source(self) -> None:
# Checks the candidates with a sentry source URI for location
file = File.objects.create(
name="crash.pdb", type="default", headers={"Content-Type": "text/x-breakpad"}
)
path = get_fixture_path("native", "windows.sym")
with open(path, "rb") as f:
file.putfile(f)
ProjectDebugFile.objects.create(
file=file,
object_name="crash.pdb",
cpu_name="x86",
project_id=self.project.id,
debug_id="3249d99d-0c40-4931-8610-f4e4fb0b6936-1",
code_id="5AB380779000",
)
self.login_as(user=self.user)
event_data = {
"contexts": {
"device": {"arch": "x86"},
},
"debug_meta": {
"images": [
{
"id": "3249d99d-0c40-4931-8610-f4e4fb0b6936-1",
"image_addr": "0x2a0000",
"image_size": 36864,
"name": "C:\\projects\\breakpad-tools\\windows\\Release\\crash.exe",
"type": "symbolic",
}
]
},
"exception": {
"stacktrace": {
"frames": [
{
"instruction_addr": "0x2a2a3d",
}
]
},
"type": "EXCEPTION_ACCESS_VIOLATION_WRITE",
"value": "Fatal Error: EXCEPTION_ACCESS_VIOLATION_WRITE",
},
"platform": "native",
"timestamp": before_now(seconds=1).isoformat(),
}
event = self.post_and_retrieve_event(event_data)
assert event.data["culprit"] == "main"
candidates = event.data["debug_meta"]["images"][0]["candidates"]
redact_location(candidates)
self.insta_snapshot(candidates)
def test_resolve_mixed_stack_trace(self) -> None:
# JS debug files:
debug_id = "c941d872-af1f-4f0c-a7ff-ad3d295fe153"
compressed = BytesIO(b"SYSB")
with zipfile.ZipFile(compressed, "a") as zip_file:
zip_file.writestr("files/_/_/test.min.js", load_fixture("test.min.js"))
zip_file.writestr("files/_/_/test.map", load_fixture("test.map"))
zip_file.writestr(
"manifest.json",
json.dumps(
{
"files": {
"files/_/_/test.min.js": {
"url": "~/test.min.js",
"type": "minified_source",
"headers": {
"debug-id": debug_id,
"sourcemap": "test.map",
},
},
"files/_/_/test.map": {
"url": "~/file.wc.sourcemap.js",
"type": "source_map",
"headers": {
"debug-id": debug_id,
},
},
},
}
),
)
compressed.seek(0)
bundle_file = File.objects.create(name="bundle.zip", type="artifact.bundle")
bundle_file.putfile(compressed)
artifact_bundle = ArtifactBundle.objects.create(
organization_id=self.organization.id,
bundle_id=uuid4(),
file=bundle_file,
artifact_count=2,
)
ProjectArtifactBundle.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
artifact_bundle=artifact_bundle,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.MINIFIED_SOURCE.value,
)
DebugIdArtifactBundle.objects.create(
organization_id=self.organization.id,
debug_id=debug_id,
artifact_bundle=artifact_bundle,
source_file_type=SourceFileType.SOURCE_MAP.value,
)
# native debug files:
wasm_file = File.objects.create(
name="test.wasm", type="default", headers={"Content-Type": "application/wasm"}
)
with open(get_local_fixture_path("a18fd85d4a4eb893022d6bfad846b1.debug"), "rb") as f:
wasm_file.putfile(f)
ProjectDebugFile.objects.create(
file=wasm_file,
object_name="test.wasm",
cpu_name="wasm32",
project_id=self.project.id,
debug_id="bda18fd8-5d4a-4eb8-9302-2d6bfad846b1",
code_id="bda18fd85d4a4eb893022d6bfad846b1",
)
data = {
"timestamp": self.min_ago,
"message": "hello",
"platform": "javascript",
"release": "abc",
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "http://example.com/test.min.js",
"lineno": 1,
"colno": 183,
},
{
"platform": "native",
"instruction_addr": "0x8c",
"addr_mode": "rel:0",
},
]
},
}
]
},
"debug_meta": {
"images": [
{
"type": "sourcemap",
"debug_id": debug_id,
"code_file": "http://example.com/test.min.js",
},
{
"type": "wasm",
"debug_id": "bda18fd8-5d4a-4eb8-9302-2d6bfad846b1",
"code_id": "bda18fd85d4a4eb893022d6bfad846b1",
"debug_file": "file://foo.invalid/demo.wasm",
},
]
},
}
event = self.post_and_retrieve_event(data)
exception = event.interfaces["exception"]
frames = exception.values[0].stacktrace.frames
assert frames[0].abs_path == "http://example.com/test.js"
assert frames[0].lineno == 20
assert frames[0].colno == 5
assert frames[0].context_line == " invoke(data);"
assert frames[1].abs_path == "/Users/mitsuhiko/Development/wasm-example/simple/src/lib.rs"
assert frames[1].lineno == 19
assert frames[1].function == "internal_func"
images = event.data["debug_meta"]["images"]
assert images[1]["debug_status"] == "found"
| SymbolicatorResolvingIntegrationTest |
python | google__jax | tests/tree_util_test.py | {
"start": 39124,
"end": 41007
} | class ____(parameterized.TestCase):
@parameterized.parameters(
(StaticInt(2),),
(StaticTuple((2, None)),),
(StaticDict(foo=2),),
)
def test_trace_just_once_with_same_static(self, y):
num_called = 0
@jax.jit
def fn(x: int, static_y: StaticInt):
nonlocal num_called
num_called += 1
unstatic_y = type(static_y).__base__(static_y)
[y] = tree_util.tree_leaves(unstatic_y)
return x + y
fn(1, y)
fn(3, y)
self.assertEqual(num_called, 1)
def test_name(self):
self.assertEqual(StaticInt.__name__, "StaticInt")
self.assertEqual(BlackBox.__name__, "BlackBox")
@parameterized.parameters(
(StaticInt(2), StaticInt(4)),
(StaticTuple((2, None)), StaticTuple((4, None))),
(StaticDict(foo=2), StaticDict(foo=4)),
)
def test_trace_twice_with_different_static(self, y1, y2):
num_called = 0
@jax.jit
def fn(x: int, static_y: StaticInt):
nonlocal num_called
num_called += 1
unstatic_y = type(static_y).__base__(static_y)
[y] = tree_util.tree_leaves(unstatic_y)
return x + y
fn(1, y1)
fn(3, y2)
self.assertEqual(num_called, 2)
def test_trace_just_once_if_static_looks_constant(self):
num_called = 0
@jax.jit
def fn(x: int, static_y: BlackBox):
nonlocal num_called
num_called += 1
return x + static_y.value
self.assertEqual(fn(1, BlackBox(2)), 3)
self.assertEqual(fn(3, BlackBox(1)), 5)
self.assertEqual(num_called, 1)
def test_serialize_treedef(self):
tree_structure = jax.tree_util.tree_structure([1, [2], (3,), {'a': 4, 'b': 5}])
serialized = tree_structure.serialize_using_proto()
new_structure = jax.tree_util.PyTreeDef.deserialize_using_proto(
jax.tree_util.default_registry,
serialized
)
self.assertEqual(tree_structure, new_structure)
| StaticTest |
python | huggingface__transformers | src/transformers/models/vits/modeling_vits.py | {
"start": 30473,
"end": 31272
} | class ____(nn.Module):
def __init__(self, config: VitsConfig):
super().__init__()
self.channels = config.depth_separable_channels
self.translate = nn.Parameter(torch.zeros(self.channels, 1))
self.log_scale = nn.Parameter(torch.zeros(self.channels, 1))
def forward(self, inputs, padding_mask, global_conditioning=None, reverse=False):
if not reverse:
outputs = self.translate + torch.exp(self.log_scale) * inputs
outputs = outputs * padding_mask
log_determinant = torch.sum(self.log_scale * padding_mask, [1, 2])
return outputs, log_determinant
else:
outputs = (inputs - self.translate) * torch.exp(-self.log_scale) * padding_mask
return outputs, None
| VitsElementwiseAffine |
python | coleifer__peewee | tests/libs/mock.py | {
"start": 60130,
"end": 60628
} | class ____(MagicMixin, NonCallableMock):
"""A version of `MagicMock` that isn't callable."""
def mock_add_spec(self, spec, spec_set=False):
"""Add a spec to a mock. `spec` can either be an object or a
list of strings. Only attributes on the `spec` can be fetched as
attributes from the mock.
If `spec_set` is True then only attributes on the spec can be set."""
self._mock_add_spec(spec, spec_set)
self._mock_set_magics()
| NonCallableMagicMock |
python | pallets__jinja | tests/test_imports.py | {
"start": 629,
"end": 4523
} | class ____:
def test_context_imports(self, test_env):
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m without context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% import "module" as m with context %}{{ m.test() }}'
)
assert t.render(foo=42) == "[42|23]"
t = test_env.from_string('{% from "module" import test %}{{ test() }}')
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test without context %}{{ test() }}'
)
assert t.render(foo=42) == "[|23]"
t = test_env.from_string(
'{% from "module" import test with context %}{{ test() }}'
)
assert t.render(foo=42) == "[42|23]"
def test_import_needs_name(self, test_env):
test_env.from_string('{% from "foo" import bar %}')
test_env.from_string('{% from "foo" import bar, baz %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import %}')
def test_no_trailing_comma(self, test_env):
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import, %}')
def test_trailing_comma_with_context(self, test_env):
test_env.from_string('{% from "foo" import bar, baz with context %}')
test_env.from_string('{% from "foo" import bar, baz, with context %}')
test_env.from_string('{% from "foo" import bar, with context %}')
test_env.from_string('{% from "foo" import bar, with, context %}')
test_env.from_string('{% from "foo" import bar, with with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar,, with context %}')
with pytest.raises(TemplateSyntaxError):
test_env.from_string('{% from "foo" import bar with context, %}')
def test_exports(self, test_env):
m = test_env.from_string(
"""
{% macro toplevel() %}...{% endmacro %}
{% macro __private() %}...{% endmacro %}
{% set variable = 42 %}
{% for item in [1] %}
{% macro notthere() %}{% endmacro %}
{% endfor %}
"""
).module
assert m.toplevel() == "..."
assert not hasattr(m, "__missing")
assert m.variable == 42
assert not hasattr(m, "notthere")
def test_not_exported(self, test_env):
t = test_env.from_string("{% from 'module' import nothing %}{{ nothing() }}")
with pytest.raises(UndefinedError, match="does not export the requested name"):
t.render()
def test_import_with_globals(self, test_env):
t = test_env.from_string(
'{% import "module" as m %}{{ m.test() }}', globals={"foo": 42}
)
assert t.render() == "[42|23]"
t = test_env.from_string('{% import "module" as m %}{{ m.test() }}')
assert t.render() == "[|23]"
def test_import_with_globals_override(self, test_env):
t = test_env.from_string(
'{% set foo = 41 %}{% import "module" as m %}{{ m.test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
def test_from_import_with_globals(self, test_env):
t = test_env.from_string(
'{% from "module" import test %}{{ test() }}',
globals={"foo": 42},
)
assert t.render() == "[42|23]"
| TestImports |
python | getsentry__sentry | src/sentry/notifications/platform/templates/sample.py | {
"start": 9408,
"end": 9682
} | class ____(NotificationData):
source = "performance-monitoring"
metric_name: str
threshold: str
current_value: str
project_name: str
chart_url: str
investigation_url: str
@template_registry.register(PerformanceAlertData.source)
| PerformanceAlertData |
python | django__django | tests/apps/query_performing_app/apps.py | {
"start": 728,
"end": 818
} | class ____(ModelQueryAppConfig):
database = "default"
| QueryDefaultDatabaseModelAppConfig |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/tokens.py | {
"start": 9389,
"end": 9452
} | class ____(Token):
__slots__ = ()
id = ','
| FlowEntryToken |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride4.py | {
"start": 1044,
"end": 1107
} | class ____:
def method1[T: BaseC](self, x: T) -> T: ...
| BaseC |
python | getsentry__sentry | src/sentry/apidocs/examples/issue_examples.py | {
"start": 4088,
"end": 4598
} | class ____:
ORGANIZATION_GROUP_INDEX_GET = [
OpenApiExample(
"Return a list of issues for an organization",
value=[SIMPLE_ISSUE],
response_only=True,
status_codes=["200"],
)
]
ORGANIZATION_GROUP_INDEX_PUT = [
OpenApiExample(
"Return the update results for issues in an organization",
value=MUTATE_ISSUE_RESULT,
response_only=True,
status_codes=["200"],
)
]
| IssueExamples |
python | google__jax | tests/mosaic/gpu_test.py | {
"start": 26893,
"end": 27248
} | class ____:
"""A type that represents a 8-bit signed integer.
This is a workaround to bypass the fact that we don't have a proper 8-bit
integer type class available in MLIR, and can't instantiate types without a
MLIR context.
"""
@staticmethod
def get(): # pylint: disable=no-method-argument
return ir.IntegerType.get_signless(8)
| I8Type |
python | sqlalchemy__sqlalchemy | test/engine/test_execute.py | {
"start": 107330,
"end": 118565
} | class ____(fixtures.TestBase):
__requires__ = ("sqlite",)
def setup_test(self):
e = create_engine("sqlite://")
connection = Mock(get_server_version_info=Mock(return_value="5.0"))
def connect(*args, **kwargs):
return connection
dbapi = Mock(
sqlite_version_info=(99, 9, 9),
version_info=(99, 9, 9),
sqlite_version="99.9.9",
paramstyle="named",
connect=Mock(side_effect=connect),
)
sqlite3 = e.dialect.dbapi
dbapi.Error = (sqlite3.Error,)
dbapi.ProgrammingError = sqlite3.ProgrammingError
self.dbapi = dbapi
self.ProgrammingError = sqlite3.ProgrammingError
def test_wraps_connect_in_dbapi(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
try:
create_engine("sqlite://", module=dbapi).connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_handle_error_event_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, getattr, conn, "connection")
def test_handle_error_event_implicit_revalidate(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi, _initialize=False)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is conn
assert isinstance(
ctx.sqlalchemy_exception, tsa.exc.ProgrammingError
)
raise MySpecialException("failed operation")
conn = eng.connect()
conn.invalidate()
dbapi.connect = Mock(side_effect=self.ProgrammingError("random error"))
assert_raises(MySpecialException, conn.execute, select(1))
def test_handle_error_custom_connect(self):
dbapi = self.dbapi
class MySpecialException(Exception):
pass
def custom_connect():
raise self.ProgrammingError("random error")
eng = create_engine("sqlite://", module=dbapi, creator=custom_connect)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.engine is eng
assert ctx.connection is None
raise MySpecialException("failed operation")
assert_raises(MySpecialException, eng.connect)
def test_handle_error_event_connect_invalidate_flag(self):
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://", module=dbapi)
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
ctx.is_disconnect = False
try:
eng.connect()
assert False
except tsa.exc.DBAPIError as de:
assert not de.connection_invalidated
def test_cant_connect_stay_invalidated(self):
class MySpecialException(Exception):
pass
eng = create_engine("sqlite://")
@event.listens_for(eng, "handle_error")
def handle_error(ctx):
assert ctx.is_disconnect
conn = eng.connect()
conn.invalidate()
eng.pool._creator = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
try:
conn.connection
assert False
except tsa.exc.DBAPIError:
assert conn.invalidated
def test_dont_touch_non_dbapi_exception_on_connect(self):
dbapi = self.dbapi
dbapi.connect = Mock(side_effect=TypeError("I'm not a DBAPI error"))
e = create_engine("sqlite://", module=dbapi)
e.dialect.is_disconnect = is_disconnect = Mock()
assert_raises_message(TypeError, "I'm not a DBAPI error", e.connect)
eq_(is_disconnect.call_count, 0)
def test_ensure_dialect_does_is_disconnect_no_conn(self):
"""test that is_disconnect() doesn't choke if no connection,
cursor given."""
dialect = testing.db.dialect
dbapi = dialect.dbapi
assert not dialect.is_disconnect(
dbapi.OperationalError("test"), None, None
)
def test_dont_create_transaction_on_initialize(self):
"""test that engine init doesn't invoke autobegin.
this happened implicitly in 1.4 due to use of a non-future
connection for initialize.
to fix for 2.0 we added a new flag _allow_autobegin=False
for init purposes only.
"""
e = create_engine("sqlite://")
init_connection = None
def mock_initialize(connection):
# definitely trigger what would normally be an autobegin
connection.execute(select(1))
nonlocal init_connection
init_connection = connection
with (
mock.patch.object(e._connection_cls, "begin") as mock_begin,
mock.patch.object(
e.dialect, "initialize", Mock(side_effect=mock_initialize)
) as mock_init,
):
conn = e.connect()
eq_(mock_begin.mock_calls, [])
is_not(init_connection, None)
is_not(conn, init_connection)
is_false(init_connection._allow_autobegin)
eq_(mock_init.mock_calls, [mock.call(init_connection)])
# assert the mock works too
conn.begin()
eq_(mock_begin.mock_calls, [mock.call()])
conn.close()
def test_invalidate_on_connect(self):
"""test that is_disconnect() is called during connect.
interpretation of connection failures are not supported by
every backend.
"""
dbapi = self.dbapi
dbapi.connect = Mock(
side_effect=self.ProgrammingError(
"Cannot operate on a closed database."
)
)
e = create_engine("sqlite://", module=dbapi)
try:
e.connect()
assert False
except tsa.exc.DBAPIError as de:
assert de.connection_invalidated
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_calls(self):
"""test for :ticket:`5497`, on_connect not called twice"""
m1 = Mock()
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
def initialize(self, connection):
super().initialize(connection)
m1.initialize(connection)
def on_connect(self):
oc = super().on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.on_connect(conn)
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
eng = create_engine(u1, poolclass=QueuePool)
# make sure other dialects aren't getting pulled in here
eq_(eng.name, "sqlite")
c = eng.connect()
dbapi_conn_one = c.connection.dbapi_connection
c.close()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c = eng.connect()
eq_(
m1.mock_calls,
[call.on_connect(dbapi_conn_one), call.initialize(mock.ANY)],
)
c2 = eng.connect()
dbapi_conn_two = c2.connection.dbapi_connection
is_not(dbapi_conn_one, dbapi_conn_two)
eq_(
m1.mock_calls,
[
call.on_connect(dbapi_conn_one),
call.initialize(mock.ANY),
call.on_connect(dbapi_conn_two),
],
)
c.close()
c2.close()
@testing.only_on("sqlite+pysqlite")
def test_initialize_connect_race(self):
"""test for :ticket:`6337` fixing the regression in :ticket:`5497`,
dialect init is mutexed"""
m1 = []
cls_ = testing.db.dialect.__class__
class SomeDialect(cls_):
supports_statement_cache = True
def initialize(self, connection):
super().initialize(connection)
m1.append("initialize")
def on_connect(self):
oc = super().on_connect()
def my_on_connect(conn):
if oc:
oc(conn)
m1.append("on_connect")
return my_on_connect
u1 = Mock(
username=None,
password=None,
host=None,
port=None,
query={},
database=None,
_instantiate_plugins=lambda kw: (u1, [], kw),
_get_entrypoint=Mock(
return_value=Mock(get_dialect_cls=lambda u: SomeDialect)
),
)
for j in range(5):
m1[:] = []
eng = create_engine(
u1,
poolclass=NullPool,
connect_args={"check_same_thread": False},
)
def go():
c = eng.connect()
c.execute(text("select 1"))
c.close()
threads = [threading.Thread(target=go) for i in range(10)]
for t in threads:
t.start()
for t in threads:
t.join()
eq_(m1, ["on_connect", "initialize"] + ["on_connect"] * 9)
| OnConnectTest |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/provider_dependencies.py | {
"start": 7533,
"end": 7605
} | class ____(NamedTuple):
package_name: str
version: str
| PackageInfo |
python | google__jax | tests/batching_test.py | {
"start": 49338,
"end": 51241
} | class ____:
name: str | None
axis: int | None
def __init__(self, name: str, axis: int | None):
assert (name is None) == (axis is None)
self.name = name
self.axis = axis
def named_mul(x: NamedArray, y: NamedArray) -> NamedArray:
if x.names != y.names: raise Exception
return NamedArray(x.names, lax.mul(x.data, y.data))
# TODO(mattjj): don't make this a pytree
register_pytree_node(NamedArray,
lambda x: ((x.data,), x.names),
lambda names, xs: NamedArray(names, xs[0]))
def named_to_elt(cont: Callable[[Array, int | None], ArrayElt],
_: Int, val: NamedArray, spec: NamedMapSpec) -> NamedArray:
if spec.name is None:
return val
else:
elt_names, mapped_name = list_pop(val.names, spec.axis)
if mapped_name != spec.name: raise Exception
elt = cont(val.data, spec.axis)
return NamedArray(elt_names, elt)
def named_from_elt(cont: Callable[[int, ArrayElt, int | None], Array],
axis_size: int, elt: NamedArray, annotation: NamedMapSpec
) -> NamedArray:
data = cont(axis_size, elt.data, annotation.axis)
if annotation.axis is None:
return NamedArray(elt.names, data)
else:
names = list_insert(elt.names, annotation.axis, annotation.name)
return NamedArray(names, data)
@contextmanager
def temporarily_register_named_array_vmappable():
batching.register_vmappable(NamedArray, NamedMapSpec, int,
named_to_elt, named_from_elt, None)
try:
yield
finally:
batching.unregister_vmappable(NamedArray)
a = TypeVar('a')
def list_pop(lst: list[a], idx: int) -> a:
lst = list(lst)
return lst, lst.pop(idx)
def list_insert(lst: list[a], idx: int, val: a) -> list[a]:
lst = list(lst)
lst.insert(idx, val)
return lst
@jtu.thread_unsafe_test_class() # temporary registration isn't thread-safe
| NamedMapSpec |
python | django__django | tests/admin_inlines/models.py | {
"start": 2681,
"end": 3042
} | class ____(models.Model):
dummy = models.IntegerField(help_text="Awesome stacked help text is awesome.")
holder = models.ForeignKey(Holder4, models.CASCADE)
class Meta:
constraints = [
models.UniqueConstraint(
fields=["dummy", "holder"], name="unique_stacked_dummy_per_holder"
)
]
| Inner4Stacked |
python | pypa__pipenv | pipenv/vendor/click/types.py | {
"start": 17978,
"end": 19490
} | class ____(_NumberRangeBase, FloatParamType):
"""Restrict a :data:`click.FLOAT` value to a range of accepted
values. See :ref:`ranges`.
If ``min`` or ``max`` are not passed, any value is accepted in that
direction. If ``min_open`` or ``max_open`` are enabled, the
corresponding boundary is not included in the range.
If ``clamp`` is enabled, a value outside the range is clamped to the
boundary instead of failing. This is not supported if either
boundary is marked ``open``.
.. versionchanged:: 8.0
Added the ``min_open`` and ``max_open`` parameters.
"""
name = "float range"
def __init__(
self,
min: t.Optional[float] = None,
max: t.Optional[float] = None,
min_open: bool = False,
max_open: bool = False,
clamp: bool = False,
) -> None:
super().__init__(
min=min, max=max, min_open=min_open, max_open=max_open, clamp=clamp
)
if (min_open or max_open) and clamp:
raise TypeError("Clamping is not supported for open bounds.")
def _clamp(self, bound: float, dir: "te.Literal[1, -1]", open: bool) -> float:
if not open:
return bound
# Could use Python 3.9's math.nextafter here, but clamping an
# open float range doesn't seem to be particularly useful. It's
# left up to the user to write a callback to do it if needed.
raise RuntimeError("Clamping is not supported for open bounds.")
| FloatRange |
python | pydantic__pydantic | tests/test_forward_ref.py | {
"start": 30762,
"end": 31400
} | class ____(BaseModel):
bar: Bar
"""
)
extras_schema = module_2.Foo.__pydantic_core_schema__['schema']['fields']['bar']['schema']['schema'][
'extras_schema'
]
assert extras_schema == {'type': 'int'}
def test_pydantic_extra_forward_ref_separate_module_subclass(create_module: Any) -> None:
@create_module
def module_1():
from pydantic import BaseModel
MyDict = dict
class Bar(BaseModel, extra='allow'):
__pydantic_extra__: 'MyDict[str, int]'
module_2 = create_module(
f"""
from pydantic import BaseModel
from {module_1.__name__} import Bar
| Foo |
python | sympy__sympy | sympy/plotting/pygletplot/plot_axes.py | {
"start": 5859,
"end": 8417
} | class ____(PlotAxesBase):
def __init__(self, parent_axes):
super().__init__(parent_axes)
def draw_axis(self, axis, color):
ticks = self._p._axis_ticks[axis]
radius = self._p._tick_length / 2.0
if len(ticks) < 2:
return
# calculate the vector for this axis
axis_lines = [[0, 0, 0], [0, 0, 0]]
axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
axis_vector = vec_sub(axis_lines[1], axis_lines[0])
# calculate angle to the z direction vector
pos_z = get_direction_vectors()[2]
d = abs(dot_product(axis_vector, pos_z))
d = d / vec_mag(axis_vector)
# don't draw labels if we're looking down the axis
labels_visible = abs(d - 1.0) > 0.02
# draw the ticks and labels
for tick in ticks:
self.draw_tick_line(axis, color, radius, tick, labels_visible)
# draw the axis line and labels
self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
axis_line = [[0, 0, 0], [0, 0, 0]]
axis_line[0][axis], axis_line[1][axis] = a_min, a_max
self.draw_line(axis_line, color)
if labels_visible:
self.draw_axis_line_labels(axis, color, axis_line)
def draw_axis_line_labels(self, axis, color, axis_line):
if not self._p._label_axes:
return
axis_labels = [axis_line[0][::], axis_line[1][::]]
axis_labels[0][axis] -= 0.3
axis_labels[1][axis] += 0.3
a_str = ['X', 'Y', 'Z'][axis]
self.draw_text("-" + a_str, axis_labels[0], color)
self.draw_text("+" + a_str, axis_labels[1], color)
def draw_tick_line(self, axis, color, radius, tick, labels_visible):
tick_axis = {0: 1, 1: 0, 2: 1}[axis]
tick_line = [[0, 0, 0], [0, 0, 0]]
tick_line[0][axis] = tick_line[1][axis] = tick
tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
self.draw_line(tick_line, color)
if labels_visible:
self.draw_tick_line_label(axis, color, radius, tick)
def draw_tick_line_label(self, axis, color, radius, tick):
if not self._p._label_axes:
return
tick_label_vector = [0, 0, 0]
tick_label_vector[axis] = tick
tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1, 1, 1][
axis] * radius * 3.5
self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
| PlotAxesOrdinate |
python | getsentry__sentry | tests/sentry/middleware/test_access_log_middleware.py | {
"start": 7846,
"end": 8509
} | class ____(LogCaptureAPITestCase):
endpoint = "ratelimit-endpoint"
def test_access_log_rate_limited(self) -> None:
self._caplog.set_level(logging.INFO, logger="sentry")
self.get_error_response(status_code=429)
self.assert_access_log_recorded()
# no token because the endpoint was not hit
assert not hasattr(self.captured_logs[0], "token_type")
assert self.captured_logs[0].limit == "0"
assert self.captured_logs[0].remaining == "0"
assert self.captured_logs[0].group == RateLimitedEndpoint.rate_limits.group
@all_silo_test
@override_settings(SENTRY_SELF_HOSTED=False)
| TestAccessLogRateLimited |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.