language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | joke2k__faker | faker/providers/company/es_CL/__init__.py | {
"start": 74,
"end": 11553
} | class ____(CompanyProvider):
formats = (
"{{last_name}} {{company_suffix}}",
"{{company_prefix}} {{last_name}} y {{last_name}} {{company_suffix}}",
"{{company_prefix}} {{last_name}}, {{last_name}} y {{last_name}} {{company_suffix}}",
"{{company_prefix}} {{last_name}} y Asociados {{company_suffix}}",
"{{last_name}}, {{last_name}} y {{last_name}} {{company_suffix}}",
"{{last_name}} y {{last_name}} {{company_suffix}}",
"{{name}} E.I.R.L.",
"{{name}} EIRL",
)
catch_phrase_words = (
(
"habilidad",
"acceso",
"adaptador",
"algoritmo",
"alianza",
"analista",
"aplicación",
"enfoque",
"arquitectura",
"archivo",
"inteligencia artificial",
"array",
"actitud",
"medición",
"gestión presupuestaria",
"capacidad",
"desafío",
"circuito",
"colaboración",
"complejidad",
"concepto",
"conglomeración",
"contingencia",
"núcleo",
"fidelidad",
"base de datos",
"data-warehouse",
"definición",
"emulación",
"codificar",
"encriptar",
"extranet",
"firmware",
"flexibilidad",
"focus group",
"previsión",
"base de trabajo",
"función",
"funcionalidad",
"interfaz gráfica",
"groupware",
"interfaz gráfico de usuario",
"hardware",
"soporte",
"jerarquía",
"conjunto",
"implementación",
"infraestructura",
"iniciativa",
"instalación",
"conjunto de instrucciones",
"interfaz",
"intranet",
"base del conocimiento",
"red de area local",
"aprovechar",
"matrices",
"metodologías",
"middleware",
"migración",
"modelo",
"moderador",
"monitorizar",
"arquitectura abierta",
"sistema abierto",
"orquestar",
"paradigma",
"paralelismo",
"política",
"portal",
"estructura de precios",
"proceso de mejora",
"producto",
"productividad",
"proyecto",
"proyección",
"protocolo",
"línea segura",
"software",
"solución",
"estandarización",
"estrategia",
"estructura",
"éxito",
"superestructura",
"soporte",
"sinergia",
"mediante",
"marco de tiempo",
"caja de herramientas",
"utilización",
"website",
"fuerza de trabajo",
),
(
"24 horas",
"24/7",
"3ra generación",
"4ta generación",
"5ta generación",
"6ta generación",
"analizada",
"asimétrica",
"asíncrona",
"monitorizada por red",
"bidireccional",
"bifurcada",
"generada por el cliente",
"cliente-servidor",
"coherente",
"cohesiva",
"compuesto",
"sensible al contexto",
"basado en el contexto",
"basado en contenido",
"dedicada",
"generado por la demanda",
"didáctica",
"direccional",
"discreta",
"dinámica",
"potenciada",
"acompasada",
"ejecutiva",
"explícita",
"tolerante a fallos",
"innovadora",
"amplio abanico",
"global",
"heurística",
"alto nivel",
"holística",
"homogénea",
"híbrida",
"incremental",
"intangible",
"interactiva",
"intermedia",
"local",
"logística",
"maximizada",
"metódica",
"misión crítica",
"móvil",
"modular",
"motivadora",
"multimedia",
"multiestado",
"multitarea",
"nacional",
"basado en necesidades",
"neutral",
"nueva generación",
"no-volátil",
"orientado a objetos",
"óptima",
"optimizada",
"radical",
"tiempo real",
"recíproca",
"regional",
"escalable",
"secundaria",
"orientada a soluciones",
"estable",
"estática",
"sistemática",
"sistémica",
"tangible",
"terciaria",
"transicional",
"uniforme",
"valor añadido",
"vía web",
"defectos cero",
"tolerancia cero",
),
(
"adaptativo",
"avanzado",
"asimilado",
"automatizado",
"balanceado",
"enfocado al negocio",
"centralizado",
"clonado",
"compatible",
"configurable",
"multiplataforma",
"enfocado al cliente",
"personalizable",
"descentralizado",
"digitalizado",
"distribuido",
"diverso",
"mejorado",
"en toda la empresa",
"ergonómico",
"exclusivo",
"expandido",
"extendido",
"cara a cara",
"enfocado",
"de primera línea",
"totalmente configurable",
"basado en funcionalidad",
"fundamental",
"horizontal",
"implementado",
"innovador",
"integrado",
"intuitivo",
"inverso",
"administrado",
"mandatorio",
"monitoreado",
"multicanal",
"multilateral",
"multi-capas",
"en red",
"basado en objetos",
"de arquitectura abierta",
"open-source",
"operativo",
"optimizado",
"opcional",
"orgánico",
"organizado",
"perseverante",
"persistente",
"polarizado",
"preventivo",
"proactivo",
"enfocado a ganancias",
"programable",
"progresivo",
"llave pública",
"enfocado a la calidad",
"reactivo",
"realineado",
"recontextualizado",
"reducido",
"con ingeniería inversa",
"de tamaño adecuado",
"robusto",
"seguro",
"compartible",
"sincronizado",
"orientado a equipos",
"total",
"universal",
"actualizable",
"centrado en el usuario",
"versátil",
"virtual",
"visionario",
),
)
bsWords = (
(
"implementa",
"utiliza",
"integra",
"optimiza",
"evoluciona",
"transforma",
"abraza",
"habilita",
"orquesta",
"reinventa",
"agrega",
"mejora",
"incentiva",
"modifica",
"empodera",
"monetiza",
"fortalece",
"facilita",
"sinergiza",
"crea marca",
"crece",
"sintetiza",
"entrega",
"mezcla",
"incuba",
"compromete",
"maximiza",
"visualiza",
"innova",
"escala",
"libera",
"maneja",
"extiende",
"revoluciona",
"genera",
"explota",
"transiciona",
"itera",
"cultiva",
"redefine",
"recontextualiza",
),
(
"sinergias",
"paradigmas",
"marcados",
"socios",
"infraestructuras",
"plataformas",
"iniciativas",
"canales",
"communidades",
"ROI",
"soluciones",
"portales",
"nichos",
"tecnologías",
"contenido",
"cadena de producción",
"convergencia",
"relaciones",
"arquitecturas",
"interfaces",
"comercio electrónico",
"sistemas",
"ancho de banda",
"modelos",
"entregables",
"usuarios",
"esquemas",
"redes",
"aplicaciones",
"métricas",
"funcionalidades",
"experiencias",
"servicios web",
"metodologías",
),
(
"valor agregado",
"verticales",
"proactivas",
"robustas",
"revolucionarias",
"escalables",
"de punta",
"innovadoras",
"intuitivas",
"estratégicas",
"e-business",
"de misión crítica",
"uno-a-uno",
"24/7",
"end-to-end",
"globales",
"B2B",
"B2C",
"granulares",
"sin fricciones",
"virtuales",
"virales",
"dinámicas",
"24/365",
"magnéticas",
"listo para la web",
"interactivas",
"punto-com",
"sexi",
"en tiempo real",
"eficientes",
"front-end",
"distribuidas",
"extensibles",
"llave en mano",
"de clase mundial",
"open-source",
"plataforma cruzada",
"de paquete",
"empresariales",
"integrado",
"impacto total",
"inalámbrica",
"transparentes",
"de siguiente generación",
"lo último",
"centrado al usuario",
"visionarias",
"personalizado",
"ubicuas",
"plug-and-play",
"colaborativas",
"holísticas",
"ricas",
),
)
company_prefixes: ElementsType[str] = (
"Corporación",
"Compañía",
"Comercial",
"Despacho",
"Grupo",
"Holding",
"Club",
"Industrias",
"Laboratorio",
"Proyectos",
)
company_suffixes: ElementsType[str] = (
"Sociedad Anónima",
"Limitada",
"S.A.",
"S.p.A.",
"SPA",
"Ltda.",
)
def company_prefix(self) -> str:
"""
:example: 'Grupo'
"""
return self.random_element(self.company_prefixes)
| Provider |
python | ansible__ansible | lib/ansible/plugins/action/service.py | {
"start": 878,
"end": 4298
} | class ____(ActionBase):
TRANSFERS_FILES = False
UNUSED_PARAMS = {
'systemd': ['pattern', 'runlevel', 'sleep', 'arguments', 'args'],
}
# HACK: list of unqualified service manager names that are/were built-in, we'll prefix these with `ansible.legacy` to
# avoid collisions with collections search
BUILTIN_SVC_MGR_MODULES = set(['openwrt_init', 'service', 'systemd', 'sysvinit'])
def run(self, tmp=None, task_vars=None):
""" handler for package operations """
self._supports_check_mode = True
self._supports_async = True
super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
module = self._task.args.get('use', 'auto').lower()
if module == 'auto':
try:
# if we delegate, we should use delegated host's facts
expr = "hostvars[delegate_to].ansible_facts.service_mgr" if self._task.delegate_to else "ansible_facts.service_mgr"
module = self._templar.resolve_variable_expression(expr, local_variables=dict(delegate_to=self._task.delegate_to))
except Exception:
pass # could not get it from template!
try:
if module == 'auto':
facts = self._execute_module(
module_name='ansible.legacy.setup',
module_args=dict(gather_subset='!all', filter='ansible_service_mgr'), task_vars=task_vars)
self._display.debug("Facts %s" % facts)
module = facts.get('ansible_facts', {}).get('ansible_service_mgr', 'auto')
if not module or module == 'auto' or not self._shared_loader_obj.module_loader.has_plugin(module):
module = 'ansible.legacy.service'
if module != 'auto':
# run the 'service' module
new_module_args = self._task.args.copy()
if 'use' in new_module_args:
del new_module_args['use']
if module in self.UNUSED_PARAMS:
for unused in self.UNUSED_PARAMS[module]:
if unused in new_module_args:
del new_module_args[unused]
self._display.warning('Ignoring "%s" as it is not used in "%s"' % (unused, module))
# get defaults for specific module
context = self._shared_loader_obj.module_loader.find_plugin_with_context(module, collection_list=self._task.collections)
new_module_args = _apply_action_arg_defaults(context.resolved_fqcn, self._task, new_module_args, self._templar)
# collection prefix known internal modules to avoid collisions from collections search, while still allowing library/ overrides
if module in self.BUILTIN_SVC_MGR_MODULES:
module = 'ansible.legacy.' + module
self._display.vvvv("Running %s" % module)
return self._execute_module(module_name=module, module_args=new_module_args, task_vars=task_vars, wrap_async=self._task.async_val)
else:
raise AnsibleActionFail('Could not detect which service manager to use. Try gathering facts or setting the "use" option.')
finally:
if not self._task.async_val:
self._remove_tmp_path(self._connection._shell.tmpdir)
| ActionModule |
python | tiangolo__fastapi | docs_src/body_multiple_params/tutorial004_an.py | {
"start": 282,
"end": 703
} | class ____(BaseModel):
username: str
full_name: Union[str, None] = None
@app.put("/items/{item_id}")
async def update_item(
*,
item_id: int,
item: Item,
user: User,
importance: Annotated[int, Body(gt=0)],
q: Union[str, None] = None,
):
results = {"item_id": item_id, "item": item, "user": user, "importance": importance}
if q:
results.update({"q": q})
return results
| User |
python | dagster-io__dagster | python_modules/libraries/dagster-shared/dagster_shared_tests/test_check.py | {
"start": 52364,
"end": 52393
} | class ____(Generic[T]): ...
| Gen |
python | huggingface__transformers | tests/utils/test_feature_extraction_utils.py | {
"start": 2139,
"end": 5784
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._token = TOKEN
def test_push_to_hub(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub(tmp_repo.repo_id, token=self._token)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo.repo_id)
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_via_save_pretrained(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token
)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo.repo_id)
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_in_organization(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub(tmp_repo.repo_id, token=self._token)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo.repo_id)
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_in_organization_via_save_pretrained(self):
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token
)
new_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(tmp_repo.repo_id)
for k, v in feature_extractor.__dict__.items():
self.assertEqual(v, getattr(new_feature_extractor, k))
def test_push_to_hub_dynamic_feature_extractor(self):
with TemporaryHubRepo(token=self._token) as tmp_repo:
CustomFeatureExtractor.register_for_auto_class()
feature_extractor = CustomFeatureExtractor.from_pretrained(SAMPLE_FEATURE_EXTRACTION_CONFIG_DIR)
feature_extractor.push_to_hub(tmp_repo.repo_id, token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map,
{"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"},
)
new_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_repo.repo_id, trust_remote_code=True)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__, "CustomFeatureExtractor")
| FeatureExtractorPushToHubTester |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_variables.py | {
"start": 21857,
"end": 27114
} | class ____:
async def test_duplicate_variable_basic(
self,
client: AsyncClient,
variable,
):
duplicate_data = {
"name": f"{variable.name}_copy",
"value": variable.value,
"tags": variable.tags,
}
res = await client.post(
"/variables/",
json=duplicate_data,
)
assert res.status_code == 201
res_data = res.json()
assert res_data["id"] != str(variable.id)
assert res_data["name"] == duplicate_data["name"]
assert res_data["value"] == duplicate_data["value"]
assert res_data["tags"] == duplicate_data["tags"]
async def test_duplicate_variable_fails_same_name(
self,
client: AsyncClient,
variable,
):
duplicate_data = {
"name": variable.name,
"value": variable.value,
"tags": variable.tags,
}
res = await client.post(
"/variables/",
json=duplicate_data,
)
assert res.status_code == 409
async def test_duplicate_variable_preserves_original(
self,
client: AsyncClient,
variable,
):
duplicate_data = {
"name": f"{variable.name}_copy",
"value": variable.value,
"tags": variable.tags,
}
await client.post("/variables/", json=duplicate_data)
original_res = await client.get(f"/variables/{variable.id}")
original_data = original_res.json()
assert original_data["id"] == str(variable.id)
assert original_data["name"] == variable.name
assert original_data["value"] == variable.value
assert original_data["tags"] == variable.tags
async def test_duplicate_variable_multiple_copies(
self,
client: AsyncClient,
variable,
):
copies = []
for i in range(3):
duplicate_data = {
"name": f"{variable.name}_copy_{i}",
"value": variable.value,
"tags": variable.tags,
}
res = await client.post(
"/variables/",
json=duplicate_data,
)
assert res.status_code == 201
copies.append(res.json())
for i, copy in enumerate(copies):
assert copy["name"] == f"{variable.name}_copy_{i}"
assert copy["id"] != str(variable.id)
for j, other_copy in enumerate(copies):
if i != j:
assert copy["id"] != other_copy["id"]
async def test_duplicate_variable_chain_duplication(
self,
client: AsyncClient,
variable,
):
first_duplicate_data = {
"name": f"{variable.name}_copy",
"value": variable.value,
"tags": variable.tags,
}
first_res = await client.post(
"/variables/",
json=first_duplicate_data,
)
assert first_res.status_code == 201
first_duplicate = first_res.json()
second_duplicate_data = {
"name": f"{variable.name}_copy_of_copy",
"value": first_duplicate["value"],
"tags": first_duplicate["tags"],
}
second_res = await client.post(
"/variables/",
json=second_duplicate_data,
)
assert second_res.status_code == 201
second_duplicate = second_res.json()
assert second_duplicate["id"] != first_duplicate["id"]
assert second_duplicate["id"] != str(variable.id)
assert second_duplicate["name"] == f"{variable.name}_copy_of_copy"
async def test_duplicate_variable_with_json_value(
self,
client: AsyncClient,
session: AsyncSession,
):
json_value = {"config": {"timeout": 300, "retries": 3}, "enabled": True}
await create_variable(
session,
VariableCreate(name="json_variable", value=json_value, tags=["json"]),
)
await session.commit()
duplicate_data = {
"name": "json_variable_copy",
"value": json_value,
"tags": ["json", "duplicated"],
}
res = await client.post(
"/variables/",
json=duplicate_data,
)
assert res.status_code == 201
res_data = res.json()
assert res_data["value"] == json_value
assert res_data["tags"] == ["json", "duplicated"]
async def test_duplicate_variable_with_null_value(
self,
client: AsyncClient,
session: AsyncSession,
):
await create_variable(
session,
VariableCreate(name="null_variable", value=None, tags=["null"]),
)
await session.commit()
duplicate_data = {
"name": "null_variable_copy",
"value": None,
"tags": ["null", "duplicated"],
}
res = await client.post(
"/variables/",
json=duplicate_data,
)
assert res.status_code == 201
res_data = res.json()
assert res_data["value"] is None
assert res_data["name"] == "null_variable_copy"
| TestDuplicateVariable |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_weight_averaging.py | {
"start": 1111,
"end": 1816
} | class ____(BoringModel):
def __init__(self, batch_norm: bool = True) -> None:
super().__init__()
layers = [nn.Linear(32, 32)]
if batch_norm:
layers.append(nn.BatchNorm1d(32))
layers += [nn.ReLU(), nn.Linear(32, 2)]
self.layer = nn.Sequential(*layers)
self.crash_on_epoch = None
def training_step(self, batch: Tensor, batch_idx: int) -> None:
if self.crash_on_epoch and self.trainer.current_epoch >= self.crash_on_epoch:
raise Exception("CRASH")
return super().training_step(batch, batch_idx)
def configure_optimizers(self) -> None:
return torch.optim.SGD(self.layer.parameters(), lr=0.1)
| TestModel |
python | pydantic__pydantic | tests/mypy/modules/root_models.py | {
"start": 526,
"end": 770
} | class ____(BaseModel, Generic[V]):
m1: Maybe[int]
m2: Maybe[V]
m3: Maybe
Model[str](m1=1, m2='dog', m3=[])
m = Model[str](m1=Maybe(None), m2=Maybe('dog'), m3=Maybe([]))
Model(m1=None, m2={}, m3=[])
assert_type(m.m1, Maybe[int])
| Model |
python | catalyst-team__catalyst | catalyst/contrib/layers/pooling.py | {
"start": 190,
"end": 985
} | class ____(nn.Module):
"""Applies a 2D global average pooling operation over an input signal
composed of several input planes.
@TODO: Docs (add `Example`). Contribution is welcome.
"""
def __init__(self):
"""Constructor method for the ``GlobalAvgPool2d`` class."""
super().__init__()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
h, w = x.shape[2:]
return F.avg_pool2d(input=x, kernel_size=(h, w))
@staticmethod
def out_features(in_features):
"""Returns number of channels produced by the pooling.
Args:
in_features: number of channels in the input sample
Returns:
number of output features
"""
return in_features
| GlobalAvgPool2d |
python | getsentry__sentry-python | sentry_sdk/integrations/gql.py | {
"start": 1067,
"end": 4801
} | class ____(Integration):
identifier = "gql"
@staticmethod
def setup_once():
# type: () -> None
gql_version = parse_version(gql.__version__)
_check_minimum_version(GQLIntegration, gql_version)
_patch_execute()
def _data_from_document(document):
# type: (DocumentNode) -> EventDataType
try:
operation_ast = get_operation_ast(document)
data = {"query": print_ast(document)} # type: EventDataType
if operation_ast is not None:
data["variables"] = operation_ast.variable_definitions
if operation_ast.name is not None:
data["operationName"] = operation_ast.name.value
return data
except (AttributeError, TypeError):
return dict()
def _transport_method(transport):
# type: (Union[Transport, AsyncTransport]) -> str
"""
The RequestsHTTPTransport allows defining the HTTP method; all
other transports use POST.
"""
try:
return transport.method
except AttributeError:
return "POST"
def _request_info_from_transport(transport):
# type: (Union[Transport, AsyncTransport, None]) -> Dict[str, str]
if transport is None:
return {}
request_info = {
"method": _transport_method(transport),
}
try:
request_info["url"] = transport.url
except AttributeError:
pass
return request_info
def _patch_execute():
# type: () -> None
real_execute = gql.Client.execute
@ensure_integration_enabled(GQLIntegration, real_execute)
def sentry_patched_execute(self, document_or_request, *args, **kwargs):
# type: (gql.Client, DocumentNode, Any, Any) -> Any
scope = sentry_sdk.get_isolation_scope()
scope.add_event_processor(_make_gql_event_processor(self, document_or_request))
try:
return real_execute(self, document_or_request, *args, **kwargs)
except TransportQueryError as e:
event, hint = event_from_exception(
e,
client_options=sentry_sdk.get_client().options,
mechanism={"type": "gql", "handled": False},
)
sentry_sdk.capture_event(event, hint)
raise e
gql.Client.execute = sentry_patched_execute
def _make_gql_event_processor(client, document_or_request):
# type: (gql.Client, Union[DocumentNode, gql.GraphQLRequest]) -> EventProcessor
def processor(event, hint):
# type: (Event, dict[str, Any]) -> Event
try:
errors = hint["exc_info"][1].errors
except (AttributeError, KeyError):
errors = None
request = event.setdefault("request", {})
request.update(
{
"api_target": "graphql",
**_request_info_from_transport(client.transport),
}
)
if should_send_default_pii():
if GraphQLRequest is not None and isinstance(
document_or_request, GraphQLRequest
):
# In v4.0.0, gql moved to using GraphQLRequest instead of
# DocumentNode in execute
# https://github.com/graphql-python/gql/pull/556
document = document_or_request.document
else:
document = document_or_request
request["data"] = _data_from_document(document)
contexts = event.setdefault("contexts", {})
response = contexts.setdefault("response", {})
response.update(
{
"data": {"errors": errors},
"type": response,
}
)
return event
return processor
| GQLIntegration |
python | cherrypy__cherrypy | cherrypy/__init__.py | {
"start": 8367,
"end": 9204
} | class ____(_local):
"""A container for thread-specific data."""
thread_data = _ThreadData()
# Monkeypatch pydoc to allow help() to go through the threadlocal proxy.
# Jan 2007: no Googleable examples of anyone else replacing pydoc.resolve.
# The only other way would be to change what is returned from type(request)
# and that's not possible in pure Python (you'd have to fake ob_type).
def _cherrypy_pydoc_resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, _ThreadLocalProxy):
thing = getattr(serving, thing.__attrname__)
return _pydoc._builtin_resolve(thing, forceload)
try:
import pydoc as _pydoc
_pydoc._builtin_resolve = _pydoc.resolve
_pydoc.resolve = _cherrypy_pydoc_resolve
except ImportError:
pass
| _ThreadData |
python | streamlit__streamlit | lib/tests/streamlit/elements/link_button_test.py | {
"start": 846,
"end": 3123
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall link_button protos."""
def test_just_label(self):
"""Test that it can be called with label and string or bytes data."""
st.link_button("the label", url="https://streamlit.io")
c = self.get_delta_from_queue().new_element.link_button
assert c.label == "the label"
assert c.type == "secondary"
assert not c.disabled
def test_just_disabled(self):
"""Test that it can be called with disabled param."""
st.link_button("the label", url="https://streamlit.io", disabled=True)
c = self.get_delta_from_queue().new_element.link_button
assert c.disabled
def test_url_exist(self):
"""Test that file url exist in proto."""
st.link_button("the label", url="https://streamlit.io")
c = self.get_delta_from_queue().new_element.link_button
assert "https://streamlit.io" in c.url
@parameterized.expand(["primary", "secondary", "tertiary"])
def test_type(self, type):
"""Test that it can be called with type param."""
st.link_button("the label", url="https://streamlit.io", type=type)
c = self.get_delta_from_queue().new_element.link_button
assert c.type == type
def test_emoji_icon(self):
"""Test that it can be called with an emoji icon."""
st.link_button("the label", url="https://streamlit.io", icon="🎈")
c = self.get_delta_from_queue().new_element.link_button
assert c.icon == "🎈"
def test_material_icon(self):
"""Test that it can be called with a material icon."""
st.link_button("the label", url="https://streamlit.io", icon=":material/bolt:")
c = self.get_delta_from_queue().new_element.link_button
assert c.icon == ":material/bolt:"
def test_invalid_icon(self):
"""Test that an error is raised if an invalid icon is provided."""
with pytest.raises(StreamlitAPIException) as e:
st.link_button("the label", url="https://streamlit.io", icon="invalid")
assert str(e.value) == (
'The value "invalid" is not a valid emoji. '
"Shortcodes are not allowed, please use a single character instead."
)
| LinkButtonTest |
python | astropy__astropy | astropy/utils/masked/tests/test_function_helpers.py | {
"start": 54734,
"end": 64427
} | class ____:
"""Tests based on those from numpy.ma.tests.test_extras.
Adjusted to take into account that comparing masked values should
result in masked equality.
"""
@classmethod
def setup_class(cls):
# Setup for unique (names as in unique_all NamedTuple)
# input data, unique values, indices in data to those,
# inverse indices in values to reconstruct data, counts.
cls.data = Masked([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
cls.values = Masked([1, 2, 3, 1, 2], mask=[0, 0, 0, 1, 1])
cls.indices = np.array([0, 3, 5, 2, 4])
cls.inverse_indices = np.array([0, 0, 3, 1, 4, 2])
cls.counts = np.array([2, 1, 1, 1, 1])
@pytest.mark.parametrize("dtype", [int, float, object])
def test_unique(self, dtype):
values, indices, inverse_indices = np.unique(
self.data.astype(dtype), return_index=True, return_inverse=True
)
assert_masked_equal(values, self.values.astype(dtype))
assert_array_equal(indices, self.indices)
assert_array_equal(inverse_indices, self.inverse_indices)
# All masked
data2 = Masked([2, 1, 3], mask=True)
values2, indices2, inverse_indices2 = np.unique(
data2.astype(dtype), return_index=True, return_inverse=True
)
expected_values2 = Masked([1, 2, 3], mask=True)
assert_masked_equal(values2, expected_values2.astype(dtype))
assert_array_equal(indices2, [1, 0, 2])
assert_array_equal(inverse_indices2, [1, 0, 2])
@pytest.mark.skipif(NUMPY_LT_2_0, reason="new in numpy 2.0")
def check_unique(self, test):
for name in test._fields:
assert_array_equal(getattr(test, name), getattr(self, name))
@pytest.mark.skipif(NUMPY_LT_2_0, reason="new in numpy 2.0")
def test_unique_all(self):
test = np.unique_all(self.data)
assert len(test) == 4
self.check_unique(test)
@pytest.mark.skipif(NUMPY_LT_2_0, reason="new in numpy 2.0")
def test_unique_counts(self):
test = np.unique_counts(self.data)
assert len(test) == 2
self.check_unique(test)
@pytest.mark.skipif(NUMPY_LT_2_0, reason="new in numpy 2.0")
def test_unique_inverse(self):
test = np.unique_inverse(self.data)
assert len(test) == 2
self.check_unique(test)
@pytest.mark.skipif(NUMPY_LT_2_0, reason="new in numpy 2.0")
def test_unique_values(self):
test = np.unique_values(self.data)
assert isinstance(test, Masked)
assert_array_equal(test, self.values)
def test_ediff1d(self):
x = Masked(np.arange(5), mask=[1, 0, 0, 0, 1])
control = Masked([1, 1, 1, 1], mask=[1, 0, 0, 1])
test = np.ediff1d(x)
assert_masked_equal(test, control)
# Test ediff1d w/ to_begin
test2 = np.ediff1d(x, to_begin=Masked(10, mask=True))
control2 = Masked([10, 1, 1, 1, 1], mask=[1, 1, 0, 0, 1])
assert_masked_equal(test2, control2)
test3 = np.ediff1d(x, to_begin=[1, 2, 3])
control3 = Masked([1, 2, 3, 1, 1, 1, 1], mask=[0, 0, 0, 1, 0, 0, 1])
assert_masked_equal(test3, control3)
# Test ediff1d w/ to_end
test4 = np.ediff1d(x, to_end=Masked(10, mask=True))
control4 = Masked([1, 1, 1, 1, 10], mask=[1, 0, 0, 1, 1])
assert_masked_equal(test4, control4)
test5 = np.ediff1d(x, to_end=[1, 2, 3])
control5 = Masked([1, 1, 1, 1, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
assert_masked_equal(test5, control5)
# Test ediff1d w/ to_begin and to_end
test6 = np.ediff1d(
x, to_end=Masked(10, mask=True), to_begin=Masked(20, mask=True)
)
control6 = Masked([20, 1, 1, 1, 1, 10], mask=[1, 1, 0, 0, 1, 1])
assert_masked_equal(test6, control6)
test7 = np.ediff1d(x, to_end=[1, 2, 3], to_begin=Masked(10, mask=True))
control7 = Masked([10, 1, 1, 1, 1, 1, 2, 3], mask=[1, 1, 0, 0, 1, 0, 0, 0])
assert_masked_equal(test7, control7)
# Test ediff1d w/ a ndarray
test8 = np.ediff1d(
np.arange(5), to_end=Masked(10, mask=True), to_begin=Masked(20, mask=True)
)
control8 = Masked([20, 1, 1, 1, 1, 10], mask=[1, 0, 0, 0, 0, 1])
assert_masked_equal(test8, control8)
def test_intersect1d(self):
x = Masked([1, 3, 3, 3, 4], mask=[0, 0, 0, 1, 1])
y = Masked([3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 1])
test = np.intersect1d(x, y)
control = Masked([1, 3, 4], mask=[0, 0, 1])
assert_masked_equal(test, control)
def test_setxor1d(self):
a = Masked([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = Masked([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = np.setxor1d(a, b)
assert_masked_equal(test, Masked([3, 4, 7]))
a = Masked([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
b = [1, 2, 3, 4, 5]
test = np.setxor1d(a, b)
assert_masked_equal(test, Masked([3, 4, 7, -1], mask=[0, 0, 0, 1]))
a = Masked([1, 8, 2, 3], mask=[0, 1, 0, 0])
b = Masked([6, 5, 4, 8], mask=[0, 0, 0, 1])
test = np.setxor1d(a, b)
assert_masked_equal(test, Masked([1, 2, 3, 4, 5, 6]))
assert_masked_equal(np.setxor1d(Masked([]), []), Masked([]))
@pytest.mark.parametrize("dtype", [int, float, object])
def test_isin(self, dtype):
a = np.arange(24).reshape((2, 3, 4))
mask = np.zeros(a.shape, bool)
mask[1, 2, 0] = 1 # 20
mask[1, 2, 1] = 1 # 21
a = Masked(a, mask=mask)
b = Masked([0, 10, 20, 30, 1, 3, 11, 21, 33], mask=[0, 1, 0, 1, 0, 1, 0, 1, 0])
# unmasked 0, 20, 1, 11, 33, masked 10, 30, 3, 21
ec = np.zeros((2, 3, 4), dtype=bool)
ec[0, 0, 0] = True # 0
ec[0, 0, 1] = True # 1
ec[0, 2, 3] = True # 11
ec[1, 2, 1] = True # masked 21
ec = Masked(ec, mask)
c = np.isin(a.astype(dtype), b.astype(dtype))
assert_masked_equal(c, ec)
@pytest.mark.skipif(not NUMPY_LT_2_4, reason="np.in1d was removed in numpy 2.4")
@pytest.mark.filterwarnings("ignore:in1d.*deprecated") # not NUMPY_LT_2_0
def test_in1d(self):
# Once we require numpy>=2.0, these tests should be joined with np.isin.
a = Masked([1, 2, 5, -2, -1], mask=[0, 0, 0, 1, 1])
b = Masked([1, 2, 3, 4, 5, -2], mask=[0, 0, 0, 0, 0, 1])
test = np.in1d(a, b) # noqa: NPY201
assert_masked_equal(test, Masked([True, True, True, True, False], mask=a.mask))
assert_array_equal(np.in1d(a, b, invert=True), ~test) # noqa: NPY201
a = Masked([5, 5, 2, -2, -1], mask=[0, 0, 0, 1, 1])
b = Masked([1, 5, -1], mask=[0, 0, 1])
test = np.in1d(a, b) # noqa: NPY201
assert_masked_equal(test, Masked([True, True, False, False, True], mask=a.mask))
assert_masked_equal(np.in1d(Masked([]), []), Masked([])) # noqa: NPY201
assert_masked_equal(np.in1d(Masked([]), [], invert=True), Masked([])) # noqa: NPY201
@pytest.mark.skipif(not NUMPY_LT_2_4, reason="np.in1d was removed in numpy 2.4")
def test_in1d_kind_table_error(self):
with pytest.raises(ValueError, match="'table' method is not supported"):
np.in1d(Masked([1, 2, 3]), [4, 5], kind="table") # noqa: NPY201
@pytest.mark.parametrize("dtype", [int, float, object])
def test_union1d(self, dtype):
a = Masked([1, 2, 5, 7, 5, 5], mask=[0, 0, 0, 0, 0, 1])
b = Masked([1, 2, 3, 4, 5, 6], mask=[0, 0, 0, 0, 0, 1])
control = Masked([1, 2, 3, 4, 5, 7, 5, 6], mask=[0, 0, 0, 0, 0, 0, 1, 1])
test = np.union1d(a.astype(dtype), b.astype(dtype))
assert_masked_equal(test, control.astype(dtype))
assert_masked_equal(np.union1d(Masked([]), []), Masked([]))
def test_setdiff1d(self):
a = Masked([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
b = np.array([2, 4, 3, 3, 2, 1, 5])
test = np.setdiff1d(a, b)
assert_masked_equal(test, Masked([6, 7, 1], mask=[0, 0, 1]))
b2 = Masked(b, mask=[1, 1, 1, 1, 0, 0, 0])
test2 = np.setdiff1d(a, b2)
assert_masked_equal(test2, Masked([4, 6, 7, 1], mask=[0, 0, 0, 1]))
a = Masked(np.array([], dtype=np.uint32), mask=[])
assert np.setdiff1d(a, []).dtype == np.uint32
a = Masked(["a", "b", "c"], mask=[0, 1, 1])
b = Masked(["a", "b", "s"], mask=[0, 1, 1])
test3 = np.setdiff1d(a, b, assume_unique=True)
assert_masked_equal(test3, Masked(["c"], True))
# Get wrapped and covered functions.
all_wrapped_functions = get_wrapped_functions(np)
tested_functions = get_covered_functions(locals())
# Create set of untested functions.
untested_functions = set()
deprecated_functions = set()
untested_functions |= deprecated_functions
io_functions = {np.save, np.savez, np.savetxt, np.savez_compressed}
untested_functions |= io_functions
poly_functions = {
np.poly, np.polyadd, np.polyder, np.polydiv, np.polyfit, np.polyint,
np.polymul, np.polysub, np.polyval, np.roots, np.vander,
} # fmt: skip
untested_functions |= poly_functions
def test_basic_testing_completeness():
declared_functions = tested_functions | IGNORED_FUNCTIONS | UNSUPPORTED_FUNCTIONS
if NUMPY_LT_2_2:
declared_functions |= SUPPORTED_NEP35_FUNCTIONS
assert declared_functions == all_wrapped_functions
@pytest.mark.xfail(reason="coverage not completely set up yet")
def test_testing_completeness():
assert not tested_functions.intersection(untested_functions)
assert all_wrapped_functions == (tested_functions | untested_functions)
| TestArraySetOps |
python | getsentry__sentry | src/sentry/api/endpoints/project_performance_general_settings.py | {
"start": 755,
"end": 2634
} | class ____(ProjectEndpoint):
owner = ApiOwner.DATA_BROWSING
publish_status = {
"DELETE": ApiPublishStatus.PRIVATE,
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (ProjectSettingPermission,)
def get(self, request: Request, project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
if not project:
return Response(status=status.HTTP_404_NOT_FOUND)
project_option_settings = self.get_current_settings(project)
return Response(project_option_settings)
def post(self, request: Request, project: Project) -> Response:
if not self.has_feature(project, request):
return self.respond(status=status.HTTP_404_NOT_FOUND)
if not project:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = ProjectPerformanceGeneralSettingsSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
self.update_settings(project, request.data)
return Response(status=status.HTTP_204_NO_CONTENT)
def has_feature(self, project, request) -> bool:
return features.has(
"organizations:performance-view", project.organization, actor=request.user
)
def get_current_settings(self, project: Project):
return project.get_option(
SETTINGS_PROJECT_OPTION_KEY, DEFAULT_PROJECT_PERFORMANCE_GENERAL_SETTINGS
)
def update_settings(self, project: Project, new_settings: dict):
current_settings = self.get_current_settings(project)
project.update_option(SETTINGS_PROJECT_OPTION_KEY, {**current_settings, **new_settings})
| ProjectPerformanceGeneralSettingsEndpoint |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/notifications/test_sqs.py | {
"start": 1303,
"end": 4542
} | class ____:
def test_class_and_notifier_are_same(self):
assert send_sqs_notification is SqsNotifier
@pytest.mark.parametrize("aws_conn_id", ["aws_test_conn_id", None, PARAM_DEFAULT_VALUE])
@pytest.mark.parametrize("region_name", ["eu-west-2", None, PARAM_DEFAULT_VALUE])
def test_parameters_propagate_to_hook(self, aws_conn_id, region_name):
"""Test notifier attributes propagate to SqsHook."""
notifier_kwargs = {}
if aws_conn_id is not NOTSET:
notifier_kwargs["aws_conn_id"] = aws_conn_id
if region_name is not NOTSET:
notifier_kwargs["region_name"] = region_name
notifier = SqsNotifier(**notifier_kwargs, **SEND_MSG_KWARGS)
with mock.patch("airflow.providers.amazon.aws.notifications.sqs.SqsHook") as mock_hook:
hook = notifier.hook
assert hook is notifier.hook, "Hook property not cached"
mock_hook.assert_called_once_with(
aws_conn_id=(aws_conn_id if aws_conn_id is not NOTSET else "aws_default"),
region_name=(region_name if region_name is not NOTSET else None),
)
# Basic check for notifier
notifier.notify({})
mock_hook.return_value.send_message.assert_called_once_with(**SEND_MSG_KWARGS)
def test_sqs_notifier_templated(self, create_dag_without_db):
notifier = SqsNotifier(
aws_conn_id="{{ dag.dag_id }}",
queue_url="https://sqs.{{ var_region }}.amazonaws.com/{{ var_account }}/{{ var_queue }}",
message_body="The {{ var_username|capitalize }} Show",
message_attributes={"bar": "{{ dag.dag_id }}"},
message_group_id="{{ var_group_id }}",
region_name="{{ var_region }}",
)
with mock.patch("airflow.providers.amazon.aws.notifications.sqs.SqsHook") as m:
notifier(
{
"dag": create_dag_without_db("test_sqs_notifier_templated"),
"var_username": "truman",
"var_region": "ca-central-1",
"var_account": "123321123321",
"var_queue": "AwesomeQueue",
"var_group_id": "spam",
}
)
# Hook initialisation
m.assert_called_once_with(aws_conn_id="test_sqs_notifier_templated", region_name="ca-central-1")
# Send message
m.return_value.send_message.assert_called_once_with(
queue_url="https://sqs.ca-central-1.amazonaws.com/123321123321/AwesomeQueue",
message_body="The Truman Show",
message_group_id="spam",
message_attributes={"bar": "test_sqs_notifier_templated"},
delay_seconds=0,
)
@pytest.mark.asyncio
async def test_async_notify(self):
notifier = SqsNotifier(**SEND_MSG_KWARGS)
with mock.patch("airflow.providers.amazon.aws.notifications.sqs.SqsHook") as mock_hook:
mock_hook.return_value.asend_message = mock.AsyncMock()
await notifier.async_notify({})
mock_hook.return_value.asend_message.assert_called_once_with(**SEND_MSG_KWARGS)
| TestSqsNotifier |
python | great-expectations__great_expectations | scripts/cleanup/cleanup_databricks.py | {
"start": 706,
"end": 2648
} | class ____(BaseSettings):
"""Environment variables for Databricks connection.
These are injected in via CI, but when running locally, you may use your own credentials.
"""
DATABRICKS_TOKEN: str
DATABRICKS_HOST: str
DATABRICKS_HTTP_PATH: str
@property
def connection_string(self) -> str:
return f"databricks://token:{self.DATABRICKS_TOKEN}@{self.DATABRICKS_HOST}?http_path={self.DATABRICKS_HTTP_PATH}&catalog=ci"
def cleanup_databricks(config: DatabricksConnectionConfig) -> None:
engine = create_engine(url=config.connection_string)
with engine.connect() as conn, conn.begin():
results = conn.execute(
TextClause(
"""
SELECT catalog_name, schema_name, created
FROM information_schema.schemata
WHERE catalog_name = :catalog_name
AND schema_name REGEXP :schema_pattern
AND created < CURRENT_TIMESTAMP() - INTERVAL 1 HOUR
ORDER BY created DESC
"""
),
{"catalog_name": CATALOG_NAME, "schema_pattern": SCHEMA_PATTERN},
).fetchall()
if not results:
logger.info("No old schemas found to clean up")
return
for row in results:
catalog_name, schema_name, _ = row
full_schema_name = f"{catalog_name}.{schema_name}"
try:
conn.execute(TextClause(f"DROP SCHEMA IF EXISTS {full_schema_name} CASCADE"))
logger.info(f"Dropped schema: {full_schema_name}")
except Exception:
logger.exception(f"Failed to drop schema {full_schema_name}")
logger.info(f"Cleaned up {len(results)} Databricks schema(s)")
engine.dispose()
if __name__ == "__main__":
config = DatabricksConnectionConfig() # type: ignore[call-arg] # pydantic populates from env vars
cleanup_databricks(config)
| DatabricksConnectionConfig |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/components/reward_providers/curiosity_reward_provider.py | {
"start": 889,
"end": 988
} | class ____(NamedTuple):
continuous: torch.Tensor
discrete: torch.Tensor
| ActionPredictionTuple |
python | facebook__pyre-check | scripts/callgraph_utilities.py | {
"start": 9033,
"end": 10573
} | class ____:
call_graph: Dict[str, Set[str]]
entrypoints: Entrypoints
def __init__(self, call_graph: InputFormat, entrypoints: Entrypoints) -> None:
self.call_graph = call_graph.call_graph
self.entrypoints = entrypoints
def get_transitive_callees_and_traces(self) -> Dict[str, Trace]:
transitive_callees = {}
queue: Deque[Tuple[str, Trace]] = deque(
[(entrypoint, [entrypoint]) for entrypoint in self.entrypoints.entrypoints]
)
while queue:
callable, trace = queue.popleft()
if callable in transitive_callees:
continue
transitive_callees[callable] = trace
if callable in self.call_graph:
queue += [
(next_callable, trace + [next_callable])
for next_callable in self.call_graph[callable]
]
return transitive_callees
def get_union_callgraph_format(
call_graph_kind_and_path: Tuple[Tuple[str, TextIO], ...],
) -> UnionCallGraphFormat:
union_call_graph_format = UnionCallGraphFormat()
for call_graph_kind, call_graph_file in call_graph_kind_and_path:
call_graph_data = load_json_from_file(call_graph_file, "CALL_GRAPH_FILE")
current_input_format_type = InputType[call_graph_kind.upper()].value
current_input_format = current_input_format_type(call_graph_data)
union_call_graph_format.union_call_graph(current_input_format.call_graph)
return union_call_graph_format
| CallGraph |
python | ray-project__ray | rllib/utils/tests/test_tf_utils.py | {
"start": 872,
"end": 1707
} | class ____:
def __init__(self, use_loss=True):
# Uses a separate graph for each network.
with tf.Graph().as_default():
# Create the network.
var = [tf.Variable(1)]
loss, init, _, _ = make_linear_network()
sess = tf.Session()
# Additional code for setting and getting the weights.
weights = tf_utils.TensorFlowVariables(
loss if use_loss else None, sess, input_variables=var
)
# Return all of the data needed to use the network.
self.values = [weights, init, sess]
sess.run(init)
def set_and_get_weights(self, weights):
self.values[0].set_weights(weights)
return self.values[0].get_weights()
def get_weights(self):
return self.values[0].get_weights()
| LossActor |
python | more-itertools__more-itertools | tests/test_recipes.py | {
"start": 13626,
"end": 14327
} | class ____(TestCase):
def test_basic(self):
iterable = [0, 1, 1, 8, 9, 9, 9, 8, 8, 1, 9, 9]
actual = list(mi.unique(iterable))
expected = [0, 1, 8, 9]
self.assertEqual(actual, expected)
def test_key(self):
iterable = ['1', '1', '10', '10', '2', '2', '20', '20']
actual = list(mi.unique(iterable, key=int))
expected = ['1', '2', '10', '20']
self.assertEqual(actual, expected)
def test_reverse(self):
iterable = ['1', '1', '10', '10', '2', '2', '20', '20']
actual = list(mi.unique(iterable, key=int, reverse=True))
expected = ['20', '10', '2', '1']
self.assertEqual(actual, expected)
| UniqueTests |
python | walkccc__LeetCode | solutions/2136. Earliest Possible Day of Full Bloom/2136.py | {
"start": 0,
"end": 301
} | class ____:
def earliestFullBloom(self, plantTime: list[int], growTime: list[int]) -> int:
ans = 0
time = 0
for p, g in sorted(
[(p, g) for (p, g) in zip(plantTime, growTime)],
key=lambda x: -x[1]):
time += p
ans = max(ans, time + g)
return ans
| Solution |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/dynamic_rendezvous.py | {
"start": 10918,
"end": 16034
} | class ____(_RendezvousStateHolder):
"""Hold the rendezvous state synced with other nodes via a backend.
Args:
backend:
The rendezvous backend to use.
settings:
The rendezvous settings.
cache_duration:
The amount of time, in seconds, to cache the last rendezvous state
before requesting it from the backend again.
"""
_backend: RendezvousBackend
_state: _RendezvousState
_settings: RendezvousSettings
_cache_duration: int
_token: Token
_dirty: bool
_last_sync_time: float
_dead_nodes: list[_NodeDesc]
def __init__(
self,
backend: RendezvousBackend,
settings: RendezvousSettings,
cache_duration: int = 1,
) -> None:
self._backend = backend
self._state = _RendezvousState()
self._settings = settings
self._cache_duration = cache_duration
self._token = None
self._dirty = False
self._last_sync_time = -1
self._dead_nodes = []
def _record(self, message: str, node_state: NodeState = NodeState.RUNNING):
construct_and_record_rdzv_event(
name=f"{self.__class__.__name__}.{get_method_name()}",
run_id=self._settings.run_id,
message=message,
node_state=node_state,
)
@property
def state(self) -> _RendezvousState:
"""See base class."""
return self._state
def sync(self) -> bool | None:
"""See base class."""
state_bits: bytes | None = None
token = None
has_set: bool | None
if self._dirty:
has_set = False
state_bits = pickle.dumps(self._state)
set_response = self._backend.set_state(state_bits, self._token)
if set_response is not None:
state_bits, token, has_set = set_response
else:
has_set = None
if self._cache_duration > 0:
# Avoid overloading the backend if we are asked to retrieve the
# state repeatedly. Try to serve the cached state.
if self._last_sync_time >= max(
time.monotonic() - self._cache_duration, 0
):
return None
get_response = self._backend.get_state()
if get_response is not None:
state_bits, token = get_response
if state_bits is not None:
try:
self._state = pickle.loads(state_bits)
except pickle.PickleError as exc:
raise RendezvousStateError(
"The rendezvous state is corrupt. See inner exception for details."
) from exc
else:
self._state = _RendezvousState()
if has_set and self._dead_nodes and logger.isEnabledFor(logging.DEBUG):
node_list = ", ".join(f"'{dead_node}'" for dead_node in self._dead_nodes)
msg = (
f"As part of the sync operation the node(s) {node_list} have been removed from the "
f"rendezvous '{self._settings.run_id}' since they had no heartbeat."
)
self._record(message=msg)
logger.debug(msg)
self._token = token
self._dirty = False
self._last_sync_time = time.monotonic()
self._sanitize()
return has_set
def _sanitize(self) -> None:
state = self._state
expire_time = datetime.now(timezone.utc) - (
self._settings.keep_alive_interval * self._settings.keep_alive_max_attempt
)
# Filter out the dead nodes.
self._dead_nodes = [
node
for node, last_heartbeat in state.last_heartbeats.items()
if last_heartbeat < expire_time
]
participant_removed = False
for dead_node in self._dead_nodes:
msg = f"Detected dead node '{dead_node}', removing it from the rendezvous"
logger.debug(msg)
del state.last_heartbeats[dead_node]
try:
del state.participants[dead_node]
participant_removed = True
except KeyError:
pass
try:
state.wait_list.remove(dead_node)
except KeyError:
pass
try:
state.redundancy_list.remove(dead_node)
except KeyError:
pass
if participant_removed:
# Common epilogue shared with the _remove_from_participants()
# function of _DistributedRendezvousOpExecutor.
_remove_participant_epilogue(state, self._settings)
def mark_dirty(self) -> None:
"""See base class.
If the local rendezvous state is dirty, the next sync call will try to
write the changes back to the backend. However this attempt might fail
if another node, which had the same state, also made changes and wrote
them before us.
"""
self._dirty = True
| _BackendRendezvousStateHolder |
python | openai__openai-python | src/openai/types/images_response.py | {
"start": 488,
"end": 926
} | class ____(BaseModel):
input_tokens: int
"""The number of tokens (images and text) in the input prompt."""
input_tokens_details: UsageInputTokensDetails
"""The input tokens detailed information for the image generation."""
output_tokens: int
"""The number of output tokens generated by the model."""
total_tokens: int
"""The total number of tokens (images and text) used for the image generation."""
| Usage |
python | google__pytype | pytype/tests/test_protocol_inference.py | {
"start": 111,
"end": 13926
} | class ____(test_base.BaseTest):
"""Tests for protocol implementation."""
def setUp(self):
super().setUp()
self.options.tweak(check=False, protocols=True)
def test_multiple_signatures_with_type_parameter(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T, y: int) -> List[T]: ...
def f(x: List[T], y: str) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x, y):
return foo.f(x, y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
def f(x, y: Union[int, str]) -> list: ...
""",
)
def test_unknown_single_signature(self):
# Test that the right signature is picked in the presence of an unknown
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import List, TypeVar
T = TypeVar("T")
def f(x: T, y: int) -> List[T]: ...
def f(x: List[T], y: str) -> List[T]: ...
""",
)
ty = self.Infer(
"""
import foo
def f(y):
return foo.f("", y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import List
def f(y: int) -> List[str]: ...
""",
)
def test_multiple_signatures_with_unknown(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(arg1: str) -> float: ...
def f(arg2: int) -> bool: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
def f(x: Union[int, str]) -> Union[float, bool]: ...
""",
)
def test_multiple_signatures_with_optional_arg(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(x: str) -> int: ...
def f(x = ...) -> float: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
def f(x: str) -> Union[int, float]: ...
""",
)
def test_multiple_signatures_with_kwarg(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
def f(*, y: int) -> bool: ...
def f(y: str) -> float: ...
""",
)
ty = self.Infer(
"""
import foo
def f(x):
return foo.f(y=x)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Union
def f(x: Union[int, str]) -> Union[bool, float]: ...
""",
)
def test_pow2(self):
ty = self.Infer("""
def t_testPow2(x, y):
# pow(int, int) returns int, or float if the exponent is negative.
# Hence, it's a handy function for testing UnionType returns.
return pow(x, y)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Union
def t_testPow2(x: Union[complex, float, int], y: Union[complex, float, int]) -> Union[complex, float, int]: ...
""",
)
@test_base.skip("Moving to protocols.")
def test_slices(self):
ty = self.Infer("""
def trim(docstring):
lines = docstring.splitlines()
for line in lines[1:]:
len(line)
return lines
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import List, Union
def trim(docstring: Union[bytearray, str, unicode]) -> List[Union[bytearray, str, unicode], ...]: ...
""",
)
def test_match_unknown_against_container(self):
ty = self.Infer("""
a = {1}
def f(x):
return a & x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Set
a = ... # type: Set[int]
def f(x) -> Set[int]: ...
""",
)
def test_supports_lower(self):
ty = self.Infer("""
def f(x):
return x.lower()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsLower) -> Any: ...
""",
)
def test_container(self):
ty = self.Infer("""
def f(x, y):
return y in x
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Container
def f(x: Container, y:Any) -> bool: ...
""",
)
def test_supports_int(self):
ty = self.Infer("""
def f(x):
return x.__int__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, SupportsInt
def f(x: SupportsInt) -> Any: ...
""",
)
def test_supports_float(self):
ty = self.Infer("""
def f(x):
return x.__float__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, SupportsFloat
def f(x: SupportsFloat) -> Any: ...
""",
)
def test_supports_complex(self):
ty = self.Infer("""
def f(x):
return x.__complex__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, SupportsComplex
def f(x: SupportsComplex) -> Any: ...
""",
)
def test_sized(self):
ty = self.Infer("""
def f(x):
return x.__len__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Sized
def f(x: Sized) -> Any: ...
""",
)
def test_supports_abs(self):
ty = self.Infer("""
def f(x):
y = abs(x)
return y.__len__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, SupportsAbs, Sized
def f(x: SupportsAbs[Sized]) -> Any: ...
""",
)
@test_base.skip("doesn't match arguments correctly")
def test_supports_round(self):
ty = self.Infer("""
def f(x):
y = x.__round__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, SupportsRound
def f(x: SupportsRound) -> Any: ...
""",
)
def test_reversible(self):
ty = self.Infer("""
def f(x):
y = x.__reversed__()
return y
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Iterator, Reversible
def f(x: Reversible) -> Iterator: ...
""",
)
def test_iterable(self):
ty = self.Infer("""
def f(x):
return x.__iter__()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Iterable, Iterator
def f(x: Iterable) -> Iterator: ...
""",
)
@test_base.skip("Iterator not implemented, breaks other functionality")
def test_iterator(self):
ty = self.Infer("""
def f(x):
return x.next()
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Iterator
def f(x: Iterator) -> Any: ...
""",
)
def test_callable(self):
ty = self.Infer("""
def f(x):
return x().lower()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any, Callable
def f(x: Callable[..., protocols.SupportsLower]) -> Any: ...
""",
)
@test_base.skip("Matches Mapping[int, Any] but not Sequence")
def test_sequence(self):
ty = self.Infer("""
def f(x):
x.index(6)
x.count(7)
return x.__getitem__(5) + x[1:5]
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any, Sequence
def f(x: Sequence) -> Any: ...
""",
)
@test_base.skip("doesn't match arguments correctly on exit")
def test_context_manager(self):
ty = self.Infer("""
def f(x):
x.__enter__()
x.__exit__(None, None, None)
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any, ContextManager
def f(x: ContextManager) -> Any: ...
""",
)
def test_protocol_needs_parameter(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import Sized, SupportsAbs
def f(x: SupportsAbs[Sized]) -> None: ...
""",
)
ty = self.Infer(
"""
import foo
def g(y):
return foo.f(y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import Sized, SupportsAbs
def g(y: SupportsAbs[Sized]) -> None: ...
""",
)
def test_protocol_needs_parameter_builtin(self):
with test_utils.Tempdir() as d:
d.create_file(
"foo.pyi",
"""
from typing import SupportsAbs
def f(x: SupportsAbs[int]) -> None: ...
""",
)
ty = self.Infer(
"""
import foo
def g(y):
return foo.f(y)
""",
pythonpath=[d.path],
)
self.assertTypesMatchPytd(
ty,
"""
import foo
from typing import SupportsAbs
def g(y: SupportsAbs[int]) -> None: ...
""",
)
@test_base.skip("Unexpectedly assumes returned result is sequence")
def test_mapping_abstractmethod(self):
ty = self.Infer("""
def f(x, y):
return x.__getitem__(y)
""")
self.assertTypesMatchPytd(
ty,
"""
from typing import Any, Mapping
def f(x: Mapping, y) -> Any: ...
""",
)
def test_supports_upper(self):
ty = self.Infer("""
def f(x):
return x.upper()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsUpper) -> Any: ...
""",
)
def test_supports_startswith(self):
ty = self.Infer("""
def f(x):
return x.startswith("foo")
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsStartswith) -> Any: ...
""",
)
def test_supports_endswith(self):
ty = self.Infer("""
def f(x):
return x.endswith("foo")
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsEndswith) -> Any: ...
""",
)
def test_supports_lstrip(self):
ty = self.Infer("""
def f(x):
return x.lstrip()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsLstrip) -> Any: ...
""",
)
def test_supports_replace(self):
ty = self.Infer("""
def f(x):
return x.replace("foo", "bar")
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsReplace) -> Any: ...
""",
)
def test_supports_encode(self):
ty = self.Infer("""
def f(x):
return x.encode()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsEncode) -> Any: ...
""",
)
def test_supports_decode(self):
ty = self.Infer("""
def f(x):
return x.decode()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsDecode) -> Any: ...
""",
)
def test_supports_splitlines(self):
ty = self.Infer("""
def f(x):
return x.splitlines()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsSplitlines) -> Any: ...
""",
)
def test_supports_split(self):
ty = self.Infer("""
def f(x):
return x.split()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsSplit) -> Any: ...
""",
)
def test_supports_strip(self):
ty = self.Infer("""
def f(x):
return x.strip()
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsStrip) -> Any: ...
""",
)
def test_supports_find(self):
ty = self.Infer("""
def f(x):
return x.find("foo")
""")
self.assertTypesMatchPytd(
ty,
"""
import protocols
from typing import Any
def f(x: protocols.SupportsFind) -> Any: ...
""",
)
def test_signature_template(self):
# Regression test for https://github.com/google/pytype/issues/410
self.assertNoCrash(
self.Infer,
"""
def rearrange_proc_table(val):
procs = val['procs']
val['procs'] = dict((ix, procs[ix]) for ix in range(0, len(procs)))
del val['fields']
""",
)
if __name__ == "__main__":
test_base.main()
| ProtocolInferenceTest |
python | scikit-learn__scikit-learn | sklearn/base.py | {
"start": 36826,
"end": 37928
} | class ____:
"""Mixin class for all density estimators in scikit-learn.
This mixin defines the following functionality:
- sets estimator type to `"density_estimator"` through the `estimator_type` tag;
- `score` method that default that do no-op.
Examples
--------
>>> from sklearn.base import DensityMixin
>>> class MyEstimator(DensityMixin):
... def fit(self, X, y=None):
... self.is_fitted_ = True
... return self
>>> estimator = MyEstimator()
>>> hasattr(estimator, "score")
True
"""
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
tags.estimator_type = "density_estimator"
return tags
def score(self, X, y=None):
"""Return the score of the model on the data `X`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
score : float
"""
pass
| DensityMixin |
python | spyder-ide__spyder | spyder/plugins/debugger/confpage.py | {
"start": 413,
"end": 3954
} | class ____(PluginConfigPage):
def setup_page(self):
newcb = self.create_checkbox
# ---- Debug ----
# Pdb run lines Group
pdb_run_lines_group = QGroupBox(_("Run code while debugging"))
pdb_run_lines_label = QLabel(_(
"You can run several lines of code on each new prompt while "
"debugging. Please introduce each one separated by semicolons and "
"a space, for example:<br>"
"<i>import matplotlib.pyplot as plt; import numpy as np</i>")
)
pdb_run_lines_label.setWordWrap(True)
pdb_run_lines_edit = self.create_lineedit(
_("Lines:"), 'startup/pdb_run_lines', '', alignment=Qt.Horizontal)
pdb_run_lines_layout = QVBoxLayout()
pdb_run_lines_layout.addWidget(pdb_run_lines_label)
pdb_run_lines_layout.addWidget(pdb_run_lines_edit)
pdb_run_lines_group.setLayout(pdb_run_lines_layout)
# Debug Group
debug_group = QGroupBox(_("Interaction"))
debug_layout = QVBoxLayout()
prevent_closing_box = newcb(
_("Prevent editor from closing files while debugging"),
'pdb_prevent_closing',
tip=_("This option prevents the user from closing a file while"
" it is debugged."))
debug_layout.addWidget(prevent_closing_box)
continue_box = newcb(
_("Stop debugging on first line of files without breakpoints"),
'pdb_stop_first_line',
tip=_("This option lets you decide if the debugger should"
" stop on the first line while debugging if no breakpoints"
" are present."))
debug_layout.addWidget(continue_box)
libraries_box = newcb(
_("Ignore Python libraries while debugging"), 'pdb_ignore_lib',
tip=_("This option lets you decide if the debugger should "
"ignore the system libraries while debugging."))
debug_layout.addWidget(libraries_box)
execute_events_box = newcb(
_("Process execute events while debugging"), 'pdb_execute_events',
tip=_("This option lets you decide if the debugger should "
"process the 'execute events' after each prompt, such as "
"matplotlib <tt>show</tt> command."))
debug_layout.addWidget(execute_events_box)
exclamation_mark_box = newcb(
_("Use exclamation mark prefix for Pdb commands"),
'pdb_use_exclamation_mark',
tip=_("This option lets you decide if the Pdb commands should "
"be prefixed by an exclamation mark. This helps in "
"separating Pdb commands from Python code."))
debug_layout.addWidget(exclamation_mark_box)
debug_group.setLayout(debug_layout)
filter_group = QGroupBox(_("Execution Inspector"))
filter_data = [
('exclude_internal', _("Exclude internal frames when inspecting execution")),
]
filter_boxes = [self.create_checkbox(text, option)
for option, text in filter_data]
filter_layout = QVBoxLayout()
for box in filter_boxes:
filter_layout.addWidget(box)
filter_group.setLayout(filter_layout)
vlayout = QVBoxLayout()
vlayout.addWidget(debug_group)
vlayout.addWidget(pdb_run_lines_group)
vlayout.addWidget(filter_group)
vlayout.addStretch(1)
self.setLayout(vlayout)
| DebuggerConfigPage |
python | lxml__lxml | src/lxml/html/tests/test_html5parser.py | {
"start": 6924,
"end": 10263
} | class ____(unittest.TestCase):
def call_it(self, *args, **kwargs):
if html5lib is None:
raise unittest.SkipTest("html5lib is not installed")
from lxml.html.html5parser import fromstring
return fromstring(*args, **kwargs)
def test_returns_whole_doc_if_input_contains_html_tag(self):
parser = DummyParser(root='the doc')
self.assertEqual(self.call_it('<html></html>', parser=parser),
'the doc')
def test_returns_whole_doc_if_input_contains_doctype(self):
parser = DummyParser(root='the doc')
self.assertEqual(self.call_it('<!DOCTYPE html>', parser=parser),
'the doc')
def test_returns_whole_doc_if_input_is_encoded(self):
parser = DummyParser(root='the doc')
input = b'<!DOCTYPE html>'
self.assertEqual(self.call_it(input, parser=parser),
'the doc')
def test_returns_whole_doc_if_head_not_empty(self, use_ns=True):
E = HTMLElementMaker(namespaceHTMLElements=use_ns)
root = E.html(E.head(E.title()))
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), root)
def test_returns_whole_doc_if_head_not_empty_no_ns(self):
self.test_returns_whole_doc_if_head_not_empty(use_ns=False)
def test_returns_unwraps_body_if_single_element(self):
E = HTMLElementMaker()
elem = E.p('test')
root = E.html(E.head(), E.body(elem))
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), elem)
def test_returns_body_if_has_text(self):
E = HTMLElementMaker()
elem = E.p('test')
body = E.body('text', elem)
root = E.html(E.head(), body)
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), body)
def test_returns_body_if_single_element_has_tail(self):
E = HTMLElementMaker()
elem = E.p('test')
elem.tail = 'tail'
body = E.body(elem)
root = E.html(E.head(), body)
parser = DummyParser(root=root)
self.assertEqual(self.call_it('', parser=parser), body)
def test_wraps_multiple_fragments_in_div_no_ns(self):
E = HTMLElementMaker(namespaceHTMLElements=False)
parser = DummyParser(root=E.html(E.head(), E.body(E.h1(), E.p())),
namespaceHTMLElements=False)
elem = self.call_it('', parser=parser)
self.assertEqual(elem.tag, 'div')
def test_wraps_multiple_fragments_in_span_no_ns(self):
E = HTMLElementMaker(namespaceHTMLElements=False)
parser = DummyParser(root=E.html(E.head(), E.body('foo', E.a('link'))),
namespaceHTMLElements=False)
elem = self.call_it('', parser=parser)
self.assertEqual(elem.tag, 'span')
def test_raises_type_error_on_nonstring_input(self):
not_a_string = None
self.assertRaises(TypeError, self.call_it, not_a_string)
def test_integration_whole_doc(self):
elem = self.call_it(XHTML_TEST_DOCUMENT)
self.assertEqual(elem.tag, xhtml_tag('html'))
def test_integration_single_fragment(self):
elem = self.call_it('<p></p>')
self.assertEqual(elem.tag, xhtml_tag('p'))
| Test_fromstring |
python | astropy__astropy | astropy/wcs/wcsapi/tests/test_high_level_api.py | {
"start": 1986,
"end": 3634
} | class ____(DoubleLowLevelWCS, HighLevelWCSMixin):
"""
This example WCS returns two SkyCoord objects which, which triggers a
different path in the high level WCS code.
"""
@property
def pixel_n_dim(self):
return 4
@property
def world_n_dim(self):
return 4
@property
def world_axis_physical_types(self):
return ["pos.eq.ra", "pos.eq.dec", "pos.galactic.lon", "pos.galactic.lat"]
@property
def world_axis_units(self):
return ["deg", "deg", "deg", "deg"]
@property
def world_axis_object_components(self):
# Deliberately use 'ra'/'dec' here to make sure that string argument
# names work properly.
return [
("test1", "ra", "spherical.lon.degree"),
("test1", "dec", "spherical.lat.degree"),
("test2", 0, "spherical.lon.degree"),
("test2", 1, "spherical.lat.degree"),
]
@property
def world_axis_object_classes(self):
return {
"test1": (SkyCoord, (), {"unit": "deg"}),
"test2": (SkyCoord, (), {"unit": "deg", "frame": "galactic"}),
}
def test_skycoord_duplicate():
# Make sure that things work properly when the low-level WCS uses the same
# class, and specifically a SkyCoord for two of the coordinates.
wcs = SkyCoordDuplicateWCS()
c1, c2 = wcs.pixel_to_world(1, 2, 3, 4)
assert isinstance(c1, SkyCoord)
assert isinstance(c2, SkyCoord)
x, y, z, a = wcs.world_to_pixel(c1, c2)
assert_allclose(x, 1)
assert_allclose(y, 2)
assert_allclose(z, 3)
assert_allclose(a, 4)
| SkyCoordDuplicateWCS |
python | pytorch__pytorch | test/quantization/core/test_quantized_tensor.py | {
"start": 642,
"end": 5006
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.qscheme = torch.per_tensor_symmetric
def _calculate_dynamic_qparams(X, dtype, reduce_range=False):
"""Calculate the dynamic quantization parameters (scale, zero_point)
according to the min and max element of the tensor"""
if isinstance(X, torch.Tensor):
X = X.cpu().data.numpy()
if dtype == torch.qint8:
if reduce_range:
qmin, qmax = -64, 63
else:
qmin, qmax = -128, 127
else: # dtype == torch.quint8
if reduce_range:
qmin, qmax = 0, 127
else:
qmin, qmax = 0, 255
min_val = X.min().astype(dtype=np.float32)
max_val = X.max().astype(dtype=np.float32)
min_val = min(0.0, min_val)
max_val = max(0.0, max_val)
scale = (np.float64(max_val) - min_val) / (qmax - qmin)
if scale == 0.0 or math.isinf(1.0 / scale):
scale = np.float64(0.1)
zero_point = 0
zero_point_from_min = qmin - min_val / float(scale)
zero_point_from_max = qmax - max_val / float(scale)
zero_point_from_min_error = abs(qmin) - abs(min_val / float(scale))
zero_point_from_max_error = abs(qmax) - abs(max_val / float(scale))
if zero_point_from_min_error < zero_point_from_max_error:
initial_zero_point = zero_point_from_min
else:
initial_zero_point = zero_point_from_max
nudged_zero_point = 0
if initial_zero_point < qmin:
nudged_zero_point = qmin
elif initial_zero_point > qmax:
nudged_zero_point = qmax
else:
nudged_zero_point = int(round(initial_zero_point))
return [scale.astype(np.float32), int(nudged_zero_point)]
# Note we explicitly cast variables to np.float32 in a couple of places to avoid
# the default casting in Python often resulting in double precision and to make
# sure we're doing the same numerics as C++ code.
def param_search_greedy(x, bit_rate, n_bins=200, ratio=0.16):
xmin, xmax = np.min(x), np.max(x)
stepsize = (xmax - xmin) / np.float32(n_bins)
min_bins = np.float32(n_bins) * (np.float32(1) - np.float32(ratio))
xq, loss = _compress_uniform_simplified(x, bit_rate, xmin, xmax)
solutions = [] # [(left, right, loss)] # local optima solution
cur_min, cur_max, cur_loss = xmin, xmax, loss
thr = min_bins * stepsize
while cur_min + thr < cur_max:
# move left
xq, loss1 = _compress_uniform_simplified(
x, bit_rate, cur_min + stepsize, cur_max
)
# move right
xq, loss2 = _compress_uniform_simplified(
x, bit_rate, cur_min, cur_max - stepsize
)
if cur_loss < loss1 and cur_loss < loss2:
# found a local optima
solutions.append((cur_min, cur_max, cur_loss))
if loss1 < loss2:
cur_min, cur_loss = cur_min + stepsize, loss1
else:
cur_max, cur_loss = cur_max - stepsize, loss2
if solutions:
best = solutions[0]
for solution in solutions:
if solution[-1] < best[-1]:
best = solution
return best[1], best[0] # xmax, xmin
return xmax, xmin
def _compress_uniform_simplified(X, bit_rate, xmin, xmax, fp16_scale_bias=True):
# affine transform to put Xq in [0,2**bit_rate - 1]
# Xq = (2 ** bit_rate - 1) * (Xq - xmin) / data_range
if fp16_scale_bias:
xmin = xmin.astype(np.float16).astype(np.float32)
data_range = xmax - xmin
scale = np.where(
data_range == 0, np.float32(1), data_range / np.float32(2 ** bit_rate - 1)
)
if fp16_scale_bias:
scale = scale.astype(np.float16).astype(np.float32)
inverse_scale = np.float32(1) / scale
Xq = np.clip(np.round((X - xmin) * inverse_scale), 0, np.float32(2 ** bit_rate - 1))
Xq = Xq * scale + xmin
# Manually compute loss instead of using np.linalg.norm to use the same
# accumulation order used by C++ code
vlen = 8
loss_v = np.zeros(vlen).astype(np.float32)
for i in range(len(Xq) // vlen * vlen):
loss_v[i % vlen] += (X[i] - Xq[i]) * (X[i] - Xq[i])
loss = np.float32(0)
for i in range(vlen):
loss += loss_v[i]
for i in range(len(Xq) // vlen * vlen, len(Xq)):
loss += (X[i] - Xq[i]) * (X[i] - Xq[i])
loss = np.sqrt(loss)
return Xq, loss
| Foo |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/expectations/expect_profile_numeric_columns_percent_diff_between_exclusive_threshold_range.py | {
"start": 947,
"end": 7869
} | class ____(
DataProfilerProfileMetricProvider
):
metric_name = "data_profiler.profile_numeric_columns_percent_diff_between_threshold_range"
value_keys = (
"profile_path",
"limit_check_report_keys",
"numerical_diff_statistics",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas( # noqa: C901 - 22
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[str, Any],
runtime_configuration: Dict,
):
profile_percent_diff = metrics.get("data_profiler.profile_percent_diff")
numeric_columns = metrics.get("data_profiler.profile_numeric_columns")
limit_check_report_keys = metric_value_kwargs["limit_check_report_keys"]
numerical_diff_statistics = metric_value_kwargs["numerical_diff_statistics"]
columns = list(profile_percent_diff["global_stats"]["profile_schema"][1].keys())
data_stats = profile_percent_diff["data_stats"]
requested_columns = {}
unavailable_stats = {}
# Adds columns if generic column key is provided
# Note: Copy is required for all metric arguments to ensure metric_value_id is identified correctly
limit_check_report_keys_copy = copy.deepcopy(limit_check_report_keys)
limit_check_report_keys_copy = replace_generic_operator_in_report_keys(
limit_check_report_keys_copy, numeric_columns
)
for col, stats in limit_check_report_keys_copy.items():
if col not in numeric_columns: # Makes sure column requested is numeric
requested_columns[col] = "Column is Non-Numeric"
continue
# adds stats if generic stat key is provided
numerical_diff_statistics_copy = copy.deepcopy(numerical_diff_statistics)
stats = replace_generic_operator_in_report_keys(stats, numerical_diff_statistics_copy)
if col not in columns: # Makes sure column exists within profile schema
requested_columns[col] = "Column requested was not found."
continue
col_data_stats = {}
for data_stat in data_stats:
if data_stat["column_name"] == col:
col_data_stats = data_stat["statistics"]
break
requested_columns[col] = {}
unavailable_stats[col] = {}
for stat, bounds in stats.items():
if stat not in col_data_stats:
requested_columns[col][stat] = "Statistic requested was not found."
continue
diff_val = col_data_stats[stat]
if diff_val == "ERR_divide_by_zero" or diff_val == "ERR_no_original_value":
unavailable_stats[col][stat] = diff_val
requested_columns[col][stat] = diff_val
continue
if diff_val == "unchanged": # In the case there is no delta
diff_val = 0
between_bounds = is_value_between_bounds(
diff_val, bounds["lower"], bounds["upper"], inclusive=False
)
if not between_bounds:
requested_columns[col][stat] = {
"lower_bound": bounds["lower"],
"upper_bound": bounds["upper"],
"value_found": diff_val,
}
else:
requested_columns[col][stat] = True
for column in list(unavailable_stats.keys()):
if unavailable_stats[column] == {}:
unavailable_stats.pop(column, None)
if unavailable_stats != {}:
div_by_zero_stats = []
no_original_value = []
for column, stats in unavailable_stats.items():
current_col = copy.deepcopy(limit_check_report_keys_copy[column])
for stat, val in stats.items():
if val == "ERR_divide_by_zero":
div_by_zero_stats.append(column + ": " + stat)
current_col.pop(stat, None)
elif val == "ERR_no_original_value":
no_original_value.append(column + ": " + stat)
current_col.pop(stat, None)
limit_check_report_keys_copy[column] = current_col
warning = "\nWARNING:\n"
if len(div_by_zero_stats) > 0:
warning += "Div By Zero ERROR:\nValue in profile report was 0 for the following column: stat\n"
for div_by_zero_stat in div_by_zero_stats:
warning += " " + div_by_zero_stat + "\n"
if len(no_original_value) > 0:
warning += "Value not Found ERROR:\nStatistic was not found in profile report for the following column: stat\n"
for no_original_value_string in no_original_value:
warning += " " + no_original_value_string + "\n"
warning += "\nTo avoid these errors, you should use the replace 'limit_check_report_keys' with the following:\n"
warning += r"" + json.dumps(limit_check_report_keys_copy, indent=2)
warning += "\n"
warnings.warn(warning)
return requested_columns
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""
Returns a dictionary of given metric names and their corresponding configuration, specifying
the metric types and their respective domains"""
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
if (
metric.metric_name
== "data_profiler.profile_numeric_columns_percent_diff_between_threshold_range"
):
dependencies["data_profiler.profile_percent_diff"] = MetricConfiguration(
metric_name="data_profiler.profile_percent_diff",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
dependencies["data_profiler.profile_numeric_columns"] = MetricConfiguration(
metric_name="data_profiler.profile_numeric_columns",
metric_domain_kwargs=metric.metric_domain_kwargs,
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerProfileNumericColumnsPercentDiffBetweenThresholdRange |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_init.py | {
"start": 56410,
"end": 57335
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 2
@skip_if_lt_x_gpu(2)
def test_mixed_dtypes_no_grad_param(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
# no grad params with different dtypes
self.w_fp8 = torch.nn.Parameter(
torch.empty((256, 256), dtype=torch.float8_e4m3fn),
requires_grad=False,
)
self.w_fp32 = torch.nn.Parameter(
torch.empty((256, 256), dtype=torch.float32)
)
def forward(self, input):
return
mesh = init_device_mesh(device_type.type, (self.world_size,))
model = Model()
fully_shard(model, mesh=mesh)
model(0)
if __name__ == "__main__":
run_tests()
| TestFullyShardMixedDtypeParam |
python | getsentry__sentry | tests/sentry/rules/actions/test_create_ticket_utils.py | {
"start": 218,
"end": 1656
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.rule = self.create_project_rule()
def test_build_description(self) -> None:
installation = MagicMock()
installation.get_group_description.return_value = "Test description"
def generate_footer(url) -> str:
return f"\n\nThis issue was created by a rule: {url}"
description = build_description(self.event, self.rule.id, installation, generate_footer)
expected_url = f"/organizations/{self.organization.slug}/alerts/rules/{self.project.slug}/{self.rule.id}/"
assert (
description == f"Test description\n\nThis issue was created by a rule: {expected_url}"
)
def test_build_description_workflow_engine_ui(self) -> None:
installation = MagicMock()
installation.get_group_description.return_value = "Test description"
workflow_id = 123
def generate_footer(url) -> str:
return f"\n\nThis issue was created by a workflow: {url}"
description = build_description_workflow_engine_ui(
self.event, workflow_id, installation, generate_footer
)
expected_url = f"/organizations/{self.organization.id}/monitors/alerts/{workflow_id}/"
assert (
description
== f"Test description\n\nThis issue was created by a workflow: {expected_url}"
)
| CreateTicketUtilsTest |
python | gevent__gevent | src/gevent/tests/test__greenlet.py | {
"start": 1836,
"end": 2278
} | class ____(gevent.Greenlet):
killed = False
joined = False
raise_on_join = True
def join(self, timeout=None):
self.joined += 1
if self.raise_on_join:
raise ExpectedJoinError
return gevent.Greenlet.join(self, timeout)
def kill(self, *args, **kwargs): # pylint:disable=signature-differs
self.killed += 1
return gevent.Greenlet.kill(self, *args, **kwargs)
| GreenletRaisesJoin |
python | doocs__leetcode | solution/1700-1799/1793.Maximum Score of a Good Subarray/Solution.py | {
"start": 0,
"end": 766
} | class ____:
def maximumScore(self, nums: List[int], k: int) -> int:
n = len(nums)
left = [-1] * n
right = [n] * n
stk = []
for i, v in enumerate(nums):
while stk and nums[stk[-1]] >= v:
stk.pop()
if stk:
left[i] = stk[-1]
stk.append(i)
stk = []
for i in range(n - 1, -1, -1):
v = nums[i]
while stk and nums[stk[-1]] > v:
stk.pop()
if stk:
right[i] = stk[-1]
stk.append(i)
ans = 0
for i, v in enumerate(nums):
if left[i] + 1 <= k <= right[i] - 1:
ans = max(ans, v * (right[i] - left[i] - 1))
return ans
| Solution |
python | ray-project__ray | python/ray/serve/batching.py | {
"start": 1327,
"end": 2848
} | class ____:
start_times: List[float]
@property
def min_start_time(self) -> Optional[float]:
return min(self.start_times) if self.start_times else None
@property
def mean_start_time(self) -> Optional[float]:
return (
sum(self.start_times) / len(self.start_times) if self.start_times else None
)
@property
def max_start_time(self) -> Optional[float]:
return max(self.start_times) if self.start_times else None
@property
def num_requests(self) -> int:
return len(self.start_times)
def _batch_args_kwargs(
list_of_flattened_args: List[List[Any]],
) -> Tuple[Tuple[Any], Dict[Any, Any]]:
"""Batch a list of flatten args and returns regular args and kwargs"""
# Ray's flatten arg format is a list with alternating key and values
# e.g. args=(1, 2), kwargs={"key": "val"} got turned into
# [None, 1, None, 2, "key", "val"]
arg_lengths = {len(args) for args in list_of_flattened_args}
assert (
len(arg_lengths) == 1
), "All batch requests should have the same number of parameters."
arg_length = arg_lengths.pop()
batched_flattened_args = []
for idx in range(arg_length):
if idx % 2 == 0:
batched_flattened_args.append(list_of_flattened_args[0][idx])
else:
batched_flattened_args.append(
[item[idx] for item in list_of_flattened_args]
)
return recover_args(batched_flattened_args)
| _RuntimeSummaryStatistics |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/bandit_envs_discrete.py | {
"start": 1256,
"end": 3555
} | class ____(gym.Env):
"""Samples data from linearly parameterized arms.
The reward for context X and arm i is given by X^T * theta_i, for some
latent set of parameters {theta_i : i = 1, ..., k}.
The thetas are sampled uniformly at random, the contexts are Gaussian,
and Gaussian noise is added to the rewards.
"""
DEFAULT_CONFIG_LINEAR = {
"feature_dim": 8,
"num_actions": 4,
"reward_noise_std": 0.01,
}
def __init__(self, config=None):
self.config = copy.copy(self.DEFAULT_CONFIG_LINEAR)
if config is not None and type(config) is dict:
self.config.update(config)
self.feature_dim = self.config["feature_dim"]
self.num_actions = self.config["num_actions"]
self.sigma = self.config["reward_noise_std"]
self.action_space = Discrete(self.num_actions)
self.observation_space = Box(low=-10, high=10, shape=(self.feature_dim,))
self.thetas = np.random.uniform(-1, 1, (self.num_actions, self.feature_dim))
self.thetas /= np.linalg.norm(self.thetas, axis=1, keepdims=True)
self._elapsed_steps = 0
self._current_context = None
def _sample_context(self):
return np.random.normal(scale=1 / 3, size=(self.feature_dim,))
def reset(self, *, seed=None, options=None):
self._current_context = self._sample_context()
return self._current_context, {}
def step(self, action):
assert (
self._elapsed_steps is not None
), "Cannot call env.step() beforecalling reset()"
assert action < self.num_actions, "Invalid action."
action = int(action)
context = self._current_context
rewards = self.thetas.dot(context)
opt_action = rewards.argmax()
regret = rewards.max() - rewards[action]
# Add Gaussian noise
rewards += np.random.normal(scale=self.sigma, size=rewards.shape)
reward = rewards[action]
self._current_context = self._sample_context()
return (
self._current_context,
reward,
True,
False,
{"regret": regret, "opt_action": opt_action},
)
def render(self, mode="human"):
raise NotImplementedError
| LinearDiscreteEnv |
python | langchain-ai__langchain | libs/langchain/langchain_classic/evaluation/qa/generate_chain.py | {
"start": 615,
"end": 1117
} | class ____(LLMChain):
"""LLM Chain for generating examples for question answering."""
output_parser: BaseLLMOutputParser = Field(default=_QA_OUTPUT_PARSER)
output_key: str = "qa_pairs"
@classmethod
@override
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(cls, llm: BaseLanguageModel, **kwargs: Any) -> QAGenerateChain:
"""Load QA Generate Chain from LLM."""
return cls(llm=llm, prompt=PROMPT, **kwargs)
| QAGenerateChain |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 129634,
"end": 131756
} | class ____(ASTBase):
def __init__(
self,
name: ASTNestedName,
scoped: str,
underlyingType: ASTType,
attrs: ASTAttributeList,
) -> None:
self.name = name
self.scoped = scoped
self.underlyingType = underlyingType
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTEnum):
return NotImplemented
return (
self.name == other.name
and self.scoped == other.scoped
and self.underlyingType == other.underlyingType
and self.attrs == other.attrs
)
def __hash__(self) -> int:
return hash((self.name, self.scoped, self.underlyingType, self.attrs))
def get_id(self, version: int, objectType: str, symbol: Symbol) -> str:
if version == 1:
raise NoOldIdError
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
if self.scoped:
res.extend((self.scoped, ' '))
res.append(transform(self.attrs))
if len(self.attrs) != 0:
res.append(' ')
res.append(transform(self.name))
if self.underlyingType:
res.extend((' : ', transform(self.underlyingType)))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
# self.scoped has been done by the CPPEnumObject
self.attrs.describe_signature(signode)
if len(self.attrs) != 0:
signode += addnodes.desc_sig_space()
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.underlyingType:
signode += addnodes.desc_sig_space()
signode += addnodes.desc_sig_punctuation(':', ':')
signode += addnodes.desc_sig_space()
self.underlyingType.describe_signature(
signode, 'noneIsName', env, symbol=symbol
)
| ASTEnum |
python | PyCQA__pylint | tests/functional/r/regression_02/regression_too_many_arguments_2335.py | {
"start": 249,
"end": 400
} | class ____(ABCMeta):
def __new__(mcs, name, bases, namespace, **kwargs):
return ABCMeta.__new__(mcs, name, bases, namespace)
| NodeCheckMetaClass |
python | Farama-Foundation__Gymnasium | gymnasium/wrappers/transform_observation.py | {
"start": 17707,
"end": 19556
} | class ____(
TransformObservation[WrapperObsType, ActType, ObsType],
gym.utils.RecordConstructorArgs,
):
"""Affinely (linearly) rescales a ``Box`` observation space of the environment to within the range of ``[min_obs, max_obs]``.
For unbounded components in the original observation space, the corresponding target bounds must also be infinite and vice versa.
A vector version of the wrapper exists :class:`gymnasium.wrappers.vector.RescaleObservation`.
Example:
>>> import gymnasium as gym
>>> from gymnasium.wrappers import RescaleObservation
>>> env = gym.make("Pendulum-v1")
>>> env.observation_space
Box([-1. -1. -8.], [1. 1. 8.], (3,), float32)
>>> env = RescaleObservation(env, np.array([-2, -1, -10], dtype=np.float32), np.array([1, 0, 1], dtype=np.float32))
>>> env.observation_space
Box([ -2. -1. -10.], [1. 0. 1.], (3,), float32)
Change logs:
* v1.0.0 - Initially added
"""
def __init__(
self,
env: gym.Env[ObsType, ActType],
min_obs: np.floating | np.integer | np.ndarray,
max_obs: np.floating | np.integer | np.ndarray,
):
"""Constructor that requires the env observation spaces to be a :class:`Box`.
Args:
env: The environment to wrap
min_obs: The new minimum observation bound
max_obs: The new maximum observation bound
"""
assert isinstance(env.observation_space, spaces.Box)
gym.utils.RecordConstructorArgs.__init__(self, min_obs=min_obs, max_obs=max_obs)
obs_space, func, _ = rescale_box(env.observation_space, min_obs, max_obs)
TransformObservation.__init__(
self,
env=env,
func=func,
observation_space=obs_space,
)
| RescaleObservation |
python | wandb__wandb | tests/unit_tests/test_retry.py | {
"start": 181,
"end": 3237
} | class ____:
now: datetime.datetime
sleep: mock.Mock
@pytest.fixture(autouse=True)
def mock_time() -> Iterator[MockTime]:
"""Mock out the now()/sleep() funcs used by the retry logic."""
now = datetime.datetime.now()
def _sleep(seconds):
nonlocal now
now += datetime.timedelta(
seconds=seconds
) # let the event loop shuffle stuff around
with mock.patch(
"wandb.sdk.lib.retry.NOW_FN",
wraps=lambda: now,
) as mock_now, mock.patch(
"wandb.sdk.lib.retry.SLEEP_FN", side_effect=_sleep
) as mock_sleep:
yield MockTime(now=mock_now, sleep=mock_sleep)
def test_retry_respects_num_retries():
func = mock.Mock()
func.side_effect = ValueError
num_retries = 7
retrier = retry.Retry(
func,
num_retries=num_retries,
retryable_exceptions=(ValueError,),
)
with pytest.raises(ValueError):
retrier()
assert func.call_count == num_retries + 1
def test_retry_call_num_retries_overrides_default_num_retries():
func = mock.Mock()
func.side_effect = ValueError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
)
num_retries = 4
with pytest.raises(ValueError):
retrier(num_retries=num_retries)
assert func.call_count == num_retries + 1
def test_retry_respects_num_retries_across_multiple_calls():
func = mock.Mock()
func.side_effect = ValueError
num_retries = 7
retrier = retry.Retry(
func,
num_retries=num_retries,
retryable_exceptions=(ValueError,),
)
with pytest.raises(ValueError):
retrier()
with pytest.raises(ValueError):
retrier()
assert func.call_count == 2 * (num_retries + 1)
def test_retry_respects_retryable_exceptions():
func = mock.Mock()
func.side_effect = ValueError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
num_retries=3,
)
with pytest.raises(ValueError):
retrier()
assert func.call_count > 1
func.reset_mock()
func.side_effect = IndexError
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
)
with pytest.raises(IndexError):
retrier()
assert func.call_count == 1
def test_retry_respects_secondary_timeout(mock_time: MockTime):
func = mock.Mock()
func.side_effect = ValueError
t0 = mock_time.now()
def check_retry_timeout(e):
if isinstance(e, ValueError):
return datetime.timedelta(minutes=10)
retry_timedelta = datetime.timedelta(hours=7)
retrier = retry.Retry(
func,
retryable_exceptions=(ValueError,),
check_retry_fn=check_retry_timeout,
retry_timedelta=retry_timedelta,
num_retries=10000,
)
with pytest.raises(ValueError):
retrier()
# add some slop for other timeout calls, should be about 10 minutes of retries
assert 10 <= (mock_time.now() - t0).total_seconds() / 60 < 20
| MockTime |
python | tensorflow__tensorflow | tensorflow/python/keras/testing_utils.py | {
"start": 23826,
"end": 37469
} | class ____(models.Model):
"""Multi IO Keras subclass model that uses a custom build method."""
def __init__(self, branch_a_func, branch_b_func,
shared_input_branch_func=None,
shared_output_branch_func=None):
super(_MultiIOSubclassModelCustomBuild, self).__init__()
self._shared_input_branch_func = shared_input_branch_func
self._branch_a_func = branch_a_func
self._branch_b_func = branch_b_func
self._shared_output_branch_func = shared_output_branch_func
self._shared_input_branch = None
self._branch_a = None
self._branch_b = None
self._shared_output_branch = None
def build(self, input_shape):
if self._shared_input_branch_func():
self._shared_input_branch = self._shared_input_branch_func()
self._branch_a = self._branch_a_func()
self._branch_b = self._branch_b_func()
if self._shared_output_branch_func():
self._shared_output_branch = self._shared_output_branch_func()
def call(self, inputs, **kwargs):
if self._shared_input_branch:
for layer in self._shared_input_branch:
inputs = layer(inputs)
a = inputs
b = inputs
else:
a, b = inputs
for layer in self._branch_a:
a = layer(a)
for layer in self._branch_b:
b = layer(b)
outs = a, b
if self._shared_output_branch:
for layer in self._shared_output_branch:
outs = layer(outs)
return outs
def get_multi_io_model(
branch_a,
branch_b,
shared_input_branch=None,
shared_output_branch=None):
"""Builds a multi-io model that contains two branches.
The produced model will be of the type specified by `get_model_type`.
To build a two-input, two-output model:
Specify a list of layers for branch a and branch b, but do not specify any
shared input branch or shared output branch. The resulting model will apply
each branch to a different input, to produce two outputs.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
model = get_multi_io_model(branch_a, branch_b)
```
To build a two-input, one-output model:
Specify a list of layers for branch a and branch b, and specify a
shared output branch. The resulting model will apply
each branch to a different input. It will then apply the shared output
branch to a tuple containing the intermediate outputs of each branch,
to produce a single output. The first layer in the shared_output_branch
must be able to merge a tuple of two tensors.
The first value in branch_a must be the Keras 'Input' layer for branch a,
and the first value in branch_b must be the Keras 'Input' layer for
branch b.
example usage:
```
input_branch_a = [Input(shape=(2,), name='a'), Dense(), Dense()]
input_branch_b = [Input(shape=(3,), name='b'), Dense(), Dense()]
shared_output_branch = [Concatenate(), Dense(), Dense()]
model = get_multi_io_model(input_branch_a, input_branch_b,
shared_output_branch=shared_output_branch)
```
To build a one-input, two-output model:
Specify a list of layers for branch a and branch b, and specify a
shared input branch. The resulting model will take one input, and apply
the shared input branch to it. It will then respectively apply each branch
to that intermediate result in parallel, to produce two outputs.
The first value in the shared_input_branch must be the Keras 'Input' layer
for the whole model. Branch a and branch b should not contain any Input
layers.
example usage:
```
shared_input_branch = [Input(shape=(2,), name='in'), Dense(), Dense()]
output_branch_a = [Dense(), Dense()]
output_branch_b = [Dense(), Dense()]
model = get_multi_io_model(output__branch_a, output_branch_b,
shared_input_branch=shared_input_branch)
```
Args:
branch_a: A sequence of layers for branch a of the model.
branch_b: A sequence of layers for branch b of the model.
shared_input_branch: An optional sequence of layers to apply to a single
input, before applying both branches to that intermediate result. If set,
the model will take only one input instead of two. Defaults to None.
shared_output_branch: An optional sequence of layers to merge the
intermediate results produced by branch a and branch b. If set,
the model will produce only one output instead of two. Defaults to None.
Returns:
A multi-io model of the type specified by `get_model_type`, specified
by the different branches.
"""
# Extract the functional inputs from the layer lists
if shared_input_branch:
inputs = shared_input_branch[0]
shared_input_branch = shared_input_branch[1:]
else:
inputs = branch_a[0], branch_b[0]
branch_a = branch_a[1:]
branch_b = branch_b[1:]
model_type = get_model_type()
if model_type == 'subclass':
return _MultiIOSubclassModel(branch_a, branch_b, shared_input_branch,
shared_output_branch)
if model_type == 'subclass_custom_build':
return _MultiIOSubclassModelCustomBuild((lambda: branch_a),
(lambda: branch_b),
(lambda: shared_input_branch),
(lambda: shared_output_branch))
if model_type == 'sequential':
raise ValueError('Cannot use `get_multi_io_model` to construct '
'sequential models')
if model_type == 'functional':
if shared_input_branch:
a_and_b = inputs
for layer in shared_input_branch:
a_and_b = layer(a_and_b)
a = a_and_b
b = a_and_b
else:
a, b = inputs
for layer in branch_a:
a = layer(a)
for layer in branch_b:
b = layer(b)
outputs = a, b
if shared_output_branch:
for layer in shared_output_branch:
outputs = layer(outputs)
return models.Model(inputs, outputs)
raise ValueError('Unknown model type {}'.format(model_type))
_V2_OPTIMIZER_MAP = {
'adadelta': adadelta_v2.Adadelta,
'adagrad': adagrad_v2.Adagrad,
'adam': adam_v2.Adam,
'adamax': adamax_v2.Adamax,
'nadam': nadam_v2.Nadam,
'rmsprop': rmsprop_v2.RMSprop,
'sgd': gradient_descent_v2.SGD
}
def get_v2_optimizer(name, **kwargs):
"""Get the v2 optimizer requested.
This is only necessary until v2 are the default, as we are testing in Eager,
and Eager + v1 optimizers fail tests. When we are in v2, the strings alone
should be sufficient, and this mapping can theoretically be removed.
Args:
name: string name of Keras v2 optimizer.
**kwargs: any kwargs to pass to the optimizer constructor.
Returns:
Initialized Keras v2 optimizer.
Raises:
ValueError: if an unknown name was passed.
"""
try:
return _V2_OPTIMIZER_MAP[name](**kwargs)
except KeyError:
raise ValueError(
'Could not find requested v2 optimizer: {}\nValid choices: {}'.format(
name, list(_V2_OPTIMIZER_MAP.keys())))
def get_expected_metric_variable_names(var_names, name_suffix=''):
"""Returns expected metric variable names given names and prefix/suffix."""
if tf2.enabled() or context.executing_eagerly():
# In V1 eager mode and V2 variable names are not made unique.
return [n + ':0' for n in var_names]
# In V1 graph mode variable names are made unique using a suffix.
return [n + name_suffix + ':0' for n in var_names]
def enable_v2_dtype_behavior(fn):
"""Decorator for enabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, True)
def disable_v2_dtype_behavior(fn):
"""Decorator for disabling the layer V2 dtype behavior on a test."""
return _set_v2_dtype_behavior(fn, False)
def _set_v2_dtype_behavior(fn, enabled):
"""Returns version of 'fn' that runs with v2 dtype behavior on or off."""
@functools.wraps(fn)
def wrapper(*args, **kwargs):
v2_dtype_behavior = base_layer_utils.V2_DTYPE_BEHAVIOR
base_layer_utils.V2_DTYPE_BEHAVIOR = enabled
try:
return fn(*args, **kwargs)
finally:
base_layer_utils.V2_DTYPE_BEHAVIOR = v2_dtype_behavior
return tf_decorator.make_decorator(fn, wrapper)
@contextlib.contextmanager
def device(should_use_gpu):
"""Uses gpu when requested and available."""
if should_use_gpu and test_util.is_gpu_available():
dev = '/device:GPU:0'
else:
dev = '/device:CPU:0'
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(should_use_gpu=True):
yield
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith('test') and (name !=
'test_session'):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
See go/tf-test-decorator-cheatsheet for the decorators to use in different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError('`run_v2_only` only supports test methods.')
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest('Test is only compatible with v2')
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
combinations = [collections.OrderedDict(result)
for result in itertools.product(*combinations)]
named_combinations = []
for combination in combinations:
assert isinstance(combination, collections.OrderedDict)
name = ''.join([
'_{}_{}'.format(''.join(filter(str.isalnum, key)),
''.join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
collections.OrderedDict(
list(combination.items()) +
[('testcase_name', '_test{}'.format(name))]))
return named_combinations
| _MultiIOSubclassModelCustomBuild |
python | spack__spack | lib/spack/spack/version/version_types.py | {
"start": 17477,
"end": 28498
} | class ____(ConcreteVersion):
"""Class to represent versions interpreted from git refs.
There are two distinct categories of git versions:
1) GitVersions instantiated with an associated reference version (e.g. ``git.foo=1.2``)
2) GitVersions requiring commit lookups
Git ref versions that are not paired with a known version are handled separately from
all other version comparisons. When Spack identifies a git ref version, it associates a
``CommitLookup`` object with the version. This object handles caching of information
from the git repo. When executing comparisons with a git ref version, Spack queries the
``CommitLookup`` for the most recent version previous to this git ref, as well as the
distance between them expressed as a number of commits. If the previous version is
``X.Y.Z`` and the distance is ``D``, the git commit version is represented by the
tuple ``(X, Y, Z, '', D)``. The component ``''`` cannot be parsed as part of any valid
version, but is a valid component. This allows a git ref version to be less than (older
than) every Version newer than its previous version, but still newer than its previous
version.
To find the previous version from a git ref version, Spack queries the git repo for its
tags. Any tag that matches a version known to Spack is associated with that version, as
is any tag that is a known version prepended with the character ``v`` (i.e., a tag
``v1.0`` is associated with the known version ``1.0``). Additionally, any tag that
represents a semver version (X.Y.Z with X, Y, Z all integers) is associated with the
version it represents, even if that version is not known to Spack. Each tag is then
queried in git to see whether it is an ancestor of the git ref in question, and if so
the distance between the two. The previous version is the version that is an ancestor
with the least distance from the git ref in question.
This procedure can be circumvented if the user supplies a known version to associate
with the GitVersion (e.g. ``[hash]=develop``). If the user prescribes the version then
there is no need to do a lookup and the standard version comparison operations are
sufficient.
"""
__slots__ = ["has_git_prefix", "commit_sha", "ref", "std_version", "_ref_lookup"]
def __init__(self, string: str):
# TODO will be required for concrete specs when commit lookup added
self.commit_sha: Optional[str] = None
self.std_version: Optional[StandardVersion] = None
# optional user supplied git ref
self.ref: Optional[str] = None
# An object that can lookup git refs to compare them to versions
self._ref_lookup: Optional[AbstractRefLookup] = None
self.has_git_prefix = string.startswith("git.")
# Drop `git.` prefix
normalized_string = string[4:] if self.has_git_prefix else string
if "=" in normalized_string:
# Store the git reference, and parse the user provided version.
self.ref, spack_version = normalized_string.split("=")
self.std_version = StandardVersion(
spack_version, *parse_string_components(spack_version)
)
else:
# The ref_version is lazily attached after parsing, since we don't know what
# package it applies to here.
self.std_version = None
self.ref = normalized_string
# Used by fetcher
self.is_commit: bool = is_git_commit_sha(self.ref)
# translations
if self.is_commit:
self.commit_sha = self.ref
@property
def ref_version(self) -> StandardVersion:
# Return cached version if we have it
if self.std_version is not None:
return self.std_version
if self.ref_lookup is None:
raise VersionLookupError(
f"git ref '{self.ref}' cannot be looked up: " "call attach_lookup first"
)
version_string, distance = self.ref_lookup.get(self.ref)
version_string = version_string or "0"
# Add a -git.<distance> suffix when we're not exactly on a tag
if distance > 0:
version_string += f"-git.{distance}"
self.std_version = StandardVersion(
version_string, *parse_string_components(version_string)
)
return self.std_version
def intersects(self, other: VersionType) -> bool:
# For concrete things intersects = satisfies = equality
if isinstance(other, GitVersion):
return self == other
if isinstance(other, StandardVersion):
return False
if isinstance(other, ClosedOpenRange):
return self.ref_version.intersects(other)
if isinstance(other, VersionList):
return any(self.intersects(rhs) for rhs in other)
raise TypeError(f"'intersects()' not supported for instances of {type(other)}")
def intersection(self, other: VersionType) -> VersionType:
if isinstance(other, ConcreteVersion):
return self if self == other else VersionList()
return other.intersection(self)
def satisfies(self, other: VersionType) -> bool:
# Concrete versions mean we have to do an equality check
if isinstance(other, GitVersion):
return self == other
if isinstance(other, StandardVersion):
return False
if isinstance(other, ClosedOpenRange):
return self.ref_version.satisfies(other)
if isinstance(other, VersionList):
return any(self.satisfies(rhs) for rhs in other)
raise TypeError(f"'satisfies()' not supported for instances of {type(other)}")
def __str__(self) -> str:
s = ""
if self.ref:
s += f"git.{self.ref}" if self.has_git_prefix else self.ref
# Note: the solver actually depends on str(...) to produce the effective version.
# So when a lookup is attached, we require the resolved version to be printed.
# But for standalone git versions that don't have a repo attached, it would still
# be nice if we could print @<hash>.
try:
s += f"={self.ref_version}"
except VersionLookupError:
pass
return s
def __repr__(self):
return f'GitVersion("{self}")'
def __bool__(self):
return True
def __eq__(self, other: object) -> bool:
# GitVersion cannot be equal to StandardVersion, otherwise == is not transitive
return (
isinstance(other, GitVersion)
and self.ref == other.ref
# TODO(psakiev) this needs to chamge to commits when we turn on lookups
and self.ref_version == other.ref_version
)
def __ne__(self, other: object) -> bool:
return not self == other
def __lt__(self, other: object) -> bool:
if isinstance(other, GitVersion):
return (self.ref_version, self.ref) < (other.ref_version, other.ref)
if isinstance(other, StandardVersion):
# GitVersion at equal ref version is larger than StandardVersion
return self.ref_version < other
if isinstance(other, ClosedOpenRange):
return self.ref_version < other
raise TypeError(f"'<' not supported between instances of {type(self)} and {type(other)}")
def __le__(self, other: object) -> bool:
if isinstance(other, GitVersion):
return (self.ref_version, self.ref) <= (other.ref_version, other.ref)
if isinstance(other, StandardVersion):
# Note: GitVersion hash=1.2.3 > StandardVersion 1.2.3, so use < comparsion.
return self.ref_version < other
if isinstance(other, ClosedOpenRange):
# Equality is not a thing
return self.ref_version < other
raise TypeError(f"'<=' not supported between instances of {type(self)} and {type(other)}")
def __ge__(self, other: object) -> bool:
if isinstance(other, GitVersion):
return (self.ref_version, self.ref) >= (other.ref_version, other.ref)
if isinstance(other, StandardVersion):
# Note: GitVersion hash=1.2.3 > StandardVersion 1.2.3, so use >= here.
return self.ref_version >= other
if isinstance(other, ClosedOpenRange):
return self.ref_version > other
raise TypeError(f"'>=' not supported between instances of {type(self)} and {type(other)}")
def __gt__(self, other: object) -> bool:
if isinstance(other, GitVersion):
return (self.ref_version, self.ref) > (other.ref_version, other.ref)
if isinstance(other, StandardVersion):
# Note: GitVersion hash=1.2.3 > StandardVersion 1.2.3, so use >= here.
return self.ref_version >= other
if isinstance(other, ClosedOpenRange):
return self.ref_version > other
raise TypeError(f"'>' not supported between instances of {type(self)} and {type(other)}")
def __hash__(self):
# hashing should not cause version lookup
return hash(self.ref)
def __contains__(self, other: object) -> bool:
raise NotImplementedError
@property
def ref_lookup(self):
if self._ref_lookup:
# Get operation ensures dict is populated
self._ref_lookup.get(self.ref)
return self._ref_lookup
def attach_lookup(self, lookup: AbstractRefLookup):
"""
Use the git fetcher to look up a version for a commit.
Since we want to optimize the clone and lookup, we do the clone once
and store it in the user specified git repository cache. We also need
context of the package to get known versions, which could be tags if
they are linked to Git Releases. If we are unable to determine the
context of the version, we cannot continue. This implementation is
alongside the GitFetcher because eventually the git repos cache will
be one and the same with the source cache.
"""
self._ref_lookup = lookup
def __iter__(self):
return self.ref_version.__iter__()
def __len__(self):
return self.ref_version.__len__()
def __getitem__(self, idx):
return self.ref_version.__getitem__(idx)
def isdevelop(self):
return self.ref_version.isdevelop()
def is_prerelease(self) -> bool:
return self.ref_version.is_prerelease()
@property
def dotted(self) -> StandardVersion:
return self.ref_version.dotted
@property
def underscored(self) -> StandardVersion:
return self.ref_version.underscored
@property
def dashed(self) -> StandardVersion:
return self.ref_version.dashed
@property
def joined(self) -> StandardVersion:
return self.ref_version.joined
def up_to(self, index) -> StandardVersion:
return self.ref_version.up_to(index)
| GitVersion |
python | django__django | tests/file_storage/test_generate_filename.py | {
"start": 1114,
"end": 2212
} | class ____(SimpleTestCase):
"""Tests for base Storage's generate_filename method."""
storage_class = Storage
def test_valid_names(self):
storage = self.storage_class()
name = "UnTRIVíAL @fil$ena#me!"
valid_name = storage.get_valid_name(name)
candidates = [
(name, valid_name),
(f"././././././{name}", valid_name),
(f"some/path/{name}", f"some/path/{valid_name}"),
(f"some/./path/./{name}", f"some/path/{valid_name}"),
(f"././some/././path/./{name}", f"some/path/{valid_name}"),
(f".\\.\\.\\.\\.\\.\\{name}", valid_name),
(f"some\\path\\{name}", f"some/path/{valid_name}"),
(f"some\\.\\path\\.\\{name}", f"some/path/{valid_name}"),
(f".\\.\\some\\.\\.\\path\\.\\{name}", f"some/path/{valid_name}"),
]
for name, expected in candidates:
with self.subTest(name=name):
result = storage.generate_filename(name)
self.assertEqual(result, os.path.normpath(expected))
| StorageGenerateFilenameTests |
python | cherrypy__cherrypy | cherrypy/test/helper.py | {
"start": 13597,
"end": 17534
} | class ____(object):
"""CherryPy Process Spawning Helper."""
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(
self,
wait=False,
daemonize=False,
ssl=False,
socket_host=None,
socket_port=None,
):
"""Initialize a server process runner."""
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=''):
"""Write the server config to disk."""
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(str(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
'-m',
'cherrypy',
'-c',
self.config_file,
'-p',
self.pid_file,
]
r"""Command for running cherryd server with autoreload enabled.
Using
```
['-c',
"__requires__ = 'CherryPy'; \
import importlib.metadata, re, sys; \
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \
sys.exit(\
importlib.metadata.distribution('cherrypy').entry_points[0])"]
```
doesn't work as it's impossible to reconstruct the `-c`'s contents.
Ref: https://github.com/cherrypy/cherrypy/issues/1545
"""
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']),
)
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
"""Get the server process ID."""
if self.daemonize:
with open(self.pid_file, 'rb') as f:
return int(f.read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
with contextlib.suppress(IOError):
os.waitpid(self.get_pid(), 0)
| CPProcess |
python | lepture__authlib | authlib/jose/rfc7519/jwt.py | {
"start": 483,
"end": 6185
} | class ____:
SENSITIVE_NAMES = ("password", "token", "secret", "secret_key")
# Thanks to sentry SensitiveDataFilter
SENSITIVE_VALUES = re.compile(
r"|".join(
[
# http://www.richardsramblings.com/regex/credit-card-numbers/
r"\b(?:3[47]\d|(?:4\d|5[1-5]|65)\d{2}|6011)\d{12}\b",
# various private keys
r"-----BEGIN[A-Z ]+PRIVATE KEY-----.+-----END[A-Z ]+PRIVATE KEY-----",
# social security numbers (US)
r"^\b(?!(000|666|9))\d{3}-(?!00)\d{2}-(?!0000)\d{4}\b",
]
),
re.DOTALL,
)
def __init__(self, algorithms, private_headers=None):
self._jws = JsonWebSignature(algorithms, private_headers=private_headers)
self._jwe = JsonWebEncryption(algorithms, private_headers=private_headers)
def check_sensitive_data(self, payload):
"""Check if payload contains sensitive information."""
for k in payload:
# check claims key name
if k in self.SENSITIVE_NAMES:
raise InsecureClaimError(k)
# check claims values
v = payload[k]
if isinstance(v, str) and self.SENSITIVE_VALUES.search(v):
raise InsecureClaimError(k)
def encode(self, header, payload, key, check=True):
"""Encode a JWT with the given header, payload and key.
:param header: A dict of JWS header
:param payload: A dict to be encoded
:param key: key used to sign the signature
:param check: check if sensitive data in payload
:return: bytes
"""
header.setdefault("typ", "JWT")
for k in ["exp", "iat", "nbf"]:
# convert datetime into timestamp
claim = payload.get(k)
if isinstance(claim, datetime.datetime):
payload[k] = calendar.timegm(claim.utctimetuple())
if check:
self.check_sensitive_data(payload)
key = find_encode_key(key, header)
text = to_bytes(json_dumps(payload))
if "enc" in header:
return self._jwe.serialize_compact(header, text, key)
else:
return self._jws.serialize_compact(header, text, key)
def decode(self, s, key, claims_cls=None, claims_options=None, claims_params=None):
"""Decode the JWT with the given key. This is similar with
:meth:`verify`, except that it will raise BadSignatureError when
signature doesn't match.
:param s: text of JWT
:param key: key used to verify the signature
:param claims_cls: class to be used for JWT claims
:param claims_options: `options` parameters for claims_cls
:param claims_params: `params` parameters for claims_cls
:return: claims_cls instance
:raise: BadSignatureError
"""
if claims_cls is None:
claims_cls = JWTClaims
if callable(key):
load_key = key
else:
load_key = create_load_key(prepare_raw_key(key))
s = to_bytes(s)
dot_count = s.count(b".")
if dot_count == 2:
data = self._jws.deserialize_compact(s, load_key, decode_payload)
elif dot_count == 4:
data = self._jwe.deserialize_compact(s, load_key, decode_payload)
else:
raise DecodeError("Invalid input segments length")
return claims_cls(
data["payload"],
data["header"],
options=claims_options,
params=claims_params,
)
def decode_payload(bytes_payload):
try:
payload = json_loads(to_unicode(bytes_payload))
except ValueError as exc:
raise DecodeError("Invalid payload value") from exc
if not isinstance(payload, dict):
raise DecodeError("Invalid payload type")
return payload
def prepare_raw_key(raw):
if isinstance(raw, KeySet):
return raw
if isinstance(raw, str) and raw.startswith("{") and raw.endswith("}"):
raw = json_loads(raw)
elif isinstance(raw, (tuple, list)):
raw = {"keys": raw}
return raw
def find_encode_key(key, header):
if isinstance(key, KeySet):
kid = header.get("kid")
if kid:
return key.find_by_kid(kid)
rv = random.choice(key.keys)
# use side effect to add kid value into header
header["kid"] = rv.kid
return rv
if isinstance(key, dict) and "keys" in key:
keys = key["keys"]
kid = header.get("kid")
for k in keys:
if k.get("kid") == kid:
return k
if not kid:
rv = random.choice(keys)
header["kid"] = rv["kid"]
return rv
raise ValueError("Invalid JSON Web Key Set")
# append kid into header
if isinstance(key, dict) and "kid" in key:
header["kid"] = key["kid"]
elif isinstance(key, Key) and key.kid:
header["kid"] = key.kid
return key
def create_load_key(key):
def load_key(header, payload):
if isinstance(key, KeySet):
return key.find_by_kid(header.get("kid"))
if isinstance(key, dict) and "keys" in key:
keys = key["keys"]
kid = header.get("kid")
if kid is not None:
# look for the requested key
for k in keys:
if k.get("kid") == kid:
return k
else:
# use the only key
if len(keys) == 1:
return keys[0]
raise ValueError("Invalid JSON Web Key Set")
return key
return load_key
| JsonWebToken |
python | bottlepy__bottle | test/test_jinja2.py | {
"start": 145,
"end": 2745
} | class ____(unittest.TestCase):
def test_string(self):
""" Templates: Jinja2 string"""
t = Jinja2Template('start {{var}} end').render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_file(self):
""" Templates: Jinja2 file"""
with chdir(__file__):
t = Jinja2Template(name='./views/jinja2_simple.tpl', lookup=['.']).render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_name(self):
""" Templates: Jinja2 lookup by name """
with chdir(__file__):
t = Jinja2Template(name='jinja2_simple', lookup=['./views/']).render(var='var')
self.assertEqual('start var end', ''.join(t))
def test_notfound(self):
""" Templates: Unavailable templates"""
self.assertRaises(Exception, Jinja2Template, name="abcdef", lookup=['./views/'])
def test_error(self):
""" Templates: Exceptions"""
self.assertRaises(Exception, Jinja2Template, '{% for badsyntax')
def test_inherit(self):
""" Templates: Jinja2 lookup and inherience """
with chdir(__file__):
t = Jinja2Template(name='jinja2_inherit', lookup=['./views/']).render()
self.assertEqual('begin abc end', ''.join(t))
def test_custom_filters(self):
"""Templates: jinja2 custom filters """
from bottle import jinja2_template as template
settings = dict(filters = {"star": lambda var: touni("").join((touni('*'), var, touni('*')))})
t = Jinja2Template("start {{var|star}} end", **settings)
self.assertEqual("start *var* end", t.render(var="var"))
def test_custom_tests(self):
"""Templates: jinja2 custom tests """
from bottle import jinja2_template as template
TEMPL = touni("{% if var is even %}gerade{% else %}ungerade{% endif %}")
settings = dict(tests={"even": lambda x: False if x % 2 else True})
t = Jinja2Template(TEMPL, **settings)
self.assertEqual("gerade", t.render(var=2))
self.assertEqual("ungerade", t.render(var=1))
def test_template_shortcut(self):
result = jinja2_template('start {{var}} end', var='middle')
self.assertEqual(touni('start middle end'), result)
def test_view_decorator(self):
@jinja2_view('start {{var}} end')
def test():
return dict(var='middle')
self.assertEqual(touni('start middle end'), test())
try:
import jinja2
except ImportError:
warn("No Jinja2 template support. Skipping tests.")
del TestJinja2Template
| TestJinja2Template |
python | PrefectHQ__prefect | src/prefect/_internal/_logging.py | {
"start": 276,
"end": 1124
} | class ____(logging.Logger):
"""
A logger with extensions for safe emission of logs in our concurrency tooling.
"""
def isEnabledFor(self, level: int):
# Override `logger.isEnabledFor` to avoid taking a logging lock which can cause
# deadlocks during complex concurrency handling
from prefect.settings import PREFECT_LOGGING_INTERNAL_LEVEL
internal_level = getLevelNamesMapping()[PREFECT_LOGGING_INTERNAL_LEVEL.value()]
return level >= internal_level
def getChild(self, suffix: str) -> Self:
logger = super().getChild(suffix)
logger.__class__ = self.__class__
return logger
# Use `getLogger` to retain `logger.Manager` behavior
logger = logging.getLogger("prefect._internal")
# Update the class to inject patched behavior
logger.__class__ = SafeLogger
| SafeLogger |
python | pyqtgraph__pyqtgraph | pyqtgraph/util/garbage_collector.py | {
"start": 37,
"end": 1605
} | class ____(object):
'''
Disable automatic garbage collection and instead collect manually
on a timer.
This is done to ensure that garbage collection only happens in the GUI
thread, as otherwise Qt can crash.
Credit: Erik Janssens
Source: http://pydev.blogspot.com/2014/03/should-python-garbage-collector-be.html
'''
def __init__(self, interval=1.0, debug=False):
self.debug = debug
if debug:
gc.set_debug(gc.DEBUG_LEAK)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.check)
self.threshold = gc.get_threshold()
gc.disable()
self.timer.start(int(interval * 1000))
def check(self):
#return self.debug_cycles() # uncomment to just debug cycles
l0, l1, l2 = gc.get_count()
if self.debug:
print('gc_check called:', l0, l1, l2)
if l0 > self.threshold[0]:
num = gc.collect(0)
if self.debug:
print('collecting gen 0, found: %d unreachable' % num)
if l1 > self.threshold[1]:
num = gc.collect(1)
if self.debug:
print('collecting gen 1, found: %d unreachable' % num)
if l2 > self.threshold[2]:
num = gc.collect(2)
if self.debug:
print('collecting gen 2, found: %d unreachable' % num)
def debug_cycles(self):
gc.collect()
for obj in gc.garbage:
print(obj, repr(obj), type(obj))
| GarbageCollector |
python | langchain-ai__langchain | libs/core/tests/unit_tests/language_models/chat_models/test_base.py | {
"start": 10822,
"end": 12190
} | class ____(BaseTracer):
def __init__(self) -> None:
super().__init__()
self.traced_run_ids: list = []
def _persist_run(self, run: Run) -> None:
"""Persist a run."""
self.traced_run_ids.append(run.id)
def test_pass_run_id() -> None:
llm = FakeListChatModel(responses=["a", "b", "c"])
cb = FakeTracer()
uid1 = uuid.uuid4()
llm.invoke("Dummy message", {"callbacks": [cb], "run_id": uid1})
assert cb.traced_run_ids == [uid1]
uid2 = uuid.uuid4()
list(llm.stream("Dummy message", {"callbacks": [cb], "run_id": uid2}))
assert cb.traced_run_ids == [uid1, uid2]
uid3 = uuid.uuid4()
llm.batch([["Dummy message"]], {"callbacks": [cb], "run_id": uid3})
assert cb.traced_run_ids == [uid1, uid2, uid3]
async def test_async_pass_run_id() -> None:
llm = FakeListChatModel(responses=["a", "b", "c"])
cb = FakeTracer()
uid1 = uuid.uuid4()
await llm.ainvoke("Dummy message", {"callbacks": [cb], "run_id": uid1})
assert cb.traced_run_ids == [uid1]
uid2 = uuid.uuid4()
async for _ in llm.astream("Dummy message", {"callbacks": [cb], "run_id": uid2}):
pass
assert cb.traced_run_ids == [uid1, uid2]
uid3 = uuid.uuid4()
await llm.abatch([["Dummy message"]], {"callbacks": [cb], "run_id": uid3})
assert cb.traced_run_ids == [uid1, uid2, uid3]
| FakeTracer |
python | jina-ai__jina | tests/unit/orchestrate/flow/flow-orchestrate/test_ndarray_type.py | {
"start": 136,
"end": 224
} | class ____(Executor):
@requests
def foo(self, docs, **kwargs):
pass
| MyExec |
python | Netflix__metaflow | metaflow/plugins/aws/batch/batch_client.py | {
"start": 1978,
"end": 24477
} | class ____(object):
def __init__(self, client):
self._client = client
tree = lambda: defaultdict(tree)
self.payload = tree()
def execute(self):
if self._image is None:
raise BatchJobException(
"Unable to launch AWS Batch job. No docker image specified."
)
if self._iam_role is None:
raise BatchJobException(
"Unable to launch AWS Batch job. No IAM role specified."
)
# Multinode
if getattr(self, "num_parallel", 0) >= 1:
num_nodes = self.num_parallel
# We need this task-id set so that all the nodes are aware of the control
# task's task-id. These "MF_" variables populate the `current.parallel` namedtuple
self.environment_variable("MF_PARALLEL_CONTROL_TASK_ID", self._task_id)
main_task_override = copy.deepcopy(self.payload["containerOverrides"])
# main
commands = self.payload["containerOverrides"]["command"][-1]
# add split-index as this worker is also an ubf_task
commands = commands.replace("[multinode-args]", "--split-index 0")
main_task_override["command"][-1] = commands
# secondary tasks
secondary_task_container_override = copy.deepcopy(
self.payload["containerOverrides"]
)
secondary_commands = self.payload["containerOverrides"]["command"][-1]
# other tasks do not have control- prefix, and have the split id appended to the task -id
secondary_commands = secondary_commands.replace(
self._task_id,
self._task_id.replace("control-", "")
+ "-node-$AWS_BATCH_JOB_NODE_INDEX",
)
secondary_commands = secondary_commands.replace(
"ubf_control",
"ubf_task",
)
secondary_commands = secondary_commands.replace(
"[multinode-args]", "--split-index $AWS_BATCH_JOB_NODE_INDEX"
)
secondary_task_container_override["command"][-1] = secondary_commands
secondary_overrides = (
[
{
"targetNodes": "1:{}".format(num_nodes - 1),
"containerOverrides": secondary_task_container_override,
}
]
if num_nodes > 1
else []
)
self.payload["nodeOverrides"] = {
"nodePropertyOverrides": [
{"targetNodes": "0:0", "containerOverrides": main_task_override},
]
+ secondary_overrides,
}
del self.payload["containerOverrides"]
response = self._client.submit_job(**self.payload)
job = RunningJob(response["jobId"], self._client)
return job.update()
def _register_job_definition(
self,
image,
job_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
inferentia,
efa,
memory,
host_volumes,
efs_volumes,
use_tmpfs,
tmpfs_tempdir,
tmpfs_size,
tmpfs_path,
num_parallel,
ephemeral_storage,
log_driver,
log_options,
):
# identify platform from any compute environment associated with the
# queue
if AWS_SANDBOX_ENABLED:
# within the Metaflow sandbox, we can't execute the
# describe_job_queues directive for AWS Batch to detect compute
# environment platform, so let's just default to EC2 for now.
platform = "EC2"
else:
response = self._client.describe_job_queues(jobQueues=[job_queue])
if len(response["jobQueues"]) == 0:
raise BatchJobException("AWS Batch Job Queue %s not found." % job_queue)
compute_environment = response["jobQueues"][0]["computeEnvironmentOrder"][
0
]["computeEnvironment"]
response = self._client.describe_compute_environments(
computeEnvironments=[compute_environment]
)
platform = response["computeEnvironments"][0]["computeResources"]["type"]
# compose job definition
job_definition = {
"type": "container",
"containerProperties": {
"image": image,
"jobRoleArn": job_role,
"command": ["echo", "hello world"],
"resourceRequirements": [
{"value": "1", "type": "VCPU"},
{"value": "4096", "type": "MEMORY"},
],
},
# This propagates the AWS Batch resource tags to the underlying
# ECS tasks.
"propagateTags": True,
}
log_options_dict = {}
if log_options:
if isinstance(log_options, str):
log_options = [log_options]
for each_log_option in log_options:
k, v = each_log_option.split(":", 1)
log_options_dict[k] = v
if log_driver or log_options:
job_definition["containerProperties"]["logConfiguration"] = {}
if log_driver:
job_definition["containerProperties"]["logConfiguration"][
"logDriver"
] = log_driver
if log_options:
job_definition["containerProperties"]["logConfiguration"][
"options"
] = log_options_dict
if platform == "FARGATE" or platform == "FARGATE_SPOT":
if num_parallel > 1:
raise BatchJobException("Fargate does not support multinode jobs.")
if execution_role is None:
raise BatchJobException(
"No AWS Fargate task execution IAM role found. Please see "
"https://docs.aws.amazon.com/batch/latest/userguide/execution-IAM-role.html "
"and set the role as METAFLOW_ECS_FARGATE_EXECUTION_ROLE "
"environment variable."
)
job_definition["containerProperties"]["executionRoleArn"] = execution_role
job_definition["platformCapabilities"] = ["FARGATE"]
job_definition["containerProperties"]["networkConfiguration"] = {
"assignPublicIp": "ENABLED"
}
if ephemeral_storage:
job_definition["containerProperties"]["ephemeralStorage"] = {
"sizeInGiB": ephemeral_storage
}
if platform == "EC2" or platform == "SPOT":
if "linuxParameters" not in job_definition["containerProperties"]:
job_definition["containerProperties"]["linuxParameters"] = {}
if shared_memory is not None:
if not (
isinstance(shared_memory, (int, unicode, basestring))
and int(float(shared_memory)) > 0
):
raise BatchJobException(
"Invalid shared memory size value ({}); "
"it should be greater than 0".format(shared_memory)
)
else:
job_definition["containerProperties"]["linuxParameters"][
"sharedMemorySize"
] = int(float(shared_memory))
if swappiness is not None:
if not (
isinstance(swappiness, (int, unicode, basestring))
and int(swappiness) >= 0
and int(swappiness) < 100
):
raise BatchJobException(
"Invalid swappiness value ({}); "
"(should be 0 or greater and less than 100)".format(swappiness)
)
else:
job_definition["containerProperties"]["linuxParameters"][
"swappiness"
] = int(swappiness)
if max_swap is not None:
if not (
isinstance(max_swap, (int, unicode, basestring))
and int(max_swap) >= 0
):
raise BatchJobException(
"Invalid swappiness value ({}); "
"(should be 0 or greater)".format(max_swap)
)
else:
job_definition["containerProperties"]["linuxParameters"][
"maxSwap"
] = int(max_swap)
if ephemeral_storage:
raise BatchJobException(
"The ephemeral_storage parameter is only available for FARGATE compute environments"
)
if inferentia:
if not (isinstance(inferentia, (int, unicode, basestring))):
raise BatchJobException(
"Invalid inferentia value: ({}) (should be 0 or greater)".format(
inferentia
)
)
else:
job_definition["containerProperties"]["linuxParameters"]["devices"] = []
for i in range(int(inferentia)):
job_definition["containerProperties"]["linuxParameters"][
"devices"
].append(
{
"containerPath": "/dev/neuron{}".format(i),
"hostPath": "/dev/neuron{}".format(i),
"permissions": ["READ", "WRITE"],
}
)
if host_volumes or efs_volumes:
job_definition["containerProperties"]["volumes"] = []
job_definition["containerProperties"]["mountPoints"] = []
if host_volumes:
if isinstance(host_volumes, str):
host_volumes = [host_volumes]
for host_path in host_volumes:
container_path = host_path
if ":" in host_path:
host_path, container_path = host_path.split(":", 1)
name = host_path.replace("/", "_").replace(".", "_")
job_definition["containerProperties"]["volumes"].append(
{"name": name, "host": {"sourcePath": host_path}}
)
job_definition["containerProperties"]["mountPoints"].append(
{"sourceVolume": name, "containerPath": container_path}
)
if efs_volumes:
if isinstance(efs_volumes, str):
efs_volumes = [efs_volumes]
for efs_id in efs_volumes:
container_path = "/mnt/" + efs_id
if ":" in efs_id:
efs_id, container_path = efs_id.split(":", 1)
name = "efs_" + efs_id
job_definition["containerProperties"]["volumes"].append(
{
"name": name,
"efsVolumeConfiguration": {
"fileSystemId": efs_id,
"transitEncryption": "ENABLED",
},
}
)
job_definition["containerProperties"]["mountPoints"].append(
{"sourceVolume": name, "containerPath": container_path}
)
if use_tmpfs and (platform == "FARGATE" or platform == "FARGATE_SPOT"):
raise BatchJobException(
"tmpfs is not available for Fargate compute resources"
)
if use_tmpfs or (tmpfs_size and not use_tmpfs):
if tmpfs_size:
if not (isinstance(tmpfs_size, (int, unicode, basestring))):
raise BatchJobException(
"Invalid tmpfs value: ({}) (should be 0 or greater)".format(
tmpfs_size
)
)
else:
# default tmpfs behavior - https://man7.org/linux/man-pages/man5/tmpfs.5.html
tmpfs_size = int(float(memory)) / 2
job_definition["containerProperties"]["linuxParameters"]["tmpfs"] = [
{
"containerPath": tmpfs_path,
"size": int(tmpfs_size),
"mountOptions": [
# should map to rw, suid, dev, exec, auto, nouser, and async
"defaults"
],
}
]
if efa:
if not (isinstance(efa, (int, unicode, basestring))):
raise BatchJobException(
"Invalid efa value: ({}) (should be 0 or greater)".format(efa)
)
else:
if "linuxParameters" not in job_definition["containerProperties"]:
job_definition["containerProperties"]["linuxParameters"] = {}
if (
"devices"
not in job_definition["containerProperties"]["linuxParameters"]
):
job_definition["containerProperties"]["linuxParameters"][
"devices"
] = []
if (num_parallel or 0) > 1:
# Multi-node parallel jobs require the container path and permissions explicitly specified in Job definition
for i in range(int(efa)):
job_definition["containerProperties"]["linuxParameters"][
"devices"
].append(
{
"hostPath": "/dev/infiniband/uverbs{}".format(i),
"containerPath": "/dev/infiniband/uverbs{}".format(i),
"permissions": ["READ", "WRITE", "MKNOD"],
}
)
else:
# Single-node container jobs only require host path in job definition
job_definition["containerProperties"]["linuxParameters"][
"devices"
].append({"hostPath": "/dev/infiniband/uverbs0"})
self.num_parallel = num_parallel or 0
if self.num_parallel >= 1:
job_definition["type"] = "multinode"
job_definition["nodeProperties"] = {
"numNodes": self.num_parallel,
"mainNode": 0,
}
job_definition["nodeProperties"]["nodeRangeProperties"] = [
{
"targetNodes": "0:0", # The properties are same for main node and others,
# but as we use nodeOverrides later for main and others
# differently, also the job definition must match those patterns
"container": job_definition["containerProperties"],
},
]
if self.num_parallel > 1:
job_definition["nodeProperties"]["nodeRangeProperties"].append(
{
"targetNodes": "1:{}".format(self.num_parallel - 1),
"container": job_definition["containerProperties"],
}
)
del job_definition["containerProperties"] # not used for multi-node
# check if job definition already exists
def_name = (
"metaflow_%s"
% hashlib.sha224(str(job_definition).encode("utf-8")).hexdigest()
)
payload = {"jobDefinitionName": def_name, "status": "ACTIVE"}
response = self._client.describe_job_definitions(**payload)
if len(response["jobDefinitions"]) > 0:
return response["jobDefinitions"][0]["jobDefinitionArn"]
# else create a job definition
job_definition["jobDefinitionName"] = def_name
try:
response = self._client.register_job_definition(**job_definition)
except Exception as ex:
if type(ex).__name__ == "ParamValidationError" and (
platform == "FARGATE" or platform == "FARGATE_SPOT"
):
raise BatchJobException(
"%s \nPlease ensure you have installed boto3>=1.16.29 if "
"you intend to launch AWS Batch jobs on AWS Fargate "
"compute platform." % ex
)
else:
raise ex
return response["jobDefinitionArn"]
def job_def(
self,
image,
iam_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
inferentia,
efa,
memory,
host_volumes,
efs_volumes,
use_tmpfs,
tmpfs_tempdir,
tmpfs_size,
tmpfs_path,
num_parallel,
ephemeral_storage,
log_driver,
log_options,
):
self.payload["jobDefinition"] = self._register_job_definition(
image,
iam_role,
job_queue,
execution_role,
shared_memory,
max_swap,
swappiness,
inferentia,
efa,
memory,
host_volumes,
efs_volumes,
use_tmpfs,
tmpfs_tempdir,
tmpfs_size,
tmpfs_path,
num_parallel,
ephemeral_storage,
log_driver,
log_options,
)
return self
def job_name(self, job_name):
self.payload["jobName"] = job_name
return self
def job_queue(self, job_queue):
self.payload["jobQueue"] = job_queue
return self
def image(self, image):
self._image = image
return self
def task_id(self, task_id):
self._task_id = task_id
return self
def iam_role(self, iam_role):
self._iam_role = iam_role
return self
def execution_role(self, execution_role):
self._execution_role = execution_role
return self
def shared_memory(self, shared_memory):
self._shared_memory = shared_memory
return self
def max_swap(self, max_swap):
self._max_swap = max_swap
return self
def swappiness(self, swappiness):
self._swappiness = swappiness
return self
def inferentia(self, inferentia):
self._inferentia = inferentia
return self
def efa(self, efa):
self._efa = efa
return self
def command(self, command):
if "command" not in self.payload["containerOverrides"]:
self.payload["containerOverrides"]["command"] = []
self.payload["containerOverrides"]["command"].extend(command)
return self
def cpu(self, cpu):
if not (isinstance(cpu, (int, unicode, basestring, float)) and float(cpu) > 0):
raise BatchJobException(
"Invalid CPU value ({}); it should be greater than 0".format(cpu)
)
if "resourceRequirements" not in self.payload["containerOverrides"]:
self.payload["containerOverrides"]["resourceRequirements"] = []
# %g will format the value without .0 if it doesn't have a fractional part
#
# While AWS Batch supports fractional values for fargate, it does not
# seem to like seeing values like 2.0 for non-fargate environments.
self.payload["containerOverrides"]["resourceRequirements"].append(
{"value": "%g" % (float(cpu)), "type": "VCPU"}
)
return self
def memory(self, mem):
if not (isinstance(mem, (int, unicode, basestring, float)) and float(mem) > 0):
raise BatchJobException(
"Invalid memory value ({}); it should be greater than 0".format(mem)
)
if "resourceRequirements" not in self.payload["containerOverrides"]:
self.payload["containerOverrides"]["resourceRequirements"] = []
self.payload["containerOverrides"]["resourceRequirements"].append(
{"value": str(int(float(mem))), "type": "MEMORY"}
)
return self
def gpu(self, gpu):
if not (isinstance(gpu, (int, unicode, basestring))):
raise BatchJobException(
"invalid gpu value: ({}) (should be 0 or greater)".format(gpu)
)
if float(gpu) > 0:
if "resourceRequirements" not in self.payload["containerOverrides"]:
self.payload["containerOverrides"]["resourceRequirements"] = []
# Only integer values are supported but the value passed to us
# could be a float-converted-to-string
self.payload["containerOverrides"]["resourceRequirements"].append(
{"type": "GPU", "value": str(int(float(gpu)))}
)
return self
def environment_variable(self, name, value):
if value is None:
return self
if "environment" not in self.payload["containerOverrides"]:
self.payload["containerOverrides"]["environment"] = []
value = str(value)
if value.startswith("$$.") or value.startswith("$."):
# Context Object substitution for AWS Step Functions
# https://docs.aws.amazon.com/step-functions/latest/dg/input-output-contextobject.html
self.payload["containerOverrides"]["environment"].append(
{"name": name, "value.$": value}
)
else:
self.payload["containerOverrides"]["environment"].append(
{"name": name, "value": value}
)
return self
def timeout_in_secs(self, timeout_in_secs):
self.payload["timeout"]["attemptDurationSeconds"] = timeout_in_secs
return self
def tag(self, key, value):
self.payload["tags"][key] = str(value)
return self
def parameter(self, key, value):
self.payload["parameters"][key] = str(value)
return self
def attempts(self, attempts):
self.payload["retryStrategy"]["attempts"] = attempts
return self
| BatchJob |
python | pytorch__pytorch | test/dynamo/test_higher_order_ops.py | {
"start": 152886,
"end": 155482
} | class ____(torch.nn.Module):
def forward(self, L_x_: "f32[5]", L_v_: "f32[5]"):
l_x_ = L_x_
l_v_ = L_v_
_saved_tensors_hooks_disable = torch._C._autograd._saved_tensors_hooks_disable("torch.func.{grad, vjp, jacrev, hessian} don't yet support saved tensor hooks. Please open an issue with your use case."); _saved_tensors_hooks_disable = None
_grad_increment_nesting = torch._C._functorch._grad_increment_nesting(); _grad_increment_nesting = None
_wrap_for_grad: "f32[5]" = torch._C._functorch._wrap_for_grad(l_x_, 1); l_x_ = None
set_inplace_requires_grad_allowed = torch._C._functorch.set_inplace_requires_grad_allowed(True); set_inplace_requires_grad_allowed = None
child_2: "f32[5]" = torch._functorch.eager_transforms._set_tensor_requires_grad(_wrap_for_grad)
set_inplace_requires_grad_allowed_1 = torch._C._functorch.set_inplace_requires_grad_allowed(False); set_inplace_requires_grad_allowed_1 = None
child: "f32[5]" = _wrap_for_grad.sin()
child_1: "f32[5]" = _wrap_for_grad.cos(); _wrap_for_grad = None
_unwrap_for_grad: "f32[5]" = torch._C._functorch._unwrap_for_grad(child, 1)
_unwrap_for_grad_1: "f32[5]" = torch._C._functorch._unwrap_for_grad(child_1, 1)
_grad_decrement_nesting = torch._C._functorch._grad_decrement_nesting(); _grad_decrement_nesting = None
_saved_tensors_hooks_enable = torch._C._autograd._saved_tensors_hooks_enable(); _saved_tensors_hooks_enable = None
_autograd_grad = torch._functorch.eager_transforms._autograd_grad([child, child_1], [child_2], [l_v_, l_v_], retain_graph = True, create_graph = True); child = child_1 = child_2 = l_v_ = None
getitem: "f32[5]" = _autograd_grad[0]; _autograd_grad = None
return (_unwrap_for_grad, _unwrap_for_grad_1, getitem)
""",
)
def test_vjp_multiple_outputs_python_struct(self):
counters.clear()
def wrapper_fn(x, v):
fn = lambda x: {"first": x.sin(), "second": x.cos()} # noqa: E731
(out, vjpfunc) = torch.func.vjp(fn, x)
vjps = vjpfunc({"first": v, "second": v.sin()})
return out, vjps
x = torch.randn([5])
v = torch.randn(5)
wrapped_gm = self._compile_check(wrapper_fn, (x, v))
# Dynamic shapes produce a slightly different graph.
if check_dynamic_shape_capture():
return
actual = normalize_gm(wrapped_gm.print_readable(print_output=False))
self.assertExpectedInline(
actual,
"""\
| GraphModule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/mariadb.py | {
"start": 819,
"end": 965
} | class ____(sqltypes.TypeEngine[str]):
"""INET4 column type for MariaDB
.. versionadded:: 2.0.37
"""
__visit_name__ = "INET4"
| INET4 |
python | huggingface__transformers | tests/models/whisper/test_feature_extraction_whisper.py | {
"start": 3484,
"end": 16665
} | class ____(SequenceFeatureExtractionTestMixin, unittest.TestCase):
feature_extraction_class = WhisperFeatureExtractor
def setUp(self):
self.feat_extract_tester = WhisperFeatureExtractionTester(self)
def test_feat_extract_from_and_save_pretrained(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_to_json_file(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "feat_extract.json")
feat_extract_first.to_json_file(json_file_path)
feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path)
dict_first = feat_extract_first.to_dict()
dict_second = feat_extract_second.to_dict()
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(np.allclose(mel_1, mel_2))
self.assertEqual(dict_first, dict_second)
def test_feat_extract_from_pretrained_kwargs(self):
feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = feat_extract_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
feat_extract_second = self.feature_extraction_class.from_pretrained(
tmpdirname, feature_size=2 * self.feat_extract_dict["feature_size"]
)
mel_1 = feat_extract_first.mel_filters
mel_2 = feat_extract_second.mel_filters
self.assertTrue(2 * mel_1.shape[1] == mel_2.shape[1])
def test_call(self):
# Tests that all call wrap to encode_plus and batch_encode_plus
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# Test feature size
input_features = feature_extractor(np_speech_inputs, padding="max_length", return_tensors="np").input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_features
self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3))
# Test batched
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test 2-D numpy arrays are batched.
speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)]
np_speech_inputs = np.asarray(speech_inputs)
encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
# Test truncation required
speech_inputs = [floats_list((1, x))[0] for x in range(200, (feature_extractor.n_samples + 500), 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
speech_inputs_truncated = [x[: feature_extractor.n_samples] for x in speech_inputs]
np_speech_inputs_truncated = [np.asarray(speech_input) for speech_input in speech_inputs_truncated]
encoded_sequences_1 = feature_extractor(np_speech_inputs, return_tensors="np").input_features
encoded_sequences_2 = feature_extractor(np_speech_inputs_truncated, return_tensors="np").input_features
for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2):
self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3))
def test_dither(self):
np.random.seed(42) # seed the dithering randn()
# Tests that features with and without little dithering are similar, but not the same
dict_no_dither = self.feat_extract_tester.prepare_feat_extract_dict()
dict_no_dither["dither"] = 0.0
dict_dither = self.feat_extract_tester.prepare_feat_extract_dict()
dict_dither["dither"] = 0.00003 # approx. 1/32k
feature_extractor_no_dither = self.feature_extraction_class(**dict_no_dither)
feature_extractor_dither = self.feature_extraction_class(**dict_dither)
# create three inputs of length 800, 1000, and 1200
speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)]
np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs]
# compute features
input_features_no_dither = feature_extractor_no_dither(
np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_no_dither["sampling_rate"]
).input_features
input_features_dither = feature_extractor_dither(
np_speech_inputs, padding=True, return_tensors="np", sampling_rate=dict_dither["sampling_rate"]
).input_features
# test there is a difference between features (there's added noise to input signal)
diff = input_features_dither - input_features_no_dither
# features are not identical
self.assertTrue(np.abs(diff).mean() > 1e-6)
# features are not too different
self.assertTrue(np.abs(diff).mean() <= 1e-4)
self.assertTrue(np.abs(diff).max() <= 5e-3)
def test_feature_shape(self):
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
hop_length = feature_extractor.hop_length
test_inputs = np.random.randn(16000)
self.assertTrue(
feature_extractor(
[test_inputs[: hop_length * 5 + 1]],
return_attention_mask=True,
padding=False,
return_tensors="np",
).attention_mask.shape[-1]
== 5
)
self.assertTrue(
feature_extractor(
[test_inputs[: hop_length * 5]],
return_attention_mask=True,
padding=False,
return_tensors="np",
).attention_mask.shape[-1]
== 5
)
self.assertTrue(
feature_extractor(
[test_inputs[: hop_length * 5 - 1]],
return_attention_mask=True,
padding=False,
return_tensors="np",
).attention_mask.shape[-1]
== 4
)
@require_torch
def test_double_precision_pad(self):
import torch
feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
np_speech_inputs = np.random.rand(100, 32).astype(np.float64)
py_speech_inputs = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
np_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="np")
self.assertTrue(np_processed.input_features.dtype == np.float32)
pt_processed = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt")
self.assertTrue(pt_processed.input_features.dtype == torch.float32)
def _load_datasamples(self, num_samples):
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch_accelerator
@require_torch
def test_torch_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = WhisperFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
self.assertEqual(input_features.shape, (1, 80, 3000))
torch.testing.assert_close(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
@unittest.mock.patch("transformers.models.whisper.feature_extraction_whisper.is_torch_available", lambda: False)
def test_numpy_integration(self):
# fmt: off
EXPECTED_INPUT_FEATURES = np.array(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
]
)
# fmt: on
input_speech = self._load_datasamples(1)
feature_extractor = WhisperFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="np").input_features
self.assertEqual(input_features.shape, (1, 80, 3000))
self.assertTrue(np.allclose(input_features[0, 0, :30], EXPECTED_INPUT_FEATURES, atol=1e-4))
def test_zero_mean_unit_variance_normalization_trunc_np_longest(self):
feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
audio = self._load_datasamples(1)[0]
audio = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
audio = feat_extract.zero_mean_unit_var_norm([audio], attention_mask=None)[0]
self.assertTrue(np.all(np.mean(audio) < 1e-3))
self.assertTrue(np.all(np.abs(np.var(audio) - 1) < 1e-3))
@require_torch_accelerator
@require_torch
def test_torch_integration_batch(self):
# fmt: off
EXPECTED_INPUT_FEATURES = torch.tensor(
[
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
],
[
-0.4696, -0.0751, 0.0276, -0.0312, -0.0540, -0.0383, 0.1295, 0.0568,
-0.2071, -0.0548, 0.0389, -0.0316, -0.2346, -0.1068, -0.0322, 0.0475,
-0.1709, -0.0041, 0.0872, 0.0537, 0.0075, -0.0392, 0.0371, 0.0189,
-0.1522, -0.0270, 0.0744, 0.0738, -0.0245, -0.0667
],
[
-0.2337, -0.0060, -0.0063, -0.2353, -0.0431, 0.1102, -0.1492, -0.0292,
0.0787, -0.0608, 0.0143, 0.0582, 0.0072, 0.0101, -0.0444, -0.1701,
-0.0064, -0.0027, -0.0826, -0.0730, -0.0099, -0.0762, -0.0170, 0.0446,
-0.1153, 0.0960, -0.0361, 0.0652, 0.1207, 0.0277
]
]
)
# fmt: on
with torch.device("cuda"):
input_speech = self._load_datasamples(3)
feature_extractor = WhisperFeatureExtractor()
input_features = feature_extractor(input_speech, return_tensors="pt").input_features
self.assertEqual(input_features.shape, (3, 80, 3000))
torch.testing.assert_close(input_features[:, 0, :30], EXPECTED_INPUT_FEATURES, rtol=1e-4, atol=1e-4)
| WhisperFeatureExtractionTest |
python | huggingface__transformers | src/transformers/models/idefics2/processing_idefics2.py | {
"start": 1329,
"end": 1565
} | class ____(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"is_split_into_words": False,
},
}
| Idefics2ProcessorKwargs |
python | apache__airflow | providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/powerbi.py | {
"start": 1865,
"end": 6409
} | class ____(BaseOperator):
"""
Refreshes a Power BI dataset.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PowerBIDatasetRefreshOperator`
:param dataset_id: The dataset id.
:param group_id: The workspace id.
:param conn_id: Airflow Connection ID that contains the connection information for the Power BI account used for authentication.
:param timeout: Time in seconds to wait for a dataset to reach a terminal status for asynchronous waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Number of seconds to wait before rechecking the
refresh status.
:param request_body: Additional arguments to pass to the request body, as described in https://learn.microsoft.com/en-us/rest/api/power-bi/datasets/refresh-dataset-in-group#request-body.
"""
template_fields: Sequence[str] = (
"dataset_id",
"group_id",
)
template_fields_renderers = {"parameters": "json"}
operator_extra_links = (PowerBILink(),)
def __init__(
self,
*,
dataset_id: str,
group_id: str,
conn_id: str = PowerBIHook.default_conn_name,
timeout: float = 60 * 60 * 24 * 7,
proxies: dict | None = None,
api_version: APIVersion | str | None = None,
check_interval: int = 60,
request_body: dict[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hook = PowerBIHook(conn_id=conn_id, proxies=proxies, api_version=api_version, timeout=timeout)
self.dataset_id = dataset_id
self.group_id = group_id
self.wait_for_termination = True
self.conn_id = conn_id
self.timeout = timeout
self.check_interval = check_interval
self.request_body = request_body
@property
def proxies(self) -> dict | None:
return self.hook.proxies
@property
def api_version(self) -> str | None:
return self.hook.api_version
def execute(self, context: Context):
"""Refresh the Power BI Dataset."""
if self.wait_for_termination:
self.defer(
trigger=PowerBITrigger(
conn_id=self.conn_id,
group_id=self.group_id,
dataset_id=self.dataset_id,
timeout=self.timeout,
proxies=self.proxies,
api_version=self.api_version,
check_interval=self.check_interval,
wait_for_termination=self.wait_for_termination,
request_body=self.request_body,
),
method_name=self.get_refresh_status.__name__,
)
def get_refresh_status(self, context: Context, event: dict[str, str] | None = None):
"""Push the refresh Id to XCom then runs the Trigger to wait for refresh completion."""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
dataset_refresh_id = event["dataset_refresh_id"]
if dataset_refresh_id:
context["ti"].xcom_push(
key=f"{self.task_id}.powerbi_dataset_refresh_Id",
value=dataset_refresh_id,
)
self.defer(
trigger=PowerBITrigger(
conn_id=self.conn_id,
group_id=self.group_id,
dataset_id=self.dataset_id,
dataset_refresh_id=dataset_refresh_id,
timeout=self.timeout,
proxies=self.proxies,
api_version=self.api_version,
check_interval=self.check_interval,
wait_for_termination=self.wait_for_termination,
),
method_name=self.execute_complete.__name__,
)
def execute_complete(self, context: Context, event: dict[str, str]) -> Any:
"""
Return immediately - callback for when the trigger fires.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
context["ti"].xcom_push(
key=f"{self.task_id}.powerbi_dataset_refresh_status",
value=event["dataset_refresh_status"],
)
if event["status"] == "error":
raise AirflowException(event["message"])
| PowerBIDatasetRefreshOperator |
python | streamlit__streamlit | lib/tests/streamlit/elements/page_link_test.py | {
"start": 848,
"end": 8494
} | class ____(DeltaGeneratorTestCase):
"""Test ability to marshall page_link protos."""
def test_external_http_page(self):
"""Test that it can be called with an external http page link."""
st.page_link(page="http://example.com", label="HTTP Test")
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "HTTP Test"
assert c.page == "http://example.com"
assert c.external
assert not c.disabled
assert c.icon == ""
assert c.help == ""
def test_external_https_page(self):
"""Test that it can be called with an external https page link."""
st.page_link(page="https://example.com", label="HTTPS Test")
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "HTTPS Test"
assert c.page == "https://example.com"
assert c.external
assert not c.disabled
def test_external_no_label(self):
"""Test that page_link throws an StreamlitAPIException on external link, no label."""
with pytest.raises(StreamlitAPIException):
st.page_link(page="http://example.com")
def test_icon(self):
"""Test that it can be called with icon param."""
st.page_link(page="https://streamlit.io", label="the label", icon="🐶")
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "the label"
assert c.page == "https://streamlit.io"
assert c.external
assert c.icon == "🐶"
def test_disabled(self):
"""Test that it can be called with disabled param."""
st.page_link(page="https://streamlit.io", label="the label", disabled=True)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "the label"
assert c.page == "https://streamlit.io"
assert c.external
assert c.disabled
def test_help(self):
"""Test that it can be called with help param."""
st.page_link(
page="https://streamlit.io", label="the label", help="Some help text"
)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "the label"
assert c.page == "https://streamlit.io"
assert c.external
assert c.help == "Some help text"
def test_query_params(self):
"""Test that it can be called with query_params param."""
st.page_link(
page="https://streamlit.io",
label="the label",
query_params={"foo": "bar", "baz": [1, 2]},
)
c = self.get_delta_from_queue().new_element.page_link
assert c.query_string == "foo=bar&baz=1&baz=2"
def test_query_params_list_of_tuples(self):
"""Test that it can be called with query_params as list of tuples."""
st.page_link(
page="https://streamlit.io",
label="the label",
query_params=[("foo", "bar"), ("baz", "1"), ("baz", "2")],
)
c = self.get_delta_from_queue().new_element.page_link
assert c.query_string == "foo=bar&baz=1&baz=2"
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_st_page_with_label(self):
"""Test that st.page_link accepts an st.Page, but does not uses its title"""
page = st.Page("foo.py", title="Bar Test")
st.page_link(page=page, label="Foo Test")
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Foo Test"
assert c.page_script_hash == page._script_hash
assert c.page == "foo"
assert not c.external
assert not c.disabled
assert c.icon == ""
assert c.help == ""
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_st_page_without_label(self):
"""Test that st.page_link accepts an st.Page, but will use its title if necessary"""
page = st.Page("foo.py", title="Bar Test")
st.page_link(page=page)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Bar Test"
assert c.page_script_hash == page._script_hash
assert c.page == "foo"
assert not c.external
assert not c.disabled
assert c.icon == ""
assert c.help == ""
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_st_page_with_url_path(self):
"""Test that st.page_link accepts an st.Page, but will use the url_path if necessary"""
page = st.Page("foo.py", title="Bar Test", url_path="bar")
st.page_link(page=page)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Bar Test"
assert c.page_script_hash == page._script_hash
assert c.page == "bar"
assert not c.external
assert not c.disabled
assert c.icon == ""
assert c.help == ""
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_icon_passed_to_page_link_takes_precedence(self):
"""Test that st.page_link icon param overrides page icon"""
page = st.Page("foo.py", title="Bar Test", icon="🎈")
st.page_link(page=page, icon="🌟")
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Bar Test"
assert c.page_script_hash == page._script_hash
assert c.page == "foo"
assert not c.external
assert not c.disabled
assert c.icon == "🌟" # Icon parameter of st.page_link takes precedence
assert c.help == ""
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_st_page_with_icon(self):
"""Test that st.page_link accepts an st.Page, will use its icon"""
page = st.Page("foo.py", title="Bar Test", icon="🎈")
st.page_link(page=page)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Bar Test"
assert c.page_script_hash == page._script_hash
assert c.page == "foo"
assert not c.external
assert not c.disabled
assert c.icon == "🎈"
assert c.help == ""
@patch("pathlib.Path.is_file", MagicMock(return_value=True))
def test_st_page_with_none_icon(self):
"""Test that st.page_link handles None icon from StreamlitPage correctly"""
# None icon defaults to empty string in StreamlitPage
page = st.Page("foo.py", title="Bar Test", icon=None)
st.page_link(page=page)
c = self.get_delta_from_queue().new_element.page_link
assert c.label == "Bar Test"
assert c.page_script_hash == page._script_hash
assert c.page == "foo"
assert not c.external
assert not c.disabled
assert c.icon == "" # None icon should become empty string (default st st.Page)
assert c.help == ""
def test_empty_string_icon_for_external_page_should_raise_exception(self):
"""Test that st.page_link with empty string icon raises an exception for external pages."""
with pytest.raises(StreamlitAPIException) as exc_info:
st.page_link(page="https://example.com", label="Test", icon="")
assert 'The value "" is not a valid emoji' in str(exc_info.value)
def test_whitespace_only_icon_for_external_page_should_raise_exception(self):
"""Test that st.page_link with whitespace-only icon raises an exception for external pages."""
with pytest.raises(StreamlitAPIException) as exc_info:
st.page_link(page="https://example.com", label="Test", icon=" ")
assert 'The value " " is not a valid emoji' in str(exc_info.value)
| PageLinkTest |
python | spyder-ide__spyder | spyder/plugins/onlinehelp/widgets.py | {
"start": 3263,
"end": 14469
} | class ____(PluginMainWidget):
"""PyDoc browser widget."""
ENABLE_SPINNER = True
# --- Signals
# ------------------------------------------------------------------------
sig_load_finished = Signal()
"""
This signal is emitted to indicate the help page has finished loading.
"""
def __init__(self, name=None, plugin=None, parent=None):
from spyder.widgets.browser import FrameWebView
super().__init__(name, plugin, parent=parent)
self._is_running = False
self.home_url = None
self.server = None
# Widgets
self.label = QLabel(_("Package:"))
self.label.ID = PydocBrowserToolbarItems.PackageLabel
self.url_combo = UrlComboBox(
self, id_=PydocBrowserToolbarItems.UrlCombo)
# Setup web view frame
self.webview = FrameWebView(
self,
handle_links=self.get_conf('handle_links')
)
self.webview.setup()
self.webview.set_zoom_factor(self.get_conf('zoom_factor'))
self.webview.loadStarted.connect(self._start)
self.webview.loadFinished.connect(self._finish)
self.webview.titleChanged.connect(self.setWindowTitle)
self.webview.urlChanged.connect(self._change_url)
if not WEBENGINE:
self.webview.iconChanged.connect(self._handle_icon_change)
# Setup find widget
self.find_widget = FindReplace(self)
self.find_widget.set_editor(self.webview)
self.find_widget.hide()
self.url_combo.setMaxCount(self.get_conf('max_history_entries'))
tip = _('Write a package name here, e.g. pandas')
self.url_combo.lineEdit().setPlaceholderText(tip)
self.url_combo.lineEdit().setToolTip(tip)
self.url_combo.valid.connect(
lambda x: self._handle_url_combo_activation())
# Layout
layout = QVBoxLayout()
layout.addWidget(self.webview)
layout.addWidget(self.find_widget)
self.setLayout(layout)
# --- PluginMainWidget API
# ------------------------------------------------------------------------
def get_title(self):
return _('Online help')
def get_focus_widget(self):
self.url_combo.lineEdit().selectAll()
return self.url_combo
def setup(self):
from spyder.widgets.browser import WebViewActions
# Actions
home_action = self.create_action(
PydocBrowserActions.Home,
text=_("Home"),
tip=_("Home"),
icon=self.create_icon('home'),
triggered=self.go_home,
)
find_action = self.create_action(
PydocBrowserActions.Find,
text=_("Find"),
tip=_("Find text"),
icon=self.create_icon('find'),
toggled=self.toggle_find_widget,
initial=False,
)
stop_action = self.get_action(WebViewActions.Stop)
refresh_action = self.get_action(WebViewActions.Refresh)
# Toolbar
toolbar = self.get_main_toolbar()
for item in [self.get_action(WebViewActions.Back),
self.get_action(WebViewActions.Forward), refresh_action,
stop_action, home_action, self.label, self.url_combo,
self.get_action(WebViewActions.ZoomIn),
self.get_action(WebViewActions.ZoomOut), find_action,
]:
self.add_item_to_toolbar(
item,
toolbar=toolbar,
section=PydocBrowserMainToolbarSections.Main,
)
# Signals
self.find_widget.visibility_changed.connect(find_action.setChecked)
self.sig_toggle_view_changed.connect(self.initialize)
def update_actions(self):
from spyder.widgets.browser import WebViewActions
stop_action = self.get_action(WebViewActions.Stop)
refresh_action = self.get_action(WebViewActions.Refresh)
refresh_action.setVisible(not self._is_running)
stop_action.setVisible(self._is_running)
# --- Private API
# ------------------------------------------------------------------------
def _start(self):
"""Webview load started."""
self._is_running = True
self.start_spinner()
self.update_actions()
def _finish(self, code):
"""Webview load finished."""
self._is_running = False
self.stop_spinner()
self.update_actions()
self.sig_load_finished.emit()
def _continue_initialization(self):
"""Load home page."""
self.go_home()
QApplication.restoreOverrideCursor()
def _handle_url_combo_activation(self):
"""Load URL from combo box first item."""
from spyder.widgets.browser import WebViewActions
if not self._is_running:
text = str(self.url_combo.currentText())
self.go_to(self.text_to_url(text))
else:
self.get_action(WebViewActions.Stop).trigger()
self.get_focus_widget().setFocus()
def _change_url(self, url):
"""
Displayed URL has changed -> updating URL combo box.
"""
self.url_combo.add_text(self.url_to_text(url))
def _handle_icon_change(self):
"""
Handle icon changes.
"""
self.url_combo.setItemIcon(self.url_combo.currentIndex(),
self.webview.icon())
self.setWindowIcon(self.webview.icon())
# --- Qt overrides
# ------------------------------------------------------------------------
def closeEvent(self, event):
self.webview.web_widget.stop()
if self.server:
self.server.finished.connect(self.deleteLater)
self.quit_server()
super().closeEvent(event)
# --- Public API
# ------------------------------------------------------------------------
def load_history(self, history):
"""
Load history.
Parameters
----------
history: list
List of searched items.
"""
self.url_combo.addItems(history)
@Slot(bool)
def initialize(self, checked=True, force=False):
"""
Start pydoc server.
Parameters
----------
checked: bool, optional
This method is connected to the `sig_toggle_view_changed` signal,
so that the first time the widget is made visible it will start
the server. Default is True.
force: bool, optional
Force a server start even if the server is running.
Default is False.
"""
server_needed = checked and self.server is None
if force or server_needed or not self.is_server_running():
self.sig_toggle_view_changed.disconnect(self.initialize)
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
self.start_server()
def is_server_running(self):
"""Return True if pydoc server is already running."""
return self.server is not None and self.server.is_running()
def start_server(self):
"""Start pydoc server."""
if self.server is None:
self.set_home_url('http://127.0.0.1:{}/'.format(PORT))
elif self.server.is_running():
self.server.sig_server_started.disconnect(
self._continue_initialization)
self.server.quit()
self.server.wait()
self.server = PydocServer(None, port=PORT)
self.server.sig_server_started.connect(
self._continue_initialization)
self.server.start()
def quit_server(self):
"""Quit the server."""
if self.server is None:
return
if self.server.is_running():
self.server.sig_server_started.disconnect(
self._continue_initialization)
self.server.quit_server()
self.server.quit()
self.server.wait()
def get_label(self):
"""Return address label text"""
return _("Package:")
def reload(self):
"""Reload page."""
if self.server:
self.webview.reload()
def text_to_url(self, text):
"""
Convert text address into QUrl object.
Parameters
----------
text: str
Url address.
"""
if text != 'about:blank':
text += '.html'
if text.startswith('/'):
text = text[1:]
return QUrl(self.home_url.toString() + text)
def url_to_text(self, url):
"""
Convert QUrl object to displayed text in combo box.
Parameters
----------
url: QUrl
Url address.
"""
string_url = url.toString()
if 'about:blank' in string_url:
return 'about:blank'
elif 'get?key=' in string_url or 'search?key=' in string_url:
return url.toString().split('=')[-1]
return osp.splitext(str(url.path()))[0][1:]
def set_home_url(self, text):
"""
Set home URL.
Parameters
----------
text: str
Home url address.
"""
self.home_url = QUrl(text)
def set_url(self, url):
"""
Set current URL.
Parameters
----------
url: QUrl or str
Url address.
"""
self._change_url(url)
self.go_to(url)
def go_to(self, url_or_text):
"""
Go to page URL.
"""
if isinstance(url_or_text, str):
url = QUrl(url_or_text)
else:
url = url_or_text
self.webview.load(url)
@Slot()
def go_home(self):
"""
Go to home page.
"""
if self.home_url is not None:
self.set_url(self.home_url)
def get_zoom_factor(self):
"""
Get the current zoom factor.
Returns
-------
int
Zoom factor.
"""
return self.webview.get_zoom_factor()
def get_history(self):
"""
Return the list of history items in the combobox.
Returns
-------
list
List of strings.
"""
history = []
for index in range(self.url_combo.count()):
history.append(str(self.url_combo.itemText(index)))
return history
@Slot(bool)
def toggle_find_widget(self, state):
"""
Show/hide the find widget.
Parameters
----------
state: bool
True to show and False to hide the find widget.
"""
if state:
self.find_widget.show()
else:
self.find_widget.hide()
def test():
"""Run web browser."""
from spyder.utils.qthelpers import qapplication
from unittest.mock import MagicMock
plugin_mock = MagicMock()
plugin_mock.CONF_SECTION = 'onlinehelp'
app = qapplication(test_time=8)
widget = PydocBrowser(None, plugin=plugin_mock)
widget._setup()
widget.setup()
widget.show()
sys.exit(app.exec_())
if __name__ == '__main__':
test()
| PydocBrowser |
python | scrapy__scrapy | scrapy/utils/testsite.py | {
"start": 690,
"end": 1860
} | class ____(util.Redirect):
def render(self, request: server.Request) -> bytes:
content = util.Redirect.render(self, request)
return content.replace(
b'http-equiv="refresh"', b'http-no-equiv="do-not-refresh-me"'
)
def test_site():
r = resource.Resource()
r.putChild(b"text", static.Data(b"Works", "text/plain"))
r.putChild(
b"html",
static.Data(
b"<body><p class='one'>Works</p><p class='two'>World</p></body>",
"text/html",
),
)
r.putChild(
b"enc-gb18030",
static.Data(b"<p>gb18030 encoding</p>", "text/html; charset=gb18030"),
)
r.putChild(b"redirect", util.Redirect(b"/redirected"))
r.putChild(b"redirect-no-meta-refresh", NoMetaRefreshRedirect(b"/redirected"))
r.putChild(b"redirected", static.Data(b"Redirected here", "text/plain"))
return server.Site(r)
if __name__ == "__main__":
from twisted.internet import reactor # pylint: disable=ungrouped-imports
port = reactor.listenTCP(0, test_site(), interface="127.0.0.1")
print(f"http://localhost:{port.getHost().port}/")
reactor.run()
| NoMetaRefreshRedirect |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/cluster_coordinator.py | {
"start": 52373,
"end": 58860
} | class ____(object):
"""A cluster with workers.
We assume all function errors are fatal and based on this assumption our
error reporting logic is:
1) Both `schedule` and `join` can raise a non-retryable error which is the
first error seen by the coordinator from any previously scheduled functions.
2) When an error is raised, there is no guarantee on how many previously
scheduled functions have been executed; functions that have not been executed
will be thrown away and marked as cancelled.
3) After an error is raised, the internal state of error will be cleared.
I.e. functions can continue to be scheduled and subsequent calls of `schedule`
or `join` will not raise the same error again.
Attributes:
failure_handler: The failure handler used to handler worker preemption
failure.
workers: a list of `Worker` objects in the cluster.
closure_queue: the global Closure queue.
resource_cancellation_mgr: the cancellation manager used to cancel resource
closures.
"""
def __init__(self, strategy):
"""Initializes the cluster instance."""
self._num_workers = strategy._num_workers
self._num_ps = strategy._num_ps
# Ignore PS failures reported by workers due to transient connection errors.
# Transient connectivity issues between workers and PS are relayed by the
# workers to the coordinator, leading the coordinator to believe that there
# are PS failures. The difference between transient vs. permanent PS failure
# is the number of reports from the workers. When this env var is set to a
# positive integer K, the coordinator ignores up to K reports of a failed PS
# task, i.e., only when there are more than K trials of executing closures
# fail due to errors from the same PS instance do we consider the PS
# instance encounters a failure.
# TODO(b/164279603): Remove this workaround when the underlying connectivity
# issue in gRPC server is resolved.
self._transient_ps_failures_threshold = int(
os.environ.get("TF_COORDINATOR_IGNORE_TRANSIENT_PS_FAILURES", 3))
self._potential_ps_failures_lock = threading.Lock()
self._potential_ps_failures_count = [0] * self._num_ps
# Ignore worker timeouts due to transient connection errors.
# Transient connectivity issues might cause the server side to unexpectedly
# cancel RPC handling logic, leading to closure execution timeouts. When
# the _transient_timeout_threshold is set to a positive number, the cluster
# coordinator ignores DeadlineExceeded errors from workers for the specified
# times before raising the error to users.
self._transient_timeouts_threshold = int(
os.environ.get("TF_COORDINATOR_IGNORE_TRANSIENT_TIMEOUTS",
self._num_workers // 10))
self._transient_timeouts_lock = threading.Lock()
self._transient_timeouts_count = 0
self.closure_queue = _CoordinatedClosureQueue()
# Set this environment variable to use an experimental
# integration with the runtime coordination service to aid in failure
# detection and handling. This will not affect the functionality of
# the strategy or cluster coordinator, but is off by default.
if os.getenv("TF_PSS_ENABLE_COORDINATION_SERVICE"):
self.failure_handler = CoordinationServicePreemptionHandler(
context.get_server_def(), self,
)
else:
self.failure_handler = WorkerPreemptionHandler(context.get_server_def(),
self)
worker_device_strings = [
"/job:worker/replica:0/task:%d" % i for i in range(self._num_workers)
]
self.workers = [
Worker(i, w, self) for i, w in enumerate(worker_device_strings)
]
# Cancellation manager for all resource closures.
self.resource_cancellation_mgr = cancellation.CancellationManager()
def stop(self):
"""Stop worker, worker preemption threads, and the closure queue."""
logging.info("Stopping cluster, starting with failure handler")
self.failure_handler.stop()
logging.info("Stopping workers")
for worker in self.workers:
worker.stop()
logging.info("Stopping queue")
self.closure_queue.stop()
logging.info("Start cancelling remote resource-building functions")
self.resource_cancellation_mgr.start_cancel()
def _record_and_ignore_transient_ps_failure(self, e):
"""Records potential PS failures and return if failure should be ignored."""
if self._transient_ps_failures_threshold <= 0 or not _is_ps_failure(e):
return False
ps_tasks = _extract_failed_ps_instances(str(e))
with self._potential_ps_failures_lock:
for t in ps_tasks:
self._potential_ps_failures_count[t] += 1
# The number of UnavailableError encountered on this PS task exceeds the
# maximum number of ignored error
if (self._potential_ps_failures_count[t] >=
self._transient_ps_failures_threshold):
return False
return True
def _record_and_ignore_transient_timeouts(self, e):
"""Records observed timeout error and return if it should be ignored."""
if self._transient_timeouts_threshold <= 0:
return False
if not isinstance(e, errors.DeadlineExceededError):
return False
with self._transient_timeouts_lock:
self._transient_timeouts_count += 1
if self._transient_timeouts_count >= self._transient_timeouts_threshold:
return False
return True
def schedule(self, function, args, kwargs):
"""Schedules `function` to be dispatched to a worker for execution.
Args:
function: The function to be dispatched to a worker for execution
asynchronously.
args: Positional arguments for `fn`.
kwargs: Keyword arguments for `fn`.
Returns:
A `RemoteValue` object.
"""
closure = Closure(
function,
self.closure_queue._cancellation_mgr, # pylint: disable=protected-access
args=args,
kwargs=kwargs)
ret = closure.build_output_remote_value()
self.closure_queue.put(closure)
return ret
def join(self):
"""Blocks until all scheduled functions are executed."""
self.closure_queue.wait()
def done(self):
"""Returns true if all scheduled functions are executed."""
return self.closure_queue.done()
@tf_export("distribute.experimental.coordinator.ClusterCoordinator",
"distribute.coordinator.ClusterCoordinator", v1=[])
| Cluster |
python | milvus-io__pymilvus | pymilvus/orm/partition.py | {
"start": 1059,
"end": 31536
} | class ____:
def __init__(
self,
collection: Union[Collection, str],
name: str,
description: str = "",
**kwargs,
) -> Partition:
# ruff: noqa: PLC0415
from .collection import Collection
if isinstance(collection, Collection):
self._collection = collection
elif isinstance(collection, str):
self._collection = Collection(collection)
else:
msg = "Collection must be of type pymilvus.Collection or String"
raise MilvusException(message=msg)
self._name = name
self._description = description
if kwargs.get("construct_only", False):
return
if not self._collection.has_partition(self.name, **kwargs):
conn = self._get_connection()
conn.create_partition(self._collection.name, self.name, **kwargs)
def __repr__(self) -> str:
return orjson.dumps(
{
"name": self.name,
"collection_name": self._collection.name,
"description": self.description,
}
).decode(Config.EncodeProtocol)
def _get_connection(self):
return self._collection._get_connection()
@property
def description(self) -> str:
"""str: discription of the partition.
Examples:
>>> from pymilvus import connections, Collection, Partition
>>> connections.connect()
>>> collection = Collection("test_partition_description")
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.description
'comedy films'
"""
return self._description
@property
def name(self) -> str:
"""str: name of the partition
Examples:
>>> from pymilvus import connections, Collection, Partition
>>> connections.connect()
>>> collection = Collection("test_partition_name")
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.name
'comedy'
"""
return self._name
@property
def is_empty(self) -> bool:
"""bool: whether the partition is empty
Examples:
>>> from pymilvus import connections, Collection, Partition
>>> connections.connect()
>>> collection = Collection("test_partition_is_empty")
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.is_empty
True
"""
return self.num_entities == 0
@property
def num_entities(self) -> int:
"""int: number of entities in the partition
Examples:
>>> from pymilvus import connections
>>> connections.connect()
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_partition_num_entities", schema)
>>> partition = Partition(collection, "comedy", "comedy films")
>>> data = [
... [i for i in range(10)],
... [[float(i) for i in range(2)] for _ in range(10)],
... ]
>>> partition.insert(data)
>>> partition.num_entities
10
"""
conn = self._get_connection()
stats = conn.get_partition_stats(
collection_name=self._collection.name, partition_name=self.name
)
result = {stat.key: stat.value for stat in stats}
result["row_count"] = int(result["row_count"])
return result["row_count"]
def flush(self, timeout: Optional[float] = None, **kwargs):
"""Seal all segment in the collection of this partition.
Inserts after flushing will be written into new segments.
Only sealed segments can be indexed.
Args:
timeout (float, optional): an optional duration of time in seconds to allow
for the RPCs. If timeout is not set, the client keeps waiting until the server
responds or an error occurs.
"""
conn = self._get_connection()
conn.flush([self._collection.name], timeout=timeout, **kwargs)
def drop(self, timeout: Optional[float] = None, **kwargs):
"""Drop the partition, the same as Collection.drop_partition
Args:
timeout (``float``, optional): an optional duration of time in seconds to allow
for the RPCs. If timeout is not set, the client keeps waiting until the server
responds or an error occurs.
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import connections, Collection, Partition
>>> connections.connect()
>>> collection = Collection("test_partition_drop")
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.drop()
"""
conn = self._get_connection()
return conn.drop_partition(self._collection.name, self.name, timeout=timeout, **kwargs)
def load(self, replica_number: Optional[int] = None, timeout: Optional[float] = None, **kwargs):
"""Load the partition data into memory.
Args:
replica_number (``int``, optional): The replica number to load, defaults to None.
timeout (``float``, optional): an optional duration of time in seconds to allow
for the RPCs. If timeout is not set, the client keeps waiting until the
server responds or an error occurs.
Raises:
MilvusException: If anything goes wrong
Examples:
>>> from pymilvus import connections
>>> connections.connect()
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_partition_load", schema)
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.load()
"""
conn = self._get_connection()
return conn.load_partitions(
collection_name=self._collection.name,
partition_names=[self.name],
replica_number=replica_number,
timeout=timeout,
**kwargs,
)
def release(self, timeout: Optional[float] = None, **kwargs):
"""Release the partition data from memory.
Args:
timeout (``float``, optional): an optional duration of time in seconds to allow
for the RPCs. If timeout is not set, the client keeps waiting until the
server responds or an error occurs.
Raises:
MilvusException: If anything goes wrong
Examples:
>>> from pymilvus import connections
>>> connections.connect()
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_partition_release", schema)
>>> partition = Partition(collection, "comedy", "comedy films")
>>> partition.load()
>>> partition.release()
"""
conn = self._get_connection()
return conn.release_partitions(
collection_name=self._collection.name,
partition_names=[self.name],
timeout=timeout,
**kwargs,
)
def insert(
self,
data: Union[List, pd.DataFrame, utils.SparseMatrixInputType],
timeout: Optional[float] = None,
**kwargs,
) -> MutationResult:
"""Insert data into the partition, the same as Collection.insert(data, [partition])
Args:
data (``list/tuple/pandas.DataFrame/sparse types``): The specified data to insert
partition_name (``str``): The partition name which the data will be inserted to,
if partition name is not passed, then the data will be inserted to default partition
timeout (``float``, optional): A duration of time in seconds to allow for the RPC
If timeout is set to None, the client keeps waiting until the server
responds or an error occurs.
Returns:
MutationResult: contains 2 properties `insert_count`, and, `primary_keys`
`insert_count`: how may entites have been inserted into Milvus,
`primary_keys`: list of primary keys of the inserted entities
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import connections
>>> connections.connect()
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_partition_insert", schema)
>>> partition = Partition(collection, "comedy", "comedy films")
>>> data = [
... [i for i in range(10)],
... [[float(i) for i in range(2)] for _ in range(10)],
... ]
>>> res = partition.insert(data)
>>> res.insert_count
10
"""
return self._collection.insert(data, self.name, timeout=timeout, **kwargs)
def delete(self, expr: str, timeout: Optional[float] = None, **kwargs):
"""Delete entities with an expression condition.
Args:
expr (``str``): The specified data to insert.
partition_names (``List[str]``): Name of partitions to delete entities.
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server responds
or an error occurs.
Returns:
MutationResult: contains `delete_count` properties represents
how many entities might be deleted.
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", DataType.FLOAT_VECTOR, dim=2)
... ])
>>> test_collection = Collection("test_partition_delete", schema)
>>> test_partition = Partition(test_collection, "comedy films")
>>> data = [
... [i for i in range(10)],
... [[float(i) for i in range(2)] for _ in range(10)],
... ]
>>> test_partition.insert(data)
(insert count: 10, delete count: 0, upsert count: 0, timestamp: 431044482906718212)
>>> test_partition.delete("film_id in [0, 1]")
(insert count: 0, delete count: 2, upsert count: 0, timestamp: 431044582560759811)
"""
return self._collection.delete(expr, self.name, timeout=timeout, **kwargs)
def upsert(
self,
data: Union[List, pd.DataFrame, utils.SparseMatrixInputType],
timeout: Optional[float] = None,
**kwargs,
) -> MutationResult:
"""Upsert data into the collection.
Args:
data (``list/tuple/pandas.DataFrame/sparse types``): The specified data to upsert
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server responds
or an error occurs.
**kwargs (``dict``): Optional upsert params
* *partial_update* (``bool``, optional): Whether this is a partial update operation.
If True, only the specified fields will be updated while others remain unchanged
Default is False.
Returns:
MutationResult: contains 2 properties `upsert_count`, and, `primary_keys`
`upsert_count`: how may entites have been upserted at Milvus,
`primary_keys`: list of primary keys of the upserted entities
Raises:
MilvusException: If anything goes wrong.
Examples:
>>> from pymilvus import Collection, Partition, FieldSchema, CollectionSchema, DataType
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_partition_upsert", schema)
>>> partition = Partition(collection, "comedy", "comedy films")
>>> data = [
... [i for i in range(10)],
... [[float(i) for i in range(2)] for _ in range(10)],
... ]
>>> res = partition.upsert(data)
>>> res.upsert_count
10
"""
return self._collection.upsert(data, self.name, timeout=timeout, **kwargs)
def search(
self,
data: Union[List, utils.SparseMatrixInputType],
anns_field: str,
param: Dict,
limit: int,
expr: Optional[str] = None,
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
round_decimal: int = -1,
**kwargs,
) -> SearchResult:
"""Conducts a vector similarity search with an optional boolean expression as filter.
Args:
data (``List[List[float]]`` or sparse types): The vectors of search data.
the length of data is number of query (nq),
and the dim of every vector in data must be equal to the vector field of collection.
anns_field (``str``): The name of the vector field used to search of collection.
param (``dict[str, Any]``):
The parameters of search. The followings are valid keys of param.
* *nprobe*, *ef*, *search_k*, etc
Corresponding search params for a certain index.
* *metric_type* (``str``)
similar metricy types, the value must be of type str.
* *offset* (``int``, optional)
offset for pagination.
* *limit* (``int``, optional)
limit for the search results and pagination.
example for param::
{
"nprobe": 128,
"metric_type": "L2",
"offset": 10,
"limit": 10,
}
limit (``int``): The max number of returned record, also known as `topk`.
expr (``str``): The boolean expression used to filter attribute. Default to None.
example for expr::
"id_field >= 0", "id_field in [1, 2, 3, 4]"
output_fields (``List[str]``, optional):
The name of fields to return in the search result. Can only get scalar fields.
round_decimal (``int``, optional): The specified number of decimal places of
returned distance......... Defaults to -1 means no round to returned distance.
**kwargs (``dict``): Optional search params
* *_async* (``bool``, optional)
Indicate if invoke asynchronously.
Returns a SearchFuture if True, else returns results from server directly.
* *_callback* (``function``, optional)
The callback function which is invoked after server response successfully.
It functions only if _async is set to True.
* *consistency_level* (``str/int``, optional)
Which consistency level to use when searching in the collection.
Options of consistency level: Strong, Bounded, Eventually, Session, Customized.
Note: this parameter overwrites the same one specified when creating collection,
if no consistency level was specified, search will use the
consistency level when you create the collection.
* *guarantee_timestamp* (``int``, optional)
Instructs Milvus to see all operations performed before this timestamp.
By default Milvus will search all operations performed to date.
Note: only valid in Customized consistency level.
* *graceful_time* (``int``, optional)
Search will use the (current_timestamp - the graceful_time) as the
`guarantee_timestamp`. By default with 5s.
Note: only valid in Bounded consistency level
Returns:
SearchResult:
Returns ``SearchResult`` if `_async` is False , otherwise ``SearchFuture``
.. _Metric type documentations:
https://milvus.io/docs/v2.2.x/metric.md
.. _Index documentations:
https://milvus.io/docs/v2.2.x/index.md
.. _How guarantee ts works:
https://github.com/milvus-io/milvus/blob/master/docs/developer_guides/how-guarantee-ts-works.md
Raises:
MilvusException: If anything goes wrong
Examples:
>>> from pymilvus import Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_search", schema)
>>> collection.create_index(
... "films",
... {"index_type": "FLAT", "metric_type": "L2", "params": {}})
>>> partition = Partition(collection, "comedy", "comedy films")
>>> # insert
>>> data = [
... [i for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> partition.insert(data)
>>> partition.load()
>>> # search
>>> search_param = {
... "data": [[1.0, 1.0]],
... "anns_field": "films",
... "param": {"metric_type": "L2"},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> res = partition.search(**search_param)
>>> assert len(res) == 1
>>> hits = res[0]
>>> assert len(hits) == 2
>>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ")
- Total hits: 2, hits ids: [8, 5]
>>> print(f"- Top1 hit id: {hits[0].id}, score: {hits[0].score} ")
- Top1 hit id: 8, score: 0.10143111646175385
"""
return self._collection.search(
data=data,
anns_field=anns_field,
param=param,
limit=limit,
expr=expr,
partition_names=[self.name],
output_fields=output_fields,
round_decimal=round_decimal,
timeout=timeout,
**kwargs,
)
def hybrid_search(
self,
reqs: List,
rerank: BaseRanker,
limit: int,
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
round_decimal: int = -1,
**kwargs,
):
"""Conducts multi vector similarity search with a rerank for rearrangement.
Args:
reqs (``List[AnnSearchRequest]``): The vector search requests.
rerank (``BaseRanker``): The reranker for rearrange nummer of limit results.
limit (``int``): The max number of returned record, also known as `topk`.
output_fields (``List[str]``, optional):
The name of fields to return in the search result. Can only get scalar fields.
round_decimal (``int``, optional):
The specified number of decimal places of returned distance.
Defaults to -1 means no round to returned distance.
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server
responds or an error occurs.
**kwargs (``dict``): Optional search params
* *_async* (``bool``, optional)
Indicate if invoke asynchronously.
Returns a SearchFuture if True, else returns results from server directly.
* *_callback* (``function``, optional)
The callback function which is invoked after server response successfully.
It functions only if _async is set to True.
* *offset* (``int``, optinal)
offset for pagination.
* *consistency_level* (``str/int``, optional)
Which consistency level to use when searching in the collection.
Options of consistency level: Strong, Bounded, Eventually, Session, Customized.
Note: this parameter overwrites the same one specified when creating collection,
if no consistency level was specified, search will use the
consistency level when you create the collection.
* *guarantee_timestamp* (``int``, optional)
Instructs Milvus to see all operations performed before this timestamp.
By default Milvus will search all operations performed to date.
Note: only valid in Customized consistency level.
* *graceful_time* (``int``, optional)
Search will use the (current_timestamp - the graceful_time) as the
`guarantee_timestamp`. By default with 5s.
Note: only valid in Bounded consistency level
Returns:
SearchResult:
Returns ``SearchResult`` if `_async` is False , otherwise ``SearchFuture``
.. _Metric type documentations:
https://milvus.io/docs/v2.2.x/metric.md
.. _Index documentations:
https://milvus.io/docs/v2.2.x/index.md
.. _How guarantee ts works:
https://github.com/milvus-io/milvus/blob/master/docs/developer_guides/how-guarantee-ts-works.md
Raises:
MilvusException: If anything goes wrong
Examples:
>>> from pymilvus import (Collection, FieldSchema, CollectionSchema, DataType
>>> AnnSearchRequest, RRFRanker, WeightedRanker)
>>> import random
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2),
... FieldSchema("poster", dtype=DataType.FLOAT_VECTOR, dim=2),
... ])
>>> collection = Collection("test_collection_search", schema)
>>> collection.create_index(
... "films",
... {"index_type": "FLAT", "metric_type": "L2", "params": {}})
>>> collection.create_index(
... "poster",
... {"index_type": "FLAT", "metric_type": "L2", "params": {}})
>>> partition = Partition(collection, "comedy", "comedy films")
>>> # insert
>>> data = [
... [i for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> partition.insert(data)
>>> partition.load()
>>> # search
>>> search_param1 = {
... "data": [[1.0, 1.0]],
... "anns_field": "films",
... "param": {"metric_type": "L2"},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> req1 = AnnSearchRequest(**search_param1)
>>> search_param2 = {
... "data": [[2.0, 2.0]],
... "anns_field": "poster",
... "param": {"metric_type": "L2", "offset": 1},
... "limit": 2,
... "expr": "film_id > 0",
... }
>>> req2 = AnnSearchRequest(**search_param2)
>>> res = partition.hybrid_search([req1, req2], WeightedRanker(0.9, 0.1), 2)
>>> assert len(res) == 1
>>> hits = res[0]
>>> assert len(hits) == 2
>>> print(f"- Total hits: {len(hits)}, hits ids: {hits.ids} ")
- Total hits: 2, hits ids: [8, 5]
>>> print(f"- Top1 hit id: {hits[0].id}, score: {hits[0].score} ")
- Top1 hit id: 8, score: 0.10143111646175385
"""
return self._collection.hybrid_search(
reqs=reqs,
rerank=rerank,
limit=limit,
partition_names=[self.name],
output_fields=output_fields,
round_decimal=round_decimal,
timeout=timeout,
**kwargs,
)
def query(
self,
expr: str,
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
**kwargs,
):
"""Query with expressions
Args:
expr (``str``): The query expression.
output_fields(``List[str]``): A list of field names to return. Defaults to None.
timeout (``float``, optional): A duration of time in seconds to allow for the RPC.
If timeout is set to None, the client keeps waiting until the server responds
or an error occurs.
**kwargs (``dict``, optional):
* *consistency_level* (``str/int``, optional)
Which consistency level to use when searching in the collection.
Options of consistency level: Strong, Bounded, Eventually, Session, Customized.
Note: this parameter overwrites the same one specified when creating collection,
if no consistency level was specified, search will use the
consistency level when you create the collection.
* *guarantee_timestamp* (``int``, optional)
Instructs Milvus to see all operations performed before this timestamp.
By default Milvus will search all operations performed to date.
Note: only valid in Customized consistency level.
* *graceful_time* (``int``, optional)
Search will use the (current_timestamp - the graceful_time) as the
`guarantee_timestamp`. By default with 5s.
Note: only valid in Bounded consistency level
* *offset* (``int``)
Combined with limit to enable pagination
* *limit* (``int``)
Combined with limit to enable pagination
Returns:
List, contains all results
Raises:
MilvusException: If anything goes wrong
Examples:
>>> from pymilvus import Collection, FieldSchema, CollectionSchema, DataType
>>> import random
>>> schema = CollectionSchema([
... FieldSchema("film_id", DataType.INT64, is_primary=True),
... FieldSchema("film_date", DataType.INT64),
... FieldSchema("films", dtype=DataType.FLOAT_VECTOR, dim=2)
... ])
>>> collection = Collection("test_collection_query", schema)
>>> collection.create_index(
... "films",
... {"index_type": "FLAT", "metric_type": "L2", "params": {}})
>>> partition = Partition(collection, "comedy", "comedy films")
>>> # insert
>>> data = [
... [i for i in range(10)],
... [i + 2000 for i in range(10)],
... [[random.random() for _ in range(2)] for _ in range(10)],
... ]
>>> partition.insert(data)
>>> partition.load()
>>> # query
>>> expr = "film_id in [ 0, 1 ]"
>>> res = partition.query(expr, output_fields=["film_date"])
>>> assert len(res) == 2
>>> print(f"- Query results: {res}")
- Query results: [{'film_id': 0, 'film_date': 2000}, {'film_id': 1, 'film_date': 2001}]
"""
return self._collection.query(
expr=expr,
output_fields=output_fields,
partition_names=[self.name],
timeout=timeout,
**kwargs,
)
def get_replicas(self, timeout: Optional[float] = None, **kwargs) -> Replica:
"""Get the current loaded replica information
Args:
timeout (``float``, optional): An optional duration of time in seconds to allow for
the RPC. When timeout is set to None, client waits until server response or error occur.
Returns:
Replica: All the replica information.
"""
return self._collection.get_replicas(timeout=timeout, **kwargs)
| Partition |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/links/analytics_admin.py | {
"start": 1209,
"end": 1674
} | class ____(BaseOperatorLink):
"""
Base class for Google Analytics links.
:meta private:
"""
name: ClassVar[str]
key: ClassVar[str]
format_str: ClassVar[str]
def get_link(self, operator: BaseOperator, *, ti_key: TaskInstanceKey) -> str:
if conf := XCom.get_value(key=self.key, ti_key=ti_key):
res = BASE_LINK + "#/" + self.format_str.format(**conf)
return res
return ""
| GoogleAnalyticsBaseLink |
python | tensorflow__tensorflow | tensorflow/python/distribute/strategy_test_lib.py | {
"start": 2473,
"end": 6025
} | class ____(Exception):
pass
# Conditionally wrap the fn in a def_function.function (so it runs in graph
# mode).
def _maybe_run_in_function(fn, run_in_function=False):
if not run_in_function or not context.executing_eagerly():
return fn
else:
return def_function.function()(fn)
# May be the argument to either distribution.extended.call_for_each_replica() or
# get_replica_context().merge_call()
def _raise_exception_fn(_=None):
raise _TestException()
# Must be the argument to a distribution.extended.call_for_each_replica() call,
# calls a get_replica_context().merge_call() that raises an exception.
def _merge_raises_fn():
distribute_lib.get_replica_context().merge_call(_raise_exception_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.extended.call_for_each_replica() with a function that raises an
# exception.
def _call_raises_fn(dist):
dist.extended.call_for_each_replica(_raise_exception_fn)
# Must be the argument to a distribution.extended.call_for_each_replica() call,
# calls a get_replica_context().merge_call() that calls a
# call_for_each_replica() that raises an exception.
def _merge_call_raises_fn():
distribute_lib.get_replica_context().merge_call(_call_raises_fn)
# Must be the argument to a get_replica_context().merge_call() call, calls
# dist.extended.call_for_each_replica() with a function that calls a
# get_replica_context().merge_call() that raises an exception.
def _call_merge_raises_fn(dist):
dist.extended.call_for_each_replica(_merge_raises_fn)
# Must be the argument to a distribution.extended.call_for_each_replica() call,
# calls a get_replica_context().merge_call() that calls a
# call_for_each_replica() that calls a get_replica_context().merge_call() that
# raises an exception.
def _merge_call_merge_raises_fn():
distribute_lib.get_replica_context().merge_call(_call_merge_raises_fn)
def _events_from_logdir(test_case, logdir):
"""Reads summary events from log directory."""
test_case.assertTrue(gfile.Exists(logdir))
files = gfile.ListDirectory(logdir)
test_case.assertLen(files, 1)
records = list(tf_record.tf_record_iterator(os.path.join(logdir, files[0])))
result = []
for r in records:
event = event_pb2.Event()
event.ParseFromString(r)
result.append(event)
return result
def create_variable_like_keras_layer(name, shape, dtype):
"""Utility for create variables that works like variable in keras layer."""
initializer = functools.partial(
init_ops_v2.GlorotUniform(), shape, dtype=dtype)
return variables.Variable(
initial_value=initializer, name=name, trainable=True)
def is_optimizer_v2_instance(optimizer_obj):
# For a optimizer instance, the v2 implementation has var_list as a required
# argument.
arg_spec = tf_inspect.getfullargspec(optimizer_obj.minimize)
return "var_list" in arg_spec.args[:-len(arg_spec.defaults)]
def is_mirrored_strategy(strategy: distribute_lib.Strategy) -> bool:
return isinstance(
strategy,
(mirrored_lib.MirroredStrategy, mirrored_lib.MirroredStrategyV1))
def is_multi_worker_mirrored_strategy(
strategy: distribute_lib.Strategy) -> bool:
return isinstance(strategy, (mwms_lib.CollectiveAllReduceStrategy,
mwms_lib.CollectiveAllReduceStrategyV1))
def is_tpu_strategy(strategy: distribute_lib.Strategy) -> bool:
return isinstance(strategy,
(tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1,
tpu_strategy.TPUStrategyV2))
| _TestException |
python | langchain-ai__langchain | libs/core/langchain_core/stores.py | {
"start": 9061,
"end": 9181
} | class ____(LangChainException):
"""Raised when a key is invalid; e.g., uses incorrect characters."""
| InvalidKeyException |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/caching/service.py | {
"start": 3452,
"end": 6198
} | class ____(Generic[_R]):
"""
Get a list of results from cache or wrapped function.
When cache read returns no data, the wrapped function will be
invoked. The result of the wrapped function is then stored in cache.
Ideal for 'get many X for organization' style methods
"""
silo_mode: SiloMode
base_key: str
cb: Callable[[int], list[_R]]
type_: type[_R]
timeout: int | None
def __init__(
self,
base_key: str,
silo_mode: SiloMode,
cb: Callable[[int], list[_R]],
t: type[_R],
timeout: int | None = None,
):
self.base_key = base_key
self.silo_mode = silo_mode
self.cb = cb
self.type_ = t
self.timeout = timeout
def __call__(self, object_id: int) -> list[_R]:
if (
SiloMode.get_current_mode() != self.silo_mode
and SiloMode.get_current_mode() != SiloMode.MONOLITH
):
return self.cb(object_id)
return self.get_results(object_id)
def key_from(self, object_id: int) -> str:
return f"{self.base_key}:{object_id}"
def resolve_from(
self, object_id: int, values: Mapping[str, int | str]
) -> Generator[None, None, list[_R]]:
from .impl import _consume_generator, _delete_cache, _set_cache
key = self.key_from(object_id)
value = values[key]
version: int
if isinstance(value, str):
try:
metrics.incr("hybridcloud.caching.list.cached", tags={"base_key": self.base_key})
return [self.type_(**item) for item in json.loads(value)]
except (pydantic.ValidationError, JSONDecodeError, TypeError) as err:
metrics.incr(
"hybridcloud.caching.list.failed_read",
tags={
"base_key": self.base_key,
"err": type(err).__name__,
},
)
version = yield from _delete_cache(key, self.silo_mode)
else:
version = value
metrics.incr("hybridcloud.caching.list.rpc", tags={"base_key": self.base_key})
result = self.cb(object_id)
if result is not None:
cache_value = json.dumps([item.dict() for item in result])
_consume_generator(_set_cache(key, cache_value, version, self.timeout))
return result
def get_results(self, object_id: int) -> list[_R]:
from .impl import _consume_generator, _get_cache
key = self.key_from(object_id)
values = _consume_generator(_get_cache([key], self.silo_mode))
return _consume_generator(self.resolve_from(object_id, values))
| SiloCacheBackedListCallable |
python | pallets__flask | src/flask/json/tag.py | {
"start": 4449,
"end": 4770
} | class ____(JSONTag):
__slots__ = ()
key = " b"
def check(self, value: t.Any) -> bool:
return isinstance(value, bytes)
def to_json(self, value: t.Any) -> t.Any:
return b64encode(value).decode("ascii")
def to_python(self, value: t.Any) -> t.Any:
return b64decode(value)
| TagBytes |
python | plotly__plotly.py | plotly/graph_objs/scattergeo/_hoverlabel.py | {
"start": 233,
"end": 11262
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scattergeo"
_path_str = "scattergeo.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scattergeo.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scattergeo.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scattergeo.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scattergeo.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattergeo.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | langchain-ai__langchain | libs/langchain/tests/mock_servers/robot/server.py | {
"start": 1365,
"end": 1454
} | class ____(str, Enum):
low = "low"
medium = "medium"
high = "high"
| Cautiousness |
python | pytorch__pytorch | test/dynamo/cpython/3_13/typinganndata/ann_module9.py | {
"start": 182,
"end": 280
} | class ____: ...
A.__module__ = 'testModule.typing'
A.__qualname__ = 'A'
ann1 = Union[List[A], int]
| A |
python | huggingface__transformers | src/transformers/models/mm_grounding_dino/modeling_mm_grounding_dino.py | {
"start": 11037,
"end": 19609
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
vision_dim = text_dim = config.d_model
embed_dim = config.encoder_ffn_dim // 2
num_heads = config.encoder_attention_heads // 2
dropout = config.fusion_dropout
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.vision_dim = vision_dim
self.text_dim = text_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by `num_heads` (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale = self.head_dim ** (-0.5)
self.dropout = dropout
self.vision_proj = nn.Linear(self.vision_dim, self.embed_dim)
self.text_proj = nn.Linear(self.text_dim, self.embed_dim)
self.values_vision_proj = nn.Linear(self.vision_dim, self.embed_dim)
self.values_text_proj = nn.Linear(self.text_dim, self.embed_dim)
self.out_vision_proj = nn.Linear(self.embed_dim, self.vision_dim)
self.out_text_proj = nn.Linear(self.embed_dim, self.text_dim)
def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
vision_features: torch.FloatTensor,
text_features: torch.FloatTensor,
vision_attention_mask: Optional[torch.BoolTensor] = None,
text_attention_mask: Optional[torch.BoolTensor] = None,
) -> tuple[tuple[torch.FloatTensor, torch.FloatTensor], tuple[torch.FloatTensor, torch.FloatTensor]]:
"""Image-to-text and text-to-image cross-attention
Args:
vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`):
Projected flattened image features generated by the vision backbone.
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`):
Projected text features generated by the text encoder.
vision_attention_mask (`torch.BoolTensor`, **optional**):
Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens.
text_attention_mask (`torch.BoolTensor`, **optional**):
Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens.
Returns:
`tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an attention
output and weights:
- **vision_attn_output** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_din)`)
--
Output of the image-to-text cross-attention layer.
- **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length,
vision_sequence_length)`) --
Attention weights of the image-to-text cross-attention layer.
- **text_attn_output** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`) --
Output of the text-to-image cross-attention layer.
- **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length,
text_sequence_length)`) --
Attention weights of the text-to-image cross-attention layer.
"""
batch_size, tgt_len, _ = vision_features.size()
vision_query_states = self.vision_proj(vision_features) * self.scale
vision_query_states = self._reshape(vision_query_states, tgt_len, batch_size)
text_key_states = self.text_proj(text_features)
text_key_states = self._reshape(text_key_states, -1, batch_size)
vision_value_states = self.values_vision_proj(vision_features)
vision_value_states = self._reshape(vision_value_states, -1, batch_size)
text_value_states = self.values_text_proj(text_features)
text_value_states = self._reshape(text_value_states, -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
vision_query_states = vision_query_states.view(*proj_shape)
text_key_states = text_key_states.view(*proj_shape)
vision_value_states = vision_value_states.view(*proj_shape)
text_value_states = text_value_states.view(*proj_shape)
src_len = text_key_states.size(1)
attn_weights = torch.bmm(vision_query_states, text_key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
if attn_weights.size() != (batch_size * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
attn_weights = attn_weights - attn_weights.max()
# Do not increase -50000/50000, data type half has quite limited range
attn_weights = torch.clamp(attn_weights, min=-50000, max=50000)
attn_weights_transposed = attn_weights.transpose(1, 2)
text_attn_weights = attn_weights_transposed - torch.max(attn_weights_transposed, dim=-1, keepdim=True)[0]
# Do not increase -50000/50000, data type half has quite limited range
text_attn_weights = torch.clamp(text_attn_weights, min=-50000, max=50000)
# mask vision for language
if vision_attention_mask is not None:
vision_attention_mask = (
vision_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
)
text_attn_weights.masked_fill_(vision_attention_mask, float("-inf"))
text_attn_weights = text_attn_weights.softmax(dim=-1)
# mask language for vision
if text_attention_mask is not None:
text_attention_mask = text_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
attn_weights.masked_fill_(text_attention_mask, float("-inf"))
vision_attn_weights = attn_weights.softmax(dim=-1)
vision_attn_probs = F.dropout(vision_attn_weights, p=self.dropout, training=self.training)
text_attn_probs = F.dropout(text_attn_weights, p=self.dropout, training=self.training)
vision_attn_output = torch.bmm(vision_attn_probs, text_value_states)
text_attn_output = torch.bmm(text_attn_probs, vision_value_states)
if vision_attn_output.size() != (batch_size * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`vision_attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {vision_attn_output.size()}"
)
if text_attn_output.size() != (batch_size * self.num_heads, src_len, self.head_dim):
raise ValueError(
f"`text_attn_output` should be of size {(batch_size, self.num_heads, src_len, self.head_dim)}, but is {text_attn_output.size()}"
)
vision_attn_output = vision_attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim)
vision_attn_output = vision_attn_output.transpose(1, 2)
vision_attn_output = vision_attn_output.reshape(batch_size, tgt_len, self.embed_dim)
text_attn_output = text_attn_output.view(batch_size, self.num_heads, src_len, self.head_dim)
text_attn_output = text_attn_output.transpose(1, 2)
text_attn_output = text_attn_output.reshape(batch_size, src_len, self.embed_dim)
vision_attn_output = self.out_vision_proj(vision_attn_output)
text_attn_output = self.out_text_proj(text_attn_output)
return (vision_attn_output, vision_attn_weights), (text_attn_output, text_attn_weights)
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
| MMGroundingDinoBiMultiHeadAttention |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Solve_BipedalWalker/A3C.py | {
"start": 825,
"end": 4926
} | class ____(object):
def __init__(self, scope, globalAC=None):
if scope == GLOBAL_NET_SCOPE: # get global network
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self._build_net()
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
else: # local net, calculate losses
with tf.variable_scope(scope):
self.s = tf.placeholder(tf.float32, [None, N_S], 'S')
self.a_his = tf.placeholder(tf.float32, [None, N_A], 'A')
self.v_target = tf.placeholder(tf.float32, [None, 1], 'Vtarget')
mu, sigma, self.v = self._build_net()
td = tf.subtract(self.v_target, self.v, name='TD_error')
with tf.name_scope('c_loss'):
self.c_loss = tf.reduce_mean(tf.square(td))
with tf.name_scope('wrap_a_out'):
self.test = sigma[0]
mu, sigma = mu * A_BOUND[1], sigma + 1e-5
normal_dist = tf.contrib.distributions.Normal(mu, sigma)
with tf.name_scope('a_loss'):
log_prob = normal_dist.log_prob(self.a_his)
exp_v = log_prob * td
entropy = normal_dist.entropy() # encourage exploration
self.exp_v = ENTROPY_BETA * entropy + exp_v
self.a_loss = tf.reduce_mean(-self.exp_v)
with tf.name_scope('choose_a'): # use local params to choose action
self.A = tf.clip_by_value(tf.squeeze(normal_dist.sample(1)), *A_BOUND)
with tf.name_scope('local_grad'):
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
self.a_grads = tf.gradients(self.a_loss, self.a_params)
self.c_grads = tf.gradients(self.c_loss, self.c_params)
with tf.name_scope('sync'):
with tf.name_scope('pull'):
self.pull_a_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]
self.pull_c_params_op = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]
with tf.name_scope('push'):
self.update_a_op = OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))
self.update_c_op = OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))
def _build_net(self):
w_init = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('actor'):
l_a = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='la')
l_a = tf.layers.dense(l_a, 300, tf.nn.relu6, kernel_initializer=w_init, name='la2')
mu = tf.layers.dense(l_a, N_A, tf.nn.tanh, kernel_initializer=w_init, name='mu')
sigma = tf.layers.dense(l_a, N_A, tf.nn.softplus, kernel_initializer=w_init, name='sigma')
with tf.variable_scope('critic'):
l_c = tf.layers.dense(self.s, 500, tf.nn.relu6, kernel_initializer=w_init, name='lc')
l_c = tf.layers.dense(l_c, 300, tf.nn.relu6, kernel_initializer=w_init, name='lc2')
v = tf.layers.dense(l_c, 1, kernel_initializer=w_init, name='v') # state value
return mu, sigma, v
def update_global(self, feed_dict): # run by a local
_, _, t = SESS.run([self.update_a_op, self.update_c_op, self.test], feed_dict) # local grads applies to global net
return t
def pull_global(self): # run by a local
SESS.run([self.pull_a_params_op, self.pull_c_params_op])
def choose_action(self, s): # run by a local
s = s[np.newaxis, :]
return SESS.run(self.A, {self.s: s})
| ACNet |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol24.py | {
"start": 1221,
"end": 1477
} | class ____(metaclass=GMeta):
pass
# The following four lines should generate an error.
pc1: ProtoG1 = ConcreteG1
pc2: ProtoG1 = ConcreteG2
pc3: ProtoG2 = ConcreteG2
pc4: ProtoG1 = ConcreteG3
pc5: ProtoG2 = ConcreteG1
pc6: ProtoG2 = ConcreteG3
| ConcreteG3 |
python | mlflow__mlflow | tests/genai/judges/test_alignment_optimizer.py | {
"start": 263,
"end": 994
} | class ____(Judge):
"""Mock Judge implementation for testing."""
def __init__(self, name: str = "mock_judge", **kwargs):
super().__init__(name=name, **kwargs)
@property
def instructions(self) -> str:
return f"Mock judge implementation: {self.name}"
def get_input_fields(self) -> list[JudgeField]:
"""Get input fields for mock judge."""
return [
JudgeField(name="input", description="Mock input field"),
JudgeField(name="output", description="Mock output field"),
]
def __call__(self, **kwargs):
from mlflow.entities.assessment import Feedback
return Feedback(name=self.name, value=True, rationale="Mock evaluation")
| MockJudge |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/utils/__init__.py | {
"start": 2666,
"end": 3178
} | class ____(Enum):
"""
An Enum class which can be compared with regular `str` and subclasses.
This class avoids multiple inheritance such as AwesomeEnum(str, Enum)
which does not work well with templated_fields and Jinja templates.
"""
def __eq__(self, other):
if isinstance(other, str):
return self.value == other
return super().__eq__(other)
def __hash__(self):
return super().__hash__() # Need to set because we redefine __eq__
| _StringCompareEnum |
python | django__django | django/db/backends/oracle/schema.py | {
"start": 238,
"end": 10727
} | class ____(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s%(collation)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_alter_column_no_default_null = sql_alter_column_no_default
sql_create_column_inline_fk = (
"CONSTRAINT %(name)s REFERENCES %(to_table)s(%(to_column)s)%(on_delete_db)"
"s%(deferrable)s"
)
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
def quote_value(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, datetime.timedelta):
return "'%s'" % duration_iso_string(value)
elif isinstance(value, str):
return "'%s'" % value.replace("'", "''")
elif isinstance(value, (bytes, bytearray, memoryview)):
return "'%s'" % value.hex()
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)
def remove_field(self, model, field):
# If the column is an identity column, drop the identity before
# removing the field.
if self._is_identity_column(model._meta.db_table, field.column):
self._drop_identity(model._meta.db_table, field.column)
super().remove_field(model, field)
def delete_model(self, model):
# Run superclass action
super().delete_model(model)
# Clean up manually created sequence.
self.execute(
"""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(1) INTO i FROM USER_SEQUENCES
WHERE SEQUENCE_NAME = '%(sq_name)s';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/"""
% {
"sq_name": self.connection.ops._get_no_autofield_sequence_name(
model._meta.db_table
)
}
)
def alter_field(self, model, old_field, new_field, strict=False):
try:
super().alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing type to an unsupported type we need a
# SQLite-ish workaround
if "ORA-22858" in description or "ORA-22859" in description:
self._alter_field_type_workaround(model, old_field, new_field)
# If an identity column is changing to a non-numeric type, drop the
# identity first.
elif "ORA-30675" in description:
self._drop_identity(model._meta.db_table, old_field.column)
self.alter_field(model, old_field, new_field, strict)
# If a primary key column is changing to an identity column, drop
# the primary key first.
elif "ORA-30673" in description and old_field.primary_key:
self._delete_primary_key(model, strict=True)
self._alter_field_type_workaround(model, old_field, new_field)
# If a collation is changing on a primary key, drop the primary key
# first.
elif "ORA-43923" in description and old_field.primary_key:
self._delete_primary_key(model, strict=True)
self.alter_field(model, old_field, new_field, strict)
# Restore a primary key, if needed.
if new_field.primary_key:
self.execute(self._create_primary_key_sql(model, new_field))
else:
raise
def _alter_field_type_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change from some type to other type.
What we need to do instead is:
- Add a nullable version of the desired field with a temporary name. If
the new column is an auto field, then the temporary column can't be
nullable.
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column and possibly drop the nullable property
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.null = new_field.get_internal_type() not in (
"AutoField",
"BigAutoField",
"SmallAutoField",
)
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Explicit data type conversion
# https://docs.oracle.com/en/database/oracle/oracle-database/21/sqlrf
# /Data-Type-Comparison-Rules.html#GUID-D0C5A47E-6F93-4C2D-9E49-4F2B86B359DD
new_value = self.quote_name(old_field.column)
old_type = old_field.db_type(self.connection)
if re.match("^N?CLOB", old_type):
new_value = "TO_CHAR(%s)" % new_value
old_type = "VARCHAR2"
if re.match("^N?VARCHAR2", old_type):
new_internal_type = new_field.get_internal_type()
if new_internal_type == "DateField":
new_value = "TO_DATE(%s, 'YYYY-MM-DD')" % new_value
elif new_internal_type == "DateTimeField":
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
elif new_internal_type == "TimeField":
# TimeField are stored as TIMESTAMP with a 1900-01-01 date
# part.
new_value = "CONCAT('1900-01-01 ', %s)" % new_value
new_value = "TO_TIMESTAMP(%s, 'YYYY-MM-DD HH24:MI:SS.FF')" % new_value
# Transfer values across
self.execute(
"UPDATE %s set %s=%s"
% (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
new_value,
)
)
# Drop the old field
self.remove_field(model, old_field)
# Rename and possibly make the new field NOT NULL
super().alter_field(model, new_temp_field, new_field)
# Recreate foreign key (if necessary) because the old field is not
# passed to the alter_field() and data types of new_temp_field and
# new_field always match.
new_type = new_field.db_type(self.connection)
if (
(old_field.primary_key and new_field.primary_key)
or (old_field.unique and new_field.unique)
) and old_type != new_type:
for _, rel in _related_non_m2m_objects(new_temp_field, new_field):
if rel.field.db_constraint:
self.execute(
self._create_fk_sql(rel.related_model, rel.field, "_fk")
)
def _alter_column_type_sql(
self, model, old_field, new_field, new_type, old_collation, new_collation
):
auto_field_types = {"AutoField", "BigAutoField", "SmallAutoField"}
# Drop the identity if migrating away from AutoField.
if (
old_field.get_internal_type() in auto_field_types
and new_field.get_internal_type() not in auto_field_types
and self._is_identity_column(model._meta.db_table, new_field.column)
):
self._drop_identity(model._meta.db_table, new_field.column)
return super()._alter_column_type_sql(
model, old_field, new_field, new_type, old_collation, new_collation
)
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name() but without the quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""Generate temporary names for workarounds that need temp columns."""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
return self.quote_value(value)
def _field_should_be_indexed(self, model, field):
create_index = super()._field_should_be_indexed(model, field)
db_type = field.db_type(self.connection)
if (
db_type is not None
and db_type.lower() in self.connection._limited_data_types
):
return False
return create_index
def _is_identity_column(self, table_name, column_name):
if not column_name:
return False
with self.connection.cursor() as cursor:
cursor.execute(
"""
SELECT
CASE WHEN identity_column = 'YES' THEN 1 ELSE 0 END
FROM user_tab_cols
WHERE table_name = %s AND
column_name = %s
""",
[self.normalize_name(table_name), self.normalize_name(column_name)],
)
row = cursor.fetchone()
return row[0] if row else False
def _drop_identity(self, table_name, column_name):
self.execute(
"ALTER TABLE %(table)s MODIFY %(column)s DROP IDENTITY"
% {
"table": self.quote_name(table_name),
"column": self.quote_name(column_name),
}
)
def _get_default_collation(self, table_name):
with self.connection.cursor() as cursor:
cursor.execute(
"""
SELECT default_collation FROM user_tables WHERE table_name = %s
""",
[self.normalize_name(table_name)],
)
return cursor.fetchone()[0]
def _collate_sql(self, collation, old_collation=None, table_name=None):
if collation is None and old_collation is not None:
collation = self._get_default_collation(table_name)
return super()._collate_sql(collation, old_collation, table_name)
| DatabaseSchemaEditor |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_comment15.py | {
"start": 315,
"end": 973
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
format1 = workbook.add_format({"bold": 1})
worksheet.write("A1", "Foo", format1)
worksheet.write_comment("B2", "Some text")
worksheet.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | kamyu104__LeetCode-Solutions | Python/partition-to-k-equal-sum-subsets.py | {
"start": 882,
"end": 1739
} | class ____(object):
def canPartitionKSubsets(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: bool
"""
def dfs(nums, target, i, subset_sums):
if i == len(nums):
return True
for k in xrange(len(subset_sums)):
if subset_sums[k]+nums[i] > target:
continue
subset_sums[k] += nums[i]
if dfs(nums, target, i+1, subset_sums):
return True
subset_sums[k] -= nums[i]
if not subset_sums[k]: break
return False
total = sum(nums)
if total%k != 0 or max(nums) > total//k:
return False
nums.sort(reverse=True)
subset_sums = [0] * k
return dfs(nums, total//k, 0, subset_sums)
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/minimum-time-to-build-blocks.py | {
"start": 48,
"end": 423
} | class ____(object):
def minBuildTime(self, blocks, split):
"""
:type blocks: List[int]
:type split: int
:rtype: int
"""
heapq.heapify(blocks)
while len(blocks) != 1:
x, y = heapq.heappop(blocks), heapq.heappop(blocks)
heapq.heappush(blocks, y+split)
return heapq.heappop(blocks)
| Solution |
python | Textualize__textual | docs/examples/guide/styles/screen.py | {
"start": 30,
"end": 255
} | class ____(App):
def on_mount(self) -> None:
self.screen.styles.background = "darkblue"
self.screen.styles.border = ("heavy", "white")
if __name__ == "__main__":
app = ScreenApp()
app.run()
| ScreenApp |
python | celery__celery | celery/beat.py | {
"start": 20502,
"end": 22945
} | class ____:
"""Celery periodic task service."""
scheduler_cls = PersistentScheduler
def __init__(self, app, max_interval=None, schedule_filename=None,
scheduler_cls=None):
self.app = app
self.max_interval = (max_interval or
app.conf.beat_max_loop_interval)
self.scheduler_cls = scheduler_cls or self.scheduler_cls
self.schedule_filename = (
schedule_filename or app.conf.beat_schedule_filename)
self._is_shutdown = Event()
self._is_stopped = Event()
def __reduce__(self):
return self.__class__, (self.max_interval, self.schedule_filename,
self.scheduler_cls, self.app)
def start(self, embedded_process=False):
info('beat: Starting...')
debug('beat: Ticking with max interval->%s',
humanize_seconds(self.scheduler.max_interval))
signals.beat_init.send(sender=self)
if embedded_process:
signals.beat_embedded_init.send(sender=self)
platforms.set_process_title('celery beat')
try:
while not self._is_shutdown.is_set():
interval = self.scheduler.tick()
if interval and interval > 0.0:
debug('beat: Waking up %s.',
humanize_seconds(interval, prefix='in '))
time.sleep(interval)
if self.scheduler.should_sync():
self.scheduler._do_sync()
except (KeyboardInterrupt, SystemExit):
self._is_shutdown.set()
finally:
self.sync()
def sync(self):
self.scheduler.close()
self._is_stopped.set()
def stop(self, wait=False):
info('beat: Shutting down...')
self._is_shutdown.set()
wait and self._is_stopped.wait() # block until shutdown done.
def get_scheduler(self, lazy=False,
extension_namespace='celery.beat_schedulers'):
filename = self.schedule_filename
aliases = dict(load_extension_class_names(extension_namespace))
return symbol_by_name(self.scheduler_cls, aliases=aliases)(
app=self.app,
schedule_filename=filename,
max_interval=self.max_interval,
lazy=lazy,
)
@cached_property
def scheduler(self):
return self.get_scheduler()
| Service |
python | getsentry__sentry | src/sentry/backup/comparators.py | {
"start": 26734,
"end": 28075
} | class ____(RegexComparator):
"""Compare the basic format of `QuerySubscription` IDs, which is basically a UUID1 with a
numeric prefix. Ensure that the two values are NOT equivalent."""
def __init__(self, *fields: str):
super().__init__(re.compile("^\\d+/[0-9a-f]{32}$"), *fields)
def compare(self, on: InstanceID, left: Any, right: Any) -> list[ComparatorFinding]:
# First, ensure that the two sides are not equivalent.
findings = []
fields = sorted(self.fields)
for f in fields:
if left["fields"].get(f) is None and right["fields"].get(f) is None:
continue
lv = left["fields"][f]
rv = right["fields"][f]
if lv == rv:
findings.append(
ComparatorFinding(
kind=self.get_kind(),
on=on,
left_pk=left["pk"],
right_pk=right["pk"],
reason=f"""the left value ({lv}) of the subscription ID field `{f}` was
equal to the right value ({rv})""",
)
)
# Now, make sure both IDs' regex are valid.
findings.extend(super().compare(on, left, right))
return findings
| SubscriptionIDComparator |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 501433,
"end": 502024
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectEdge"), graphql_name="edges")
nodes = sgqlc.types.Field(sgqlc.types.list_of("Project"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ProjectConnection |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 37754,
"end": 39253
} | class ____(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+1': Decimal('20'),
}
invalid_inputs = (
(None, ["This field may not be null."]),
('', ["A valid number is required."]),
(' ', ["A valid number is required."]),
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Snan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
(200000000000.0, ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."]),
('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
| TestDecimalField |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_defined_name03.py | {
"start": 315,
"end": 861
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("defined_name03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with defined names."""
workbook = Workbook(self.got_filename)
worksheet1 = workbook.add_worksheet("sheet One")
workbook.define_name("Sales", "='sheet One'!G1:H10")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | python__mypy | mypy/gclogger.py | {
"start": 96,
"end": 1639
} | class ____:
"""Context manager to log GC stats and overall time."""
def __enter__(self) -> GcLogger:
self.gc_start_time: float | None = None
self.gc_time = 0.0
self.gc_calls = 0
self.gc_collected = 0
self.gc_uncollectable = 0
gc.callbacks.append(self.gc_callback)
self.start_time = time.time()
return self
def gc_callback(self, phase: str, info: Mapping[str, int]) -> None:
if phase == "start":
assert self.gc_start_time is None, "Start phase out of sequence"
self.gc_start_time = time.time()
elif phase == "stop":
assert self.gc_start_time is not None, "Stop phase out of sequence"
self.gc_calls += 1
self.gc_time += time.time() - self.gc_start_time
self.gc_start_time = None
self.gc_collected += info["collected"]
self.gc_uncollectable += info["uncollectable"]
else:
assert False, f"Unrecognized gc phase ({phase!r})"
def __exit__(self, *args: object) -> None:
while self.gc_callback in gc.callbacks:
gc.callbacks.remove(self.gc_callback)
def get_stats(self) -> Mapping[str, float]:
end_time = time.time()
result = {
"gc_time": self.gc_time,
"gc_calls": self.gc_calls,
"gc_collected": self.gc_collected,
"gc_uncollectable": self.gc_uncollectable,
"build_time": end_time - self.start_time,
}
return result
| GcLogger |
python | langchain-ai__langchain | libs/langchain/langchain_classic/callbacks/streaming_stdout_final_only.py | {
"start": 261,
"end": 3506
} | class ____(StreamingStdOutCallbackHandler):
"""Callback handler for streaming in agents.
Only works with agents using LLMs that support streaming.
Only the final output of the agent will be streamed.
"""
def append_to_last_tokens(self, token: str) -> None:
"""Append token to the last tokens."""
self.last_tokens.append(token)
self.last_tokens_stripped.append(token.strip())
if len(self.last_tokens) > len(self.answer_prefix_tokens):
self.last_tokens.pop(0)
self.last_tokens_stripped.pop(0)
def check_if_answer_reached(self) -> bool:
"""Check if the answer has been reached."""
if self.strip_tokens:
return self.last_tokens_stripped == self.answer_prefix_tokens_stripped
return self.last_tokens == self.answer_prefix_tokens
def __init__(
self,
*,
answer_prefix_tokens: list[str] | None = None,
strip_tokens: bool = True,
stream_prefix: bool = False,
) -> None:
"""Instantiate FinalStreamingStdOutCallbackHandler.
Args:
answer_prefix_tokens: Token sequence that prefixes the answer.
Default is ["Final", "Answer", ":"]
strip_tokens: Ignore white spaces and new lines when comparing
answer_prefix_tokens to last tokens? (to determine if answer has been
reached)
stream_prefix: Should answer prefix itself also be streamed?
"""
super().__init__()
if answer_prefix_tokens is None:
self.answer_prefix_tokens = DEFAULT_ANSWER_PREFIX_TOKENS
else:
self.answer_prefix_tokens = answer_prefix_tokens
if strip_tokens:
self.answer_prefix_tokens_stripped = [
token.strip() for token in self.answer_prefix_tokens
]
else:
self.answer_prefix_tokens_stripped = self.answer_prefix_tokens
self.last_tokens = [""] * len(self.answer_prefix_tokens)
self.last_tokens_stripped = [""] * len(self.answer_prefix_tokens)
self.strip_tokens = strip_tokens
self.stream_prefix = stream_prefix
self.answer_reached = False
@override
def on_llm_start(
self,
serialized: dict[str, Any],
prompts: list[str],
**kwargs: Any,
) -> None:
"""Run when LLM starts running."""
self.answer_reached = False
@override
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
# Remember the last n tokens, where n = len(answer_prefix_tokens)
self.append_to_last_tokens(token)
# Check if the last n tokens match the answer_prefix_tokens list ...
if self.check_if_answer_reached():
self.answer_reached = True
if self.stream_prefix:
for t in self.last_tokens:
sys.stdout.write(t)
sys.stdout.flush()
return
# ... if yes, then print tokens from now on
if self.answer_reached:
sys.stdout.write(token)
sys.stdout.flush()
| FinalStreamingStdOutCallbackHandler |
python | coleifer__peewee | peewee.py | {
"start": 95405,
"end": 97387
} | class ____(object):
__slots__ = ('exceptions',)
def __init__(self, exceptions):
self.exceptions = exceptions
def __enter__(self): pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
# psycopg shits out a million cute error types. Try to catch em all.
if pg_errors is not None and exc_type.__name__ not in self.exceptions \
and issubclass(exc_type, pg_errors.Error):
exc_type = exc_type.__bases__[0]
elif pg3_errors is not None and \
exc_type.__name__ not in self.exceptions \
and issubclass(exc_type, pg3_errors.Error):
exc_type = exc_type.__bases__[0]
if exc_type.__name__ in self.exceptions:
new_type = self.exceptions[exc_type.__name__]
exc_args = exc_value.args
reraise(new_type, new_type(exc_value, *exc_args), traceback)
EXCEPTIONS = {
'ConstraintError': IntegrityError,
'DatabaseError': DatabaseError,
'DataError': DataError,
'IntegrityError': IntegrityError,
'InterfaceError': InterfaceError,
'InternalError': InternalError,
'NotSupportedError': NotSupportedError,
'OperationalError': OperationalError,
'ProgrammingError': ProgrammingError,
'TransactionRollbackError': OperationalError,
'UndefinedFunction': ProgrammingError,
'UniqueViolation': IntegrityError}
__exception_wrapper__ = ExceptionWrapper(EXCEPTIONS)
# DATABASE INTERFACE AND CONNECTION MANAGEMENT.
IndexMetadata = collections.namedtuple(
'IndexMetadata',
('name', 'sql', 'columns', 'unique', 'table'))
ColumnMetadata = collections.namedtuple(
'ColumnMetadata',
('name', 'data_type', 'null', 'primary_key', 'table', 'default'))
ForeignKeyMetadata = collections.namedtuple(
'ForeignKeyMetadata',
('column', 'dest_table', 'dest_column', 'table'))
ViewMetadata = collections.namedtuple('ViewMetadata', ('name', 'sql'))
| ExceptionWrapper |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 32164,
"end": 34296
} | class ____(Processor):
"""
Processor that groups multiple other `Processor` objects, but exposes an
API as if it is one `Processor`.
"""
def __init__(self, processors: list[Processor]):
self.processors = processors
def apply_transformation(self, ti: TransformationInput) -> Transformation:
source_to_display_functions = [ti.source_to_display]
display_to_source_functions = []
fragments = ti.fragments
def source_to_display(i: int) -> int:
"""Translate x position from the buffer to the x position in the
processor fragments list."""
for f in source_to_display_functions:
i = f(i)
return i
for p in self.processors:
transformation = p.apply_transformation(
TransformationInput(
ti.buffer_control,
ti.document,
ti.lineno,
source_to_display,
fragments,
ti.width,
ti.height,
ti.get_line,
)
)
fragments = transformation.fragments
display_to_source_functions.append(transformation.display_to_source)
source_to_display_functions.append(transformation.source_to_display)
def display_to_source(i: int) -> int:
for f in reversed(display_to_source_functions):
i = f(i)
return i
# In the case of a nested _MergedProcessor, each processor wants to
# receive a 'source_to_display' function (as part of the
# TransformationInput) that has everything in the chain before
# included, because it can be called as part of the
# `apply_transformation` function. However, this first
# `source_to_display` should not be part of the output that we are
# returning. (This is the most consistent with `display_to_source`.)
del source_to_display_functions[:1]
return Transformation(fragments, source_to_display, display_to_source)
| _MergedProcessor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI045.py | {
"start": 1079,
"end": 1170
} | class ____:
def __iter__(self) -> typing.Iterator[int]:
...
| TypingIteratorTReturn |
python | walkccc__LeetCode | solutions/2860. Happy Students/2860.py | {
"start": 0,
"end": 243
} | class ____:
def countWays(self, nums: list[int]) -> int:
return sum(a < i < b
for i, (a, b) in # i := the number of the selected numbers
enumerate(itertools.pairwise([-1] + sorted(nums) + [math.inf])))
| Solution |
python | django__django | tests/async/tests.py | {
"start": 769,
"end": 1035
} | class ____(SimpleTestCase):
"""A database connection cannot be used in an async context."""
async def test_get_async_connection(self):
with self.assertRaises(SynchronousOnlyOperation):
list(SimpleModel.objects.all())
| DatabaseConnectionTest |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/builder_pattern.py | {
"start": 284,
"end": 2617
} | class ____:
def __init__(self) -> None:
self._saved: Optional[str] = None
self._not_saved: Optional[str] = None
def set_saved(self, saved: str) -> "Builder":
self._saved = saved
return self
def set_not_saved(self, not_saved: str) -> "Builder":
self._not_saved = not_saved
return self
def async_save(self) -> None:
_test_sink(self._saved)
def set_saved_through_typevar(self: T, saved: str) -> T:
self._saved = saved
return self
def set_not_saved_through_typevar(self: T, not_saved: str) -> T:
self._not_saved = not_saved
return self
def return_self(self) -> "Builder":
return self
def set_saved_no_return(self, saved: str) -> None:
self._saved = saved
def test_no_issue():
builder = Builder()
builder.set_not_saved(_test_source()).set_saved("benign").async_save()
def test_issue():
builder = Builder()
builder.set_not_saved("benign").set_saved(_test_source()).async_save()
def test_no_issue_with_type_var():
builder = Builder()
builder.set_not_saved_through_typevar(_test_source()).set_saved_through_typevar(
"benign"
).async_save()
def test_issue_with_type_var():
builder = Builder()
builder.set_not_saved_through_typevar("benign").set_saved_through_typevar(
_test_source()
).async_save()
def test_chained_class_setter():
# TODO(T161085814): False negative due to return_self() returning self not
# being understood by the analysis. We need an alias analysis.
builder = Builder()
builder.return_self().set_saved_no_return(_test_source())
_test_sink(builder)
_test_sink(builder._saved)
def test_class_setter():
builder = Builder()
builder.set_saved_no_return(_test_source())
_test_sink(builder) # Issue.
_test_sink(builder._saved) # Issue.
def test_taint_update_receiver_declaration():
# TODO(T161085814): False Negative due to not passing around
# the fact that builder and return_self() are the same for the
# chained builder pattern. return_self and set_saved both
# have valid models.
builder = Builder()
builder.return_self().set_saved(_test_source())
_test_sink(builder)
_test_sink(builder._saved)
_test_sink(builder.return_self())
| Builder |
python | walkccc__LeetCode | solutions/3400. Maximum Number of Matching Indices After Right Shifts/3400.py | {
"start": 0,
"end": 323
} | class ____:
def maximumMatchingIndices(self, nums1: list[int], nums2: list[int]) -> int:
n = len(nums1)
ans = 0
for shift in range(n):
matches = 0
for i, num2 in enumerate(nums2):
if nums1[(i + shift) % n] == num2:
matches += 1
ans = max(ans, matches)
return ans
| Solution |
python | getsentry__sentry | src/sentry/rules/conditions/event_frequency.py | {
"start": 18909,
"end": 21915
} | class ____(BaseEventFrequencyCondition):
id = "sentry.rules.conditions.event_frequency.EventUniqueUserFrequencyCondition"
label = "The issue is seen by more than {value} users in {interval}"
def query_hook(
self,
event: GroupEvent,
start: datetime,
end: datetime,
environment_id: int,
) -> int:
totals: Mapping[int, int] = self.get_snuba_query_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
keys=[event.group_id],
group_id=event.group.id,
organization_id=event.group.project.organization_id,
model=get_issue_tsdb_user_group_model(event.group.issue_category),
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="alert_event_uniq_user_frequency",
group_on_time=False,
)
return totals[event.group_id]
def batch_query_hook(
self,
group_ids: set[int],
start: datetime,
end: datetime,
environment_id: int,
group_on_time: bool = False,
) -> dict[int, int | float]:
batch_totals: dict[int, int | float] = defaultdict(int)
groups = Group.objects.filter(id__in=group_ids).values(
"id", "type", "project_id", "project__organization_id"
)
error_issue_ids, generic_issue_ids = self.get_error_and_generic_group_ids(groups)
organization_id = self.get_value_from_groups(groups, "project__organization_id")
if error_issue_ids and organization_id:
error_totals = self.get_chunked_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
model=get_issue_tsdb_user_group_model(GroupCategory.ERROR),
group_ids=error_issue_ids,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_uniq_user_frequency",
group_on_time=group_on_time,
)
batch_totals.update(error_totals)
if generic_issue_ids and organization_id:
generic_totals = self.get_chunked_result(
tsdb_function=self.tsdb.get_distinct_counts_totals,
# this isn't necessarily performance, just any non-error category
model=get_issue_tsdb_user_group_model(GroupCategory.PERFORMANCE),
group_ids=generic_issue_ids,
organization_id=organization_id,
start=start,
end=end,
environment_id=environment_id,
referrer_suffix="batch_alert_event_uniq_user_frequency",
group_on_time=group_on_time,
)
batch_totals.update(generic_totals)
return batch_totals
def get_preview_aggregate(self) -> tuple[str, str]:
return "uniq", "user"
| EventUniqueUserFrequencyCondition |
python | psf__black | src/black/parsing.py | {
"start": 494,
"end": 3821
} | class ____(ValueError):
"""Raised when input source code fails all parse attempts."""
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
if not target_versions:
# No target_version specified, so try all grammars.
return [
# Python 3.7-3.9
pygram.python_grammar_async_keywords,
# Python 3.0-3.6
pygram.python_grammar,
# Python 3.10+
pygram.python_grammar_soft_keywords,
]
grammars = []
# If we have to parse both, try to parse async as a keyword first
if not supports_feature(
target_versions, Feature.ASYNC_IDENTIFIERS
) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
# Python 3.7-3.9
grammars.append(pygram.python_grammar_async_keywords)
if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
# Python 3.0-3.6
grammars.append(pygram.python_grammar)
if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions):
# Python 3.10+
grammars.append(pygram.python_grammar_soft_keywords)
# At least one of the above branches must have been taken, because every Python
# version has exactly one of the two 'ASYNC_*' flags
return grammars
def lib2to3_parse(
src_txt: str, target_versions: Collection[TargetVersion] = ()
) -> Node:
"""Given a string with source, return the lib2to3 Node."""
if not src_txt.endswith("\n"):
src_txt += "\n"
grammars = get_grammars(set(target_versions))
if target_versions:
max_tv = max(target_versions, key=lambda tv: tv.value)
tv_str = f" for target version {max_tv.pretty()}"
else:
tv_str = ""
errors = {}
for grammar in grammars:
drv = driver.Driver(grammar)
try:
result = drv.parse_string(src_txt, False)
break
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
faulty_line = "<line number missing in source>"
errors[grammar.version] = InvalidInput(
f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}"
)
except TokenError as te:
# In edge cases these are raised; and typically don't have a "faulty_line".
lineno, column = te.args[1]
errors[grammar.version] = InvalidInput(
f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}"
)
else:
# Choose the latest version when raising the actual parsing error.
assert len(errors) >= 1
exc = errors[max(errors)]
raise exc from None
if isinstance(result, Leaf):
result = Node(syms.file_input, [result])
return result
def matches_grammar(src_txt: str, grammar: Grammar) -> bool:
drv = driver.Driver(grammar)
try:
drv.parse_string(src_txt, False)
except (ParseError, TokenError, IndentationError):
return False
else:
return True
def lib2to3_unparse(node: Node) -> str:
"""Given a lib2to3 node, return its string representation."""
code = str(node)
return code
| InvalidInput |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 20627,
"end": 21239
} | class ____(PrefectBaseModel):
"""A collection of filters for common objects"""
flows: FlowFilter = Field(
default_factory=FlowFilter, description="Filters that apply to flows"
)
flow_runs: FlowRunFilter = Field(
default_factory=FlowRunFilter, description="Filters that apply to flow runs"
)
task_runs: TaskRunFilter = Field(
default_factory=TaskRunFilter, description="Filters that apply to task runs"
)
deployments: DeploymentFilter = Field(
default_factory=DeploymentFilter,
description="Filters that apply to deployments",
)
| FilterSet |
python | eth-brownie__brownie | brownie/network/state.py | {
"start": 1511,
"end": 6981
} | class ____(metaclass=_Singleton):
"""List-like singleton container that contains TransactionReceipt objects.
Whenever a transaction is broadcast, the TransactionReceipt is automatically
added to this container."""
def __init__(self) -> None:
self._list: List[TransactionReceipt] = []
self.gas_profile: Final[Dict[str, Dict[str, int]]] = {}
_revert_register(self)
def __repr__(self) -> str:
if CONFIG.argv["cli"] == "console":
return str(self._list)
return super().__repr__()
def __getattribute__(self, name: str) -> Any:
# filter dropped transactions prior to attribute access
items: List[TransactionReceipt] = super().__getattribute__("_list")
items = [i for i in items if i.status != -2]
setattr(self, "_list", items)
return super().__getattribute__(name)
def __bool__(self) -> bool:
return bool(self._list)
def __contains__(self, item: Any) -> bool:
return item in self._list
def __iter__(self) -> Iterator[TransactionReceipt]:
return iter(self._list)
def __getitem__(self, key: int) -> TransactionReceipt:
return self._list[key]
def __len__(self) -> int:
return len(self._list)
def _reset(self) -> None:
self._list.clear()
def _revert(self, height: BlockNumber) -> None:
self._list = [i for i in self._list if i.block_number <= height] # type: ignore [operator]
def _add_tx(self, tx: TransactionReceipt) -> None:
if tx not in self._list:
self._list.append(tx)
def clear(self, only_confirmed: bool = False) -> None:
"""
Clear the list.
Arguments
---------
only_confirmed : bool, optional
If True, transactions which are still marked as pending will not be removed.
"""
if only_confirmed:
self._list = [i for i in self._list if i.status == -1]
else:
self._list.clear()
def copy(self) -> List[TransactionReceipt]:
"""Returns a shallow copy of the object as a list"""
return self._list.copy()
def filter(self, key: Optional[Callable] = None, **kwargs: Any) -> List[TransactionReceipt]:
"""
Return a filtered list of transactions.
Arguments
---------
key : Callable, optional
An optional function to filter with. It should expect one argument and return
True or False.
Keyword Arguments
-----------------
**kwargs : Any
Names and expected values for TransactionReceipt attributes.
Returns
-------
List
A filtered list of TransactionReceipt objects.
"""
result = (i for i in self._list if all(getattr(i, k) == v for k, v in kwargs.items()))
return list(result if key is None else filter(key, result))
def wait(self, key: Optional[Callable] = None, **kwargs: Any) -> None:
"""
Wait for pending transactions to confirm.
This method iterates over a list of transactions generated by `TxHistory.filter`,
waiting until each transaction has confirmed. If no arguments are given, all
transactions within the container are used.
Arguments
---------
key : Callable, optional
An optional function to filter with. It should expect one argument and return
True or False.
Keyword Arguments
-----------------
**kwargs : Any
Names and expected values for TransactionReceipt attributes.
"""
while True:
pending = next(iter(self.filter(key, status=-1, **kwargs)), None)
if pending is None:
return
pending._confirmed.wait()
def from_sender(self, account: str) -> List[TransactionReceipt]:
"""Returns a list of transactions where the sender is account"""
return [i for i in self._list if i.sender == account]
def to_receiver(self, account: str) -> List[TransactionReceipt]:
"""Returns a list of transactions where the receiver is account"""
return [i for i in self._list if i.receiver == account]
def of_address(self, account: str) -> List[TransactionReceipt]:
"""Returns a list of transactions where account is the sender or receiver"""
return [i for i in self._list if i.receiver == account or i.sender == account]
def _gas(self, fn_name: str, gas_used: int, is_success: bool) -> None:
gas = self.gas_profile.setdefault(fn_name, {})
if not gas:
gas.update(
avg=gas_used, high=gas_used, low=gas_used, count=1, count_success=0, avg_success=0
)
if is_success:
gas["count_success"] = 1
gas["avg_success"] = gas_used
return
gas.update(
avg=(gas["avg"] * gas["count"] + gas_used) // (gas["count"] + 1),
high=max(gas["high"], gas_used),
low=min(gas["low"], gas_used),
)
gas["count"] += 1
if is_success:
count = gas["count_success"]
gas["count_success"] += 1
if not gas["avg_success"]:
gas["avg_success"] = gas_used
else:
avg = gas["avg_success"]
gas["avg_success"] = (avg * count + gas_used) // (count + 1)
@final
| TxHistory |
python | openai__openai-python | src/openai/types/eval_create_params.py | {
"start": 6371,
"end": 6703
} | class ____(ScoreModelGraderParam, total=False):
pass_threshold: float
"""The threshold for the score."""
TestingCriterion: TypeAlias = Union[
TestingCriterionLabelModel,
StringCheckGraderParam,
TestingCriterionTextSimilarity,
TestingCriterionPython,
TestingCriterionScoreModel,
]
| TestingCriterionScoreModel |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 65379,
"end": 65626
} | class ____(BaseModel):
"""
XCom Collection serializer for responses.
"""
xcom_entries: Annotated[list[XComResponse], Field(title="Xcom Entries")]
total_entries: Annotated[int, Field(title="Total Entries")]
| XComCollectionResponse |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.