language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/1600-1699/1698.Number of Distinct Substrings in a String/Solution.py | {
"start": 0,
"end": 155
} | class ____:
def countDistinct(self, s: str) -> int:
n = len(s)
return len({s[i:j] for i in range(n) for j in range(i + 1, n + 1)})
| Solution |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 79618,
"end": 82886
} | class ____(Response):
"""
Response of events.get_multi_task_plots endpoint.
:param plots: Plots mapping (keyed by task name)
:type plots: dict
:param returned: Number of results returned
:type returned: int
:param total: Total number of results available for this query
:type total: float
:param scroll_id: Scroll ID for getting more results
:type scroll_id: str
"""
_service = "events"
_action = "get_multi_task_plots"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {
"plots": {
"description": "Plots mapping (keyed by task name)",
"type": ["object", "null"],
},
"returned": {
"description": "Number of results returned",
"type": ["integer", "null"],
},
"scroll_id": {
"description": "Scroll ID for getting more results",
"type": ["string", "null"],
},
"total": {
"description": "Total number of results available for this query",
"type": ["number", "null"],
},
},
"type": "object",
}
def __init__(
self,
plots: Optional[dict] = None,
returned: Optional[int] = None,
total: Optional[float] = None,
scroll_id: Optional[str] = None,
**kwargs: Any
) -> None:
super(GetMultiTaskPlotsResponse, self).__init__(**kwargs)
self.plots = plots
self.returned = returned
self.total = total
self.scroll_id = scroll_id
@schema_property("plots")
def plots(self) -> Optional[dict]:
return self._property_plots
@plots.setter
def plots(self, value: Optional[dict]) -> None:
if value is None:
self._property_plots = None
return
self.assert_isinstance(value, "plots", (dict,))
self._property_plots = value
@schema_property("returned")
def returned(self) -> Optional[int]:
return self._property_returned
@returned.setter
def returned(self, value: Optional[int]) -> None:
if value is None:
self._property_returned = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "returned", six.integer_types)
self._property_returned = value
@schema_property("total")
def total(self) -> Optional[float]:
return self._property_total
@total.setter
def total(self, value: Optional[float]) -> None:
if value is None:
self._property_total = None
return
self.assert_isinstance(value, "total", six.integer_types + (float,))
self._property_total = value
@schema_property("scroll_id")
def scroll_id(self) -> Optional[str]:
return self._property_scroll_id
@scroll_id.setter
def scroll_id(self, value: Optional[str]) -> None:
if value is None:
self._property_scroll_id = None
return
self.assert_isinstance(value, "scroll_id", six.string_types)
self._property_scroll_id = value
| GetMultiTaskPlotsResponse |
python | tensorflow__tensorflow | tensorflow/python/data/ops/multi_device_iterator_ops.py | {
"start": 9461,
"end": 15871
} | class ____:
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
max_buffer_size=1,
prefetch_buffer_size=1,
source_device="/cpu:0"):
"""Constructs a MultiDeviceIterator.
Args:
dataset: The input dataset to be iterated over.
devices: The list of devices to fetch data to.
max_buffer_size: Maximum size of the host side per device buffer to keep.
prefetch_buffer_size: if > 0, then we setup a buffer on each device to
prefetch into.
source_device: The host device to place the `dataset` on. In order to
prevent deadlocks, if the prefetch_buffer_size is greater than the
max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.
"""
options = options_lib.Options()
options.experimental_distribute.num_devices = len(devices)
# If `prefetch_buffer_size` is 0, we turn off the `inject_prefetch`
# optimization to prevent potentially introducing asynchrony.
if prefetch_buffer_size == 0:
options.experimental_optimization.inject_prefetch = False
dataset = dataset.with_options(options)
self._dataset = dataset._apply_debug_options() # pylint: disable=protected-access
self._experimental_slack = dataset.options().experimental_slack
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._max_buffer_size = max_buffer_size
self._prefetch_buffer_size = prefetch_buffer_size
if self._prefetch_buffer_size > self._max_buffer_size:
self._max_buffer_size = self._prefetch_buffer_size
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
# TODO(b/121378567): Get rid of this shared_name hack.
shared_name = ""
if context.executing_eagerly():
shared_name = context.anonymous_name()
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name=shared_name,
container="",
**self._dataset._flat_structure)) # pylint: disable=protected-access
if context.executing_eagerly():
# Delete the resource when this object is deleted
self._resource_deleter = resource_variable_ops.EagerResourceDeleter(
handle=self._multi_device_iterator_resource,
handle_device=self._source_device)
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor, # pylint: disable=protected-access
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
self._prototype_device_datasets = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _PerDeviceGenerator(
i,
self._multi_device_iterator_resource,
self._incarnation_id,
self._source_device_tensor,
self._dataset.element_spec,
iterator_is_anonymous=False)
self._prototype_device_datasets.append(ds)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
if context.executing_eagerly():
self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))
else:
self._device_iterators.append(
dataset_ops.make_initializable_iterator(ds))
if not context.executing_eagerly():
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def get_next(self, device=None):
"""Returns the next element given a `device`, else returns all in a list."""
if device is not None:
index = self._devices.index(device)
return self._device_iterators[index].get_next()
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next())
return result
def get_next_as_optional(self):
result = []
for i, device in enumerate(self._devices):
with ops.device(device):
result.append(self._device_iterators[i].get_next_as_optional())
return result
@property
def initializer(self):
if context.executing_eagerly():
return control_flow_ops.no_op()
return self._initializer
def _eager_reset(self):
"""Resets the MultiDeviceIterator in eager mode."""
if not ops.executing_eagerly_outside_functions():
raise ValueError(
"Resetting a multi-device iterator is only supported in the eager "
"mode.")
# pylint: disable=protected-access
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._variant_tensor,
self._multi_device_iterator_resource,
max_buffer_size=self._max_buffer_size)
for i, device in enumerate(self._devices):
with ops.device(device):
ds = _create_device_dataset(self._prototype_device_datasets[i],
self._incarnation_id,
self._prefetch_buffer_size,
self._experimental_slack)
# Reset the device iterator resources with the new dataset.
ds_variant = ds._variant_tensor
gen_dataset_ops.make_iterator(
ds_variant, self._device_iterators[i]._iterator_resource)
@property
def element_spec(self):
return self._dataset.element_spec
| MultiDeviceIterator |
python | sympy__sympy | sympy/assumptions/predicates/order.py | {
"start": 2499,
"end": 3668
} | class ____(Predicate):
"""
Nonzero real number predicate.
Explanation
===========
``ask(Q.nonzero(x))`` is true iff ``x`` is real and ``x`` is not zero. Note in
particular that ``Q.nonzero(x)`` is false if ``x`` is not real. Use
``~Q.zero(x)`` if you want the negation of being zero without any real
assumptions.
A few important facts about nonzero numbers:
- ``Q.nonzero`` is logically equivalent to ``Q.positive | Q.negative``.
- See the documentation of ``Q.real`` for more information about
related facts.
Examples
========
>>> from sympy import Q, ask, symbols, I, oo
>>> x = symbols('x')
>>> print(ask(Q.nonzero(x), ~Q.zero(x)))
None
>>> ask(Q.nonzero(x), Q.positive(x))
True
>>> ask(Q.nonzero(x), Q.zero(x))
False
>>> ask(Q.nonzero(0))
False
>>> ask(Q.nonzero(I))
False
>>> ask(~Q.zero(I))
True
>>> ask(Q.nonzero(oo))
False
"""
name = 'nonzero'
handler = Dispatcher(
"NonZeroHandler",
doc=("Handler for key 'nonzero'. Test that an expression is not identically"
" zero.")
)
| NonZeroPredicate |
python | doocs__leetcode | solution/0100-0199/0101.Symmetric Tree/Solution.py | {
"start": 192,
"end": 633
} | class ____:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
def dfs(root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:
if root1 == root2:
return True
if root1 is None or root2 is None or root1.val != root2.val:
return False
return dfs(root1.left, root2.right) and dfs(root1.right, root2.left)
return dfs(root.left, root.right)
| Solution |
python | getsentry__sentry | src/sentry/api/endpoints/custom_rules.py | {
"start": 3180,
"end": 11605
} | class ____(OrganizationEndpoint):
publish_status = {
"POST": ApiPublishStatus.EXPERIMENTAL,
"GET": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.TELEMETRY_EXPERIENCE
permission_classes = (CustomRulePermission,)
def post(self, request: Request, organization: Organization) -> Response:
serializer = CustomRulesInputSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
query = serializer.validated_data["query"]
project_ids = serializer.validated_data.get("projects")
# project-level permission check
self.get_projects(request, organization, project_ids=set(project_ids))
try:
condition = get_rule_condition(query)
# for now delta it is fixed at 2 days (maybe in the future will base it on the query period)
delta = timedelta(days=2)
now = datetime.now(tz=timezone.utc)
start = now
end = now + delta
rule = CustomDynamicSamplingRule.update_or_create(
condition=condition,
start=start,
end=end,
project_ids=project_ids,
organization_id=organization.id,
num_samples=NUM_SAMPLES_PER_CUSTOM_RULE,
sample_rate=1.0,
query=query,
created_by_id=request.user.id,
)
# schedule update for affected project configs
_schedule_invalidate_project_configs(organization, project_ids)
return _rule_to_response(rule)
except UnsupportedSearchQuery as e:
return Response({"query": [e.error_code]}, status=400)
except InvalidSearchQuery as e:
return Response({"query": [str(e)]}, status=400)
except DatabaseError:
return Response(
{"projects": ["Could not save rule, probably wrong project ids"]}, status=400
)
except TooManyRules:
return Response(
{
"error": [
"Too many investigation rules active for this organization."
"Wait until some expire or delete some rules."
]
},
status=429,
)
except ValueError as e:
return Response({"query": ["Could not convert to rule", str(e)]}, status=400)
def get(self, request: Request, organization: Organization) -> Response:
requested_projects = request.GET.getlist("project")
query = request.GET.get("query")
try:
requested_projects_ids = [int(project_id) for project_id in requested_projects]
requested_projects_ids = _clean_project_list(requested_projects_ids)
except ValueError:
return Response({"projects": ["Invalid project id"]}, status=400)
# project-level permission check
self.get_projects(request, organization, project_ids=set(requested_projects_ids))
if requested_projects_ids:
org_rule = False
invalid_projects = []
available_projects = {
p.id for p in Project.objects.get_many_from_cache(requested_projects_ids)
}
for project_id in requested_projects_ids:
if project_id not in available_projects:
invalid_projects.append(f"invalid project id: {project_id}")
if invalid_projects:
raise serializers.ValidationError({"projects": invalid_projects})
else:
# no project specified (it is an org rule)
org_rule = True
try:
condition = get_rule_condition(query)
except UnsupportedSearchQuery as e:
return Response({"query": [e.error_code]}, status=400)
except InvalidSearchQuery as e:
return Response({"query": [str(e)]}, status=400)
except ValueError as e:
return Response({"query": ["Could not convert to rule", str(e)]}, status=400)
rule = CustomDynamicSamplingRule.get_rule_for_org(
condition, organization.id, requested_projects_ids
)
if rule is None:
return Response(status=204) # no rule found, nothing to return
# we have a rule, check to see if the projects match
if rule.is_org_level:
# a rule org covers all projects
return _rule_to_response(rule)
if not rule.is_org_level and org_rule:
# we need an org rule, and we have a simple rule return not found
return Response(status=204)
# project rule request and project rule found # see if we have all projects
available_projects = {p.id for p in rule.projects.all()}
for project_id in requested_projects_ids:
if project_id not in available_projects:
return Response(status=204)
# the rule covers all projects
return _rule_to_response(rule)
def _rule_to_response(rule: CustomDynamicSamplingRule) -> Response:
response_data = {
"ruleId": rule.external_rule_id,
"condition": orjson.loads(rule.condition),
"startDate": rule.start_date.strftime(CUSTOM_RULE_DATE_FORMAT),
"endDate": rule.end_date.strftime(CUSTOM_RULE_DATE_FORMAT),
"numSamples": rule.num_samples,
"sampleRate": rule.sample_rate,
"dateAdded": rule.date_added.strftime(CUSTOM_RULE_DATE_FORMAT),
"projects": [project.id for project in rule.projects.all()],
"orgId": rule.organization.id,
}
return Response(response_data, status=200)
def get_rule_condition(query: str | None) -> RuleCondition:
"""
Gets the rule condition given a query.
The rule returned, is in the format which is understood by Relay.
"""
try:
if not query:
raise UnsupportedSearchQuery(UnsupportedSearchQueryReason.NOT_TRANSACTION_QUERY)
try:
# First we parse the query.
tokens = parse_search_query(
query=query, removed_blacklisted=True, force_transaction_event_type=True
)
except ValueError:
raise UnsupportedSearchQuery(UnsupportedSearchQueryReason.NOT_TRANSACTION_QUERY)
# In case there are no tokens anymore, we will return a condition that always matches.
if not tokens:
return {"op": "and", "inner": []}
# Second we convert it to the Relay's internal rules format.
converter = SearchQueryConverter(tokens)
condition = converter.convert()
return condition
except UnsupportedSearchQuery as unsupported_ex:
# log unsupported queries with a different message so that
# we can differentiate them from other errors
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("query", query)
scope.set_extra("error", unsupported_ex)
message = "Unsupported search query"
sentry_sdk.capture_message(message, level="warning")
raise
except Exception as ex:
with sentry_sdk.isolation_scope() as scope:
scope.set_extra("query", query)
scope.set_extra("error", ex)
message = "Could not convert query to custom dynamic sampling rule"
sentry_sdk.capture_message(message, level="warning")
raise
def _clean_project_list(project_ids: list[int]) -> list[int]:
if len(project_ids) == 1 and project_ids[0] == -1:
# special case for all projects convention ( sends a project id of -1)
return []
return project_ids
def _schedule_invalidate_project_configs(organization: Organization, project_ids: list[int]):
"""
Schedule a task to update the project configs for the given projects
"""
if not project_ids:
# an organization rule, update all projects from the org
schedule_invalidate_project_config(
trigger="dynamic_sampling:custom_rule_upsert",
organization_id=organization.id,
)
else:
# update the given projects
for project_id in project_ids:
schedule_invalidate_project_config(
trigger="dynamic_sampling:custom_rule_upsert",
project_id=project_id,
)
| CustomRulesEndpoint |
python | numpy__numpy | tools/swig/test/testTensor.py | {
"start": 12857,
"end": 13157
} | class ____(TensorTestCase):
def __init__(self, methodName="runTest"):
TensorTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
self.result = int(self.result)
######################################################################
| intTestCase |
python | pytorch__pytorch | torch/testing/_internal/common_utils.py | {
"start": 116549,
"end": 117471
} | class ____(TensorLikePair):
"""Pair for :class:`torch.storage.TypedStorage` inputs."""
def __init__(self, actual, expected, *, rtol_override=0.0, atol_override=0.0, **other_parameters):
self._check_inputs_isinstance(actual, expected, cls=torch.storage.TypedStorage)
super().__init__(actual, expected, **other_parameters)
self.rtol = max(self.rtol, rtol_override)
self.atol = max(self.atol, atol_override)
def _to_tensor(self, typed_storage):
return torch.tensor(
typed_storage._untyped_storage,
dtype={
torch.quint8: torch.uint8,
torch.quint4x2: torch.uint8,
torch.quint2x4: torch.uint8,
torch.qint32: torch.int32,
torch.qint8: torch.int8
}.get(typed_storage.dtype, typed_storage.dtype),
device=typed_storage.device,
)
| TypedStoragePair |
python | huggingface__transformers | src/transformers/models/efficientloftr/modeling_efficientloftr.py | {
"start": 10370,
"end": 11010
} | class ____(nn.Module):
def __init__(self, config: EfficientLoFTRConfig):
super().__init__()
self.stages = nn.ModuleList([])
for stage_idx in range(len(config.stage_stride)):
stage = EfficientLoFTRRepVGGStage(config, stage_idx)
self.stages.append(stage)
def forward(self, hidden_states: torch.Tensor) -> list[torch.Tensor]:
outputs = []
for stage in self.stages:
hidden_states = stage(hidden_states)
outputs.append(hidden_states)
# Exclude first stage in outputs
outputs = outputs[1:]
return outputs
| EfficientLoFTRepVGG |
python | getsentry__sentry | src/sentry/models/options/project_option.py | {
"start": 2928,
"end": 7751
} | class ____(OptionManager["ProjectOption"]):
def get_value_bulk(self, instances: Sequence[Project], key: str) -> Mapping[Project, Any]:
instance_map = {i.id: i for i in instances}
queryset = self.filter(project__in=instances, key=key)
result = {i: None for i in instances}
for obj in queryset:
result[instance_map[obj.project_id]] = obj.value
return result
def get_value_bulk_id(self, ids: Sequence[int], key: str) -> Mapping[int, Any]:
queryset = self.filter(project_id__in=ids, key=key)
result = {i: None for i in ids}
for obj in queryset:
result[obj.project_id] = obj.value
return result
def get_value(
self,
project: int | Project,
key: str,
default: Any | None = None,
validate: Callable[[object], bool] | None = None,
) -> Any:
result = self.get_all_values(project)
if key in result:
if validate is None or validate(result[key]):
return result[key]
if default is None:
well_known_key = projectoptions.lookup_well_known_key(key)
if well_known_key is not None:
return well_known_key.get_default(project)
return default
def unset_value(self, project: Project, key: str) -> None:
self.filter(project=project, key=key).delete()
self.reload_cache(project.id, "projectoption.unset_value", key)
def set_value(
self, project: int | Project, key: str, value: Any, reload_cache: bool = True
) -> bool:
"""
Sets a project option for the given project.
:param reload_cache: Invalidate the project config and reload the
cache only if the value has changed and `reload_cache` is `True`.
Do not call this with `False` unless you know for sure that it's fine
to keep the cached project config.
"""
if isinstance(project, models.Model):
project_id = project.id
else:
project_id = project
is_value_changed = False
with transaction.atomic(router.db_for_write(ProjectOption)):
# select_for_update lock rows until the end of the transaction
obj, created = self.select_for_update().get_or_create(
project_id=project_id, key=key, defaults={"value": value}
)
if created:
is_value_changed = True
elif obj.value != value:
# update the value via ORM update() to avoid post save signals which
# might cause cache reload when it is not needed (e.g. post_save signal)
self.filter(id=obj.id).update(value=value)
is_value_changed = True
if reload_cache and is_value_changed:
# invalidate the cached project config only if the value has changed,
# and reload_cache is set to True
self.reload_cache(project_id, "projectoption.set_value", key)
return is_value_changed
def get_all_values(self, project: Project | int) -> Mapping[str, Any]:
if isinstance(project, models.Model):
project_id = project.id
else:
project_id = project
cache_key = self._make_key(project_id)
if cache_key not in self._option_cache:
result = cache.get(cache_key)
if result is None:
self.reload_cache(project_id, "projectoption.get_all_values")
else:
self._option_cache[cache_key] = result
return self._option_cache.get(cache_key, {})
def reload_cache(
self, project_id: int, update_reason: str, option_key: str | None = None
) -> Mapping[str, Any]:
from sentry.tasks.relay import schedule_invalidate_project_config
if update_reason != "projectoption.get_all_values":
schedule_invalidate_project_config(
project_id=project_id, trigger=update_reason, trigger_details=option_key
)
cache_key = self._make_key(project_id)
result = {i.key: i.value for i in self.filter(project=project_id)}
cache.set(cache_key, result)
self._option_cache[cache_key] = result
return result
def post_save(self, *, instance: ProjectOption, created: bool, **kwargs: object) -> None:
self.reload_cache(instance.project_id, "projectoption.post_save", option_key=instance.key)
def post_delete(self, instance: ProjectOption, **kwargs: Any) -> None:
self.reload_cache(instance.project_id, "projectoption.post_delete", option_key=instance.key)
def isset(self, project: Project, key: str) -> bool:
return self.get_value(project, key, default=Ellipsis) is not Ellipsis
@region_silo_model
| ProjectOptionManager |
python | facelessuser__pymdown-extensions | tests/test_extensions/test_b64.py | {
"start": 120,
"end": 3238
} | class ____(util.MdCase):
"""Test Base 64."""
extension = ['pymdownx.b64']
extension_configs = {
"pymdownx.b64": {
"base_path": CURRENT_DIR
}
}
def test_in_script(self):
"""Test that we do not parse image in script."""
self.check_markdown(
r'''
<script>
var str = '<img alt="picture" src="_assets/bg.png" />'
</script>
''',
r'''
<script>
var str = '<img alt="picture" src="_assets/bg.png" />'
</script>
''',
True
)
def test_comment(self):
"""Don't convert image in comment."""
self.check_markdown(
'<!--  -->',
'<!--  -->',
True
)
def test_relative_path(self):
"""Test relative path."""
self.check_markdown(
'',
'<p><img alt="picture" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHgAAAB4CAMAAAAOusbgAAAAqFBMVEU5eZdll66Yucjd7h7d7h7d7h5ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll66YuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYucgXVONTAAABDklEQVRoge3XR3ICQRBE0b/Ce2+FAAnkLeb+N9MRNBvFR0H2vuJFT3dX5VAqV6q1eqPZane6vf5gOBpPprP5YnnD72t1u95s7+53+4fHp+eX17f3j8+v78PxdC5Qi+SWkNwykltBcqtIbg3JrSO5DSS3ieS2kNw2kttBcrtIbg/J7SO5AyR3iOSOkNwxkjtBcqdI7gzJnSO5CyR3WaD0T1xrv3Hjxo0bN27cuHHjxo0bN27c63Sx9ov1nbHOF+teYd1nrHeE9X6x+gZWv8Lqk1j9GWsuYM0jrDmINX+x5j5W3sDKOVj5CivXYeVJrByLlZ+xcnuB0n/5vxA3bty4cePGjRs3bty4cePGvUj3B2JzyvcNRmTGAAAAAElFTkSuQmCC" /></p>', # noqa: E501
True
)
def test_cache_busting(self):
"""Test we can convert cache busted images."""
self.check_markdown(
'',
'<p><img alt="picture with cache busting" src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAHgAAAB4CAMAAAAOusbgAAAAqFBMVEU5eZdll66Yucjd7h7d7h7d7h5ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll65ll66YuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYuciYucgXVONTAAABDklEQVRoge3XR3ICQRBE0b/Ce2+FAAnkLeb+N9MRNBvFR0H2vuJFT3dX5VAqV6q1eqPZane6vf5gOBpPprP5YnnD72t1u95s7+53+4fHp+eX17f3j8+v78PxdC5Qi+SWkNwykltBcqtIbg3JrSO5DSS3ieS2kNw2kttBcrtIbg/J7SO5AyR3iOSOkNwxkjtBcqdI7gzJnSO5CyR3WaD0T1xrv3Hjxo0bN27cuHHjxo0bN27c63Sx9ov1nbHOF+teYd1nrHeE9X6x+gZWv8Lqk1j9GWsuYM0jrDmINX+x5j5W3sDKOVj5CivXYeVJrByLlZ+xcnuB0n/5vxA3bty4cePGjRs3bty4cePGvUj3B2JzyvcNRmTGAAAAAElFTkSuQmCC" /></p>', # noqa: E501
True
)
def test_does_not_exist(self):
"""Test handling of file that does not exist."""
self.check_markdown(
'',
'<p><img alt="Some windows path link" src="file:///c:/does_not_exist.png" /></p>',
True
)
| TestB64 |
python | huggingface__transformers | src/transformers/cache_utils.py | {
"start": 62095,
"end": 62500
} | class ____(StaticCache):
def __init__(self, config: PreTrainedConfig, max_cache_len: int, *args, **kwargs):
logger.warning_once(
"`HybridCache` is deprecated and will be removed in version v4.59 "
"Use `StaticCache(...)` instead which will correctly infer the type of each layer."
)
super().__init__(config=config, max_cache_len=max_cache_len)
| HybridCache |
python | numba__numba | numba/tests/test_storeslice.py | {
"start": 394,
"end": 1940
} | class ____(TestCase):
def test_usecase(self):
n = 10
obs_got = np.zeros(n)
obs_expected = obs_got.copy()
cfunc = njit((types.float64[:], types.intp))(usecase)
cfunc(obs_got, n)
usecase(obs_expected, n)
self.assertPreciseEqual(obs_got, obs_expected)
def test_array_slice_setitem(self):
n = 10
argtys = (types.int64[:], types.int64, types.int64, types.int64,
types.int64)
cfunc = njit(argtys)(setitem_slice)
a = np.arange(n, dtype=np.int64)
# tuple is (start, stop, step, scalar)
tests = ((2, 6, 1, 7),
(2, 6, -1, 7),
(-2, len(a), 2, 77),
(-2, 2 * len(a), 2, 77),
(-2, -6, 3, 88),
(-2, -6, -3, 9999),
(-6, -2, 4, 88),
(-6, -2, -4, 88),
(16, 20, 2, 88),
(16, 20, -2, 88),
)
for start, stop, step, scalar in tests:
a = np.arange(n, dtype=np.int64)
b = np.arange(n, dtype=np.int64)
cfunc(a, start, stop, step, scalar)
setitem_slice(b, start, stop, step, scalar)
self.assertPreciseEqual(a, b)
# test if step = 0
a = np.arange(n, dtype=np.int64)
with self.assertRaises(ValueError) as cm:
cfunc(a, 3, 6, 0, 88)
self.assertEqual(str(cm.exception), "slice step cannot be zero")
if __name__ == '__main__':
unittest.main()
| TestStoreSlice |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 482625,
"end": 483041
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("BypassPullRequestAllowance", graphql_name="node")
"""The item at the end of the edge."""
| BypassPullRequestAllowanceEdge |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/python_lsp.py | {
"start": 868,
"end": 5855
} | class ____(socketserver.StreamRequestHandler):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self) -> None:
super().setup()
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self) -> None:
try:
self.delegate.start()
except OSError as e:
if os.name == "nt":
# Catch and pass on ConnectionResetError when parent process
# dies
if isinstance(e, WindowsError) and e.winerror == 10054:
pass
self.SHUTDOWN_CALL()
def start_tcp_lang_server(bind_addr, port, check_parent_process, handler_class) -> None:
if not issubclass(handler_class, PythonLSPServer):
raise ValueError("Handler class must be an instance of PythonLSPServer")
def shutdown_server(check_parent_process, *args):
if check_parent_process:
log.debug("Shutting down server")
# Shutdown call must be done on a thread, to prevent deadlocks
stop_thread = threading.Thread(target=server.shutdown)
stop_thread.start()
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + "Handler",
(_StreamHandlerWrapper,),
{
"DELEGATE_CLASS": partial(
handler_class, check_parent_process=check_parent_process
),
"SHUTDOWN_CALL": partial(shutdown_server, check_parent_process),
},
)
server = socketserver.TCPServer(
(bind_addr, port), wrapper_class, bind_and_activate=False
)
server.allow_reuse_address = True
try:
server.server_bind()
server.server_activate()
log.info("Serving %s on (%s, %s)", handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info("Shutting down")
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class) -> None:
if not issubclass(handler_class, PythonLSPServer):
raise ValueError("Handler class must be an instance of PythonLSPServer")
log.info("Starting %s IO language server", handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
def start_ws_lang_server(port, check_parent_process, handler_class) -> None:
if not issubclass(handler_class, PythonLSPServer):
raise ValueError("Handler class must be an instance of PythonLSPServer")
# imports needed only for websockets based server
try:
import asyncio
from concurrent.futures import ThreadPoolExecutor
import websockets
except ImportError as e:
raise ImportError(
"websocket modules missing. Please run: pip install 'python-lsp-server[websockets]'"
) from e
with ThreadPoolExecutor(max_workers=10) as tpool:
send_queue = None
loop = None
async def pylsp_ws(websocket):
log.debug("Creating LSP object")
# creating a partial function and suppling the websocket connection
response_handler = partial(send_message, websocket=websocket)
# Not using default stream reader and writer.
# Instead using a consumer based approach to handle processed requests
pylsp_handler = handler_class(
rx=None,
tx=None,
consumer=response_handler,
check_parent_process=check_parent_process,
)
async for message in websocket:
try:
log.debug("consuming payload and feeding it to LSP handler")
request = json.loads(message)
loop = asyncio.get_running_loop()
await loop.run_in_executor(tpool, pylsp_handler.consume, request)
except Exception as e:
log.exception("Failed to process request %s, %s", message, str(e))
def send_message(message, websocket):
"""Handler to send responses of processed requests to respective web socket clients"""
try:
payload = json.dumps(message, ensure_ascii=False)
loop.call_soon_threadsafe(send_queue.put_nowait, (payload, websocket))
except Exception as e:
log.exception("Failed to write message %s, %s", message, str(e))
async def run_server():
nonlocal send_queue, loop
send_queue = asyncio.Queue()
loop = asyncio.get_running_loop()
async with websockets.serve(pylsp_ws, port=port):
while 1:
# Wait until payload is available for sending
payload, websocket = await send_queue.get()
await websocket.send(payload)
asyncio.run(run_server())
| _StreamHandlerWrapper |
python | tensorflow__tensorflow | tensorflow/python/ops/linalg/linear_operator_permutation.py | {
"start": 1516,
"end": 10768
} | class ____(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of permutation matrices.
This operator acts like a [batch] of permutations with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorPermutation` is initialized with a (batch) vector.
A permutation, is defined by an integer vector `v` whose values are unique
and are in the range `[0, ... n]`. Applying the permutation on an input
matrix has the folllowing meaning: the value of `v` at index `i`
says to move the `v[i]`-th row of the input matrix to the `i`-th row.
Because all values are unique, this will result in a permutation of the
rows the input matrix. Note, that the permutation vector `v` has the same
semantics as `tf.transpose`.
```python
# Create a 3 x 3 permutation matrix that swaps the last two columns.
vec = [0, 2, 1]
operator = LinearOperatorPermutation(vec)
operator.to_dense()
==> [[1., 0., 0.]
[0., 0., 1.]
[0., 1., 0.]]
operator.shape
==> [3, 3]
# This will be zero.
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
perm,
dtype=dtypes.float32,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorPermutation"):
r"""Initialize a `LinearOperatorPermutation`.
Args:
perm: Shape `[B1,...,Bb, N]` Integer `Tensor` with `b >= 0`
`N >= 0`. An integer vector that represents the permutation to apply.
Note that this argument is same as `tf.transpose`. However, this
permutation is applied on the rows, while the permutation in
`tf.transpose` is applied on the dimensions of the `Tensor`. `perm`
is required to have unique entries from `{0, 1, ... N-1}`.
dtype: The `dtype` of arguments to this operator. Default: `float32`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
parameters = dict(
perm=perm,
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[perm]):
self._perm = linear_operator_util.convert_nonref_to_tensor(
perm, name="perm")
self._check_perm(self._perm)
# Check and auto-set hints.
if is_non_singular is False: # pylint:disable=g-bool-id-comparison
raise ValueError(f"A Permutation operator is always non-singular. "
f"Expected argument `is_non_singular` to be True. "
f"Received: {is_non_singular}.")
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError(f"A Permutation operator is always square. "
f"Expected argument `is_square` to be True. "
f"Received: {is_square}.")
is_square = True
super(LinearOperatorPermutation, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _check_perm(self, perm):
"""Static check of perm."""
if (perm.shape.ndims is not None and perm.shape.ndims < 1):
raise ValueError(f"Argument `perm` must have at least 1 dimension. "
f"Received: {perm}.")
if not perm.dtype.is_integer:
raise TypeError(f"Argument `perm` must be integer dtype. "
f"Received: {perm}.")
# Check that the permutation satisfies the uniqueness constraint.
static_perm = tensor_util.constant_value(perm)
if static_perm is not None:
sorted_perm = np.sort(static_perm, axis=-1)
if np.any(sorted_perm != np.arange(0, static_perm.shape[-1])):
raise ValueError(
f"Argument `perm` must be a vector of unique integers from "
f"0 to {static_perm.shape[-1] - 1}.")
def _shape(self):
perm_shape = self._perm.shape
return perm_shape.concatenate(perm_shape[-1:])
def _shape_tensor(self):
perm_shape = array_ops.shape(self._perm)
k = perm_shape[-1]
return array_ops.concat((perm_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _domain_dimension_tensor(self, perm=None):
perm = perm if perm is not None else self.perm
return array_ops.shape(perm)[-1]
def _matmul(self, x, adjoint=False, adjoint_arg=False):
perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm)
if adjoint and not self.is_self_adjoint:
# TODO(srvasude): invert_permutation doesn't work on batches so we use
# argsort.
perm = sort_ops.argsort(perm, axis=-1)
x = linalg.adjoint(x) if adjoint_arg else x
# We need to broadcast x and the permutation since tf.gather doesn't
# broadcast.
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x)[:-1], array_ops.shape(perm))
k = array_ops.shape(x)[-1]
broadcast_x_shape = array_ops.concat([broadcast_shape, [k]], axis=-1)
x = array_ops.broadcast_to(x, broadcast_x_shape)
perm = array_ops.broadcast_to(perm, broadcast_shape)
m = array_ops.shape(x)[-2]
x = array_ops.reshape(x, [-1, m, k])
perm = array_ops.reshape(perm, [-1, m])
y = array_ops.gather(x, perm, axis=-2, batch_dims=1)
return array_ops.reshape(y, broadcast_x_shape)
# TODO(srvasude): Permutation parity is equivalent to the determinant.
def _log_abs_determinant(self):
# Permutation matrices have determinant +/- 1.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# The inverse of a permutation matrix is the transpose matrix.
# Apply a matmul and flip the adjoint bit.
return self._matmul(rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _to_dense(self):
perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm)
return math_ops.cast(math_ops.equal(
math_ops.range(0, self._domain_dimension_tensor(perm)),
perm[..., array_ops.newaxis]), self.dtype)
def _diag_part(self):
perm = tensor_conversion.convert_to_tensor_v2_with_dispatch(self.perm)
return math_ops.cast(math_ops.equal(
math_ops.range(0, self._domain_dimension_tensor(perm)),
perm), self.dtype)
def _cond(self):
# Permutation matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def perm(self):
return self._perm
@property
def _composite_tensor_fields(self):
return ("perm", "dtype")
@property
def _experimental_parameter_ndims_to_matrix_ndims(self):
return {"perm": 1}
| LinearOperatorPermutation |
python | getsentry__sentry | tests/sentry/workflow_engine/utils/test_workflow_metrics.py | {
"start": 397,
"end": 1415
} | class ____(WorkflowEventContextTestCase):
def setUp(self) -> None:
super().setUp()
# ensure the context is empty by default
self.ctx_token = WorkflowEventContext.set(WorkflowEventContextData(detector=None))
def test(self, mock_incr: mock.MagicMock) -> None:
metrics_incr("example.metric")
mock_incr.assert_called_once_with("workflow_engine.example.metric", 1)
def test_many(self, mock_incr: mock.MagicMock) -> None:
metrics_incr("example.metric", 2)
mock_incr.assert_called_once_with("workflow_engine.example.metric", 2)
def test_with_context(self, mock_incr: mock.MagicMock) -> None:
detector = self.create_detector()
self.ctx_token = WorkflowEventContext.set(WorkflowEventContextData(detector=detector))
metrics_incr("example.metric")
mock_incr.assert_called_with(
"workflow_engine.example.metric",
1,
tags={"detector_type": detector.type},
)
| TestWorkflowEngineMetrics |
python | numba__numba | numba/cuda/tests/cudapy/test_const_string.py | {
"start": 2230,
"end": 4279
} | class ____(CUDATestCase):
def test_assign_const_unicode_string(self):
@cuda.jit
def str_assign(arr):
i = cuda.grid(1)
if i < len(arr):
arr[i] = "XYZ"
n_strings = 8
arr = np.zeros(n_strings + 1, dtype="<U12")
str_assign[1, n_strings](arr)
# Expected result, e.g.:
# ['XYZ' 'XYZ' 'XYZ' 'XYZ' 'XYZ' 'XYZ' 'XYZ' 'XYZ' '']
expected = np.zeros_like(arr)
expected[:-1] = 'XYZ'
expected[-1] = ''
np.testing.assert_equal(arr, expected)
def test_assign_const_byte_string(self):
@cuda.jit
def bytes_assign(arr):
i = cuda.grid(1)
if i < len(arr):
arr[i] = b"XYZ"
n_strings = 8
arr = np.zeros(n_strings + 1, dtype="S12")
bytes_assign[1, n_strings](arr)
# Expected result, e.g.:
# [b'XYZ' b'XYZ' b'XYZ' b'XYZ' b'XYZ' b'XYZ' b'XYZ' b'XYZ' b'']
expected = np.zeros_like(arr)
expected[:-1] = b'XYZ'
expected[-1] = b''
np.testing.assert_equal(arr, expected)
def test_assign_const_string_in_record(self):
@cuda.jit
def f(a):
a[0]['x'] = 1
a[0]['y'] = 'ABC'
a[1]['x'] = 2
a[1]['y'] = 'XYZ'
dt = np.dtype([('x', np.int32), ('y', np.dtype('<U12'))])
a = np.zeros(2, dt)
f[1, 1](a)
reference = np.asarray([(1, 'ABC'), (2, 'XYZ')], dtype=dt)
np.testing.assert_array_equal(reference, a)
def test_assign_const_bytes_in_record(self):
@cuda.jit
def f(a):
a[0]['x'] = 1
a[0]['y'] = b'ABC'
a[1]['x'] = 2
a[1]['y'] = b'XYZ'
dt = np.dtype([('x', np.float32), ('y', np.dtype('S12'))])
a = np.zeros(2, dt)
f[1, 1](a)
reference = np.asarray([(1, b'ABC'), (2, b'XYZ')], dtype=dt)
np.testing.assert_array_equal(reference, a)
if __name__ == '__main__':
unittest.main()
| TestConstString |
python | google__pytype | pytype/tests/test_errors2.py | {
"start": 13044,
"end": 14419
} | class ____(test_base.BaseTest):
"""Test in-place operations."""
def _testOp(self, op, symbol):
errors = self.CheckWithErrors(f"""
class A:
def __{op}__(self, x: "A"):
return None
def f():
v = A()
v {symbol} 3 # unsupported-operands[e]
""")
self.assertErrorSequences(
errors, {"e": [symbol, "A", "int", f"__{op}__ on A", "A"]}
)
def test_isub(self):
self._testOp("isub", "-=")
def test_imul(self):
self._testOp("imul", "*=")
def test_idiv(self):
errors = self.CheckWithErrors("""
class A:
def __idiv__(self, x: "A"):
return None
def __itruediv__(self, x: "A"):
return None
def f():
v = A()
v /= 3 # unsupported-operands[e]
""")
self.assertErrorRegexes(
errors, {"e": r"\/\=.*A.*int.*__i(true)?div__ on A.*A"}
)
def test_imod(self):
self._testOp("imod", "%=")
def test_ipow(self):
self._testOp("ipow", "**=")
def test_ilshift(self):
self._testOp("ilshift", "<<=")
def test_irshift(self):
self._testOp("irshift", ">>=")
def test_iand(self):
self._testOp("iand", "&=")
def test_ixor(self):
self._testOp("ixor", "^=")
def test_ior(self):
self._testOp("ior", "|=")
def test_ifloordiv(self):
self._testOp("ifloordiv", "//=")
| InPlaceOperationsTest |
python | falconry__falcon | tests/test_httperror.py | {
"start": 7581,
"end": 35928
} | class ____:
def _misc_test(self, client, exception, status, needs_title=True):
client.app.add_route('/misc', MiscErrorsResource(exception, needs_title))
response = client.simulate_request(path='/misc')
assert response.status == status
def test_base_class(self, client):
headers = {
'X-Error-Title': 'Storage service down',
'X-Error-Description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider.'
),
'X-Error-Status': falcon.HTTP_503,
}
expected_body = {
'title': 'Storage service down',
'description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider.'
),
'code': 10042,
}
# Try it with Accept: */*
headers['Accept'] = '*/*'
response = client.simulate_request(path='/fail', headers=headers)
assert response.status == headers['X-Error-Status']
assert response.headers['vary'] == 'Accept'
assert expected_body == response.json
# Now try it with application/json
headers['Accept'] = 'application/json'
response = client.simulate_request(path='/fail', headers=headers)
assert response.status == headers['X-Error-Status']
assert response.json == expected_body
def test_no_description_json(self, client):
response = client.simulate_patch('/fail')
assert response.status == falcon.HTTP_400
assert response.json == {'title': '400 Bad Request'}
assert response.content_type == 'application/json'
def test_no_description_xml(self, client):
client.app.resp_options.xml_error_serialization = True
response = client.simulate_patch(
path='/fail', headers={'Accept': 'application/xml'}
)
assert response.status == falcon.HTTP_400
expected_xml = (
b'<?xml version="1.0" encoding="UTF-8"?><error>'
b'<title>400 Bad Request</title></error>'
)
assert response.content == expected_xml
assert response.content_type == 'application/xml'
@pytest.mark.parametrize('custom_xml', [True, False])
def test_xml_enable(self, client, enable_xml, custom_xml):
has_xml = enable_xml(client.app)
client.app.resp_options.default_media_type = 'app/foo'
accept = 'app/falcon+xml' if custom_xml else 'application/xml'
response = client.simulate_patch(path='/fail', headers={'Accept': accept})
assert response.status == falcon.HTTP_400
if has_xml:
expected_xml = (
b'<?xml version="1.0" encoding="UTF-8"?><error>'
b'<title>400 Bad Request</title></error>'
)
assert response.content == expected_xml
else:
assert response.content == b''
if has_xml or custom_xml:
assert response.content_type == 'application/xml'
else:
assert response.content_type == 'app/foo'
def test_to_xml_deprecated(self):
with pytest.warns(
DeprecatedWarning,
match='The internal error serialization to XML is deprecated.',
):
res = falcon.HTTPGone().to_xml()
assert res == falcon.HTTPGone()._to_xml()
def test_client_does_not_accept_json_or_xml(self, client):
headers = {
'Accept': 'application/x-yaml',
'X-Error-Title': 'Storage service down',
'X-Error-Description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider'
),
'X-Error-Status': falcon.HTTP_503,
}
response = client.simulate_request(path='/fail', headers=headers)
assert response.status == headers['X-Error-Status']
assert response.headers['Vary'] == 'Accept'
assert not response.content
def test_custom_error_serializer(self, client, yaml):
headers = {
'X-Error-Title': 'Storage service down',
'X-Error-Description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider'
),
'X-Error-Status': falcon.HTTP_503,
}
expected_doc = {
'code': 10042,
'description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider'
),
'title': 'Storage service down',
}
def _my_serializer(req, resp, exception):
preferred = req.client_prefers((falcon.MEDIA_YAML, falcon.MEDIA_JSON))
if preferred is not None:
if preferred == falcon.MEDIA_JSON:
resp.data = exception.to_json()
else:
resp.text = yaml.dump(exception.to_dict(), encoding=None)
resp.content_type = preferred
def _check(media_type, deserializer):
headers['Accept'] = media_type
client.app.set_error_serializer(_my_serializer)
response = client.simulate_request(path='/fail', headers=headers)
assert response.status == headers['X-Error-Status']
actual_doc = deserializer(response.content.decode('utf-8'))
assert expected_doc == actual_doc
_check(falcon.MEDIA_YAML, yaml.safe_load)
_check(falcon.MEDIA_JSON, json.loads)
@pytest.mark.parametrize(
'method,path,status',
[
('GET', '/404', 404),
('GET', '/notfound', 404),
('REPORT', '/404', 405),
('BREW', '/notfound', 400),
],
)
def test_custom_error_serializer_optional_representation(
self, client, method, path, status
):
def _simple_serializer(req, resp, exception):
representation = exception.to_dict()
representation.update(status=int(exception.status[:3]))
resp.content_type = falcon.MEDIA_JSON
resp.media = representation
client.app.add_route('/404', NotFoundResource())
client.app.add_route('/notfound', NotFoundResourceWithBody())
client.app.set_error_serializer(_simple_serializer)
def s():
return client.simulate_request(path=path, method=method)
if method not in falcon.COMBINED_METHODS:
if not client.app._ASGI:
with pytest.warns(wsgiref.validate.WSGIWarning):
resp = s()
else:
resp = s()
else:
resp = s()
assert resp.json['title']
assert resp.json['status'] == status
def test_custom_serializer_no_representation(self, client):
def _chatty_serializer(req, resp, exception):
resp.content_type = falcon.MEDIA_TEXT
resp.text = b'You might think this error should not have a body'
client.app.add_route('/416', RangeNotSatisfiableResource())
client.app.set_error_serializer(_chatty_serializer)
resp = client.simulate_get(path='/416')
assert resp.text == 'You might think this error should not have a body'
def test_client_does_not_accept_anything(self, client):
headers = {
'Accept': '45087gigo;;;;',
'X-Error-Title': 'Storage service down',
'X-Error-Description': (
'The configured storage service is not '
'responding to requests. Please contact '
'your service provider'
),
'X-Error-Status': falcon.HTTP_503,
}
response = client.simulate_request(path='/fail', headers=headers)
assert response.status == headers['X-Error-Status']
assert not response.content
@pytest.mark.parametrize(
'media_type',
[
'application/json',
'application/vnd.company.system.project.resource+json;v=1.1',
'application/json-patch+json',
],
)
def test_forbidden(self, client, media_type):
headers = {'Accept': media_type}
expected_body = {
'title': 'Request denied',
'description': 'You do not have write permissions for this queue.',
'link': {
'text': 'Documentation related to this error',
'href': 'http://example.com/api/rbac',
'rel': 'help',
},
}
response = client.simulate_post(path='/fail', headers=headers)
assert response.status == falcon.HTTP_403
assert response.json == expected_body
def test_epic_fail_json(self, client):
headers = {'Accept': 'application/json'}
expected_body = {
'title': 'Internet crashed',
'description': 'Catastrophic weather event due to climate change.',
'code': 8733224,
'link': {
'text': 'Drill baby drill!',
'href': 'http://example.com/api/climate',
'rel': 'help',
},
}
response = client.simulate_put('/fail', headers=headers)
assert response.status == falcon.HTTP_792
assert response.json == expected_body
@pytest.mark.parametrize(
'media_type',
[
'text/xml',
'application/xml',
'application/vnd.company.system.project.resource+xml;v=1.1',
'application/atom+xml',
],
)
def test_epic_fail_xml(self, client, media_type):
client.app.resp_options.xml_error_serialization = True
headers = {'Accept': media_type}
expected_body = (
'<?xml version="1.0" encoding="UTF-8"?>'
+ '<error>'
+ '<title>Internet crashed</title>'
+ '<description>'
+ 'Catastrophic weather event due to climate change.'
+ '</description>'
+ '<code>8733224</code>'
+ '<link>'
+ '<text>Drill baby drill!</text>'
+ '<href>http://example.com/api/climate</href>'
+ '<rel>help</rel>'
+ '</link>'
+ '</error>'
)
response = client.simulate_put(path='/fail', headers=headers)
assert response.status == falcon.HTTP_792
try:
et.fromstring(response.content.decode('utf-8'))
except ValueError:
pytest.fail()
assert response.text == expected_body
def test_unicode_json(self, client):
unicode_resource = UnicodeFaultyResource()
expected_body = {
'title': 'Internet \xe7rashed!',
'description': '\xc7atastrophic weather event',
'link': {
'text': 'Drill b\xe1by drill!',
'href': 'http://example.com/api/%C3%A7limate',
'rel': 'help',
},
}
client.app.add_route('/unicode', unicode_resource)
response = client.simulate_request(path='/unicode')
assert unicode_resource.called
assert response.status == falcon.HTTP_792
assert expected_body == response.json
def test_unicode_xml(self, client):
client.app.resp_options.xml_error_serialization = True
unicode_resource = UnicodeFaultyResource()
expected_body = (
'<?xml version="1.0" encoding="UTF-8"?>'
+ '<error>'
+ '<title>Internet çrashed!</title>'
+ '<description>'
+ 'Çatastrophic weather event'
+ '</description>'
+ '<link>'
+ '<text>Drill báby drill!</text>'
+ '<href>http://example.com/api/%C3%A7limate</href>'
+ '<rel>help</rel>'
+ '</link>'
+ '</error>'
)
client.app.add_route('/unicode', unicode_resource)
response = client.simulate_request(
path='/unicode', headers={'accept': 'application/xml'}
)
assert unicode_resource.called
assert response.status == falcon.HTTP_792
assert expected_body == response.text
def test_401(self, client):
client.app.add_route('/401', UnauthorizedResource())
response = client.simulate_request(path='/401')
assert response.status == falcon.HTTP_401
assert response.headers['www-authenticate'] == 'Basic realm="simple"'
response = client.simulate_post('/401')
assert response.status == falcon.HTTP_401
assert (
response.headers['www-authenticate']
== 'Newauth realm="apps", Basic realm="simple"'
)
response = client.simulate_put('/401')
assert response.status == falcon.HTTP_401
assert 'www-authenticate' not in response.headers
def test_404_without_body(self, client):
client.app.add_route('/404', NotFoundResource())
response = client.simulate_request(path='/404')
assert response.status == falcon.HTTP_404
assert response.json == falcon.HTTPNotFound().to_dict()
assert response.json == {'title': falcon.HTTP_NOT_FOUND}
def test_404_with_body(self, client):
client.app.add_route('/404', NotFoundResourceWithBody())
response = client.simulate_request(path='/404')
assert response.status == falcon.HTTP_404
assert response.content
expected_body = {'title': '404 Not Found', 'description': 'Not Found'}
assert response.json == expected_body
def test_405_without_body(self, client):
client.app.add_route('/405', MethodNotAllowedResource())
response = client.simulate_request(path='/405')
assert response.status == falcon.HTTP_405
assert response.content == falcon.HTTPMethodNotAllowed(['PUT']).to_json()
assert response.json == {'title': falcon.HTTP_METHOD_NOT_ALLOWED}
assert response.headers['allow'] == 'PUT'
def test_405_without_body_with_extra_headers(self, client):
client.app.add_route('/405', MethodNotAllowedResourceWithHeaders())
response = client.simulate_request(path='/405')
assert response.status == falcon.HTTP_405
assert response.content == falcon.HTTPMethodNotAllowed([]).to_json()
assert response.headers['allow'] == 'PUT'
assert response.headers['x-ping'] == 'pong'
def test_405_without_body_with_extra_headers_double_check(self, client):
client.app.add_route('/405', MethodNotAllowedResourceWithHeadersWithAccept())
response = client.simulate_request(path='/405')
assert response.status == falcon.HTTP_405
assert response.json == falcon.HTTPMethodNotAllowed([]).to_dict()
assert response.headers['allow'] == 'PUT'
assert response.headers['allow'] != 'GET,PUT'
assert response.headers['allow'] != 'GET'
assert response.headers['x-ping'] == 'pong'
def test_405_with_body(self, client):
client.app.add_route('/405', MethodNotAllowedResourceWithBody())
response = client.simulate_request(path='/405')
assert response.status == falcon.HTTP_405
assert response.content
expected_body = {
'title': '405 Method Not Allowed',
'description': 'Not Allowed',
}
assert response.json == expected_body
assert response.headers['allow'] == 'PUT'
def test_410_without_body(self, client):
client.app.add_route('/410', GoneResource())
response = client.simulate_request(path='/410')
assert response.status == falcon.HTTP_410
assert response.content == falcon.HTTPGone().to_json()
assert response.json == {'title': '410 Gone'}
def test_410_with_body(self, client):
client.app.add_route('/410', GoneResourceWithBody())
response = client.simulate_request(path='/410')
assert response.status == falcon.HTTP_410
assert response.content
expected_body = {'title': '410 Gone', 'description': 'Gone with the wind'}
assert response.json == expected_body
def test_411(self, client):
client.app.add_route('/411', LengthRequiredResource())
response = client.simulate_request(path='/411')
assert response.status == falcon.HTTP_411
parsed_body = response.json
assert parsed_body['title'] == 'title'
assert parsed_body['description'] == 'description'
def test_413(self, client):
client.app.add_route('/413', RequestEntityTooLongResource())
response = client.simulate_request(path='/413')
assert response.status == falcon.HTTP_413
parsed_body = response.json
assert parsed_body['title'] == 'Request Rejected'
assert parsed_body['description'] == 'Request Body Too Large'
assert 'retry-after' not in response.headers
def test_temporary_413_integer_retry_after(self, client):
client.app.add_route('/413', TemporaryRequestEntityTooLongResource('6'))
response = client.simulate_request(path='/413')
assert response.status == falcon.HTTP_413
parsed_body = response.json
assert parsed_body['title'] == 'Request Rejected'
assert parsed_body['description'] == 'Request Body Too Large'
assert response.headers['retry-after'] == '6'
def test_temporary_413_datetime_retry_after(self, client):
date = datetime.datetime.now() + datetime.timedelta(minutes=5)
client.app.add_route('/413', TemporaryRequestEntityTooLongResource(date))
response = client.simulate_request(path='/413')
assert response.status == falcon.HTTP_413
parsed_body = response.json
assert parsed_body['title'] == 'Request Rejected'
assert parsed_body['description'] == 'Request Body Too Large'
assert response.headers['retry-after'] == falcon.util.dt_to_http(date)
def test_414(self, client):
client.app.add_route('/414', UriTooLongResource())
response = client.simulate_request(path='/414')
assert response.status == falcon.HTTP_414
def test_414_with_title(self, client):
title = 'Argh! Error!'
client.app.add_route('/414', UriTooLongResource(title=title))
response = client.simulate_request(path='/414', headers={})
parsed_body = json.loads(response.content.decode())
assert parsed_body['title'] == title
def test_414_with_description(self, client):
description = 'Be short please.'
client.app.add_route('/414', UriTooLongResource(description=description))
response = client.simulate_request(path='/414', headers={})
parsed_body = json.loads(response.content.decode())
assert parsed_body['description'] == description
def test_414_with_custom_kwargs(self, client):
code = 'someid'
client.app.add_route('/414', UriTooLongResource(code=code))
response = client.simulate_request(path='/414', headers={})
parsed_body = json.loads(response.content.decode())
assert parsed_body['code'] == code
def test_416(self, client, asgi, util):
client.app = util.create_app(asgi)
client.app.resp_options.xml_error_serialization = True
client.app.add_route('/416', RangeNotSatisfiableResource())
response = client.simulate_request(path='/416', headers={'accept': 'text/xml'})
assert response.status == falcon.HTTP_416
assert response.content == falcon.HTTPRangeNotSatisfiable(123456)._to_xml()
exp = (
b'<?xml version="1.0" encoding="UTF-8"?><error>'
b'<title>416 Range Not Satisfiable</title></error>'
)
assert response.content == exp
assert response.headers['content-range'] == 'bytes */123456'
assert response.headers['content-length'] == str(len(response.content))
def test_429_no_retry_after(self, client):
client.app.add_route('/429', TooManyRequestsResource())
response = client.simulate_request(path='/429')
parsed_body = response.json
assert response.status == falcon.HTTP_429
assert parsed_body['title'] == 'Too many requests'
assert parsed_body['description'] == '1 per minute'
assert 'retry-after' not in response.headers
def test_429(self, client):
client.app.add_route('/429', TooManyRequestsResource(60))
response = client.simulate_request(path='/429')
parsed_body = response.json
assert response.status == falcon.HTTP_429
assert parsed_body['title'] == 'Too many requests'
assert parsed_body['description'] == '1 per minute'
assert response.headers['retry-after'] == '60'
def test_429_datetime(self, client):
date = datetime.datetime.now() + datetime.timedelta(minutes=1)
client.app.add_route('/429', TooManyRequestsResource(date))
response = client.simulate_request(path='/429')
parsed_body = response.json
assert response.status == falcon.HTTP_429
assert parsed_body['title'] == 'Too many requests'
assert parsed_body['description'] == '1 per minute'
assert response.headers['retry-after'] == falcon.util.dt_to_http(date)
def test_503_integer_retry_after(self, client):
client.app.add_route('/503', ServiceUnavailableResource(60))
response = client.simulate_request(path='/503')
expected_body = {
'title': 'Oops',
'description': 'Stand by...',
}
assert response.status == falcon.HTTP_503
assert response.json == expected_body
assert response.headers['retry-after'] == '60'
def test_503_datetime_retry_after(self, client):
date = datetime.datetime.now() + datetime.timedelta(minutes=5)
client.app.add_route('/503', ServiceUnavailableResource(date))
response = client.simulate_request(path='/503')
expected_body = {
'title': 'Oops',
'description': 'Stand by...',
}
assert response.status == falcon.HTTP_503
assert response.json == expected_body
assert response.headers['retry-after'] == falcon.util.dt_to_http(date)
def test_invalid_header(self, client):
client.app.add_route('/400', InvalidHeaderResource())
response = client.simulate_request(path='/400')
expected_desc = (
'The value provided for the "X-Auth-Token" '
'header is invalid. Please provide a valid token.'
)
expected_body = {
'title': 'Invalid header value',
'description': expected_desc,
'code': 'A1001',
}
assert response.status == falcon.HTTP_400
assert response.json == expected_body
def test_missing_header(self, client):
client.app.add_route('/400', MissingHeaderResource())
response = client.simulate_request(path='/400')
expected_body = {
'title': 'Missing header value',
'description': 'The "X-Auth-Token" header is required.',
}
assert response.status == falcon.HTTP_400
assert response.json == expected_body
def test_invalid_param(self, client):
client.app.add_route('/400', InvalidParamResource())
response = client.simulate_request(path='/400')
expected_desc = (
'The "id" parameter is invalid. The value must be a hex-encoded UUID.'
)
expected_body = {
'title': 'Invalid parameter',
'description': expected_desc,
'code': 'P1002',
}
assert response.status == falcon.HTTP_400
assert response.json == expected_body
def test_missing_param(self, client):
client.app.add_route('/400', MissingParamResource())
response = client.simulate_request(path='/400')
expected_body = {
'title': 'Missing parameter',
'description': 'The "id" parameter is required.',
'code': 'P1003',
}
assert response.status == falcon.HTTP_400
assert response.json == expected_body
def test_misc(self, client):
self._misc_test(client, falcon.HTTPBadRequest, falcon.HTTP_400)
self._misc_test(
client, falcon.HTTPNotAcceptable, falcon.HTTP_406, needs_title=False
)
self._misc_test(client, falcon.HTTPConflict, falcon.HTTP_409)
self._misc_test(client, falcon.HTTPPreconditionFailed, falcon.HTTP_412)
self._misc_test(
client, falcon.HTTPUnsupportedMediaType, falcon.HTTP_415, needs_title=False
)
self._misc_test(client, falcon.HTTPUnprocessableEntity, falcon.HTTP_422)
self._misc_test(
client,
falcon.HTTPUnavailableForLegalReasons,
falcon.HTTP_451,
needs_title=False,
)
self._misc_test(client, falcon.HTTPInternalServerError, falcon.HTTP_500)
self._misc_test(client, falcon.HTTPBadGateway, falcon.HTTP_502)
@pytest.mark.parametrize(
'status, status_type',
[
(falcon.HTTP_503, 'str'),
(falcon.HTTP_503, 'bytes'),
(503, 'int'),
(503, 'str'),
(503, 'bytes'),
(503, 'HTTPStatus'),
],
)
def test_title_default_message_if_none(self, status, status_type, client):
headers = {
'X-Error-Status': str(status),
'X-Error-Status-Type': status_type,
}
response = client.simulate_request(path='/fail', headers=headers)
assert response.json['title'] == falcon.HTTP_503
assert response.status_code == 503
def test_to_json_dumps(self):
e = falcon.HTTPError(status=418, title='foo', description='bar')
assert e.to_json() == b'{"title": "foo", "description": "bar"}'
class Handler:
def serialize(self, obj, type):
assert type == falcon.MEDIA_JSON
return b'{"a": "b"}'
assert e.to_json(Handler()) == b'{"a": "b"}'
def test_serialize_error_uses_media_handler(self, client):
client.app.add_route('/path', NotFoundResource())
h = client.app.resp_options.media_handlers[falcon.MEDIA_JSON]
h._dumps = lambda x: json.dumps(x).upper()
response = client.simulate_request(path='/path')
assert response.status == falcon.HTTP_404
assert response.json == {'TITLE': falcon.HTTP_NOT_FOUND.upper()}
def test_serialize_no_json_media_handler(self, client):
client.app.add_route('/path', NotFoundResource())
for h in list(client.app.resp_options.media_handlers):
if 'json' in h.casefold():
client.app.resp_options.media_handlers.pop(h)
response = client.simulate_request(path='/path')
assert response.status == falcon.HTTP_404
assert response.json == {'title': falcon.HTTP_NOT_FOUND}
def test_MediaMalformedError(self):
err = falcon.MediaMalformedError('foo-media')
assert err.description == 'Could not parse foo-media body'
err.__cause__ = ValueError('some error')
assert err.description == 'Could not parse foo-media body - some error'
def test_kw_only(self):
with pytest.raises(TypeError, match='positional argument'):
falcon.HTTPError(falcon.HTTP_BAD_REQUEST, 'foo', 'bar')
JSON_CONTENT = b'{"title": "410 Gone"}'
JSON = (MEDIA_JSON, MEDIA_JSON, JSON_CONTENT)
CUSTOM_JSON = ('custom/any+json', MEDIA_JSON, JSON_CONTENT)
XML_CONTENT = (
b'<?xml version="1.0" encoding="UTF-8"?><error><title>410 Gone</title></error>'
)
XML = (MEDIA_XML, MEDIA_XML, XML_CONTENT)
CUSTOM_XML = ('custom/any+xml', MEDIA_XML, XML_CONTENT)
YAML = (MEDIA_YAML, MEDIA_YAML, b'title: 410 Gone!')
ASYNC_ONLY = ('application/only_async', 'application/only_async', b'this is async')
ASYNC_WITH_SYNC = (
'application/async_with_sync',
'application/async_with_sync',
b'this is sync instead',
)
| TestHTTPError |
python | getsentry__sentry | src/sentry/issue_detection/detectors/experiments/n_plus_one_db_span_detector.py | {
"start": 730,
"end": 12786
} | class ____(PerformanceDetector):
"""
Detector goals:
- identify a database N+1 query with high accuracy
- collect enough information to create a good fingerprint (see below)
- only return issues with good fingerprints
A good fingerprint is one that gives us confidence that, if two fingerprints
match, then they correspond to the same issue location in code (and
therefore, the same fix).
To do this we look for a specific structure:
[-------- transaction span -----------]
[-------- parent span -----------]
[source query]
[n0]
[n1]
[n2]
...
If we detect two different N+1 problems, and both have matching parents,
source queries, and repeated (n) queries, then we can be fairly confident
they are the same issue.
"""
__slots__ = (
"potential_parents",
"source_span",
"n_hash",
"n_spans",
"transaction",
)
type = DetectorType.EXPERIMENTAL_N_PLUS_ONE_DB_QUERIES
settings_key = DetectorType.EXPERIMENTAL_N_PLUS_ONE_DB_QUERIES
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.potential_parents = {}
self.previous_span: Span | None = None
self.n_spans: list[Span] = []
self.source_span: Span | None = None
root_span = get_path(self._event, "contexts", "trace")
if root_span:
self.potential_parents[root_span.get("span_id")] = root_span
@classmethod
def is_detection_allowed_for_system(cls) -> bool:
# Defer to the issue platform for whether to create issues
# See https://develop.sentry.dev/backend/issue-platform/#releasing-your-issue-type
return True
def is_creation_allowed_for_organization(self, organization: Organization | None) -> bool:
return features.has(
"organizations:experimental-n-plus-one-db-detector-rollout", organization
)
def is_creation_allowed_for_project(self, project: Project | None) -> bool:
return self.settings["detection_enabled"]
def visit_span(self, span: Span) -> None:
span_id = span.get("span_id", None)
op = span.get("op", None)
if not span_id or not op:
return
if not self._is_db_op(op):
# This breaks up the N+1 we're currently tracking.
self._maybe_store_problem()
self._reset_detection()
# Treat it as a potential parent as long as it isn't the root span.
if span.get("parent_span_id", None):
self.potential_parents[span_id] = span
return
if not self.source_span:
# We aren't currently tracking an N+1. Maybe this span triggers one!
self._maybe_use_as_source(span)
return
# If we got this far, we know we're a DB span and we're looking for a
# sequence of N identical DB spans.
if self._continues_n_plus_1(span):
self.n_spans.append(span)
else:
previous_span = self.n_spans[-1] if self.n_spans else None
self._maybe_store_problem()
self._reset_detection()
# Maybe this DB span starts a whole new N+1!
if previous_span:
self._maybe_use_as_source(previous_span)
if self.source_span and self._continues_n_plus_1(span):
self.n_spans.append(span)
else:
self.source_span = None
self._maybe_use_as_source(span)
def on_complete(self) -> None:
self._maybe_store_problem()
def _is_db_op(self, op: str) -> bool:
return (
op.startswith("db")
and not op.startswith("db.redis")
and not op.startswith("db.connection")
)
def _maybe_use_as_source(self, span: Span) -> None:
parent_span_id = span.get("parent_span_id", None)
if not parent_span_id or parent_span_id not in self.potential_parents:
return
self.source_span = span
def _continues_n_plus_1(self, span: Span) -> bool:
if self.source_span is None:
return False
expected_parent_id = self.source_span.get("parent_span_id", None)
parent_id = span.get("parent_span_id", None)
if not parent_id or parent_id != expected_parent_id:
return False
span_hash = span.get("hash", None)
if not span_hash:
return False
if span_hash == self.source_span.get("hash", None):
# The source span and n repeating spans must have different queries.
return False
if not self.previous_span:
self.previous_span = span
return True
return are_spans_equivalent(a=span, b=self.previous_span)
def _maybe_store_problem(self) -> None:
if not self.source_span or not self.n_spans:
return
# Do we have enough spans?
count = self.settings.get("count")
if len(self.n_spans) < count:
return
# Do the spans take enough total time?
if not self._is_slower_than_threshold():
return
# We require a parent span in order to improve our fingerprint accuracy.
parent_span_id = self.source_span.get("parent_span_id", None)
if not parent_span_id:
return
parent_span = self.potential_parents[parent_span_id]
if not parent_span:
return
# Track how many N+1-looking problems we found but dropped because we
# couldn't be sure (maybe the truncated part of the query differs).
if not contains_complete_query(
self.source_span, is_source=True
) or not contains_complete_query(self.n_spans[0]):
metrics.incr("performance.performance_issue.truncated_np1_db")
return
fingerprint = self._fingerprint(
parent_op=parent_span.get("op", ""),
parent_hash=parent_span.get("hash", ""),
source_hash=self.source_span.get("hash", ""),
n_hash=self.n_spans[0].get("hash", ""),
)
if fingerprint not in self.stored_problems:
self._metrics_for_extra_matching_spans()
offender_span_ids = [span["span_id"] for span in self.n_spans]
first_span_description = get_valid_db_span_description(self.n_spans[0])
if not first_span_description:
metrics.incr("performance.performance_issue.invalid_description")
return
self.stored_problems[fingerprint] = PerformanceProblem(
fingerprint=fingerprint,
op="db",
desc=first_span_description,
type=PerformanceNPlusOneGroupType,
parent_span_ids=[parent_span_id],
cause_span_ids=[self.source_span["span_id"]],
offender_span_ids=offender_span_ids,
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body("db", first_span_description),
# Has to be marked important to be displayed in the notifications
important=True,
)
],
evidence_data={
"transaction_name": self._event.get("transaction", ""),
"op": "db",
"parent_span_ids": [parent_span_id],
"parent_span": get_span_evidence_value(parent_span),
"cause_span_ids": [self.source_span.get("span_id", None)],
"offender_span_ids": offender_span_ids,
"repeating_spans": f"{self.n_spans[0].get('op', 'db')} - {first_span_description}",
"repeating_spans_compact": first_span_description,
"num_repeating_spans": str(len(offender_span_ids)),
},
)
def _is_slower_than_threshold(self) -> bool:
duration_threshold = self.settings.get("duration_threshold")
return total_span_time(self.n_spans) >= duration_threshold
def _metrics_for_extra_matching_spans(self) -> None:
# Checks for any extra spans that match the detected problem but are not part of affected spans.
# Temporary check since we eventually want to capture extra perf problems on the initial pass while walking spans.
n_count = len(self.n_spans)
all_matching_spans = [
span
for span in self._event.get("spans", [])
if self.previous_span
and span.get("span_id", None) == self.previous_span.get("span_id", None)
]
all_count = len(all_matching_spans)
if n_count > 0 and n_count != all_count:
metrics.incr("performance.performance_issue.np1_db.extra_spans")
def _reset_detection(self) -> None:
self.source_span = None
self.previous_span = None
self.n_spans = []
def _fingerprint(self, parent_op: str, parent_hash: str, source_hash: str, n_hash: str) -> str:
# XXX: this has to be a hardcoded string otherwise grouping will break
# For the experiment, we also need to modify the hardcoded string so that after re-GA, new groups send notifications.
problem_class = "GroupType.PERFORMANCE_N_PLUS_ONE_DB_QUERIES"
full_fingerprint = hashlib.sha1(
(str(parent_op) + str(parent_hash) + str(source_hash) + str(n_hash)).encode("utf8"),
).hexdigest()
return f"1-{problem_class}-{full_fingerprint}"
def contains_complete_query(span: Span, is_source: bool | None = False) -> bool:
# Remove the truncation check from the n_plus_one db detector.
query = span.get("description")
if is_source and query:
return True
else:
return bool(query and not query.endswith("..."))
def get_valid_db_span_description(span: Span) -> str | None:
"""
For MongoDB spans, we use the `description` provided by Relay since it re-includes the collection name.
See https://github.com/getsentry/relay/blob/25.3.0/relay-event-normalization/src/normalize/span/description/mod.rs#L68-L82
Explicitly require a '{' in MongoDB spans to only trigger on queries rather than client calls.
"""
default_description = span.get("description", "")
db_system = span.get("sentry_tags", {}).get("system", "")
# Connection spans can have `op` as `db` but we don't want to trigger on them.
if "pg-pool.connect" in default_description:
return None
# Trigger pathway on `mongodb`, `mongoose`, etc...
if "mongo" in db_system:
description = span.get("sentry_tags", {}).get("description")
if not description or "{" not in description:
return None
return description
return default_description
def are_spans_equivalent(a: Span, b: Span) -> bool:
"""
Returns True if two DB spans are sufficiently similar for grouping N+1 DB Spans
"""
hash_match = a.get("hash") == b.get("hash")
has_description = bool(a.get("description"))
description_match = a.get("description") == b.get("description")
base_checks = all([hash_match, has_description, description_match])
a_db_system = a.get("sentry_tags", {}).get("system")
# We perform more checks for MongoDB spans
if a_db_system == "mongodb":
# Relay augments MongoDB span descriptions with more collection data.
# We can use this for more accurate grouping.
a_relay_description = a.get("sentry_tags", {}).get("description")
b_relay_description = b.get("sentry_tags", {}).get("description")
return a_relay_description == b_relay_description and base_checks
return base_checks
| NPlusOneDBSpanExperimentalDetector |
python | openai__openai-python | tests/api_resources/containers/test_files.py | {
"start": 7984,
"end": 16094
} | class ____:
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)
@parametrize
async def test_method_create(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.create(
container_id="container_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_method_create_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.create(
container_id="container_id",
file=b"raw file contents",
file_id="file_id",
)
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_raw_response_create(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.create(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
@parametrize
async def test_streaming_response_create(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.create(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(FileCreateResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_create(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.create(
container_id="",
)
@parametrize
async def test_method_retrieve(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.retrieve(
file_id="file_id",
container_id="container_id",
)
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
async def test_raw_response_retrieve(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
@parametrize
async def test_streaming_response_retrieve(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.retrieve(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(FileRetrieveResponse, file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_retrieve(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.retrieve(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.containers.files.with_raw_response.retrieve(
file_id="",
container_id="container_id",
)
@parametrize
async def test_method_list(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.list(
container_id="container_id",
)
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_method_list_with_all_params(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.list(
container_id="container_id",
after="after",
limit=0,
order="asc",
)
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_raw_response_list(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.list(
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
@parametrize
async def test_streaming_response_list(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.list(
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert_matches_type(AsyncCursorPage[FileListResponse], file, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_list(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.list(
container_id="",
)
@parametrize
async def test_method_delete(self, async_client: AsyncOpenAI) -> None:
file = await async_client.containers.files.delete(
file_id="file_id",
container_id="container_id",
)
assert file is None
@parametrize
async def test_raw_response_delete(self, async_client: AsyncOpenAI) -> None:
response = await async_client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="container_id",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = response.parse()
assert file is None
@parametrize
async def test_streaming_response_delete(self, async_client: AsyncOpenAI) -> None:
async with async_client.containers.files.with_streaming_response.delete(
file_id="file_id",
container_id="container_id",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
file = await response.parse()
assert file is None
assert cast(Any, response.is_closed) is True
@parametrize
async def test_path_params_delete(self, async_client: AsyncOpenAI) -> None:
with pytest.raises(ValueError, match=r"Expected a non-empty value for `container_id` but received ''"):
await async_client.containers.files.with_raw_response.delete(
file_id="file_id",
container_id="",
)
with pytest.raises(ValueError, match=r"Expected a non-empty value for `file_id` but received ''"):
await async_client.containers.files.with_raw_response.delete(
file_id="",
container_id="container_id",
)
| TestAsyncFiles |
python | sphinx-doc__sphinx | tests/roots/test-add_enumerable_node/enumerable_node.py | {
"start": 527,
"end": 848
} | class ____(nodes.Element):
pass
def visit_numbered_text(self, node):
self.body.append(self.starttag(node, 'div'))
self.add_fignumber(node)
self.body.append(node['title'])
self.body.append('</div>')
raise nodes.SkipNode
def get_title(node): # NoQA: FURB118
return node['title']
| numbered_text |
python | django__django | tests/admin_docs/test_views.py | {
"start": 23837,
"end": 23893
} | class ____(models.Field):
pass
| DescriptionLackingField |
python | Pylons__pyramid | src/pyramid/httpexceptions.py | {
"start": 37068,
"end": 37487
} | class ____(HTTPServerError):
"""
subclass of :class:`~HTTPServerError`
This indicates that the server does not support, or refuses to
support, the HTTP protocol version that was used in the request
message.
code: 505, title: HTTP Version Not Supported
"""
code = 505
title = 'HTTP Version Not Supported'
explanation = 'The HTTP version is not supported.'
| HTTPVersionNotSupported |
python | ethereum__web3.py | tests/integration/go_ethereum/test_goethereum_ipc.py | {
"start": 3981,
"end": 4068
} | class ____(PersistentConnectionProviderTest):
pass
| TestPersistentConnectionProviderTest |
python | has2k1__plotnine | plotnine/geoms/geom_area.py | {
"start": 182,
"end": 818
} | class ____(geom_ribbon):
"""
Area plot
{usage}
An area plot is a special case of geom_ribbon,
where the minimum of the range is fixed to 0,
and the position adjustment defaults to 'stack'.
Parameters
----------
{common_parameters}
See Also
--------
plotnine.geom_ribbon
"""
REQUIRED_AES = {"x", "y"}
DEFAULT_PARAMS = {
**geom_ribbon.DEFAULT_PARAMS,
"position": "stack",
"outline_type": "upper",
}
def setup_data(self, data: pd.DataFrame) -> pd.DataFrame:
data["ymin"] = 0
data["ymax"] = data["y"]
return data
| geom_area |
python | modin-project__modin | modin/config/envvars.py | {
"start": 21667,
"end": 22398
} | class ____(EnvironmentVariable, type=bool):
"""
Whether automatic backend switching is allowed.
When this flag is set, a Modin backend can attempt to automatically choose an appropriate backend
for different operations based on features of the input data. When disabled, backends should
avoid implicit backend switching outside of explicit operations like `to_pandas` and `to_ray`.
"""
varname = "MODIN_AUTO_SWITCH_BACKENDS"
default = False
@classmethod
def enable(cls) -> None:
"""Enable automatic backend switching."""
cls.put(True)
@classmethod
def disable(cls) -> None:
"""Disable automatic backend switching."""
cls.put(False)
| AutoSwitchBackend |
python | facelessuser__pymdown-extensions | pymdownx/betterem.py | {
"start": 8444,
"end": 9378
} | class ____(util.PatternSequenceProcessor):
"""Emphasis processor for handling strong and em matches."""
PATTERNS = [
util.PatSeqItem(re.compile(UNDER_STRONG_EM, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
util.PatSeqItem(re.compile(UNDER_EM_STRONG, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
util.PatSeqItem(re.compile(UNDER_STRONG_EM2, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
util.PatSeqItem(re.compile(UNDER_STRONG_EM3, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
util.PatSeqItem(re.compile(UNDER_STRONG, re.DOTALL | re.UNICODE), 'single', 'strong'),
util.PatSeqItem(re.compile(UNDER_EM_STRONG2, re.DOTALL | re.UNICODE), 'double2', 'em,strong'),
util.PatSeqItem(re.compile(UNDER_EM2, re.DOTALL | re.UNICODE), 'single', 'em', True),
util.PatSeqItem(re.compile(UNDER_EM, re.DOTALL | re.UNICODE), 'single', 'em')
]
| UnderscoreProcessor |
python | getsentry__sentry | src/sentry/integrations/slack/sdk_client.py | {
"start": 2483,
"end": 2822
} | class ____(type):
def __new__(meta, name, bases, dct):
cls = super().__new__(meta, name, bases, dct)
for parent in cls.__bases__:
for base in parent.__bases__:
# this wraps the api_call function in the slack_sdk BaseClient class
wrap_api_call(base)
return cls
| MetaClass |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/services/public/task_instances.py | {
"start": 5613,
"end": 13839
} | class ____(BulkService[BulkTaskInstanceBody]):
"""Service for handling bulk operations on task instances."""
def __init__(
self,
session: Session,
request: BulkBody[BulkTaskInstanceBody],
dag_id: str,
dag_run_id: str,
dag_bag: DagBagDep,
user: GetUserDep,
):
super().__init__(session, request)
self.dag_id = dag_id
self.dag_run_id = dag_run_id
self.dag_bag = dag_bag
self.user = user
def categorize_task_instances(
self, task_keys: set[tuple[str, int]]
) -> tuple[dict[tuple[str, int], TI], set[tuple[str, int]], set[tuple[str, int]]]:
"""
Categorize the given task_ids into matched_task_keys and not_found_task_keys based on existing task_ids.
:param task_keys: set of task_keys (tuple of task_id and map_index)
:return: tuple of (task_instances_map, matched_task_keys, not_found_task_keys)
"""
query = select(TI).where(
TI.dag_id == self.dag_id,
TI.run_id == self.dag_run_id,
TI.task_id.in_([task_id for task_id, _ in task_keys]),
)
task_instances = self.session.scalars(query).all()
task_instances_map = {
(ti.task_id, ti.map_index if ti.map_index is not None else -1): ti for ti in task_instances
}
matched_task_keys = {
(task_id, map_index)
for (task_id, map_index) in task_instances_map.keys()
if (task_id, map_index) in task_keys
}
not_found_task_keys = {(task_id, map_index) for task_id, map_index in task_keys} - matched_task_keys
return task_instances_map, matched_task_keys, not_found_task_keys
def handle_bulk_create(
self, action: BulkCreateAction[BulkTaskInstanceBody], results: BulkActionResponse
) -> None:
results.errors.append(
{
"error": "Task instances bulk create is not supported",
"status_code": status.HTTP_405_METHOD_NOT_ALLOWED,
}
)
def handle_bulk_update(
self, action: BulkUpdateAction[BulkTaskInstanceBody], results: BulkActionResponse
) -> None:
"""Bulk Update Task Instances."""
to_update_task_keys = {
(task_instance.task_id, task_instance.map_index if task_instance.map_index is not None else -1)
for task_instance in action.entities
}
_, _, not_found_task_keys = self.categorize_task_instances(to_update_task_keys)
try:
for task_instance_body in action.entities:
task_key = (
task_instance_body.task_id,
task_instance_body.map_index if task_instance_body.map_index is not None else -1,
)
if task_key in not_found_task_keys:
if action.action_on_non_existence == BulkActionNotOnExistence.FAIL:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"The Task Instance with dag_id: `{self.dag_id}`, run_id: `{self.dag_run_id}`, task_id: `{task_instance_body.task_id}` and map_index: `{task_instance_body.map_index}` was not found",
)
if action.action_on_non_existence == BulkActionNotOnExistence.SKIP:
continue
dag, tis, data = _patch_ti_validate_request(
dag_id=self.dag_id,
dag_run_id=self.dag_run_id,
task_id=task_instance_body.task_id,
dag_bag=self.dag_bag,
body=task_instance_body,
session=self.session,
map_index=task_instance_body.map_index,
update_mask=None,
)
for key, _ in data.items():
if key == "new_state":
_patch_task_instance_state(
task_id=task_instance_body.task_id,
dag_run_id=self.dag_run_id,
dag=dag,
task_instance_body=task_instance_body,
session=self.session,
data=data,
)
elif key == "note":
_patch_task_instance_note(
task_instance_body=task_instance_body, tis=tis, user=self.user
)
results.success.append(task_instance_body.task_id)
except ValidationError as e:
results.errors.append({"error": f"{e.errors()}"})
except HTTPException as e:
results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
def handle_bulk_delete(
self, action: BulkDeleteAction[BulkTaskInstanceBody], results: BulkActionResponse
) -> None:
"""Bulk delete task instances."""
delete_all_map_indexes: set[str] = set()
delete_specific_task_keys: set[tuple[str, int]] = set()
for entity in action.entities:
if isinstance(entity, str):
# String task ID - remove all task instances for this task
delete_all_map_indexes.add(entity)
else:
# BulkTaskInstanceBody object
if entity.map_index is None:
delete_all_map_indexes.add(entity.task_id)
else:
delete_specific_task_keys.add((entity.task_id, entity.map_index))
try:
# Handle deletion of specific (task_id, map_index) pairs
if delete_specific_task_keys:
_, matched_task_keys, not_found_task_keys = self.categorize_task_instances(
delete_specific_task_keys
)
not_found_task_ids = [f"{task_id}[{map_index}]" for task_id, map_index in not_found_task_keys]
if action.action_on_non_existence == BulkActionNotOnExistence.FAIL and not_found_task_keys:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"The task instances with these task_ids: {not_found_task_ids} were not found",
)
for task_id, map_index in matched_task_keys:
result = (
self.session.execute(
select(TI).where(
TI.task_id == task_id,
TI.dag_id == self.dag_id,
TI.run_id == self.dag_run_id,
TI.map_index == map_index,
)
)
.scalars()
.one_or_none()
)
if result:
existing_task_instance = result
self.session.delete(existing_task_instance)
results.success.append(f"{task_id}[{map_index}]")
# Handle deletion of all map indexes for certain task_ids
for task_id in delete_all_map_indexes:
all_task_instances = self.session.scalars(
select(TI).where(
TI.task_id == task_id,
TI.dag_id == self.dag_id,
TI.run_id == self.dag_run_id,
)
).all()
if not all_task_instances and action.action_on_non_existence == BulkActionNotOnExistence.FAIL:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"No task instances found for task_id: {task_id}",
)
for ti in all_task_instances:
self.session.delete(ti)
if all_task_instances:
results.success.append(task_id)
except HTTPException as e:
results.errors.append({"error": f"{e.detail}", "status_code": e.status_code})
| BulkTaskInstanceService |
python | google__pytype | pytype/blocks/blocks.py | {
"start": 564,
"end": 1185
} | class ____:
"""Unpack the code.co_localsplus* attributes in 3.11+."""
# Cell kinds (cpython/Include/internal/pycore_code.h)
CO_FAST_LOCAL = 0x20
CO_FAST_CELL = 0x40
CO_FAST_FREE = 0x80
def __init__(self, code: pycnite.types.CodeType311):
table = list(zip(code.co_localsplusnames, code.co_localspluskinds))
filter_names = lambda k: tuple(name for name, kind in table if kind & k)
self.co_varnames = filter_names(self.CO_FAST_LOCAL)
self.co_cellvars = filter_names(self.CO_FAST_CELL)
self.co_freevars = filter_names(self.CO_FAST_FREE)
self.localsplus = code.co_localsplusnames
| _Locals311 |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 166519,
"end": 180484
} | class ____(Qwen2_5OmniPreTrainedModel, GenerationMixin):
config: Qwen2_5OmniConfig
output_modalities = ("audio", "text")
_no_split_modules = [
"Qwen2_5OmniTalkerForConditionalGeneration",
"Qwen2_5OmniToken2WavModel",
]
def __init__(self, config):
super().__init__(config)
self.thinker = Qwen2_5OmniThinkerForConditionalGeneration(config.thinker_config)
self.has_talker = config.enable_audio_output
self.speaker_map = {}
if config.enable_audio_output:
self.enable_talker()
self.post_init()
def enable_talker(self):
self.talker = Qwen2_5OmniTalkerForConditionalGeneration(self.config.talker_config)
self.token2wav = Qwen2_5OmniToken2WavModel(self.config.token2wav_config)
self.token2wav.float()
self.has_talker = True
def load_speakers(self, path):
check_torch_load_is_safe()
for key, value in torch.load(path, weights_only=True).items():
self.speaker_map[key] = value
logger.info(f"Speaker {list(self.speaker_map.keys())} loaded")
def disable_talker(self):
if hasattr(self, "talker"):
del self.talker
if hasattr(self, "token2wav"):
del self.token2wav
self.has_talker = False
@classmethod
def from_pretrained(
cls,
pretrained_model_name_or_path,
*model_args,
config=None,
cache_dir=None,
ignore_mismatched_sizes=False,
force_download=False,
local_files_only=False,
token=None,
revision="main",
use_safetensors=None,
weights_only=True,
**kwargs,
):
model = super().from_pretrained(
pretrained_model_name_or_path,
*model_args,
config=config,
cache_dir=cache_dir,
ignore_mismatched_sizes=ignore_mismatched_sizes,
force_download=force_download,
local_files_only=local_files_only,
token=token,
revision=revision,
use_safetensors=use_safetensors,
weights_only=weights_only,
**kwargs,
)
spk_path = cached_file(
pretrained_model_name_or_path,
"spk_dict.pt",
subfolder=kwargs.pop("subfolder", None),
cache_dir=kwargs.pop("cache_dir", None),
force_download=kwargs.pop("force_download", False),
proxies=kwargs.pop("proxies", None),
local_files_only=kwargs.pop("local_files_only", False),
token=token,
revision=kwargs.pop("revision", None),
)
if spk_path is None:
raise ValueError(f"""{pretrained_model_name_or_path}/{spk_path} not exists""")
model.load_speakers(spk_path)
return model
@torch.no_grad()
@deprecate_kwarg("return_audio", version="v5", new_name="generation_mode")
# TODO: raushan, defaults should be saved in generation config
def generate(
self,
input_ids: Optional[torch.Tensor] = None,
speaker: str = "Chelsie",
use_audio_in_video: bool = False,
thinker_max_new_tokens: int = 1024,
talker_max_new_tokens: int = 4096,
talker_do_sample: bool = True,
talker_top_k: int = 40,
talker_top_p: float = 0.8,
talker_temperature: float = 0.9,
talker_eos_token_id: list[int] = [8292, 8294],
talker_repetition_penalty: float = 1.05,
**kwargs,
):
r"""
Generate text response and audio from input.
Args:
input_ids (`Optional[torch.Tensor]`, *optional*):
Input ids, should obtain from processor.
speaker (`str` , defaults to "Chelsie"):
Which speaker should be used in audio response.
use_audio_in_video (`bool`, defaults to False):
Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
generation_mode (`Optional[str]`, *optional*):
Whether or not return response in audio format. When `generation_mode="audio"`, this parameter is same as `config.enable_audio_output`.
kwargs (*optional*):
- Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model.
- With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the
thinker, talker and token2wav respectively. It has the priority over the keywords without a prefix.
Returns:
When `return_audio=False`:
- **Text** (`torch.Tensor`): Generated text token sequence.
When `return_audio=True`:
- **Text** (`torch.Tensor`): Generated text token sequence.
- **Audio waveform** (`torch.Tensor`): Generated audio waveform.
"""
# check `False` on purpose because the paramter can be `str/bool`. This is needed for BC
generation_mode = kwargs.pop("generation_mode", None)
return_audio = generation_mode != "text" and generation_mode is not False
if speaker not in self.speaker_map:
raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}")
if return_audio and not self.has_talker:
raise ValueError(
"Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker."
)
if return_audio is None:
return_audio = self.has_talker
if input_ids.shape[0] != 1 and return_audio:
raise NotImplementedError("Qwen2.5-Omni currently does not support batched inference with audio output")
shared_kwargs = {"use_audio_in_video": use_audio_in_video}
thinker_kwargs = {
"max_new_tokens": thinker_max_new_tokens,
}
talker_kwargs = {
"max_new_tokens": talker_max_new_tokens,
"do_sample": talker_do_sample,
"top_k": talker_top_k,
"top_p": talker_top_p,
"temperature": talker_temperature,
"eos_token_id": talker_eos_token_id,
"repetition_penalty": talker_repetition_penalty,
}
token2wav_kwargs = {}
for key, value in kwargs.items():
if key.startswith("thinker_"):
thinker_kwargs[key[len("thinker_") :]] = value
elif key.startswith("talker_"):
talker_kwargs[key[len("talker_") :]] = value
elif key.startswith("token2wav_"):
token2wav_kwargs[key[len("token2wav_") :]] = value
# Process special input values
elif key == "feature_attention_mask":
thinker_kwargs[key] = value
talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1)
elif key == "input_features" or key == "attention_mask":
thinker_kwargs[key] = value
# Put other key to shared kwargs
else:
shared_kwargs[key] = value
# Merge kwargs
for key, value in shared_kwargs.items():
if key not in thinker_kwargs:
thinker_kwargs[key] = value
if key not in talker_kwargs:
talker_kwargs[key] = value
if key not in token2wav_kwargs:
token2wav_kwargs[key] = value
speaker_params = self.speaker_map[speaker]
# 1. Generate from thinker module
generate_audio = return_audio and self.has_talker
if generate_audio:
thinker_kwargs["output_hidden_states"] = True
thinker_kwargs["return_dict_in_generate"] = True
thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs)
if not generate_audio:
return thinker_result
# 2. Generate speech tokens from talker module
embeds_to_talker = thinker_result.hidden_states[0][0].clone().to(input_ids.device)
if thinker_kwargs.get("input_features") is not None:
audio_ids_mask = input_ids == self.config.thinker_config.audio_token_index
audio_mask = audio_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
audio_mask_tensor = torch.zeros(
[audio_ids_mask.sum(), embeds_to_talker.shape[-1]],
dtype=embeds_to_talker.dtype,
device=input_ids.device,
)
embeds_to_talker.masked_scatter_(audio_mask, audio_mask_tensor)
if thinker_kwargs.get("pixel_values") is not None:
image_ids_mask = input_ids == self.config.thinker_config.image_token_index
image_mask = image_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
image_mask_tensor = torch.zeros(
[image_ids_mask.sum(), embeds_to_talker.shape[-1]],
dtype=embeds_to_talker.dtype,
device=input_ids.device,
)
embeds_to_talker.masked_scatter_(image_mask, image_mask_tensor)
if thinker_kwargs.get("pixel_values_videos") is not None:
video_ids_mask = input_ids == self.config.thinker_config.video_token_index
video_mask = video_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
video_mask_tensor = torch.zeros(
[video_ids_mask.sum(), embeds_to_talker.shape[-1]],
dtype=embeds_to_talker.dtype,
device=input_ids.device,
)
embeds_to_talker.masked_scatter_(video_mask, video_mask_tensor)
processed_thinker_hidden = (
(embeds_to_talker,) + thinker_result.hidden_states[0][1:],
) + thinker_result.hidden_states[1:]
thinker_generate_ids = thinker_result.sequences[:, input_ids.size(1) :].to(input_ids.device)
thinker_token_embeds = [
token_hidden_states[0].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
]
thinker_hidden_states = [
token_hidden_states[-1].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
]
talker_text_bos_token = speaker_params["bos_token"]
talker_input_text_ids = torch.cat(
[
input_ids,
torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device),
thinker_generate_ids[:, :1],
],
dim=-1,
)
talker_input_ids = torch.cat(
[
torch.full_like(input_ids, fill_value=self.talker.codec_mask_token),
torch.tensor([[self.talker.codec_pad_token]], dtype=torch.long, device=input_ids.device),
torch.tensor([[self.talker.codec_bos_token]], dtype=torch.long, device=input_ids.device),
],
dim=1,
)
thinker_embed_tokens = self.thinker.get_input_embeddings()
thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1)
talker_inputs_embeds = thinker_hidden_states[0] + thinker_token_embeds[0]
talker_text_bos_token = torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device)
talker_text_bos_embed = thinker_embed_tokens(talker_text_bos_token).to(input_ids.device)
talker_inputs_embeds = torch.cat(
[
talker_inputs_embeds,
talker_text_bos_embed,
thinker_reply_part[:, :1, :],
],
dim=1,
)
eos_token = torch.tensor([[self.talker.text_eos_token]], dtype=torch.long, device=input_ids.device)
eos_embedding = thinker_embed_tokens(eos_token).to(input_ids.device)
pad_token = torch.tensor([[self.talker.text_pad_token]], dtype=torch.long, device=input_ids.device)
pad_embedding = thinker_embed_tokens(pad_token).to(input_ids.device)
thinker_reply_part = torch.cat(
[
thinker_reply_part[:, 1:, :],
eos_embedding,
pad_embedding,
],
dim=1,
)
talker_attention_mask = None
if "attention_mask" in kwargs:
talker_attention_mask = torch.cat(
[kwargs["attention_mask"], kwargs["attention_mask"].new_ones((1, 2))], dim=1
).to(input_ids.device)
talker_result = self.talker.generate(
input_ids=talker_input_ids,
input_text_ids=talker_input_text_ids,
thinker_reply_part=thinker_reply_part,
inputs_embeds=talker_inputs_embeds,
attention_mask=talker_attention_mask,
suppress_tokens=[self.talker.codec_bos_token],
**{k: (v.to(input_ids.device) if torch.is_tensor(v) else v) for k, v in talker_kwargs.items()},
)
talker_generate_codes = talker_result[:, talker_input_ids.shape[1] : -1]
# 3. Generate wavs from code
if self.token2wav.dtype != torch.float:
self.token2wav.float()
wav = self.token2wav(
talker_generate_codes.to(input_ids.device),
conditioning=speaker_params["cond"].to(input_ids.device).float(),
reference_mel=speaker_params["ref_mel"].to(input_ids.device).float(),
**token2wav_kwargs,
)
return thinker_result.sequences, wav.float()
__all__ = [
"Qwen2_5OmniForConditionalGeneration",
"Qwen2_5OmniThinkerTextModel",
"Qwen2_5OmniThinkerForConditionalGeneration",
"Qwen2_5OmniTalkerModel",
"Qwen2_5OmniTalkerForConditionalGeneration",
"Qwen2_5OmniToken2WavDiTModel",
"Qwen2_5OmniToken2WavBigVGANModel",
"Qwen2_5OmniToken2WavModel",
"Qwen2_5OmniPreTrainedModel",
"Qwen2_5OmniPreTrainedModelForConditionalGeneration",
]
| Qwen2_5OmniForConditionalGeneration |
python | sqlalchemy__sqlalchemy | test/dialect/sqlite/test_dialect.py | {
"start": 5964,
"end": 13360
} | class ____(
fixtures.TestBase, AssertsExecutionResults, AssertsCompiledSQL
):
__only_on__ = "sqlite"
__backend__ = True
def test_3_7_16_warning(self):
with expect_warnings(
r"SQLite version \(3, 2, 8\) is older than 3.7.16, and "
"will not support right nested joins"
):
sqlite.dialect(
dbapi=mock.Mock(
version_info=(2, 6, 0), sqlite_version_info=(3, 2, 8)
)
)
@testing.only_on("sqlite+pysqlcipher")
def test_pysqlcipher_connects(self):
"""test #6586"""
str_url = str(testing.db.url)
e = create_engine(str_url)
with e.connect() as conn:
eq_(conn.scalar(text("select 1")), 1)
@testing.provide_metadata
def test_extra_reserved_words(self, connection):
"""Tests reserved words in identifiers.
'true', 'false', and 'column' are undocumented reserved words
when used as column identifiers (as of 3.5.1). Covering them
here to ensure they remain in place if the dialect's
reserved_words set is updated in the future."""
t = Table(
"reserved",
self.metadata,
Column("safe", Integer),
Column("true", Integer),
Column("false", Integer),
Column("column", Integer),
Column("exists", Integer),
)
self.metadata.create_all(connection)
connection.execute(t.insert(), dict(safe=1))
result = connection.execute(t.select())
eq_(list(result), [(1, None, None, None, None)])
@testing.provide_metadata
def test_quoted_identifiers_functional_one(self):
"""Tests autoload of tables created with quoted column names."""
metadata = self.metadata
exec_sql(
testing.db,
"""CREATE TABLE "django_content_type" (
"id" integer NOT NULL PRIMARY KEY,
"django_stuff" text NULL
)
""",
)
exec_sql(
testing.db,
"""
CREATE TABLE "django_admin_log" (
"id" integer NOT NULL PRIMARY KEY,
"action_time" datetime NOT NULL,
"content_type_id" integer NULL
REFERENCES "django_content_type" ("id"),
"object_id" text NULL,
"change_message" text NOT NULL
)
""",
)
table1 = Table("django_admin_log", metadata, autoload_with=testing.db)
table2 = Table(
"django_content_type", metadata, autoload_with=testing.db
)
j = table1.join(table2)
assert j.onclause.compare(table1.c.content_type_id == table2.c.id)
@testing.provide_metadata
def test_quoted_identifiers_functional_two(self):
"""test the edgiest of edge cases, quoted table/col names
that start and end with quotes.
SQLite claims to have fixed this in
https://www.sqlite.org/src/info/600482d161, however
it still fails if the FK points to a table name that actually
has quotes as part of its name.
"""
metadata = self.metadata
exec_sql(
testing.db,
r'''CREATE TABLE """a""" (
"""id""" integer NOT NULL PRIMARY KEY
)
''',
)
# unfortunately, still can't do this; sqlite quadruples
# up the quotes on the table name here for pragma foreign_key_list
# exec_sql(testing.db,r'''
# CREATE TABLE """b""" (
# """id""" integer NOT NULL PRIMARY KEY,
# """aid""" integer NULL
# REFERENCES """a""" ("""id""")
# )
# ''')
table1 = Table(r'"a"', metadata, autoload_with=testing.db)
assert '"id"' in table1.c
@testing.provide_metadata
def test_description_encoding(self, connection):
t = Table(
"x",
self.metadata,
Column("méil", Integer, primary_key=True),
Column("\u6e2c\u8a66", Integer),
)
self.metadata.create_all(testing.db)
result = connection.execute(t.select())
assert "méil" in result.keys()
assert "\u6e2c\u8a66" in result.keys()
def test_pool_class(self):
e = create_engine("sqlite+pysqlite://")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///:memory:")
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine(
"sqlite+pysqlite:///file:foo.db?mode=memory&uri=true"
)
assert e.pool.__class__ is pool.SingletonThreadPool
e = create_engine("sqlite+pysqlite:///foo.db")
# changed as of 2.0 #7490
assert e.pool.__class__ is pool.QueuePool
@combinations(
(
"sqlite:///foo.db", # file path is absolute
([os.path.abspath("foo.db")], {"check_same_thread": False}),
),
(
"sqlite:////abs/path/to/foo.db",
(
[os.path.abspath("/abs/path/to/foo.db")],
{"check_same_thread": False},
),
),
("sqlite://", ([":memory:"], {"check_same_thread": True})),
(
"sqlite:///?check_same_thread=true",
([":memory:"], {"check_same_thread": True}),
),
(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true",
(
["file:path/to/database?mode=ro&nolock=1"],
{"check_same_thread": True, "timeout": 10.0, "uri": True},
),
),
(
"sqlite:///file:path/to/database?mode=ro&uri=true",
(
["file:path/to/database?mode=ro"],
{"uri": True, "check_same_thread": False},
),
),
(
"sqlite:///file:path/to/database?uri=true",
(
["file:path/to/database"],
{"uri": True, "check_same_thread": False},
),
),
)
def test_connect_args(self, url, expected):
"""test create_connect_args scenarios including support for uri=True"""
d = pysqlite_dialect.dialect()
url = make_url(url)
eq_(d.create_connect_args(url), expected)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_true", " STORED", True),
("persisted_false", " VIRTUAL", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER,"
" y INTEGER GENERATED ALWAYS AS (x + 2)%s)" % text,
)
@testing.combinations(
(func.localtimestamp(),),
(func.now(),),
(func.char_length("test"),),
(func.aggregate_strings("abc", ","),),
argnames="fn",
)
def test_builtin_functions_roundtrip(self, fn, connection):
connection.execute(select(fn))
| DialectTest |
python | sympy__sympy | sympy/utilities/codegen.py | {
"start": 39317,
"end": 47296
} | class ____(CodeGen):
"""Generator for Fortran 95 code
The .write() method inherited from CodeGen will output a code file and
an interface file, <prefix>.f90 and <prefix>.h respectively.
"""
code_extension = "f90"
interface_extension = "h"
def __init__(self, project='project', printer=None):
super().__init__(project)
self.printer = printer or FCodePrinter()
def _get_header(self):
"""Writes a common header for the generated files."""
code_lines = []
code_lines.append("!" + "*"*78 + '\n')
tmp = header_comment % {"version": sympy_version,
"project": self.project}
for line in tmp.splitlines():
code_lines.append("!*%s*\n" % line.center(76))
code_lines.append("!" + "*"*78 + '\n')
return code_lines
def _preprocessor_statements(self, prefix):
return []
def _get_routine_opening(self, routine):
"""Returns the opening statements of the fortran routine."""
code_list = []
if len(routine.results) > 1:
raise CodeGenError(
"Fortran only supports a single or no return value.")
elif len(routine.results) == 1:
result = routine.results[0]
code_list.append(result.get_datatype('fortran'))
code_list.append("function")
else:
code_list.append("subroutine")
args = ", ".join("%s" % self._get_symbol(arg.name)
for arg in routine.arguments)
call_sig = "{}({})\n".format(routine.name, args)
# Fortran 95 requires all lines be less than 132 characters, so wrap
# this line before appending.
call_sig = ' &\n'.join(textwrap.wrap(call_sig,
width=60,
break_long_words=False)) + '\n'
code_list.append(call_sig)
code_list = [' '.join(code_list)]
code_list.append('implicit none\n')
return code_list
def _declare_arguments(self, routine):
# argument type declarations
code_list = []
array_list = []
scalar_list = []
for arg in routine.arguments:
if isinstance(arg, InputArgument):
typeinfo = "%s, intent(in)" % arg.get_datatype('fortran')
elif isinstance(arg, InOutArgument):
typeinfo = "%s, intent(inout)" % arg.get_datatype('fortran')
elif isinstance(arg, OutputArgument):
typeinfo = "%s, intent(out)" % arg.get_datatype('fortran')
else:
raise CodeGenError("Unknown Argument type: %s" % type(arg))
fprint = self._get_symbol
if arg.dimensions:
# fortran arrays start at 1
dimstr = ", ".join(["%s:%s" % (
fprint(dim[0] + 1), fprint(dim[1] + 1))
for dim in arg.dimensions])
typeinfo += ", dimension(%s)" % dimstr
array_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
else:
scalar_list.append("%s :: %s\n" % (typeinfo, fprint(arg.name)))
# scalars first, because they can be used in array declarations
code_list.extend(scalar_list)
code_list.extend(array_list)
return code_list
def _declare_globals(self, routine):
# Global variables not explicitly declared within Fortran 90 functions.
# Note: a future F77 mode may need to generate "common" blocks.
return []
def _declare_locals(self, routine):
code_list = []
for var in sorted(routine.local_vars, key=str):
typeinfo = get_default_datatype(var)
code_list.append("%s :: %s\n" % (
typeinfo.fname, self._get_symbol(var)))
return code_list
def _get_routine_ending(self, routine):
"""Returns the closing statements of the fortran routine."""
if len(routine.results) == 1:
return ["end function\n"]
else:
return ["end subroutine\n"]
def get_interface(self, routine):
"""Returns a string for the function interface.
The routine should have a single result object, which can be None.
If the routine has multiple result objects, a CodeGenError is
raised.
See: https://en.wikipedia.org/wiki/Function_prototype
"""
prototype = [ "interface\n" ]
prototype.extend(self._get_routine_opening(routine))
prototype.extend(self._declare_arguments(routine))
prototype.extend(self._get_routine_ending(routine))
prototype.append("end interface\n")
return "".join(prototype)
def _call_printer(self, routine):
declarations = []
code_lines = []
for result in routine.result_variables:
if isinstance(result, Result):
assign_to = routine.name
elif isinstance(result, (OutputArgument, InOutArgument)):
assign_to = result.result_var
constants, not_fortran, f_expr = self._printer_method_with_settings(
'doprint', {"human": False, "source_format": 'free', "standard": 95, "strict": False},
result.expr, assign_to=assign_to)
for obj, v in sorted(constants, key=str):
t = get_default_datatype(obj)
declarations.append(
"%s, parameter :: %s = %s\n" % (t.fname, obj, v))
for obj in sorted(not_fortran, key=str):
t = get_default_datatype(obj)
if isinstance(obj, Function):
name = obj.func
else:
name = obj
declarations.append("%s :: %s\n" % (t.fname, name))
code_lines.append("%s\n" % f_expr)
return declarations + code_lines
def _indent_code(self, codelines):
return self._printer_method_with_settings(
'indent_code', {"human": False, "source_format": 'free', "strict": False}, codelines)
def dump_f95(self, routines, f, prefix, header=True, empty=True):
# check that symbols are unique with ignorecase
for r in routines:
lowercase = {str(x).lower() for x in r.variables}
orig_case = {str(x) for x in r.variables}
if len(lowercase) < len(orig_case):
raise CodeGenError("Fortran ignores case. Got symbols: %s" %
(", ".join([str(var) for var in r.variables])))
self.dump_code(routines, f, prefix, header, empty)
dump_f95.extension = code_extension # type: ignore
dump_f95.__doc__ = CodeGen.dump_code.__doc__
def dump_h(self, routines, f, prefix, header=True, empty=True):
"""Writes the interface to a header file.
This file contains all the function declarations.
Parameters
==========
routines : list
A list of Routine instances.
f : file-like
Where to write the file.
prefix : string
The filename prefix.
header : bool, optional
When True, a header comment is included on top of each source
file. [default : True]
empty : bool, optional
When True, empty lines are included to structure the source
files. [default : True]
"""
if header:
print(''.join(self._get_header()), file=f)
if empty:
print(file=f)
# declaration of the function prototypes
for routine in routines:
prototype = self.get_interface(routine)
f.write(prototype)
if empty:
print(file=f)
dump_h.extension = interface_extension # type: ignore
# This list of dump functions is used by CodeGen.write to know which dump
# functions it has to call.
dump_fns = [dump_f95, dump_h]
| FCodeGen |
python | getsentry__sentry | src/sentry/integrations/slack/message_builder/notifications/issues.py | {
"start": 417,
"end": 1187
} | class ____(SlackNotificationsMessageBuilder):
def __init__(
self,
notification: ProjectNotification,
context: Mapping[str, Any],
recipient: Actor,
) -> None:
super().__init__(notification, context, recipient)
self.notification: ProjectNotification = notification
def build(self) -> SlackBlock:
return SlackIssuesMessageBuilder(
group=self.notification.group,
event=getattr(self.notification, "event", None),
tags=self.context.get("tags", None),
rules=getattr(self.notification, "rules", None),
issue_details=True,
notification=self.notification,
recipient=self.recipient,
).build()
| IssueNotificationMessageBuilder |
python | sqlalchemy__sqlalchemy | test/dialect/postgresql/test_types.py | {
"start": 203668,
"end": 203780
} | class ____(
_DateTimeTZMultiRangeTests, _MultiRangeTypeRoundTrip
):
pass
| DateTimeTZRMultiangeRoundTripTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 264157,
"end": 264844
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("CreatedRepositoryContributionEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("CreatedRepositoryContribution"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| CreatedRepositoryContributionConnection |
python | weaviate__weaviate-python-client | weaviate/gql/filter.py | {
"start": 15852,
"end": 16428
} | class ____(NearMedia):
"""NearDepth class used to filter weaviate objects."""
def __init__(
self,
content: dict,
):
"""Initialize a NearDepth class instance.
Args:
content: The content of the `nearDepth` clause.
Raises:
TypeError: If 'content' is not of type dict.
TypeError: If 'content["depth"]' is not of type str.
ValueError: If 'content' has key "certainty"/"distance" but the value is not float.
"""
super().__init__(content, MediaType.DEPTH)
| NearDepth |
python | buildout__buildout | src/zc/buildout/testing.py | {
"start": 12528,
"end": 23431
} | class ____(BaseHTTPRequestHandler):
Server.__log = False
def __init__(self, request, address, server):
self.__server = server
self.tree = server.tree
BaseHTTPRequestHandler.__init__(self, request, address, server)
def do_GET(self):
if '__stop__' in self.path:
self.__server.server_close()
raise SystemExit
def k():
self.send_response(200)
out = '<html><body>k</body></html>\n'.encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
if self.path == '/enable_server_logging':
self.__server.__log = True
return k()
if self.path == '/disable_server_logging':
self.__server.__log = False
return k()
path = os.path.abspath(os.path.join(self.tree, *self.path.split('/')))
if not (
((path == self.tree) or path.startswith(self.tree+os.path.sep))
and
os.path.exists(path)
):
self.send_response(404, 'Not Found')
#self.send_response(200)
out = '<html><body>Not Found</body></html>'.encode()
#out = '\n'.join(self.tree, self.path, path)
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
return
self.send_response(200)
if os.path.isdir(path):
out = ['<html><body>\n']
names = sorted(os.listdir(path))
for name in names:
if os.path.isdir(os.path.join(path, name)):
name += '/'
out.append('<a href="%s">%s</a><br>\n' % (name, name))
out.append('</body></html>\n')
out = ''.join(out).encode()
self.send_header('Content-Length', str(len(out)))
self.send_header('Content-Type', 'text/html')
else:
with open(path, 'rb') as f:
out = f.read()
self.send_header('Content-Length', len(out))
if path.endswith('.egg'):
self.send_header('Content-Type', 'application/zip')
elif path.endswith('.gz'):
self.send_header('Content-Type', 'application/x-gzip')
elif path.endswith('.zip'):
self.send_header('Content-Type', 'application/x-gzip')
elif path.endswith('.whl'):
self.send_header('Content-Type', 'application/octet-stream')
else:
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(out)
def log_request(self, code):
if self.__server.__log:
print_('%s %s %s' % (self.command, code, self.path))
def _run(tree, port):
server_address = ('localhost', port)
httpd = Server(tree, server_address, Handler)
httpd.serve_forever()
httpd.server_close()
def get_port():
for i in range(10):
port = random.randrange(20000, 30000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
s.connect(('localhost', port))
except socket.error:
return port
finally:
s.close()
raise RuntimeError("Can't find port")
def _start_server(tree, name=''):
port = get_port()
thread = threading.Thread(target=_run, args=(tree, port), name=name)
thread.setDaemon(True)
thread.start()
wait(port, up=True)
return port, thread
def start_server(tree):
return _start_server(tree)[0]
def stop_server(url, thread=None):
try:
urlopen(url+'__stop__')
except Exception:
pass
if thread is not None:
thread.join() # wait for thread to stop
def wait(port, up):
addr = 'localhost', port
for i in range(120):
time.sleep(0.25)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(addr)
s.close()
if up:
break
except socket.error:
e = sys.exc_info()[1]
if e[0] not in (errno.ECONNREFUSED, errno.ECONNRESET):
raise
s.close()
if not up:
break
else:
if up:
raise
else:
raise SystemError("Couldn't stop server")
def install(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
if dist.location.endswith('.egg'):
destination = os.path.join(destination,
os.path.basename(dist.location),
)
if os.path.isdir(dist.location):
shutil.copytree(dist.location, destination)
else:
shutil.copyfile(dist.location, destination)
else:
# copy link
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def install_develop(project, destination):
if not isinstance(destination, str):
destination = os.path.join(destination.globs['sample_buildout'],
'develop-eggs')
dist = pkg_resources.working_set.find(
pkg_resources.Requirement.parse(project))
with open(os.path.join(destination, project+'.egg-link'), 'w') as f:
f.write(dist.location)
def _normalize_path(match):
path = match.group(1)
if os.path.sep == '\\':
path = path.replace('\\\\', '/')
if path.startswith('\\'):
path = path[1:]
return '/' + path.replace(os.path.sep, '/')
normalize_path = (
re.compile(
r'''[^'" \t\n\r]+\%(sep)s_[Tt][Ee][Ss][Tt]_\%(sep)s([^"' \t\n\r]+)'''
% dict(sep=os.path.sep)),
_normalize_path,
)
normalize_endings = re.compile('\r\n'), '\n'
normalize_script = (
re.compile('(\n?)- ([a-zA-Z_.-]+)-script.py\n- \\2.exe\n'),
'\\1- \\2\n')
normalize___pycache__ = (
re.compile('(\n?)d __pycache__\n'), '\\1')
normalize_egg_py = (
re.compile(r'-py\d[.]\d+(-\S+)?\.egg'),
'-pyN.N.egg',
)
normalize_exception_type_for_python_2_and_3 = (
re.compile(r'^(\w+\.)*([A-Z][A-Za-z0-9]+Error: )'),
'\2')
normalize_open_in_generated_script = (
re.compile(r"open\(__file__, 'U'\)"), 'open(__file__)')
not_found = (re.compile(r'Not found: [^\n]+/(\w|\.|-)+/\r?\n'), '')
easyinstall_deprecated = (re.compile(r'.*EasyInstallDeprecationWarning.*\n'),'')
setuptools_deprecated = (re.compile(r'.*SetuptoolsDeprecationWarning.*\n'),'')
pkg_resources_deprecated = (re.compile(r'.*PkgResourcesDeprecationWarning.*\n'),'')
warnings_warn = (re.compile(r'.*warnings\.warn.*\n'),'')
# Setuptools now pulls in dependencies when installed.
adding_find_link = (re.compile(r"Adding find link '[^']+'"
r" from setuptools .*\r?\n"), '')
ignore_not_upgrading = (
re.compile(
'Not upgrading because not running a local buildout command.\n'
), '')
# The root logger from setuptools prints all kinds of lines.
# This might depend on which setuptools version, or something else,
# because it did not happen before. Sample lines:
# "root: Couldn't retrieve index page for 'zc.recipe.egg'"
# "root: Scanning index of all packages.
# "root: Found: /sample-buildout/recipe/dist/spam-2-pyN.N.egg"
# I keep finding new lines like that, so let's ignore all.
ignore_root_logger = (re.compile(r'root:.*'), '')
# Now replace a multiline warning about that you should switch to native namespaces.
ignore_native_namespace_warning_1 = (re.compile(r'!!'), '')
ignore_native_namespace_warning_2 = (re.compile(r'\*' * 80), '')
ignore_native_namespace_warning_3 = (re.compile(
r'Please replace its usage with implicit namespaces \(PEP 420\).'),
''
)
ignore_native_namespace_warning_4 = (re.compile(
r'See https://setuptools.pypa.io/en/latest/references/keywords.html#keyword-namespace-packages for details.'),
''
)
ignore_native_namespace_warning_5 = (re.compile(
r'ep.load\(\)\(self, ep.name, value\)'),
''
)
def run_buildout(command):
# Make sure we don't get .buildout
os.environ['HOME'] = os.path.join(os.getcwd(), 'home')
args = command.split()
buildout = pkg_resources.load_entry_point(
'zc.buildout', 'console_scripts', args[0])
buildout(args[1:])
def run_from_process(target, *args, **kw):
sys.stdout = sys.stderr = open('out', 'w')
target(*args, **kw)
def run_in_process(*args, **kwargs):
try:
ctx = multiprocessing.get_context('fork')
process = ctx.Process(target=run_from_process, args=args, kwargs=kwargs)
except AttributeError:
process = multiprocessing.Process(target=run_from_process, args=args, kwargs=kwargs)
process.daemon = True
process.start()
process.join(99)
if process.is_alive() or process.exitcode:
with open('out') as f:
print(f.read())
def run_buildout_in_process(command='buildout'):
command = command.split(' ', 1)
command.insert(
1,
" use-dependency-links=false"
# Leaving this here so we can uncomment to see what's going on.
#" log-format=%(asctime)s____%(levelname)s_%(message)s -vvv"
" index=" + __file__ + 'nonexistent' # hide index
)
command = ' '.join(command)
run_in_process(run_buildout, command)
def setup_coverage(path_to_coveragerc):
if 'RUN_COVERAGE' not in os.environ:
return
if not os.path.exists(path_to_coveragerc):
raise ValueError('coveragerc file %s does not exist.' % path_to_coveragerc)
os.environ['COVERAGE_PROCESS_START'] = path_to_coveragerc
rootdir = os.path.dirname(path_to_coveragerc)
def combine_report():
subprocess.call(
[
sys.executable, '-m', 'coverage', 'combine',
],
cwd=rootdir,
)
subprocess.call(
[
sys.executable, '-m', 'coverage', 'report',
],
cwd=rootdir,
)
if path_to_coveragerc:
try:
import coverage
print("Coverage configured with %s" % path_to_coveragerc)
if 'COVERAGE_REPORT' in os.environ:
import atexit
atexit.register(combine_report)
coverage.process_startup()
except ImportError:
print(
"You try to run coverage "
"but coverage is not installed in your environment."
)
sys.exit(1)
| Handler |
python | apache__airflow | airflow-core/src/airflow/providers_manager.py | {
"start": 6752,
"end": 6903
} | class ____(NamedTuple):
"""Notification class and provider it comes from."""
notification_class_name: str
package_name: str
| NotificationInfo |
python | weaviate__weaviate-python-client | weaviate/collections/classes/types.py | {
"start": 379,
"end": 678
} | class ____(_WeaviateInput):
"""Input for the geo-coordinate datatype."""
latitude: float = Field(default=..., le=90, ge=-90)
longitude: float = Field(default=..., le=180, ge=-180)
def _to_dict(self) -> Dict[str, float]:
return self.model_dump(exclude_none=True)
| GeoCoordinate |
python | dagster-io__dagster | python_modules/libraries/dagster-powerbi/dagster_powerbi_tests/test_asset_specs.py | {
"start": 3726,
"end": 13798
} | class ____(DagsterPowerBITranslator):
def get_asset_spec(self, data: PowerBITranslatorData) -> AssetSpec:
default_spec = super().get_asset_spec(data)
return default_spec.replace_attributes(
key=default_spec.key.with_prefix("prefix"),
).merge_attributes(metadata={"custom": "metadata"})
def test_translator_custom_metadata(workspace_data_api_mocks: None, workspace_id: str) -> None:
fake_token = uuid.uuid4().hex
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=fake_token),
workspace_id=workspace_id,
)
all_asset_specs = load_powerbi_asset_specs(
workspace=resource,
dagster_powerbi_translator=MyCustomTranslator(),
use_workspace_scan=False,
)
asset_spec = next(spec for spec in all_asset_specs)
assert "custom" in asset_spec.metadata
assert asset_spec.metadata["custom"] == "metadata"
assert asset_spec.key.path == ["prefix", "dashboard", "Sales_Returns_Sample_v201912"]
assert "dagster/kind/powerbi" in asset_spec.tags
def test_translator_custom_metadata_legacy(
workspace_data_api_mocks: None, workspace_id: str
) -> None:
fake_token = uuid.uuid4().hex
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=fake_token),
workspace_id=workspace_id,
)
with pytest.warns(
DeprecationWarning,
match=r"Support of `dagster_powerbi_translator` as a Type\[DagsterPowerBITranslator\]",
):
# Pass the translator type
all_asset_specs = load_powerbi_asset_specs(
workspace=resource,
dagster_powerbi_translator=MyCustomTranslator,
use_workspace_scan=False,
)
asset_spec = next(spec for spec in all_asset_specs)
assert "custom" in asset_spec.metadata
assert asset_spec.metadata["custom"] == "metadata"
assert asset_spec.key.path == ["prefix", "dashboard", "Sales_Returns_Sample_v201912"]
assert "dagster/kind/powerbi" in asset_spec.tags
@definitions
def state_derived_defs_two_workspaces() -> Definitions:
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=EnvVar("FAKE_API_TOKEN")),
workspace_id="a2122b8f-d7e1-42e8-be2b-a5e636ca3221",
)
resource_second_workspace = PowerBIWorkspace(
credentials=PowerBIToken(api_token=EnvVar("FAKE_API_TOKEN")),
workspace_id="c5322b8a-d7e1-42e8-be2b-a5e636ca3221",
)
return Definitions(
assets=[
*load_powerbi_asset_specs(resource, use_workspace_scan=False),
*load_powerbi_asset_specs(resource_second_workspace, use_workspace_scan=False),
]
)
def test_two_workspaces(
workspace_data_api_mocks: responses.RequestsMock,
second_workspace_data_api_mocks: responses.RequestsMock,
) -> None:
with instance_for_test(), environ({"FAKE_API_TOKEN": uuid.uuid4().hex}):
assert len(workspace_data_api_mocks.calls) == 0
# first, we resolve the repository to generate our cached metadata
repository_def = state_derived_defs_two_workspaces().get_repository_def()
assert len(workspace_data_api_mocks.calls) == 9
# 3 PowerBI external assets from first workspace, 1 from second
assert len(repository_def.assets_defs_by_key) == 3 + 1
@pytest.mark.parametrize("success", [True, False])
def test_refreshable_semantic_model(
workspace_data_api_mocks: responses.RequestsMock, workspace_id: str, success: bool
) -> None:
fake_token = uuid.uuid4().hex
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=fake_token),
workspace_id=workspace_id,
refresh_poll_interval=0,
)
all_specs = load_powerbi_asset_specs(resource, use_workspace_scan=False)
assets_with_semantic_models = [
build_semantic_model_refresh_asset_definition(resource_key="powerbi", spec=spec)
if spec.tags.get("dagster-powerbi/asset_type") == "semantic_model"
else spec
for spec in all_specs
]
# 1 dashboard, 1 report, 1 semantic model
assert len(assets_with_semantic_models) == 3
semantic_model_asset = next(
asset for asset in assets_with_semantic_models if asset.key.path[0] == "semantic_model"
)
assert semantic_model_asset.key.path == ["semantic_model", "Sales_Returns_Sample_v201912"]
assert isinstance(semantic_model_asset, AssetsDefinition) and semantic_model_asset.is_executable
# materialize the semantic model
workspace_data_api_mocks.add(
method=responses.POST,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={"notifyOption": "NoNotification"},
status=202,
)
workspace_data_api_mocks.add(
method=responses.GET,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={"value": [{"status": "Unknown"}]},
status=200,
)
workspace_data_api_mocks.add(
method=responses.GET,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={
"value": [{"status": "Completed" if success else "Failed", "serviceExceptionJson": {}}]
},
status=200,
)
# Missing resource
with pytest.raises(DagsterInvalidDefinitionError):
materialize([semantic_model_asset], raise_on_error=False)
result = materialize(
[semantic_model_asset], raise_on_error=False, resources={"powerbi": resource}
)
assert result.success is success
@pytest.mark.parametrize("success", [True, False])
def test_refreshable_semantic_model_legacy(
workspace_data_api_mocks: responses.RequestsMock, workspace_id: str, success: bool
) -> None:
fake_token = uuid.uuid4().hex
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=fake_token),
workspace_id=workspace_id,
refresh_poll_interval=0,
)
defs = resource.build_defs(enable_refresh_semantic_models=True)
semantic_model_asset = next(
asset
for asset in defs.resolve_asset_graph().assets_defs
if asset.is_executable and asset.key.path[0] == "semantic_model"
)
assert semantic_model_asset.key.path == ["semantic_model", "Sales_Returns_Sample_v201912"]
assert isinstance(semantic_model_asset, AssetsDefinition) and semantic_model_asset.is_executable
# materialize the semantic model
workspace_data_api_mocks.add(
method=responses.POST,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={"notifyOption": "NoNotification"},
status=202,
)
workspace_data_api_mocks.add(
method=responses.GET,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={"value": [{"status": "Unknown"}]},
status=200,
)
workspace_data_api_mocks.add(
method=responses.GET,
url=f"{BASE_API_URL}/groups/{workspace_id}/datasets/{SAMPLE_SEMANTIC_MODEL['id']}/refreshes",
json={
"value": [{"status": "Completed" if success else "Failed", "serviceExceptionJson": {}}]
},
status=200,
)
result = materialize([semantic_model_asset], raise_on_error=False)
assert result.success is success
@definitions
def state_derived_defs() -> Definitions:
fake_token = uuid.uuid4().hex
resource = PowerBIWorkspace(
credentials=PowerBIToken(api_token=fake_token),
workspace_id="a2122b8f-d7e1-42e8-be2b-a5e636ca3221",
)
powerbi_specs = load_powerbi_asset_specs(resource, use_workspace_scan=False)
@asset
def my_materializable_asset(): ...
return Definitions(
assets=[my_materializable_asset, *powerbi_specs], jobs=[define_asset_job("all_asset_job")]
)
def test_state_derived_defs(
workspace_data_api_mocks: responses.RequestsMock,
) -> None:
with instance_for_test() as instance:
assert len(workspace_data_api_mocks.calls) == 0
repository_def = initialize_repository_def_from_pointer(
CodePointer.from_python_file(str(Path(__file__)), "state_derived_defs", None),
)
# first, we resolve the repository to generate our cached metadata
assert len(workspace_data_api_mocks.calls) == 5
# 3 PowerBI external assets, one materializable asset
assert len(repository_def.assets_defs_by_key) == 3 + 1
# Assert that all Power BI assets have upstreams, which are resolved
for asset_def in repository_def.assets_defs_by_key.values():
for key, deps in asset_def.asset_deps.items():
if key.path[-1] == "my_materializable_asset":
continue
if key.path[0] == "semantic_model":
continue
assert len(deps) > 0, f"Expected upstreams for {key}"
assert all(dep in repository_def.assets_defs_by_key for dep in deps), (
f"Asset {key} depends on {deps} which are not in the repository"
)
job_def = repository_def.get_job("all_asset_job")
repository_load_data = repository_def.repository_load_data
recon_repo = ReconstructableRepository.for_file(__file__, fn_name="state_derived_defs")
recon_job = ReconstructableJob(repository=recon_repo, job_name="all_asset_job")
execution_plan = create_execution_plan(recon_job, repository_load_data=repository_load_data)
run = instance.create_run_for_job(job_def=job_def, execution_plan=execution_plan)
events = execute_plan(
execution_plan=execution_plan,
job=recon_job,
dagster_run=run,
instance=instance,
)
assert (
len([event for event in events if event.event_type == DagsterEventType.STEP_SUCCESS])
== 1
), "Expected two successful steps"
assert len(workspace_data_api_mocks.calls) == 5
| MyCustomTranslator |
python | google__pytype | pytype/pytd/visitors.py | {
"start": 53898,
"end": 62324
} | class ____(Visitor):
"""Visitor for adjusting type parameters.
* Inserts class templates.
* Inserts signature templates.
* Adds scopes to type parameters.
"""
def __init__(self):
super().__init__()
self.class_typeparams = set()
self.function_typeparams = None
self.class_template = []
self.class_name = None
self.function_name = None
self.constant_name = None
self.all_typevariables = set()
self.generic_level = 0
def _GetTemplateItems(self, param):
"""Get a list of template items from a parameter."""
items = []
if isinstance(param, pytd.GenericType):
for p in param.parameters:
items.extend(self._GetTemplateItems(p))
elif isinstance(param, pytd.UnionType):
for p in param.type_list:
items.extend(self._GetTemplateItems(p))
elif isinstance(param, pytd.TypeParameter):
items.append(pytd.TemplateItem(param))
return items
def VisitTypeDeclUnit(self, node):
type_params_to_add = []
declared_type_params = {n.name for n in node.type_params}
# Sorting type params helps keep pickling deterministic.
for t in sorted(self.all_typevariables):
if t.name in declared_type_params:
continue
logging.debug("Adding definition for type parameter %r", t.name)
declared_type_params.add(t.name)
# We assume undeclared `Self` type parameters are imported from `typing`.
scope = "typing" if t.name == "Self" else None
type_params_to_add.append(t.Replace(scope=scope))
new_type_params = tuple(
sorted(node.type_params + tuple(type_params_to_add))
)
return node.Replace(type_params=new_type_params)
def _CheckDuplicateNames(self, params, class_name):
seen = set()
for x in params:
if x.name in seen:
raise ContainerError(
"Duplicate type parameter %s in typing.Generic base of class %s"
% (x.name, class_name)
)
seen.add(x.name)
def EnterClass(self, node):
"""Establish the template for the class."""
templates = []
generic_template = None
for base in node.bases:
if isinstance(base, pytd.GenericType):
params = sum(
(self._GetTemplateItems(param) for param in base.parameters), []
)
if base.name in ["typing.Generic", "Generic"]:
# TODO(mdemello): Do we need "Generic" in here or is it guaranteed
# to be replaced by typing.Generic by the time this visitor is called?
self._CheckDuplicateNames(params, node.name)
if generic_template:
raise ContainerError(
"Cannot inherit from Generic[...] "
f"multiple times in class {node.name}"
)
else:
generic_template = params
else:
templates.append(params)
if generic_template:
for params in templates:
for param in params:
if param not in generic_template:
raise ContainerError(
"Some type variables (%s) are not listed in Generic of class %s"
% (param.type_param.name, node.name)
)
templates = [generic_template]
try:
template = mro.MergeSequences(templates)
except ValueError as e:
raise ContainerError(
f"Illegal type parameter order in class {node.name}"
) from e
self.class_template.append(template)
for t in template:
assert isinstance(t.type_param, pytd.TypeParameter)
self.class_typeparams.add(t.name)
self.class_name = node.name
def LeaveClass(self, node):
del node
for t in self.class_template[-1]:
if t.name in self.class_typeparams:
self.class_typeparams.remove(t.name)
self.class_name = None
self.class_template.pop()
def VisitClass(self, node):
"""Builds a template for the class from its GenericType bases."""
# The template items will not have been properly scoped because they were
# stored outside of the ast and not visited while processing the class
# subtree. They now need to be scoped similar to VisitTypeParameter,
# except we happen to know they are all bound by the class.
template = [
pytd.TemplateItem(t.type_param.Replace(scope=node.name))
for t in self.class_template[-1]
]
node = node.Replace(template=tuple(template))
return node.Visit(AdjustSelf()).Visit(NamedTypeToClassType())
def EnterSignature(self, unused_node):
assert self.function_typeparams is None, self.function_typeparams
self.function_typeparams = set()
def LeaveSignature(self, unused_node):
self.function_typeparams = None
def _MaybeMutateSelf(self, sig):
# If the given signature is an __init__ method for a generic class and the
# class's type parameters all appear among the method's parameter
# annotations, then we should add a mutation to the parameter values, e.g.:
# class Foo(Generic[T]):
# def __init__(self, x: T) -> None: ...
# becomes:
# class Foo(Generic[T]):
# def __init__(self, x: T) -> None:
# self = Foo[T]
if self.function_name != "__init__" or not self.class_name:
return sig
class_template = self.class_template[-1]
if not class_template:
return sig
seen_params = {t.name: t for t in pytd_utils.GetTypeParameters(sig)}
if any(t.name not in seen_params for t in class_template):
return sig
if not sig.params or sig.params[0].mutated_type:
return sig
mutated_type = pytd.GenericType(
base_type=pytd.ClassType(self.class_name),
parameters=tuple(seen_params[t.name] for t in class_template),
)
self_param = sig.params[0].Replace(mutated_type=mutated_type)
return sig.Replace(params=(self_param,) + sig.params[1:])
def VisitSignature(self, node):
# Sorting the template in CanonicalOrderingVisitor is enough to guarantee
# pyi determinism, but we need to sort here as well for pickle determinism.
return self._MaybeMutateSelf(
node.Replace(template=tuple(sorted(self.function_typeparams)))
)
def EnterFunction(self, node):
self.function_name = node.name
def LeaveFunction(self, unused_node):
self.function_name = None
def EnterConstant(self, node):
self.constant_name = node.name
def LeaveConstant(self, unused_node):
self.constant_name = None
def EnterGenericType(self, unused_node):
self.generic_level += 1
def LeaveGenericType(self, unused_node):
self.generic_level -= 1
def EnterCallableType(self, node):
self.EnterGenericType(node)
def LeaveCallableType(self, node):
self.LeaveGenericType(node)
def EnterTupleType(self, node):
self.EnterGenericType(node)
def LeaveTupleType(self, node):
self.LeaveGenericType(node)
def EnterUnionType(self, node):
self.EnterGenericType(node)
def LeaveUnionType(self, node):
self.LeaveGenericType(node)
def _GetFullName(self, name):
return ".".join(n for n in [self.class_name, name] if n)
def _GetScope(self, name):
if name in self.class_typeparams:
return self.class_name
return self._GetFullName(self.function_name)
def _IsBoundTypeParam(self, node):
in_class = self.class_name and node.name in self.class_typeparams
return in_class or self.generic_level
def VisitTypeParameter(self, node):
"""Add scopes to type parameters, track unbound params."""
if (
self.constant_name
and node.name != "Self"
and not self._IsBoundTypeParam(node)
):
raise ContainerError(
"Unbound type parameter {} in {}".format(
node.name, self._GetFullName(self.constant_name)
)
)
scope = self._GetScope(node.name)
if scope:
node = node.Replace(scope=scope)
else:
# This is a top-level type parameter (TypeDeclUnit.type_params).
# AddNamePrefix gave it the right scope, so leave it alone.
pass
if (
self.function_typeparams is not None
and node.name not in self.class_typeparams
):
self.function_typeparams.add(pytd.TemplateItem(node))
self.all_typevariables.add(node)
return node
def VisitParamSpec(self, node):
"""Add scopes to paramspecs."""
scope = self._GetScope(node.name)
if scope:
node = node.Replace(scope=scope)
self.all_typevariables.add(node)
return node
| AdjustTypeParameters |
python | numpy__numpy | numpy/_core/tests/test_numeric.py | {
"start": 148310,
"end": 152142
} | class ____:
@pytest.mark.filterwarnings(
"ignore:.*2-dimensional vectors.*:DeprecationWarning"
)
def test_2x2(self):
u = [1, 2]
v = [3, 4]
z = -2
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
@pytest.mark.filterwarnings(
"ignore:.*2-dimensional vectors.*:DeprecationWarning"
)
def test_2x3(self):
u = [1, 2]
v = [3, 4, 5]
z = np.array([10, -5, -2])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
def test_3x3(self):
u = [1, 2, 3]
v = [4, 5, 6]
z = np.array([-3, 6, -3])
cp = np.cross(u, v)
assert_equal(cp, z)
cp = np.cross(v, u)
assert_equal(cp, -z)
@pytest.mark.filterwarnings(
"ignore:.*2-dimensional vectors.*:DeprecationWarning"
)
def test_broadcasting(self):
# Ticket #2624 (Trac #2032)
u = np.tile([1, 2], (11, 1))
v = np.tile([3, 4], (11, 1))
z = -2
assert_equal(np.cross(u, v), z)
assert_equal(np.cross(v, u), -z)
assert_equal(np.cross(u, u), 0)
u = np.tile([1, 2], (11, 1)).T
v = np.tile([3, 4, 5], (11, 1))
z = np.tile([10, -5, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0), z)
assert_equal(np.cross(v, u.T), -z)
assert_equal(np.cross(v, v), 0)
u = np.tile([1, 2, 3], (11, 1)).T
v = np.tile([3, 4], (11, 1)).T
z = np.tile([-12, 9, -2], (11, 1))
assert_equal(np.cross(u, v, axisa=0, axisb=0), z)
assert_equal(np.cross(v.T, u.T), -z)
assert_equal(np.cross(u.T, u.T), 0)
u = np.tile([1, 2, 3], (5, 1))
v = np.tile([4, 5, 6], (5, 1)).T
z = np.tile([-3, 6, -3], (5, 1))
assert_equal(np.cross(u, v, axisb=0), z)
assert_equal(np.cross(v.T, u), -z)
assert_equal(np.cross(u, u), 0)
@pytest.mark.filterwarnings(
"ignore:.*2-dimensional vectors.*:DeprecationWarning"
)
def test_broadcasting_shapes(self):
u = np.ones((2, 1, 3))
v = np.ones((5, 3))
assert_equal(np.cross(u, v).shape, (2, 5, 3))
u = np.ones((10, 3, 5))
v = np.ones((2, 5))
assert_equal(np.cross(u, v, axisa=1, axisb=0).shape, (10, 5, 3))
assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=2)
assert_raises(AxisError, np.cross, u, v, axisa=3, axisb=0)
u = np.ones((10, 3, 5, 7))
v = np.ones((5, 7, 2))
assert_equal(np.cross(u, v, axisa=1, axisc=2).shape, (10, 5, 3, 7))
assert_raises(AxisError, np.cross, u, v, axisa=-5, axisb=2)
assert_raises(AxisError, np.cross, u, v, axisa=1, axisb=-4)
# gh-5885
u = np.ones((3, 4, 2))
for axisc in range(-2, 2):
assert_equal(np.cross(u, u, axisc=axisc).shape, (3, 4))
def test_uint8_int32_mixed_dtypes(self):
# regression test for gh-19138
u = np.array([[195, 8, 9]], np.uint8)
v = np.array([250, 166, 68], np.int32)
z = np.array([[950, 11010, -30370]], dtype=np.int32)
assert_equal(np.cross(v, u), z)
assert_equal(np.cross(u, v), -z)
@pytest.mark.parametrize("a, b", [(0, [1, 2]), ([1, 2], 3)])
def test_zero_dimension(self, a, b):
with pytest.raises(ValueError) as exc:
np.cross(a, b)
assert "At least one array has zero dimension" in str(exc.value)
def test_outer_out_param():
arr1 = np.ones((5,))
arr2 = np.ones((2,))
arr3 = np.linspace(-2, 2, 5)
out1 = np.ndarray(shape=(5, 5))
out2 = np.ndarray(shape=(2, 5))
res1 = np.outer(arr1, arr3, out1)
assert_equal(res1, out1)
assert_equal(np.outer(arr2, arr3, out2), out2)
| TestCross |
python | Netflix__metaflow | test/core/tests/tag_catch.py | {
"start": 101,
"end": 5826
} | class ____(MetaflowTest):
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag("retry(times=3)")
@steps(0, ["start"])
def step_start(self):
import os
import sys
self.test_attempt = current.retry_count
sys.stdout.write("stdout testing logs %d\n" % self.test_attempt)
sys.stderr.write("stderr testing logs %d\n" % self.test_attempt)
if self.test_attempt < 3:
self.invisible = True
raise TestRetry()
# foreach splits don't support @catch but @retry should work
@tag("retry(times=2)")
@steps(0, ["foreach-split", "parallel-split"])
def step_split(self):
import os
if current.retry_count == 2:
self.this_is_split = True
else:
raise TestRetry()
@tag("retry(times=2)")
@steps(0, ["join"])
def step_join(self, inputs):
import os
if current.retry_count == 2:
self.test_attempt = inputs[0].test_attempt
else:
raise TestRetry()
@tag('catch(var="end_ex", print_exception=False)')
@steps(0, ["end"], required=True)
def step_end(self):
from metaflow.exception import ExternalCommandFailed
# make sure we see the latest attempt version of the artifact
assert_equals(3, self.test_attempt)
# the test uses a non-trivial derived exception on purpose
# which is non-trivial to pickle correctly
self.here = True
raise ExternalCommandFailed("catch me!")
@tag('catch(var="ex", print_exception=False)')
@tag("retry(times=2)")
@steps(1, ["all"])
def step_all(self):
import signal
import os
# die an ugly death
os.kill(os.getpid(), signal.SIGKILL)
def check_results(self, flow, checker):
checker.assert_log(
"start", "stdout", "stdout testing logs 3\n", exact_match=False
)
checker.assert_log(
"start", "stderr", "stderr testing logs 3\n", exact_match=False
)
for step in flow:
if step.name == "start":
checker.assert_artifact("start", "test_attempt", 3)
try:
for task in checker.artifact_dict("start", "invisible").values():
if task:
raise Exception(
"'invisible' should not be visible " "in 'start'"
)
except KeyError:
pass
elif step.name == "end":
checker.assert_artifact("end", "test_attempt", 3)
for task in checker.artifact_dict(step.name, "end_ex").values():
assert_equals("catch me!", str(task["end_ex"].exception))
break
else:
raise Exception("No artifact 'end_ex' in step 'end'")
elif flow._graph[step.name].type == "foreach":
checker.assert_artifact(step.name, "this_is_split", True)
elif flow._graph[step.name].type == "join":
checker.assert_artifact("end", "test_attempt", 3)
else:
# Use artifact_dict_if_exists because for parallel tasks, only the
# control task will have the 'ex' artifact.
for task in checker.artifact_dict_if_exists(step.name, "ex").values():
extype = "metaflow.plugins.catch_decorator." "FailureHandledByCatch"
assert_equals(extype, str(task["ex"].type))
break
else:
raise Exception("No artifact 'ex' in step '%s'" % step.name)
run = checker.get_run()
if run:
for step in run:
if step.id == "end":
continue
if flow._graph[step.id].type in ("foreach", "join"):
# 1 normal run + 2 retries = 3 attempts
attempts = 3
else:
# 1 normal run + 2 retries + 1 fallback = 4 attempts
attempts = 4
for task in step:
data = task.data
got = sorted(m.value for m in task.metadata if m.type == "attempt")
if flow._graph[step.id].parallel_step:
if task.metadata_dict.get(
"internal_task_type", None
): # Only control tasks have internal_task_type set
assert_equals(list(map(str, range(attempts))), got)
else:
# non-control tasks have one attempt less for parallel steps
assert_equals(list(map(str, range(attempts - 1))), got)
else:
assert_equals(list(map(str, range(attempts))), got)
assert_equals(False, "invisible" in run["start"].task.data)
assert_equals(3, run["start"].task.data.test_attempt)
end = run["end"].task
assert_equals(True, end.data.here)
assert_equals(3, end.data.test_attempt)
# task.exception is None since the exception was handled
assert_equals(None, end.exception)
assert_equals("catch me!", end.data.end_ex.exception)
assert_equals(
"metaflow.exception.ExternalCommandFailed", end.data.end_ex.type
)
| TagCatchTest |
python | agronholm__apscheduler | examples/web/asgi_starlette.py | {
"start": 1045,
"end": 2195
} | class ____:
def __init__(
self,
app: ASGIApp,
scheduler: AsyncScheduler,
) -> None:
self.app = app
self.scheduler = scheduler
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] == "lifespan":
async with self.scheduler:
await self.scheduler.add_schedule(
tick, IntervalTrigger(seconds=1), id="tick"
)
await self.scheduler.start_in_background()
await self.app(scope, receive, send)
else:
await self.app(scope, receive, send)
async def root(request: Request) -> Response:
return PlainTextResponse("Hello, world!")
engine = create_async_engine("postgresql+asyncpg://postgres:secret@localhost/testdb")
data_store = SQLAlchemyDataStore(engine)
event_broker = AsyncpgEventBroker.from_async_sqla_engine(engine)
scheduler = AsyncScheduler(data_store, event_broker)
routes = [Route("/", root)]
middleware = [Middleware(SchedulerMiddleware, scheduler=scheduler)]
app = Starlette(routes=routes, middleware=middleware)
| SchedulerMiddleware |
python | allegroai__clearml | clearml/utilities/process/mp.py | {
"start": 15125,
"end": 15749
} | class ____(object):
__thread_pool = SingletonThreadPool()
def __init__(self) -> None:
self._event = ProcessEvent()
def is_set(self) -> bool:
return self._event.is_set()
def set(self) -> None:
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self) -> bool:
return self._event.clear()
def wait(self, timeout: Optional[float] = None) -> bool:
return self._event.wait(timeout=timeout)
| SafeEvent |
python | django__django | tests/backends/tests.py | {
"start": 7123,
"end": 9065
} | class ____(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ["backends"]
def test_sequence_name_length_limits_create(self):
"""Creation of model with long name and long pk name doesn't error."""
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""
An m2m save of a model with a long name and a long m2m field name
doesn't error (#8901).
"""
obj = (
VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
)
rel_obj = Person.objects.create(first_name="Django", last_name="Reinhardt")
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""
Sequence resetting as part of a flush with model with long name and
long pk name doesn't error (#8901).
"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = (
VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
)
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sql_list = connection.ops.sql_flush(no_style(), tables, reset_sequences=True)
connection.ops.execute_sql_flush(sql_list)
@skipUnlessDBFeature("supports_sequence_reset")
| LongNameTest |
python | openai__openai-python | src/openai/_module_client.py | {
"start": 1508,
"end": 1631
} | class ____(LazyProxy["Files"]):
@override
def __load__(self) -> Files:
return _load_client().files
| FilesProxy |
python | ray-project__ray | doc/source/serve/doc_code/batching_guide.py | {
"start": 1183,
"end": 1896
} | class ____:
@serve.batch(max_batch_size=8, batch_wait_timeout_s=0.1)
async def __call__(self, multiple_samples: List[int]) -> List[int]:
# Use numpy's vectorized computation to efficiently process a batch.
return np.array(multiple_samples) * 2
def reconfigure(self, user_config: Dict):
self.__call__.set_max_batch_size(user_config["max_batch_size"])
self.__call__.set_batch_wait_timeout_s(user_config["batch_wait_timeout_s"])
# __batch_params_update_end__
# __single_stream_begin__
import asyncio
from typing import AsyncGenerator
from starlette.requests import Request
from starlette.responses import StreamingResponse
from ray import serve
@serve.deployment
| Model |
python | coleifer__peewee | tests/cysqlite.py | {
"start": 17456,
"end": 18035
} | class ____(CyDatabaseTestCase):
database = db_loader('sqlite')
def test_data_types_table_function(self):
self.database.register_table_function(DataTypes)
cursor = self.database.execute_sql('SELECT key, value '
'FROM data_types() ORDER BY key')
self.assertEqual(cursor.fetchall(), [
('k0', None),
('k1', 1),
('k2', 2.),
('k3', u'unicode str'),
('k4', b'byte str'),
('k5', 0),
('k6', 1),
])
| TestDataTypesTableFunction |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-sec-filings/llama_index/readers/sec_filings/prepline_sec_filings/sections.py | {
"start": 133,
"end": 6170
} | class ____(Enum):
PROSPECTUS_SUMMARY = re.compile(r"^(?:prospectus )?summary$")
ABOUT_PROSPECTUS = re.compile(r"about this prospectus")
FORWARD_LOOKING_STATEMENTS = re.compile(r"forward[ -]looking statements")
RISK_FACTORS = re.compile(r"risk factors")
USE_OF_PROCEEDS = re.compile(r"use of proceeds")
DIVIDEND_POLICY = re.compile(r"^dividend policy")
CAPITALIZATION = re.compile(r"^capitalization$")
DILUTION = re.compile(r"^dilution$")
MANAGEMENT_DISCUSSION = re.compile(r"^management(?:[\u2019']s)? discussion")
BUSINESS = re.compile(r"^business$")
MANAGEMENT = re.compile(r"^(?:(?:our )?management)|(?:executive officers)$")
COMPENSATION = re.compile(r"compensation")
RELATED_PARTY_TRANSACTIONS = re.compile(r"(?:relationships|related).*transactions")
PRINCIPAL_STOCKHOLDERS = re.compile(
r"(?:principal.*(?:stockholder|shareholder)s?)|(?:(security|stock|share) "
r"ownership .*certain)"
)
DESCRIPTION_OF_STOCK = re.compile(
r"^description of (?:capital stock|share capital|securities)"
)
DESCRIPTION_OF_DEBT = re.compile(r"^description of .*debt")
FUTURE_SALE = re.compile(r"(?:shares|stock) eligible for future sale")
US_TAX = re.compile(
r"(?:us|u\.s\.|united states|material federal).* tax"
r" (?:consideration|consequence)"
)
UNDERWRITING = re.compile(r"underwrit")
LEGAL_MATTERS = re.compile(r"legal matters")
EXPERTS = re.compile(r"^experts$")
MORE_INFORMATION = re.compile(r"(?:additional|more) information")
FINANCIAL_STATEMENTS = r"financial statements"
MARKET_RISK_DISCLOSURES = (
r"(?:quantitative|qualitative) disclosures? about market risk"
)
CONTROLS_AND_PROCEDURES = r"controls and procedures"
LEGAL_PROCEEDINGS = r"legal proceedings"
DEFAULTS = r"defaults (?:up)?on .*securities"
MINE_SAFETY = r"mine safety disclosures?"
OTHER_INFORMATION = r"other information"
UNRESOLVED_STAFF_COMMENTS = r"unresolved staff comments"
PROPERTIES = r"^properties$"
MARKET_FOR_REGISTRANT_COMMON_EQUITY = (
r"market for(?: the)? (?:registrant|company)(?:['\u2019]s)? common equity"
)
ACCOUNTING_DISAGREEMENTS = r"disagreements with accountants"
FOREIGN_JURISDICTIONS = r"diclosure .*foreign jurisdictions .*inspection"
EXECUTIVE_OFFICERS = r"executive officers"
ACCOUNTING_FEES = r"accounting fees"
EXHIBITS = r"^exhibits?(.*financial statement schedules)?$"
FORM_SUMMARY = r"^form .*summary$"
# NOTE(yuming): Additional section titles used in test_real_examples.py,
# maybe change this when custom regex string param is allowed.
CERTAIN_TRADEMARKS = r"certain trademarks"
OFFER_PRICE = r"(?:determination of )offering price"
@property
def pattern(self):
return self.value
ALL_SECTIONS = "_ALL"
section_string_to_enum = {enum.name: enum for enum in SECSection}
# NOTE(robinson) - Sections are listed in the following document from SEC
# ref: https://www.sec.gov/files/form10-k.pdf
SECTIONS_10K = (
SECSection.BUSINESS, # ITEM 1
SECSection.RISK_FACTORS, # ITEM 1A
SECSection.UNRESOLVED_STAFF_COMMENTS, # ITEM 1B
SECSection.PROPERTIES, # ITEM 2
SECSection.LEGAL_PROCEEDINGS, # ITEM 3
SECSection.MINE_SAFETY, # ITEM 4
SECSection.MARKET_FOR_REGISTRANT_COMMON_EQUITY, # ITEM 5
# NOTE(robinson) - ITEM 6 is "RESERVED"
SECSection.MANAGEMENT_DISCUSSION, # ITEM 7
SECSection.MARKET_RISK_DISCLOSURES, # ITEM 7A
SECSection.FINANCIAL_STATEMENTS, # ITEM 8
SECSection.ACCOUNTING_DISAGREEMENTS, # ITEM 9
SECSection.CONTROLS_AND_PROCEDURES, # ITEM 9A
# NOTE(robinson) - ITEM 9B is other information
SECSection.FOREIGN_JURISDICTIONS, # ITEM 9C
SECSection.MANAGEMENT, # ITEM 10
SECSection.COMPENSATION, # ITEM 11
SECSection.PRINCIPAL_STOCKHOLDERS, # ITEM 12
SECSection.RELATED_PARTY_TRANSACTIONS, # ITEM 13
SECSection.ACCOUNTING_FEES, # ITEM 14
SECSection.EXHIBITS, # ITEM 15
SECSection.FORM_SUMMARY, # ITEM 16
)
# NOTE(robinson) - Sections are listed in the following document from SEC
# ref: https://www.sec.gov/files/form10-q.pdf
SECTIONS_10Q = (
# Part I - Financial information
SECSection.FINANCIAL_STATEMENTS, # ITEM 1
SECSection.MANAGEMENT_DISCUSSION, # ITEM 2
SECSection.MARKET_RISK_DISCLOSURES, # ITEM 3
SECSection.CONTROLS_AND_PROCEDURES, # ITEM 4
# Part II - Other information
SECSection.LEGAL_PROCEEDINGS, # ITEM 1
SECSection.RISK_FACTORS, # ITEM 1A
SECSection.USE_OF_PROCEEDS, # ITEM 2
SECSection.DEFAULTS, # ITEM 3
SECSection.MINE_SAFETY, # ITEM 4
SECSection.OTHER_INFORMATION, # ITEM 5
)
SECTIONS_S1 = (
SECSection.PROSPECTUS_SUMMARY,
SECSection.ABOUT_PROSPECTUS,
SECSection.FORWARD_LOOKING_STATEMENTS,
SECSection.RISK_FACTORS,
SECSection.USE_OF_PROCEEDS,
SECSection.DIVIDEND_POLICY,
SECSection.CAPITALIZATION,
SECSection.DILUTION,
SECSection.MANAGEMENT_DISCUSSION,
SECSection.BUSINESS,
SECSection.MANAGEMENT,
SECSection.COMPENSATION,
SECSection.RELATED_PARTY_TRANSACTIONS,
SECSection.PRINCIPAL_STOCKHOLDERS,
SECSection.DESCRIPTION_OF_STOCK,
SECSection.DESCRIPTION_OF_DEBT,
SECSection.FUTURE_SALE,
SECSection.US_TAX,
SECSection.UNDERWRITING,
SECSection.LEGAL_MATTERS,
SECSection.EXPERTS,
SECSection.MORE_INFORMATION,
)
def validate_section_names(section_names: List[str]):
"""Return section names that don't correspond to a defined enum."""
if len(section_names) == 1 and section_names[0] == ALL_SECTIONS:
return
elif len(section_names) > 1 and ALL_SECTIONS in section_names:
raise ValueError(f"{ALL_SECTIONS} may not be specified with other sections")
invalid_names = [
name for name in section_names if name not in section_string_to_enum
]
if invalid_names:
raise ValueError(f"The following section names are not valid: {invalid_names}")
return
| SECSection |
python | EpistasisLab__tpot | tpot/builtin_modules/nn.py | {
"start": 3542,
"end": 6852
} | class ____(ClassifierMixin, PytorchEstimator):
@abstractmethod
def _init_model(self, X, y): # pragma: no cover
pass
def fit(self, X, y):
"""Generalizable method for fitting a PyTorch estimator to a training
set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
Returns
-------
self
Fitted estimator.
"""
self._init_model(X, y)
assert _pytorch_model_is_fully_initialized(self)
for epoch in range(self.num_epochs):
for i, (samples, labels) in enumerate(self.data_loader):
samples = samples.to(self.device)
labels = labels.to(self.device)
self.optimizer.zero_grad()
outputs = self.network(samples)
loss = self.loss_function(outputs, labels)
loss.backward()
self.optimizer.step()
if self.verbose and ((i + 1) % 100 == 0):
print(
"Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f"
% (
epoch + 1,
self.num_epochs,
i + 1,
self.train_dset_len // self.batch_size,
loss.item(),
)
)
# pylint: disable=attribute-defined-outside-init
self.is_fitted_ = True
return self
def validate_inputs(self, X, y):
# Things we don't want to allow until we've tested them:
# - Sparse inputs
# - Multiclass outputs (e.g., more than 2 classes in `y`)
# - Non-finite inputs
# - Complex inputs
X, y = check_X_y(X, y, accept_sparse=False, allow_nd=False)
# Throw a ValueError if X or y contains NaN or infinity.
assert_all_finite(X)
assert_all_finite(y)
if type_of_target(y) != 'binary':
raise ValueError("Non-binary targets not supported")
if np.any(np.iscomplex(X)) or np.any(np.iscomplex(y)):
raise ValueError("Complex data not supported")
if np.issubdtype(X.dtype, np.object_) or np.issubdtype(y.dtype, np.object_):
try:
X = X.astype(float)
y = y.astype(int)
except (TypeError, ValueError):
raise ValueError("argument must be a string.* number")
return (X, y)
def predict(self, X):
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'is_fitted_')
X = torch.tensor(X, dtype=torch.float32).to(self.device)
predictions = np.empty(len(X), dtype=int)
for i, rows in enumerate(X):
rows = Variable(rows.view(-1, self.input_size))
outputs = self.network(rows)
_, predicted = torch.max(outputs.data, 1)
predictions[i] = int(predicted)
return predictions.reshape(-1, 1)
def transform(self, X):
return self.predict(X)
| PytorchClassifier |
python | py-pdf__pypdf | pypdf/papersizes.py | {
"start": 129,
"end": 1413
} | class ____:
"""(width, height) of the paper in portrait mode in pixels at 72 ppi."""
# Notes of how to calculate it:
# 1. Get the size of the paper in millimeters
# 2. Convert it to inches (25.4 millimeters is equal to 1 inch)
# 3. Convert it to pixels at 72dpi (1 inch is equal to 72 pixels)
# All Din-A paper sizes follow this pattern:
# 2 x A(n - 1) = A(n)
# So the height of the next bigger one is the width of the smaller one
# The ratio is always approximately 1:2**0.5
# Additionally, A0 is defined to have an area of 1 m**2
# https://en.wikipedia.org/wiki/ISO_216
# Be aware of rounding issues!
A0 = Dimensions(2384, 3370) # 841mm x 1189mm
A1 = Dimensions(1684, 2384)
A2 = Dimensions(1191, 1684)
A3 = Dimensions(842, 1191)
A4 = Dimensions(
595, 842
) # Printer paper, documents - this is by far the most common
A5 = Dimensions(420, 595) # Paperback books
A6 = Dimensions(298, 420) # Postcards
A7 = Dimensions(210, 298)
A8 = Dimensions(147, 210)
# Envelopes
C4 = Dimensions(649, 918)
_din_a = (
PaperSize.A0,
PaperSize.A1,
PaperSize.A2,
PaperSize.A3,
PaperSize.A4,
PaperSize.A5,
PaperSize.A6,
PaperSize.A7,
PaperSize.A8,
)
| PaperSize |
python | numba__llvmlite | llvmlite/ir/types.py | {
"start": 11347,
"end": 11657
} | class ____(object):
def __init__(self, value, size):
self.value = value
self.size = size
def __len__(self):
return self.size
def __getitem__(self, item):
if 0 <= item < self.size:
return self.value
else:
raise IndexError(item)
| _Repeat |
python | google__pytype | pytype/rewrite/abstract/containers_test.py | {
"start": 2110,
"end": 2362
} | class ____(BaseTest):
def test_constant_type(self):
a = self.const_var("a")
b = self.const_var("b")
c = containers.Tuple(self.ctx, (a, b))
assert_type(c.constant, tuple[_Var, ...])
if __name__ == "__main__":
unittest.main()
| TupleTest |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyflakes/F401_12.py | {
"start": 172,
"end": 227
} | class ____:
datetime: Optional[datetime.datetime]
| Class |
python | kamyu104__LeetCode-Solutions | Python/find-common-elements-between-two-arrays.py | {
"start": 50,
"end": 372
} | class ____(object):
def findIntersectionValues(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
lookup1, lookup2 = set(nums1), set(nums2)
return [sum(x in lookup2 for x in nums1), sum(x in lookup1 for x in nums2)]
| Solution |
python | numpy__numpy | numpy/_core/tests/test_defchararray.py | {
"start": 3977,
"end": 4430
} | class ____:
def test1(self):
A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.char.chararray)
B = np.array([['abc', '123'],
['789', 'xyz']]).view(np.char.chararray)
assert_(np.all(A == B))
assert_(np.all(A >= B))
assert_(np.all(A <= B))
assert_(not np.any(A > B))
assert_(not np.any(A < B))
assert_(not np.any(A != B))
| TestWhitespace |
python | numba__numba | numba/core/caching.py | {
"start": 737,
"end": 1617
} | class ____(metaclass=ABCMeta):
@property
@abstractmethod
def cache_path(self):
"""
The base filesystem path of this cache (for example its root folder).
"""
@abstractmethod
def load_overload(self, sig, target_context):
"""
Load an overload for the given signature using the target context.
The saved object must be returned if successful, None if not found
in the cache.
"""
@abstractmethod
def save_overload(self, sig, data):
"""
Save the overload for the given signature.
"""
@abstractmethod
def enable(self):
"""
Enable the cache.
"""
@abstractmethod
def disable(self):
"""
Disable the cache.
"""
@abstractmethod
def flush(self):
"""
Flush the cache.
"""
| _Cache |
python | bokeh__bokeh | release/logger.py | {
"start": 590,
"end": 1217
} | class ____:
""""""
def __init__(self, text: str, *, name: str, replacement: str = _DEFAULT_REPLACEMENT) -> None:
self._text = text
self._name = name
self._replacement = replacement
def __repr__(self) -> str:
if self._replacement == _DEFAULT_REPLACEMENT:
return f"Scrubber(..., name={self._name!r})"
return f"Scrubber(..., name={self._name!r}, replacement={self._replacement!r})"
def __len__(self) -> int:
return len(self._text)
def clean(self, text: str) -> str:
""""""
return text.replace(self._text, self._replacement)
| Scrubber |
python | django__django | django/contrib/postgres/indexes.py | {
"start": 5001,
"end": 6027
} | class ____(PostgresIndex):
suffix = "gin"
def __init__(
self, *expressions, fastupdate=None, gin_pending_list_limit=None, **kwargs
):
self.fastupdate = fastupdate
self.gin_pending_list_limit = gin_pending_list_limit
super().__init__(*expressions, **kwargs)
def deconstruct(self):
path, args, kwargs = super().deconstruct()
if self.fastupdate is not None:
kwargs["fastupdate"] = self.fastupdate
if self.gin_pending_list_limit is not None:
kwargs["gin_pending_list_limit"] = self.gin_pending_list_limit
return path, args, kwargs
def get_with_params(self):
with_params = []
if self.gin_pending_list_limit is not None:
with_params.append(
"gin_pending_list_limit = %d" % self.gin_pending_list_limit
)
if self.fastupdate is not None:
with_params.append("fastupdate = %s" % ("on" if self.fastupdate else "off"))
return with_params
| GinIndex |
python | tensorflow__tensorflow | tensorflow/python/util/tf_decorator_test.py | {
"start": 9539,
"end": 10648
} | class ____(test.TestCase):
def testRewrapMutatesAffectedFunction(self):
@test_injectable_decorator_square
@test_injectable_decorator_increment
def test_rewrappable_decorated(x):
return x * 2
def new_target(x):
return x * 3
self.assertEqual((1 * 2 + 1)**2, test_rewrappable_decorated(1))
prev_target, _ = tf_decorator.unwrap(test_rewrappable_decorated)
tf_decorator.rewrap(test_rewrappable_decorated, prev_target, new_target)
self.assertEqual((1 * 3 + 1)**2, test_rewrappable_decorated(1))
def testRewrapOfDecoratorFunction(self):
@test_injectable_decorator_square
@test_injectable_decorator_increment
def test_rewrappable_decorated(x):
return x * 2
def new_target(x):
return x * 3
prev_target = test_rewrappable_decorated._tf_decorator._decorated_target
# In this case, only the outer decorator (test_injectable_decorator_square)
# should be preserved.
tf_decorator.rewrap(test_rewrappable_decorated, prev_target, new_target)
self.assertEqual((1 * 3)**2, test_rewrappable_decorated(1))
| TfDecoratorRewrapTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-vectara/destination_vectara/writer.py | {
"start": 517,
"end": 5708
} | class ____:
write_buffer: List[Mapping[str, Any]] = []
flush_interval = 1000
def __init__(
self,
client: VectaraClient,
text_fields: Optional[List[str]],
title_field: Optional[str],
metadata_fields: Optional[List[str]],
catalog: ConfiguredAirbyteCatalog,
):
self.client = client
self.text_fields = text_fields
self.title_field = title_field
self.metadata_fields = metadata_fields
self.streams = {f"{stream.stream.namespace}_{stream.stream.name}": stream for stream in catalog.streams}
self.ids_to_delete: List[str] = []
def delete_streams_to_overwrite(self, catalog: ConfiguredAirbyteCatalog) -> None:
streams_to_overwrite = [
f"{stream.stream.namespace}_{stream.stream.name}"
for stream in catalog.streams
if stream.destination_sync_mode == DestinationSyncMode.overwrite
]
if len(streams_to_overwrite):
self.client.delete_doc_by_metadata(metadata_field_name=METADATA_STREAM_FIELD, metadata_field_values=streams_to_overwrite)
def _delete_documents_to_dedupe(self):
if len(self.ids_to_delete) > 0:
self.client.delete_docs_by_id(document_ids=self.ids_to_delete)
def queue_write_operation(self, record: AirbyteRecordMessage) -> None:
"""Adds messages to the write queue and flushes if the buffer is full"""
stream_identifier = self._get_stream_id(record=record)
document_section = self._get_document_section(record=record)
document_metadata = self._get_document_metadata(record=record)
document_title = self._get_document_title(record=record)
primary_key = self._get_record_primary_key(record=record)
if primary_key:
document_id = f"Stream_{stream_identifier}_Key_{primary_key}"
if self.streams[stream_identifier].destination_sync_mode == DestinationSyncMode.append_dedup:
self.ids_to_delete.append(document_id)
else:
document_id = str(uuid.uuid4().int)
self.write_buffer.append((document_section, document_metadata, document_title, document_id))
if len(self.write_buffer) == self.flush_interval:
self.flush()
def flush(self) -> None:
"""Flush all documents in Queue to Vectara"""
self._delete_documents_to_dedupe()
self.client.index_documents(self.write_buffer)
self.write_buffer.clear()
self.ids_to_delete.clear()
def _get_document_section(self, record: AirbyteRecordMessage):
relevant_fields = self._extract_relevant_fields(record, self.text_fields)
if len(relevant_fields) == 0:
text_fields = ", ".join(self.text_fields) if self.text_fields else "all fields"
raise AirbyteTracedException(
internal_message="No text fields found in record",
message=f"Record {str(record.data)[:250]}... does not contain any of the configured text fields: {text_fields}. Please check your processing configuration, there has to be at least one text field set in each record.",
failure_type=FailureType.config_error,
)
document_section = relevant_fields
return document_section
def _extract_relevant_fields(self, record: AirbyteRecordMessage, fields: Optional[List[str]]) -> Dict[str, Any]:
relevant_fields = {}
if fields and len(fields) > 0:
for field in fields:
values = dpath.util.values(record.data, field, separator=".")
if values and len(values) > 0:
relevant_fields[field] = values if len(values) > 1 else values[0]
else:
relevant_fields = record.data
return relevant_fields
def _get_document_metadata(self, record: AirbyteRecordMessage) -> Dict[str, Any]:
document_metadata = self._extract_relevant_fields(record, self.metadata_fields)
document_metadata[METADATA_STREAM_FIELD] = self._get_stream_id(record)
return document_metadata
def _get_document_title(self, record: AirbyteRecordMessage) -> str:
title = "Untitled"
if self.title_field:
found_title = dpath.util.values(record.data, self.title_field, separator=".")
if found_title:
title = found_title[0]
return title
def _get_stream_id(self, record: AirbyteRecordMessage) -> str:
return f"{record.namespace}_{record.stream}"
def _get_record_primary_key(self, record: AirbyteRecordMessage) -> Optional[str]:
stream_identifier = self._get_stream_id(record)
current_stream: ConfiguredAirbyteStream = self.streams[stream_identifier]
if not current_stream.primary_key:
return None
primary_key = []
for key in current_stream.primary_key:
try:
primary_key.append(str(dpath.util.get(record.data, key)))
except KeyError:
primary_key.append("__not_found__")
stringified_primary_key = "_".join(primary_key)
return f"{stream_identifier}_{stringified_primary_key}"
| VectaraWriter |
python | keras-team__keras | keras/src/legacy/saving/serialization.py | {
"start": 4397,
"end": 5754
} | class ____(dict):
"""A configuration container that keeps track of references.
`SharedObjectConfig` will automatically attach a shared object ID to any
configs which are referenced more than once, allowing for proper shared
object reconstruction at load time.
In most cases, it would be more proper to subclass something like
`collections.UserDict` or `collections.Mapping` rather than `dict` directly.
Unfortunately, python's json encoder does not support `Mapping`s. This is
important functionality to retain, since we are dealing with serialization.
We should be safe to subclass `dict` here, since we aren't actually
overriding any core methods, only augmenting with a new one for reference
counting.
"""
def __init__(self, base_config, object_id, **kwargs):
self.ref_count = 1
self.object_id = object_id
super().__init__(base_config, **kwargs)
def increment_ref_count(self):
# As soon as we've seen the object more than once, we want to attach the
# shared object ID. This allows us to only attach the shared object ID
# when it's strictly necessary, making backwards compatibility breakage
# less likely.
if self.ref_count == 1:
self[SHARED_OBJECT_KEY] = self.object_id
self.ref_count += 1
| SharedObjectConfig |
python | numba__numba | numba/tests/test_errorhandling.py | {
"start": 6366,
"end": 13682
} | class ____(unittest.TestCase):
def test_specific_error(self):
given_reason = "specific_reason"
def foo():
pass
@overload(foo)
def ol_foo():
raise errors.NumbaValueError(given_reason)
@njit
def call_foo():
foo()
with self.assertRaises(errors.TypingError) as raises:
call_foo()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
self.assertIn(given_reason, excstr)
def test_no_match_error(self):
def foo():
pass
@overload(foo)
def ol_foo():
return None # emulate no impl available for type
@njit
def call_foo():
foo()
with self.assertRaises(errors.TypingError) as raises:
call_foo()
excstr = str(raises.exception)
self.assertIn("No match", excstr)
@skip_unless_scipy
def test_error_function_source_is_correct(self):
""" Checks that the reported source location for an overload is the
overload implementation source, not the actual function source from the
target library."""
@njit
def foo():
np.linalg.svd("chars")
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
expected_file = os.path.join("numba", "np", "linalg.py")
expected = f"Overload in function 'svd_impl': File: {expected_file}:"
self.assertIn(expected.format(expected_file), excstr)
def test_concrete_template_source(self):
# hits ConcreteTemplate
@njit
def foo():
return 'a' + 1
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'add'", excstr)
# there'll be numerous matched templates that don't work but as they
# are mostly "overload"s they'll just appear as "No match".
self.assertIn("No match.", excstr)
def test_abstract_template_source(self):
# hits AbstractTemplate
@njit
def foo():
return len(1)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'len'", excstr)
def test_callable_template_source(self):
# hits CallableTemplate
@njit
def foo():
return np.angle(None)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("No implementation of function Function(<function angle",
excstr)
def test_overloadfunction_template_source(self):
# hits _OverloadFunctionTemplate
def bar(x):
pass
@overload(bar)
def ol_bar(x):
pass
@njit
def foo():
return bar(1)
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
# there will not be "numerous" matched templates, there's just one,
# the one above, so assert it is reported
self.assertNotIn("<numerous>", excstr)
expected_file = os.path.join("numba", "tests",
"test_errorhandling.py")
expected_ol = f"Overload of function 'bar': File: {expected_file}:"
self.assertIn(expected_ol.format(expected_file), excstr)
self.assertIn("No match.", excstr)
def test_intrinsic_template_source(self):
# hits _IntrinsicTemplate
given_reason1 = "x must be literal"
given_reason2 = "array.ndim must be 1"
@intrinsic
def myintrin(typingctx, x, arr):
if not isinstance(x, types.IntegerLiteral):
raise errors.RequireLiteralValue(given_reason1)
if arr.ndim != 1:
raise errors.NumbaValueError(given_reason2)
sig = types.intp(x, arr)
def codegen(context, builder, signature, args):
pass
return sig, codegen
@njit
def call_intrin():
arr = np.zeros((2, 2))
myintrin(1, arr)
with self.assertRaises(errors.TypingError) as raises:
call_intrin()
excstr = str(raises.exception)
self.assertIn(error_reasons['specific_error'].splitlines()[0], excstr)
self.assertIn(given_reason1, excstr)
self.assertIn(given_reason2, excstr)
self.assertIn("Intrinsic in function", excstr)
def test_overloadmethod_template_source(self):
# doesn't hit _OverloadMethodTemplate for source as it's a nested
# exception
@overload_method(types.UnicodeType, 'isnonsense')
def ol_unicode_isnonsense(self):
pass
@njit
def foo():
"abc".isnonsense()
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr)
def test_overloadattribute_template_source(self):
# doesn't hit _OverloadMethodTemplate for source as it's a nested
# exception
@overload_attribute(types.UnicodeType, 'isnonsense')
def ol_unicode_isnonsense(self):
pass
@njit
def foo():
"abc".isnonsense
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Overload of function 'ol_unicode_isnonsense'", excstr)
def test_external_function_pointer_template_source(self):
from numba.tests.ctypes_usecases import c_cos
@njit
def foo():
c_cos('a')
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Type Restricted Function in function 'unknown'", excstr)
@skip_unless_cffi
def test_cffi_function_pointer_template_source(self):
from numba.tests import cffi_usecases as mod
mod.init()
func = mod.cffi_cos
@njit
def foo():
func('a')
with self.assertRaises(errors.TypingError) as raises:
foo()
excstr = str(raises.exception)
self.assertIn("Type Restricted Function in function 'unknown'", excstr)
def test_missing_source(self):
@structref.register
class ParticleType(types.StructRef):
pass
class Particle(structref.StructRefProxy):
def __new__(cls, pos, mass):
return structref.StructRefProxy.__new__(cls, pos)
# didn't provide the required mass argument ----^
structref.define_proxy(Particle, ParticleType, ["pos", "mass"])
with self.assertRaises(errors.TypingError) as raises:
Particle(pos=1, mass=2)
excstr = str(raises.exception)
self.assertIn("missing a required argument: 'mass'", excstr)
| TestErrorMessages |
python | apache__airflow | providers/edge3/src/airflow/providers/edge3/worker_api/datamodels.py | {
"start": 6140,
"end": 6696
} | class ____(BaseModel):
"""The return class for the worker set state."""
state: Annotated[EdgeWorkerState, Field(description="State of the worker from the view of the server.")]
queues: Annotated[
list[str] | None,
Field(
description="List of queues the worker is pulling jobs from. If not provided, worker pulls from all queues."
),
]
maintenance_comments: Annotated[
str | None,
Field(description="Comments about the maintenance state of the worker."),
] = None
| WorkerSetStateReturn |
python | langchain-ai__langchain | libs/partners/prompty/langchain_prompty/renderers.py | {
"start": 124,
"end": 572
} | class ____(Invoker):
"""Render a mustache template."""
def __init__(self, prompty: Prompty) -> None:
self.prompty = prompty
def invoke(self, data: BaseModel) -> BaseModel:
if not isinstance(data, SimpleModel):
raise ValueError("Expected data to be an instance of SimpleModel")
generated = mustache.render(self.prompty.content, data.item)
return SimpleModel[str](item=generated)
| MustacheRenderer |
python | google__jax | tests/checkify_test.py | {
"start": 1191,
"end": 30870
} | class ____(jtu.JaxTestCase):
@jtu.sample_product(jit=[False, True])
@jtu.skip_on_devices("tpu")
def test_jit_nan(self, jit):
def f(x1, x2):
y1 = jnp.sin(x1)
y2 = jnp.sin(x2)
return y1 + y2
f = jax.jit(f) if jit else f
checked_f = checkify.checkify(f, errors=checkify.float_checks)
err, _ = checked_f(3., 4.)
self.assertIsNone(err.get())
err, _ = checked_f(3., jnp.inf)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
@jtu.sample_product(jit=[False, True])
def test_jit_oob(self, jit):
def f(x, i):
y = jnp.sin(x)
z = y[i]
w = jnp.cos(z)
return w
f = jax.jit(f) if jit else f
checked_f = checkify.checkify(f, errors=checkify.index_checks)
err, _ = checked_f(jnp.arange(3), 2)
self.assertIsNone(err.get())
err, _ = checked_f(jnp.arange(3), 5)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
@parameterized.named_parameters(
("get", lambda x: x.get()),
("set", lambda x: x.set(1)),
("add", lambda x: x.add(1)),
("mul", lambda x: x.multiply(1)),
("div", lambda x: x.divide(1)),
("pow", lambda x: x.power(1)),
("min", lambda x: x.min(1)),
("max", lambda x: x.max(1)),
)
def test_jit_oob_update(self, update_fn):
f = jax.jit(lambda x, i: update_fn(x.at[i]))
checked_f = checkify.checkify(f, errors=checkify.index_checks)
err, _ = checked_f(jnp.arange(3), 2)
self.assertIsNone(err.get())
err, _ = checked_f(jnp.arange(3), 3)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
@jtu.sample_product(jit=[False, True])
@jax.numpy_dtype_promotion('standard')
def test_jit_div_errors(self, jit):
def f(x, y):
return x / y
f = jax.jit(f) if jit else f
checked_f = checkify.checkify(f, errors=checkify.float_checks)
err, _ = checked_f(jnp.ones((3,)), jnp.ones((3,)))
self.assertIsNone(err.get())
err, _ = checked_f(jnp.ones((3,)), jnp.array([1., 0., 1.]))
self.assertIsNotNone(err.get())
err, _ = checked_f(jnp.array([1, jnp.inf, 1]), jnp.array([1, jnp.inf, 1]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: div")
@jtu.sample_product(jit=[False, True])
@jtu.skip_on_devices("tpu")
def test_jit_multi(self, jit):
def f(x, i):
y = x[i]
z = jnp.cos(y)
return z
f = jax.jit(f) if jit else f
checked_f = checkify.checkify(f, errors=checkify.automatic_checks)
# no error
err, _ = checked_f(jnp.array([0., jnp.inf, 2.]), 2)
self.assertIsNone(err.get())
# oob error
err, _ = checked_f(jnp.array([0., 1., 2.]), 5)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
# nan error
err, _ = checked_f(jnp.array([0., 1., jnp.inf]), 2)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: cos")
@parameterized.named_parameters(
("gather", lambda x: x.get()),
("scatter_update", lambda x: x.set(1.)),
("scatter_add", lambda x: x.add(1.)),
("scatter_mul", lambda x: x.multiply(1.)),
("scatter_div", lambda x: x.divide(1.)),
("scatter_pow", lambda x: x.power(1.)),
("scatter_min", lambda x: x.min(1.)),
("scatter_max", lambda x: x.max(1.)),
)
def test_numpy_indexing_oobs(self, update_op):
def raises_oob(fn, idx, *expected_strs):
err, _ = checkify.checkify(jax.jit(fn), errors=checkify.index_checks)(x, idx)
error_txt = err.get()
self.assertIsNotNone(error_txt)
self.assertStartsWith(error_txt, "out-of-bounds indexing")
for s in expected_strs:
self.assertIn(s, error_txt)
x = jnp.ones((2, 3, 7))
axis0_msg = "axis 0 with size 2"
axis1_msg = "axis 1 with size 3"
axis2_msg = "axis 2 with size 7"
single_idx = lambda x, i: update_op(x.at[i])
raises_oob(single_idx, 5, "index 5", axis0_msg)
raises_oob(single_idx, -5, "index -3", axis0_msg)
raises_oob(single_idx, (0, 100), "index 100", axis1_msg)
raises_oob(single_idx, (0, 5, 100), "index 5", axis1_msg)
raises_oob(single_idx, (0, 0, 100), "index 100", axis2_msg)
raises_oob(single_idx, ((1, 20), (1, 4)), "index 20", axis0_msg)
raises_oob(single_idx, ((1, 20), (3, 4)), "index 3", axis1_msg)
raises_oob(single_idx, (((1, 1), (1, 20)), 3), "index 3", axis1_msg)
raises_oob(single_idx, (((1, 1), (1, 20)), 0), "index 20", axis0_msg)
multi_idx = lambda x, i: update_op(x.at[i[0], :, i[1]])
raises_oob(multi_idx, (0, 9), "index 9", axis2_msg)
# TODO(lenamartens): numpy reports index -5 here, need to normalize?
raises_oob(multi_idx, (-5, 9), "index -3", axis0_msg)
raises_oob(multi_idx, (5, -9), "index 5", axis0_msg)
raises_oob(multi_idx, ((0, 9), 0), "index 9", axis0_msg)
def test_dynamic_slice_oobs(self):
def raises_oob(fn, x, idx, *expected_strs):
err, _ = checkify.checkify(jax.jit(fn), errors=checkify.index_checks)(x, idx)
error_txt = err.get()
self.assertIsNotNone(error_txt)
self.assertStartsWith(error_txt, "out-of-bounds indexing")
for s in expected_strs:
self.assertIn(s, error_txt)
x = jnp.ones((2, 3, 7))
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (2, 0, 0), 'index 2')
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (-3, 0, 0), 'index -1')
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (0, 3, 0), 'index 3')
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (0, -5, 0), 'index -2')
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (0, 1, 8), 'index 8')
raises_oob(partial(lax.dynamic_slice, slice_sizes=(1, 1, 1)), x, (0, 1, -10), 'index -3')
def test_dynamic_update_slice_oobs(self):
def raises_oob(fn, x, y, idx, *expected_strs):
err, _ = checkify.checkify(jax.jit(fn), errors=checkify.index_checks)(x, y, idx)
error_txt = err.get()
self.assertIsNotNone(error_txt)
self.assertStartsWith(error_txt, "out-of-bounds indexing")
for s in expected_strs:
self.assertIn(s, error_txt)
x = jnp.ones((2, 3, 7))
y = jnp.zeros((1, 1, 1))
raises_oob(lax.dynamic_update_slice, x, y, (2, 0, 0), 'index 2')
raises_oob(lax.dynamic_update_slice, x, y, (-3, 0, 0), 'index -1')
raises_oob(lax.dynamic_update_slice, x, y, (0, 3, 0), 'index 3')
raises_oob(lax.dynamic_update_slice, x, y, (0, -5, 0), 'index -2')
raises_oob(lax.dynamic_update_slice, x, y, (0, 1, 8), 'index 8')
raises_oob(lax.dynamic_update_slice, x, y, (0, 1, -10), 'index -3')
@jtu.sample_product(jit=[False, True])
def test_jit_ordering(self, jit):
def f(x, i):
y = x[i]
z = jnp.sin(x)
return y * z
f = jax.jit(f) if jit else f
checked_f = checkify.checkify(f, errors=checkify.automatic_checks)
# both oob and nan error, but oob happens first
err, _ = checked_f(jnp.array([0., 1., jnp.inf]), 5)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
def test_pmap_basic(self):
if len(jax.devices()) < 2:
raise unittest.SkipTest("requires at least 2 devices")
@jax.pmap
def f(x):
y1 = jnp.sin(1./x)
y2 = jnp.sin(x)
return y1 + y2
checked_f = checkify.checkify(f, errors=checkify.nan_checks)
xs = jnp.array([1., 2.])
err, _ = checked_f(xs)
self.assertIsNone(err.get())
xs = jnp.array([3., 0.])
err, _ = checked_f(xs)
self.assertIsNotNone(err.get())
self.assertIn("nan generated by primitive: sin", err.get())
def test_pmap_collectives(self):
if len(jax.devices()) < 4:
raise unittest.SkipTest("requires at least 2 devices")
@partial(jax.pmap, axis_name="i")
def f(x1):
return jax.lax.all_gather(x1, axis_name="i")
checked_f = checkify.checkify(f, errors=checkify.float_checks)
xs = jnp.array([0., 2., 3., 6.])
err, _ = checked_f(xs)
self.assertIsNone(err.get())
@jtu.skip_on_devices("tpu")
def test_cond_basic(self):
@jax.jit
def f(x):
def true_fun(x):
return jnp.sin(x)
def false_fun(x):
checkify.check(x > -1, "oh no")
return x / 0.
return lax.cond(x > 0, true_fun, false_fun, x)
checked_f = checkify.checkify(f, errors=checkify.all_checks)
err, _ = checked_f(3.)
self.assertIsNone(err.get())
err, _ = checked_f(jnp.inf)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
err, _ = checked_f(-jnp.inf)
self.assertStartsWith(err.get(), "oh no")
err, _ = checked_f(0.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
def test_cond_different_payloads(self):
@jax.jit
def f(x):
def true_fun(x):
checkify.check(~x, "{one}", one=x)
def false_fun(x):
checkify.check(x, "{one} and {two}", one=x, two=x)
return lax.cond(x, true_fun, false_fun, x)
checked_f = checkify.checkify(f)
err, _ = checked_f(True)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "True")
err, _ = checked_f(False)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "False and False")
def test_cond_nd_payloads(self):
@jax.jit
def f(x):
def true_fun(x):
checkify.check(jnp.all(x > 0), "{one}", one=x)
def false_fun(x):
checkify.check(jnp.all(x < 0), "{one} and {two}", one=x, two=x)
return lax.cond(jnp.all(x < 0), true_fun, false_fun, x)
checked_f = checkify.checkify(f)
err, _ = checked_f(jnp.arange(0, 4))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "[0 1 2 3] and [0 1 2 3]")
err, _ = checked_f(jnp.arange(-4, -1))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "[-4 -3 -2]")
@jtu.skip_on_devices("tpu")
def test_scan_map(self):
def scan_body(_, x):
return None, jnp.sin(x)
@jax.jit
def f(xs):
return lax.scan(scan_body, None, xs)
checked_f = checkify.checkify(f, errors=checkify.float_checks)
xs = jnp.array([0., 2.])
err, (_, ch_outs) = checked_f(xs)
_, outs = f(xs)
self.assertIsNone(err.get())
self.assertArraysEqual(ch_outs, outs)
xs = jnp.array([3., jnp.inf])
err, (_, ch_outs) = checked_f(xs)
_, outs = f(xs)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
self.assertArraysEqual(ch_outs, outs)
@jtu.skip_on_devices("tpu")
def test_scan_carry(self):
def scan_body(carry, x):
carry = carry-1.
possible_nan = jnp.sin(1./carry)
return carry, x+possible_nan
@jax.jit
def f(carry, xs):
return lax.scan(scan_body, carry, xs)
checked_f = checkify.checkify(f, errors=checkify.float_checks)
carry, xs = 3., jnp.ones((2,))
err, (ch_out_carry, ch_outs) = checked_f(carry, xs)
out_carry, outs = f(carry, xs)
self.assertIsNone(err.get())
self.assertArraysEqual(ch_outs, outs)
self.assertArraysEqual(ch_out_carry, out_carry)
# error happens on first iteration
carry, xs = 1., jnp.ones((2,))
err, (ch_out_carry, ch_outs) = checked_f(carry, xs)
out_carry, outs = f(carry, xs)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
self.assertArraysEqual(ch_outs, outs)
self.assertArraysEqual(ch_out_carry, out_carry)
# error happens on second iteration
carry, xs = 2., jnp.ones((4,))
err, (ch_out_carry, ch_outs) = checked_f(carry, xs)
out_carry, outs = f(carry, xs)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
self.assertArraysEqual(ch_outs, outs)
self.assertArraysEqual(ch_out_carry, out_carry)
@jtu.skip_on_devices("tpu")
def test_while_loop_body_error(self):
def while_cond(val):
i, _ = val
return i < 2
def while_body(val):
i, x = val
possible_nan = jnp.sin(1./i)
return i+1., x+possible_nan
@jax.jit
def f(init_val):
return lax.while_loop(while_cond, while_body, (init_val, 0.))
checked_f = checkify.checkify(f, errors=checkify.float_checks)
init_val = 1.
err, ch_out = checked_f(init_val)
out = f(init_val)
self.assertIsNone(err.get())
self.assertArraysEqual(ch_out, out)
init_val = 0.
err, ch_out = checked_f(init_val)
out = f(init_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
self.assertArraysEqual(ch_out, out)
@jtu.skip_on_devices("tpu")
def test_while_loop_cond_error(self):
def while_cond(val):
_ = jnp.sin(1./val)
return 0. * _ + val < 2.
def while_body(val):
return val+1.
@jax.jit
def f(init_val):
return lax.while_loop(while_cond, while_body, init_val)
checked_f = checkify.checkify(f, errors=checkify.float_checks)
init_val = 1.
err, ch_out = checked_f(init_val)
out = f(init_val)
self.assertIsNone(err.get())
self.assertArraysEqual(ch_out, out)
init_val = 0.
err, ch_out = checked_f(init_val)
out = f(init_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
self.assertArraysEqual(ch_out, out)
@jtu.skip_on_devices("tpu")
def test_while_loop_cond_error_and_false(self):
# Tests if an error is generated when cond returns False.
def while_cond(val):
possible_nan = jnp.sin(1./val)
return jnp.logical_not(jnp.isnan(possible_nan))
@jax.jit
def f(init_val):
return lax.while_loop(while_cond, lambda val: val-1, init_val)
checked_f = checkify.checkify(f, errors=checkify.float_checks)
# error on first cond
init_val = 0.
err, _ = checked_f(init_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
# error on second cond
init_val = 1.
err, _ = checked_f(init_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
def test_checify_donation_no_forwarding(self):
mesh = jtu.create_mesh((2,), ('x',))
@checkify.checkify
@jax.jit(donate_argnums=(0,))
def f(x: jax.Array) -> jax.Array:
checkify.check(jnp.all(x > 0), "a")
return x
x = jax.device_put(jnp.zeros(64, dtype="int32"), NamedSharding(mesh, P()))
err, y = f(x)
err, z = f(y) # doesn't crash
@jtu.skip_on_devices("tpu")
def test_while_loop_body_and_cond_error(self):
def while_cond(val):
i, cond_val, _ = val
j = jnp.sin(cond_val)
return i + (0. * j) < 2 # don't let the sin value be dead code
def while_body(val):
i, cond_val, body_val = val
possible_nan = jnp.cos(body_val)
return i+1., cond_val, possible_nan
@jax.jit
def f(cond_val, body_val):
return lax.while_loop(while_cond, while_body, (0., cond_val, body_val))
checked_f = checkify.checkify(f, errors=checkify.float_checks)
cond_val = jnp.inf
body_val = 1.
err, _ = checked_f(cond_val, body_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
cond_val = 1.
body_val = jnp.inf
err, _ = checked_f(cond_val, body_val)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: cos")
cond_val = jnp.inf
body_val = jnp.inf
err, _ = checked_f(cond_val, body_val)
self.assertIsNotNone(err.get())
# first error which occurs is in cond
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
def test_checkify_jit(self):
def f(x):
# unary func
return x / x
def g(x, y):
# binary func
return x / y
devices = jax.local_devices()[:8] # Taking up to 8 devices
mesh = jax.sharding.Mesh(np.array(devices), ["dev"])
ps = NamedSharding(mesh, jax.sharding.PartitionSpec("dev"))
inp = np.arange(8)
x = array.make_array_from_callback(inp.shape, ps, lambda idx: inp[idx])
f = jax.jit(f, in_shardings=ps, out_shardings=ps)
f = checkify.checkify(f, errors=checkify.float_checks)
g = jax.jit(g, in_shardings=ps, out_shardings=ps)
g = checkify.checkify(g, errors=checkify.float_checks)
with jax.set_mesh(mesh):
u_err, _ = f(x)
b_err, _ = g(x, x)
self.assertIsNotNone(u_err.get())
self.assertStartsWith(u_err.get(), "division by zero")
self.assertIsNotNone(b_err.get())
self.assertStartsWith(b_err.get(), "division by zero")
@parameterized.parameters(True, False)
def test_shard_map(self, check_vma):
def f(x):
# unary func
return jax.lax.axis_index("dev") * x / x
def g(x, y):
# binary func
return jax.lax.axis_index("dev") * x / y
devices = jax.local_devices()[:8] # Taking up to 8 devices
mesh = jax.sharding.Mesh(np.array(devices), ["dev"])
pspec = jax.sharding.PartitionSpec("dev")
ps = NamedSharding(mesh, pspec)
inp = np.tile(np.arange(4, dtype=np.int32), 2)
x = array.make_array_from_callback(inp.shape, ps, lambda idx: inp[idx])
f = shard_map.shard_map(
f, mesh=mesh, in_specs=pspec, out_specs=pspec, check_vma=check_vma
)
f = jax.jit(f, in_shardings=ps, out_shardings=ps)
f = checkify.checkify(f, errors=checkify.float_checks)
g = shard_map.shard_map(
g, mesh=mesh, in_specs=(pspec, pspec), out_specs=pspec, check_vma=check_vma
)
g = jax.jit(g, in_shardings=(ps, ps), out_shardings=ps)
g = checkify.checkify(g, errors=checkify.float_checks)
u_err, _ = f(x)
b_err, _ = g(x, x)
divbyzero = "division by zero"
expected_err = f"at mapped index 0: {divbyzero}"
if (next_device_with_zero := len(devices) // 2) != 0:
expected_err += f"\nat mapped index {next_device_with_zero}: {divbyzero}"
self.assertIsNotNone(u_err.get())
self.assertEqual(u_err.get(), expected_err)
self.assertIsNotNone(b_err.get())
self.assertEqual(b_err.get(), expected_err)
def test_empty_enabled_errors(self):
def multi_errors(x):
x = x/0 # DIV
x = jnp.sin(x) # NAN
x = x[500] # OOB
checkify.check(x < 0, "must be negative!") # ASSERT
return x
x = jnp.ones((2,))
err, _ = checkify.checkify(multi_errors, errors=set())(x)
self.assertIsNone(err.get())
@parameterized.named_parameters(
("assert", checkify.user_checks, "must be negative!"),
("div", checkify.div_checks, "division by zero"),
("nan", checkify.nan_checks, "nan generated"),
("oob", checkify.index_checks, "out-of-bounds indexing"),
("automatic_checks", checkify.automatic_checks, "division by zero"),
)
@jtu.skip_on_devices("tpu")
def test_enabled_errors(self, error_set, expected_error):
def multi_errors(x):
checkify.check(jnp.all(x < 0), "must be negative!") # ASSERT
x = x/0 # DIV
x = jnp.sin(x) # NAN
x = x[500] # OOB
return x
x = jnp.ones((2,))
err, _ = checkify.checkify(multi_errors, errors=error_set)(x)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), expected_error)
@jtu.skip_on_devices("tpu")
def test_post_process_call(self):
@partial(checkify.checkify, errors=checkify.float_checks)
def g(x):
@jax.jit
def f(y):
return jnp.sin(x * y)
return f(jnp.inf)
err, _ = g(2.)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
@jtu.skip_on_devices("tpu")
def test_post_process_map(self):
@partial(checkify.checkify, errors=checkify.float_checks)
def g(x):
@jax.pmap
def f(y):
return jnp.sin(x * y), jnp.cos(x * y)
return f(jnp.array([jnp.inf]))[0]
err, _ = g(2.)
self.assertIsNotNone(err.get())
self.assertIn("nan generated by primitive: sin", err.get())
@jtu.skip_on_devices("tpu")
def test_custom_jvp(self):
@jax.custom_jvp
def sin(x):
return jnp.sin(x)
@sin.defjvp
def sin_jvp(primals, tangents):
(x,), (xdot,) = primals, tangents
return sin(x), jnp.cos(x) * xdot
f = checkify.checkify(sin, errors=checkify.float_checks)
err, y = f(3.)
self.assertIsNone(err.get())
err, y = f(jnp.inf)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), 'nan generated by primitive: sin')
# When we hit the custom jvp rule with jvp-of-checkify, no checks are added.
(err, y), (errdot, ydot) = jax.jvp(f, (3.,), (1.,)) # doesn't crash
self.assertIsNone(err.get()) # no error
self.assertEmpty(err._metadata) # and no checks were added!
self.assertEmpty(errdot._metadata)
y_expected, ydot_expected = jax.jvp(jnp.sin, (3.,), (1.,))
self.assertAllClose(y, y_expected)
self.assertAllClose(ydot, ydot_expected)
# Grad-of-checkify doesn't crash either.
x_bar = jax.grad(lambda x: f(x)[1])(3.)
self.assertAllClose(x_bar, jnp.cos(3.))
# Checkify-of-jvp adds checks (unlike jvp-of-checkify above).
g = checkify.checkify(lambda x, xdot: jax.jvp(sin, (x,), (xdot,)),
errors=checkify.float_checks)
err, (y, ydot) = g(3., 1.) # doesn't crash
self.assertIsNone(err.get()) # no error
self.assertNotEmpty(err._metadata) # but checks were added!
self.assertAllClose(y, jnp.sin(3.))
self.assertAllClose(ydot, jnp.cos(3.))
err, _ = g(jnp.inf, 1.)
self.assertIsNotNone(err.get()) # yes error
self.assertStartsWith(err.get(), 'nan generated by primitive: sin')
@jtu.skip_on_devices("tpu")
def test_custom_vjp(self):
@jax.custom_vjp
def sin(x):
return jnp.sin(x)
def sin_fwd(x):
return jnp.sin(x), 2. * x
def sin_bwd(x2, g):
return jnp.cos(x2 / 2.) * g,
sin.defvjp(sin_fwd, sin_bwd)
f = checkify.checkify(sin, errors=checkify.float_checks)
# no differentiation, no error
err, y = f(3.)
self.assertIsNone(err.get())
# no differentiation, yes error
err, y = f(jnp.inf)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), 'nan generated by primitive: sin')
# When we hit the custom vjp rule with vjp-of-checkify, no checks are added.
(err, y), f_vjp = jax.vjp(f, 3.)
self.assertIsNone(err.get()) # no error
self.assertEmpty(err._metadata) # and no checks were added!
# Checkify-of-vjp adds checks (unlike vjp-of-checkify above).
err, y = checkify.checkify(jax.grad(sin), errors=checkify.float_checks)(3.)
self.assertIsNone(err.get()) # no error
self.assertNotEmpty(err._metadata) # but checks were added!
err, y = checkify.checkify(jax.grad(sin),
errors=checkify.float_checks)(jnp.inf)
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive: sin")
def test_scan_consts(self):
def f(xs):
def scan_body(carry, _):
# closes oves xs
return carry+1, xs[carry]
return lax.scan(scan_body, 1, xs)
checked_f = checkify.checkify(f, errors=checkify.index_checks)
err, _ = checked_f(jnp.ones((7,)))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
def test_scan_consts2(self):
def f(xs):
def scan_body(carry, _):
# add more consts!
_ = xs[carry], xs[carry], jnp.sin(np.arange(11.))
return carry+1, xs[carry]
return lax.scan(scan_body, 1, xs)[1]
checked_f = checkify.checkify(f, errors=checkify.index_checks)
err, _ = checked_f(jnp.ones((7, 3)))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "out-of-bounds indexing")
def test_while_consts(self):
def f(xs):
def while_cond(carry):
i, _ = carry
_ = xs[i], jnp.sin(np.arange(11.))
return i > -1
def while_body(carry):
i, _ = carry
x = xs[i]
return i - 1, x/i
return lax.while_loop(while_cond, while_body, (0, jnp.zeros_like(xs[0])))
checked_f = checkify.checkify(f, errors=checkify.float_checks)
err, _ = checked_f(jnp.ones((7, 3)))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "division by zero")
def test_multiple_payloads(self):
def f(x):
a = x[5]
b = x[6]
return a + b
err, _ = checkify.checkify(f, errors=checkify.index_checks)(jnp.ones((2,)))
self.assertIsNotNone(err.get())
self.assertIn("index 5", err.get())
def test_nd_payloads(self):
cf = checkify.checkify(lambda x, i: x[i], errors=checkify.index_checks)
errs, _ = jax.vmap(cf)(jnp.ones((3, 2)), jnp.array([5, 0, 100]))
self.assertIsNotNone(errs.get())
self.assertIn("index 5", errs.get())
self.assertIn("index 100", errs.get())
def test_mapped_error_one_payload(self):
def f(x, i):
x = x[i]
return x/0
cf = checkify.checkify(f, errors=checkify.automatic_checks)
errs, _ = jax.vmap(cf)(jnp.ones((2, 1)), jnp.array([0, 100]))
self.assertIsNotNone(errs.get())
self.assertIn("division by zero", errs.get())
self.assertIn("index 100", errs.get())
@jax.legacy_prng_key('allow')
def test_checking_key_split_with_nan_check(self):
cf = checkify.checkify(
lambda k: jax.random.permutation(k, jnp.array([0, 1, 2])),
errors=checkify.float_checks)
cf(jax.random.PRNGKey(123)) # does not crash.
def test_pmap_one_device(self):
@jax.pmap
def f(x, y):
return x/y
cf = checkify.checkify(f, errors=checkify.automatic_checks)
errs, _ = cf(jnp.ones((1,)), jnp.zeros((1,)))
self.assertIsNotNone(errs.get())
self.assertIn("division by zero", errs.get())
def test_psum_nan_check(self):
@partial(jax.vmap, axis_name="i")
def f(x, y):
return lax.psum((x, y), axis_name="i")
cf = checkify.checkify(f, errors=checkify.nan_checks)
err, _ = cf(jnp.array([-jnp.inf, 0, jnp.inf]), jnp.ones((3, 2)))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "nan generated by primitive")
def test_different_payload_effects(self):
def f(x, y):
x = x[y]
checkify.check(jnp.all(x > 0), "{x}", x=x)
return x
f = checkify.checkify(f, errors=checkify.all_checks)
err, _ = jax.vmap(f)(jnp.ones((2, 3))*-1, jnp.array([0, 5]))
self.assertIsNotNone(err.get())
def test_effects_total_ordering(self):
sds0 = jax.ShapeDtypeStruct((2,), jnp.float32)
sds1 = jax.ShapeDtypeStruct((2,), jnp.int32)
sds2 = jax.ShapeDtypeStruct((3,), jnp.int32)
self.assertTotallyOrdered(
[ErrorEffect(FailedCheckError, (sds0,))],
[ErrorEffect(FailedCheckError, (sds0, sds0))],
[ErrorEffect(FailedCheckError, (sds1,))],
[ErrorEffect(FailedCheckError, (sds1, sds0))],
[ErrorEffect(FailedCheckError, (sds2,))],
[ErrorEffect(OOBError, (sds0,))],
[ErrorEffect(OOBError, (sds0, sds0))],
)
def test_scan_xs_mapped_correctly(self):
def f(_, x):
return None, jnp.reshape(x, (2, 2))
@jax.jit
def g(x):
return jax.lax.scan(f, None, x)
checked_f = checkify.checkify(g)
checked_f = jax.jit(checked_f)
err, _ = checked_f(jnp.ones((2, 4)))
self.assertIsNone(err.get())
def test_retracing(self):
f = checkify.checkify(jax.jit(lambda x: jnp.sin(x) ** 2))
_ = f(3.)
with jtu.count_jit_and_pmap_lowerings() as count:
_ = f(3.)
self.assertEqual(count(), 0)
def test_goodfellow_custom_jvp(self):
def h(fext):
checkify.check(True, "")
return jax.nn.relu(fext)
h = checkify.checkify(h)
def h_out(fext):
_, out = h(fext)
return out
h_grad = jax.grad(h_out)
h_grad(0.) # doesn't crash
def test_goodfellow_custom_vjp(self):
@jax.custom_vjp
def sin(x):
return jnp.sin(x)
def sin_fwd(x):
return jnp.sin(x), 2. * x
def sin_bwd(x2, g):
return jnp.cos(x2 / 2.) * g,
sin.defvjp(sin_fwd, sin_bwd)
def h(fext):
checkify.check(True, "")
return sin(fext)
h = checkify.checkify(h)
def h_out(fext):
_, out = h(fext)
return out
h_grad = jax.grad(h_out)
h_grad(0.) # doesn't crash
def test_closed_call(self):
# lots of golfing went into this test
y = jnp.array([3.14])
summify = lambda f: lambda x: f(x).sum()
f = checkify.checkify(jax.grad(summify(jax.remat(
partial(partial, jax.lax.map)(lambda x: jnp.sin(x * y))))))
f(jnp.array([3.])) # don't crash
def test_while_loop_leaks(self):
def f(x):
n = jnp.minimum(1, 2)
return jax.lax.while_loop(lambda i: i < n, lambda i: i + 1, x)
jax.jit(checkify.checkify(f))(0) # Does not crash bc of leaked tracer.
@parameterized.parameters(True, False)
def test_remat(self, jit):
# basic test from https://github.com/jax-ml/jax/issues/23867
def fn(x: jax.Array):
checkify.check(jnp.all(x > 0), "x must be positive")
return x + 1
fn = jax.remat(fn)
if jit:
fn = jax.jit(fn)
fn = checkify.checkify(fn)
err, y = fn(jnp.array([1, 2, 3]))
self.assertIsNone(err.get())
self.assertAllClose(y, jnp.array([2, 3, 4]), check_dtypes=False)
err, _ = fn(jnp.array([0, 2, 3]))
self.assertIsNotNone(err.get())
self.assertStartsWith(err.get(), "x must be positive")
@jtu.with_config(jax_check_tracer_leaks=True)
| CheckifyTransformTests |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/metadata_service/models/generated/ConnectorIPCOptions.py | {
"start": 446,
"end": 561
} | class ____(BaseModel):
class Config:
extra = Extra.forbid
dataChannel: DataChannel
| ConnectorIPCOptions |
python | dateutil__dateutil | src/dateutil/rrule.py | {
"start": 2610,
"end": 9109
} | class ____(object):
def __init__(self, cache=False):
if cache:
self._cache = []
self._cache_lock = _thread.allocate_lock()
self._invalidate_cache()
else:
self._cache = None
self._cache_complete = False
self._len = None
def __iter__(self):
if self._cache_complete:
return iter(self._cache)
elif self._cache is None:
return self._iter()
else:
return self._iter_cached()
def _invalidate_cache(self):
if self._cache is not None:
self._cache = []
self._cache_complete = False
self._cache_gen = self._iter()
if self._cache_lock.locked():
self._cache_lock.release()
self._len = None
def _iter_cached(self):
i = 0
gen = self._cache_gen
cache = self._cache
acquire = self._cache_lock.acquire
release = self._cache_lock.release
while gen:
if i == len(cache):
acquire()
if self._cache_complete:
break
try:
for j in range(10):
cache.append(advance_iterator(gen))
except StopIteration:
self._cache_gen = gen = None
self._cache_complete = True
break
release()
yield cache[i]
i += 1
while i < self._len:
yield cache[i]
i += 1
def __getitem__(self, item):
if self._cache_complete:
return self._cache[item]
elif isinstance(item, slice):
if item.step and item.step < 0:
return list(iter(self))[item]
else:
return list(itertools.islice(self,
item.start or 0,
item.stop or sys.maxsize,
item.step or 1))
elif item >= 0:
gen = iter(self)
try:
for i in range(item+1):
res = advance_iterator(gen)
except StopIteration:
raise IndexError
return res
else:
return list(iter(self))[item]
def __contains__(self, item):
if self._cache_complete:
return item in self._cache
else:
for i in self:
if i == item:
return True
elif i > item:
return False
return False
# __len__() introduces a large performance penalty.
def count(self):
""" Returns the number of recurrences in this set. It will have go
through the whole recurrence, if this hasn't been done before. """
if self._len is None:
for x in self:
pass
return self._len
def before(self, dt, inc=False):
""" Returns the last recurrence before the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
last = None
if inc:
for i in gen:
if i > dt:
break
last = i
else:
for i in gen:
if i >= dt:
break
last = i
return last
def after(self, dt, inc=False):
""" Returns the first recurrence after the given datetime instance. The
inc keyword defines what happens if dt is an occurrence. With
inc=True, if dt itself is an occurrence, it will be returned. """
if self._cache_complete:
gen = self._cache
else:
gen = self
if inc:
for i in gen:
if i >= dt:
return i
else:
for i in gen:
if i > dt:
return i
return None
def xafter(self, dt, count=None, inc=False):
"""
Generator which yields up to `count` recurrences after the given
datetime instance, equivalent to `after`.
:param dt:
The datetime at which to start generating recurrences.
:param count:
The maximum number of recurrences to generate. If `None` (default),
dates are generated until the recurrence rule is exhausted.
:param inc:
If `dt` is an instance of the rule and `inc` is `True`, it is
included in the output.
:yields: Yields a sequence of `datetime` objects.
"""
if self._cache_complete:
gen = self._cache
else:
gen = self
# Select the comparison function
if inc:
comp = lambda dc, dtc: dc >= dtc
else:
comp = lambda dc, dtc: dc > dtc
# Generate dates
n = 0
for d in gen:
if comp(d, dt):
if count is not None:
n += 1
if n > count:
break
yield d
def between(self, after, before, inc=False, count=1):
""" Returns all the occurrences of the rrule between after and before.
The inc keyword defines what happens if after and/or before are
themselves occurrences. With inc=True, they will be included in the
list, if they are found in the recurrence set. """
if self._cache_complete:
gen = self._cache
else:
gen = self
started = False
l = []
if inc:
for i in gen:
if i > before:
break
elif not started:
if i >= after:
started = True
l.append(i)
else:
l.append(i)
else:
for i in gen:
if i >= before:
break
elif not started:
if i > after:
started = True
l.append(i)
else:
l.append(i)
return l
| rrulebase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/metrics_test.py | {
"start": 165469,
"end": 167465
} | class ____(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
@test_util.run_deprecated_v1
def testVars(self):
metrics.false_negatives(
labels=(0, 1, 0, 1),
predictions=(0, 0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
@test_util.run_deprecated_v1
def testUnweighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
tn, tn_update_op = metrics.false_negatives(
labels=labels, predictions=predictions)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn)
self.assertAllClose(3., tn_update_op)
self.assertAllClose(3., tn)
@test_util.run_deprecated_v1
def testWeighted(self):
labels = constant_op.constant(((0, 1, 0, 1, 0),
(0, 0, 1, 1, 1),
(1, 1, 1, 1, 0),
(0, 0, 0, 0, 1)))
predictions = constant_op.constant(((0, 0, 1, 1, 0),
(1, 1, 1, 1, 1),
(0, 1, 0, 1, 0),
(1, 1, 1, 1, 1)))
weights = constant_op.constant((1., 1.5, 2., 2.5))
tn, tn_update_op = metrics.false_negatives(
labels=labels, predictions=predictions, weights=weights)
with self.cached_session():
self.evaluate(variables.local_variables_initializer())
self.assertAllClose(0., tn)
self.assertAllClose(5., tn_update_op)
self.assertAllClose(5., tn)
| FalseNegativesTest |
python | tensorflow__tensorflow | tensorflow/python/keras/combinations.py | {
"start": 2753,
"end": 3797
} | class ____(test_combinations.TestCombination):
"""Combination for Keras model types when doing model test.
It by default includes 'functional', 'subclass', 'sequential'.
Various methods in `testing_utils` to get models will auto-generate a model
of the currently active Keras model type. This allows unittests to confirm
the equivalence between different Keras models.
"""
def context_managers(self, kwargs):
model_type = kwargs.pop('model_type', None)
if model_type in KERAS_MODEL_TYPES:
return [testing_utils.model_type_scope(model_type)]
else:
return []
def parameter_modifiers(self):
return [test_combinations.OptionalParameter('model_type')]
_defaults = combinations.generate.keywords['test_combinations']
generate = functools.partial(
combinations.generate,
test_combinations=_defaults +
(KerasModeCombination(), KerasModelTypeCombination()))
combine = test_combinations.combine
times = test_combinations.times
NamedObject = test_combinations.NamedObject
| KerasModelTypeCombination |
python | urllib3__urllib3 | test/test_poolmanager.py | {
"start": 516,
"end": 18846
} | class ____:
@resolvesLocalhostFQDN()
def test_same_url(self) -> None:
# Convince ourselves that normally we don't get the same object
conn1 = connection_from_url("http://localhost:8081/foo")
conn2 = connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
# Now try again using the PoolManager
p = PoolManager(1)
conn1 = p.connection_from_url("http://localhost:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 == conn2
# Ensure that FQDNs are handled separately from relative domains
p = PoolManager(2)
conn1 = p.connection_from_url("http://localhost.:8081/foo")
conn2 = p.connection_from_url("http://localhost:8081/bar")
assert conn1 != conn2
def test_many_urls(self) -> None:
urls = [
"http://localhost:8081/foo",
"http://www.google.com/mail",
"http://localhost:8081/bar",
"https://www.google.com/",
"https://www.google.com/mail",
"http://yahoo.com",
"http://bing.com",
"http://yahoo.com/",
]
connections = set()
p = PoolManager(10)
for url in urls:
conn = p.connection_from_url(url)
connections.add(conn)
assert len(connections) == 5
def test_manager_clear(self) -> None:
p = PoolManager(5)
p.connection_from_url("http://google.com")
assert len(p.pools) == 1
p.clear()
assert len(p.pools) == 0
@pytest.mark.parametrize("url", ["http://@", None])
def test_nohost(self, url: str | None) -> None:
p = PoolManager(5)
with pytest.raises(LocationValueError):
p.connection_from_url(url=url) # type: ignore[arg-type]
def test_contextmanager(self) -> None:
with PoolManager(1) as p:
p.connection_from_url("http://google.com")
assert len(p.pools) == 1
assert len(p.pools) == 0
def test_http_pool_key_fields(self) -> None:
"""Assert the HTTPPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
"blocksize": _DEFAULT_BLOCKSIZE + 1,
}
p = PoolManager()
conn_pools = [
p.connection_from_url("http://example.com/"),
p.connection_from_url("http://example.com:8000/"),
p.connection_from_url("http://other.example.com/"),
]
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("http://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_pool_key_fields(self) -> None:
"""Assert the HTTPSPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
"timeout": timeout.Timeout(3.14),
"retries": retry.Retry(total=6, connect=2),
"block": True,
"source_address": "127.0.0.1",
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
"blocksize": _DEFAULT_BLOCKSIZE + 1,
}
p = PoolManager()
conn_pools = [
p.connection_from_url("https://example.com/"),
p.connection_from_url("https://example.com:4333/"),
p.connection_from_url("https://other.example.com/"),
]
# Asking for a connection pool with the same key should give us an
# existing pool.
dup_pools = []
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url("https://example.com/"))
dup_pools.append(p.connection_from_url("https://example.com/"))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(pool in conn_pools for pool in dup_pools)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_default_pool_key_funcs_copy(self) -> None:
"""Assert each PoolManager gets a copy of ``pool_keys_by_scheme``."""
p = PoolManager()
assert p.key_fn_by_scheme == p.key_fn_by_scheme
assert p.key_fn_by_scheme is not key_fn_by_scheme
def test_pools_keyed_with_from_host(self) -> None:
"""Assert pools are still keyed correctly with connection_from_host."""
ssl_kw = {
"key_file": "/root/totally_legit.key",
"cert_file": "/root/totally_legit.crt",
"cert_reqs": "CERT_REQUIRED",
"ca_certs": "/root/path_to_pem",
"ssl_version": "SSLv23_METHOD",
}
p = PoolManager(5, **ssl_kw) # type: ignore[arg-type]
conns = [p.connection_from_host("example.com", 443, scheme="https")]
for k in ssl_kw:
p.connection_pool_kw[k] = "newval"
conns.append(p.connection_from_host("example.com", 443, scheme="https"))
assert all(
x is not y
for i, x in enumerate(conns)
for j, y in enumerate(conns)
if i != j
)
def test_https_connection_from_url_case_insensitive(self) -> None:
"""Assert scheme case is ignored when pooling HTTPS connections."""
p = PoolManager()
pool = p.connection_from_url("https://example.com/")
other_pool = p.connection_from_url("HTTPS://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_host_case_insensitive(self) -> None:
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="https")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTPS")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_https_connection_from_context_case_insensitive(self) -> None:
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "https", "host": "example.com", "port": "443"}
other_context = {"scheme": "HTTPS", "host": "EXAMPLE.COM", "port": "443"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_url_case_insensitive(self) -> None:
"""Assert scheme case is ignored when pooling HTTP connections."""
p = PoolManager()
pool = p.connection_from_url("http://example.com/")
other_pool = p.connection_from_url("HTTP://EXAMPLE.COM/")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_http_connection_from_host_case_insensitive(self) -> None:
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
pool = p.connection_from_host("example.com", scheme="http")
other_pool = p.connection_from_host("EXAMPLE.COM", scheme="HTTP")
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
def test_assert_hostname_and_fingerprint_flag(self) -> None:
"""Assert that pool manager can accept hostname and fingerprint flags."""
fingerprint = "92:81:FE:85:F7:0C:26:60:EC:D6:B3:BF:93:CF:F9:71:CC:07:7D:0A"
p = PoolManager(assert_hostname=True, assert_fingerprint=fingerprint)
pool = p.connection_from_url("https://example.com/")
assert 1 == len(p.pools)
assert isinstance(pool, HTTPSConnectionPool)
assert pool.assert_hostname
assert fingerprint == pool.assert_fingerprint
def test_http_connection_from_context_case_insensitive(self) -> None:
"""Assert scheme case is ignored when getting the https key class."""
p = PoolManager()
context = {"scheme": "http", "host": "example.com", "port": "8080"}
other_context = {"scheme": "HTTP", "host": "EXAMPLE.COM", "port": "8080"}
pool = p.connection_from_context(context)
other_pool = p.connection_from_context(other_context)
assert 1 == len(p.pools)
assert pool is other_pool
assert all(isinstance(key, PoolKey) for key in p.pools.keys())
@patch("urllib3.poolmanager.PoolManager.connection_from_host")
def test_deprecated_no_scheme(self, connection_from_host: mock.MagicMock) -> None:
# Don't actually make a network connection, just verify the DeprecationWarning
connection_from_host.side_effect = ConnectionError("Not attempting connection")
p = PoolManager()
with pytest.warns(DeprecationWarning) as records:
with pytest.raises(ConnectionError):
p.request(method="GET", url="evil.com://good.com")
msg = (
"URLs without a scheme (ie 'https://') are deprecated and will raise an error "
"in a future version of urllib3. To avoid this DeprecationWarning ensure all URLs "
"start with 'https://' or 'http://'. Read more in this issue: "
"https://github.com/urllib3/urllib3/issues/2920"
)
assert len(records) == 1
assert isinstance(records[0].message, DeprecationWarning)
assert records[0].message.args[0] == msg
@patch("urllib3.poolmanager.PoolManager.connection_from_pool_key")
def test_connection_from_context_strict_param(
self, connection_from_pool_key: mock.MagicMock
) -> None:
p = PoolManager()
context = {
"scheme": "http",
"host": "example.com",
"port": 8080,
"strict": True,
}
with pytest.warns(DeprecationWarning) as records:
p.connection_from_context(context)
msg = (
"The 'strict' parameter is no longer needed on Python 3+. "
"This will raise an error in urllib3 v2.1.0."
)
record = records[0]
assert isinstance(record.message, Warning)
assert record.message.args[0] == msg
_, kwargs = connection_from_pool_key.call_args
assert kwargs["request_context"] == {
"scheme": "http",
"host": "example.com",
"port": 8080,
}
def test_custom_pool_key(self) -> None:
"""Assert it is possible to define a custom key function."""
p = PoolManager(10)
p.key_fn_by_scheme["http"] = lambda x: tuple(x["key"]) # type: ignore[assignment]
pool1 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value"}
)
pool2 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "other"}
)
pool3 = p.connection_from_url(
"http://example.com", pool_kwargs={"key": "value", "x": "y"}
)
assert 2 == len(p.pools)
assert pool1 is pool3
assert pool1 is not pool2
def test_override_pool_kwargs_url(self) -> None:
"""Assert overriding pool kwargs works with connection_from_url."""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_url("http://example.com/")
override_pool = p.connection_from_url(
"http://example.com/", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_override_pool_kwargs_host(self) -> None:
"""Assert overriding pool kwargs works with connection_from_host"""
p = PoolManager()
pool_kwargs = {"retries": 100, "block": True}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert retry.Retry.DEFAULT == default_pool.retries
assert not default_pool.block
assert 100 == override_pool.retries
assert override_pool.block
def test_pool_kwargs_socket_options(self) -> None:
"""Assert passing socket options works with connection_from_host"""
p = PoolManager(socket_options=[])
override_opts = [
(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1),
]
pool_kwargs = {"socket_options": override_opts}
default_pool = p.connection_from_host("example.com", scheme="http")
override_pool = p.connection_from_host(
"example.com", scheme="http", pool_kwargs=pool_kwargs
)
assert default_pool.conn_kw["socket_options"] == []
assert override_pool.conn_kw["socket_options"] == override_opts
def test_merge_pool_kwargs(self) -> None:
"""Assert _merge_pool_kwargs works in the happy case"""
retries = retry.Retry(total=100)
p = PoolManager(retries=retries)
merged = p._merge_pool_kwargs({"new_key": "value"})
assert {"retries": retries, "new_key": "value"} == merged
def test_merge_pool_kwargs_none(self) -> None:
"""Assert false-y values to _merge_pool_kwargs result in defaults"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({})
assert p.connection_pool_kw == merged
merged = p._merge_pool_kwargs(None)
assert p.connection_pool_kw == merged
def test_merge_pool_kwargs_remove_key(self) -> None:
"""Assert keys can be removed with _merge_pool_kwargs"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"retries": None})
assert "retries" not in merged
def test_merge_pool_kwargs_invalid_key(self) -> None:
"""Assert removing invalid keys with _merge_pool_kwargs doesn't break"""
p = PoolManager(retries=100)
merged = p._merge_pool_kwargs({"invalid_key": None})
assert p.connection_pool_kw == merged
def test_pool_manager_no_url_absolute_form(self) -> None:
"""Valides we won't send a request with absolute form without a proxy"""
p = PoolManager()
assert p._proxy_requires_url_absolute_form(Url("http://example.com")) is False
assert p._proxy_requires_url_absolute_form(Url("https://example.com")) is False
@pytest.mark.parametrize(
"input_blocksize,expected_blocksize",
[
(_DEFAULT_BLOCKSIZE, _DEFAULT_BLOCKSIZE),
(None, _DEFAULT_BLOCKSIZE),
(8192, 8192),
],
)
def test_poolmanager_blocksize(
self, input_blocksize: int, expected_blocksize: int
) -> None:
"""Assert PoolManager sets blocksize properly"""
p = PoolManager()
pool_blocksize = p.connection_from_url(
"http://example.com", {"blocksize": input_blocksize}
)
assert pool_blocksize.conn_kw["blocksize"] == expected_blocksize
assert pool_blocksize._get_conn().blocksize == expected_blocksize
@pytest.mark.parametrize(
"url",
[
"[a::b%zone]",
"[a::b%25zone]",
"http://[a::b%zone]",
"http://[a::b%25zone]",
],
)
@patch("urllib3.util.connection.create_connection")
def test_e2e_connect_to_ipv6_scoped(
self, create_connection: MagicMock, url: str
) -> None:
"""Checks that IPv6 scoped addresses are properly handled end-to-end.
This is not strictly speaking a pool manager unit test - this test
lives here in absence of a better code location for e2e/integration
tests.
"""
p = PoolManager()
conn_pool = p.connection_from_url(url)
conn = conn_pool._get_conn()
conn.connect()
assert create_connection.call_args[0][0] == ("a::b%zone", 80)
@patch("urllib3.connection.ssl_wrap_socket")
@patch("urllib3.util.connection.create_connection")
def test_e2e_connect_to_ipv6_scoped_tls(
self, create_connection: MagicMock, ssl_wrap_socket: MagicMock
) -> None:
p = PoolManager()
conn_pool = p.connection_from_url(
"https://[a::b%zone]", pool_kwargs={"assert_hostname": False}
)
conn = conn_pool._get_conn()
conn.connect()
assert ssl_wrap_socket.call_args[1]["server_hostname"] == "a::b"
def test_thread_safty(self) -> None:
pool_manager = PoolManager(num_pools=2)
# thread 1 gets a pool for host x
pool_1 = pool_manager.connection_from_url("http://host_x:80/")
# thread 2 gets a pool for host y
pool_2 = pool_manager.connection_from_url("http://host_y:80/")
# thread 3 gets a pool for host z
pool_3 = pool_manager.connection_from_url("http://host_z:80")
# None of the pools should be closed, since all of them are referenced.
assert pool_1.pool is not None
assert pool_2.pool is not None
assert pool_3.pool is not None
conn_queue = pool_1.pool
assert conn_queue.qsize() > 0
# thread 1 stops.
del pool_1
gc.collect()
# Connection should be closed, because reference to pool_1 is gone.
assert conn_queue.qsize() == 0
| TestPoolManager |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 4311,
"end": 4482
} | class ____(models.Model):
company = models.OneToOneField(Company, primary_key=True, on_delete=models.CASCADE)
self_modifying = SelfModifyingField()
| CompanyExtension |
python | aio-libs__aiohttp | aiohttp/web_request.py | {
"start": 2769,
"end": 27685
} | class ____(MutableMapping[str | RequestKey[Any], Any], HeadersMixin):
POST_METHODS = {
hdrs.METH_PATCH,
hdrs.METH_POST,
hdrs.METH_PUT,
hdrs.METH_TRACE,
hdrs.METH_DELETE,
}
_post: MultiDictProxy[str | bytes | FileField] | None = None
_read_bytes: bytes | None = None
_seen_str_keys: set[str] = set()
def __init__(
self,
message: RawRequestMessage,
payload: StreamReader,
protocol: "RequestHandler[Self]",
payload_writer: AbstractStreamWriter,
task: "asyncio.Task[None]",
loop: asyncio.AbstractEventLoop,
*,
client_max_size: int = 1024**2,
state: dict[RequestKey[Any] | str, Any] | None = None,
scheme: str | None = None,
host: str | None = None,
remote: str | None = None,
) -> None:
self._message = message
self._protocol = protocol
self._payload_writer = payload_writer
self._payload = payload
self._headers: CIMultiDictProxy[str] = message.headers
self._method = message.method
self._version = message.version
self._cache: dict[str, Any] = {}
url = message.url
if url.absolute:
if scheme is not None:
url = url.with_scheme(scheme)
if host is not None:
url = url.with_host(host)
# absolute URL is given,
# override auto-calculating url, host, and scheme
# all other properties should be good
self._cache["url"] = url
self._cache["host"] = url.host
self._cache["scheme"] = url.scheme
self._rel_url = url.relative()
else:
self._rel_url = url
if scheme is not None:
self._cache["scheme"] = scheme
if host is not None:
self._cache["host"] = host
self._state = {} if state is None else state
self._task = task
self._client_max_size = client_max_size
self._loop = loop
self._transport_sslcontext = protocol.ssl_context
self._transport_peername = protocol.peername
if remote is not None:
self._cache["remote"] = remote
def clone(
self,
*,
method: str | _SENTINEL = sentinel,
rel_url: StrOrURL | _SENTINEL = sentinel,
headers: LooseHeaders | _SENTINEL = sentinel,
scheme: str | _SENTINEL = sentinel,
host: str | _SENTINEL = sentinel,
remote: str | _SENTINEL = sentinel,
client_max_size: int | _SENTINEL = sentinel,
) -> "BaseRequest":
"""Clone itself with replacement some attributes.
Creates and returns a new instance of Request object. If no parameters
are given, an exact copy is returned. If a parameter is not passed, it
will reuse the one from the current request object.
"""
if self._read_bytes:
raise RuntimeError("Cannot clone request after reading its content")
dct: dict[str, Any] = {}
if method is not sentinel:
dct["method"] = method
if rel_url is not sentinel:
new_url: URL = URL(rel_url)
dct["url"] = new_url
dct["path"] = str(new_url)
if headers is not sentinel:
# a copy semantic
new_headers = CIMultiDictProxy(CIMultiDict(headers))
dct["headers"] = new_headers
dct["raw_headers"] = tuple(
(k.encode("utf-8"), v.encode("utf-8")) for k, v in new_headers.items()
)
message = self._message._replace(**dct)
kwargs: dict[str, str] = {}
if scheme is not sentinel:
kwargs["scheme"] = scheme
if host is not sentinel:
kwargs["host"] = host
if remote is not sentinel:
kwargs["remote"] = remote
if client_max_size is sentinel:
client_max_size = self._client_max_size
return self.__class__(
message,
self._payload,
self._protocol, # type: ignore[arg-type]
self._payload_writer,
self._task,
self._loop,
client_max_size=client_max_size,
state=self._state.copy(),
**kwargs,
)
@property
def task(self) -> "asyncio.Task[None]":
return self._task
@property
def protocol(self) -> "RequestHandler[Self]":
return self._protocol
@property
def transport(self) -> asyncio.Transport | None:
return self._protocol.transport
@property
def writer(self) -> AbstractStreamWriter:
return self._payload_writer
@property
def client_max_size(self) -> int:
return self._client_max_size
@reify
def rel_url(self) -> URL:
return self._rel_url
# MutableMapping API
@overload # type: ignore[override]
def __getitem__(self, key: RequestKey[_T]) -> _T: ...
@overload
def __getitem__(self, key: str) -> Any: ...
def __getitem__(self, key: str | RequestKey[_T]) -> Any:
return self._state[key]
@overload # type: ignore[override]
def __setitem__(self, key: RequestKey[_T], value: _T) -> None: ...
@overload
def __setitem__(self, key: str, value: Any) -> None: ...
def __setitem__(self, key: str | RequestKey[_T], value: Any) -> None:
if not isinstance(key, RequestKey) and key not in BaseRequest._seen_str_keys:
BaseRequest._seen_str_keys.add(key)
warnings.warn(
"It is recommended to use web.RequestKey instances for keys.\n"
+ "https://docs.aiohttp.org/en/stable/web_advanced.html"
+ "#request-s-storage",
category=NotAppKeyWarning,
stacklevel=2,
)
self._state[key] = value
def __delitem__(self, key: str | RequestKey[_T]) -> None:
del self._state[key]
def __len__(self) -> int:
return len(self._state)
def __iter__(self) -> Iterator[str | RequestKey[Any]]:
return iter(self._state)
########
@reify
def secure(self) -> bool:
"""A bool indicating if the request is handled with SSL."""
return self.scheme == "https"
@reify
def forwarded(self) -> tuple[Mapping[str, str], ...]:
"""A tuple containing all parsed Forwarded header(s).
Makes an effort to parse Forwarded headers as specified by RFC 7239:
- It adds one (immutable) dictionary per Forwarded 'field-value', ie
per proxy. The element corresponds to the data in the Forwarded
field-value added by the first proxy encountered by the client. Each
subsequent item corresponds to those added by later proxies.
- It checks that every value has valid syntax in general as specified
in section 4: either a 'token' or a 'quoted-string'.
- It un-escapes found escape sequences.
- It does NOT validate 'by' and 'for' contents as specified in section
6.
- It does NOT validate 'host' contents (Host ABNF).
- It does NOT validate 'proto' contents for valid URI scheme names.
Returns a tuple containing one or more immutable dicts
"""
elems = []
for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
length = len(field_value)
pos = 0
need_separator = False
elem: dict[str, str] = {}
elems.append(types.MappingProxyType(elem))
while 0 <= pos < length:
match = _FORWARDED_PAIR_RE.match(field_value, pos)
if match is not None: # got a valid forwarded-pair
if need_separator:
# bad syntax here, skip to next comma
pos = field_value.find(",", pos)
else:
name, value, port = match.groups()
if value[0] == '"':
# quoted string: remove quotes and unescape
value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1])
if port:
value += port
elem[name.lower()] = value
pos += len(match.group(0))
need_separator = True
elif field_value[pos] == ",": # next forwarded-element
need_separator = False
elem = {}
elems.append(types.MappingProxyType(elem))
pos += 1
elif field_value[pos] == ";": # next forwarded-pair
need_separator = False
pos += 1
elif field_value[pos] in " \t":
# Allow whitespace even between forwarded-pairs, though
# RFC 7239 doesn't. This simplifies code and is in line
# with Postel's law.
pos += 1
else:
# bad syntax here, skip to next comma
pos = field_value.find(",", pos)
return tuple(elems)
@reify
def scheme(self) -> str:
"""A string representing the scheme of the request.
Hostname is resolved in this order:
- overridden value by .clone(scheme=new_scheme) call.
- type of connection to peer: HTTPS if socket is SSL, HTTP otherwise.
'http' or 'https'.
"""
if self._transport_sslcontext:
return "https"
else:
return "http"
@reify
def method(self) -> str:
"""Read only property for getting HTTP method.
The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
"""
return self._method
@reify
def version(self) -> HttpVersion:
"""Read only property for getting HTTP version of request.
Returns aiohttp.protocol.HttpVersion instance.
"""
return self._version
@reify
def host(self) -> str:
"""Hostname of the request.
Hostname is resolved in this order:
- overridden value by .clone(host=new_host) call.
- HOST HTTP header
- socket.getfqdn() value
For example, 'example.com' or 'localhost:8080'.
For historical reasons, the port number may be included.
"""
host = self._message.headers.get(hdrs.HOST)
if host is not None:
return host
return socket.getfqdn()
@reify
def remote(self) -> str | None:
"""Remote IP of client initiated HTTP request.
The IP is resolved in this order:
- overridden value by .clone(remote=new_remote) call.
- peername of opened socket
"""
if self._transport_peername is None:
return None
if isinstance(self._transport_peername, (list, tuple)):
return str(self._transport_peername[0])
return str(self._transport_peername)
@reify
def url(self) -> URL:
"""The full URL of the request."""
# authority is used here because it may include the port number
# and we want yarl to parse it correctly
return URL.build(scheme=self.scheme, authority=self.host).join(self._rel_url)
@reify
def path(self) -> str:
"""The URL including *PATH INFO* without the host or scheme.
E.g., ``/app/blog``
"""
return self._rel_url.path
@reify
def path_qs(self) -> str:
"""The URL including PATH_INFO and the query string.
E.g, /app/blog?id=10
"""
return str(self._rel_url)
@reify
def raw_path(self) -> str:
"""The URL including raw *PATH INFO* without the host or scheme.
Warning, the path is unquoted and may contains non valid URL characters
E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
"""
return self._message.path
@reify
def query(self) -> MultiDictProxy[str]:
"""A multidict with all the variables in the query string."""
return self._rel_url.query
@reify
def query_string(self) -> str:
"""The query string in the URL.
E.g., id=10
"""
return self._rel_url.query_string
@reify
def headers(self) -> CIMultiDictProxy[str]:
"""A case-insensitive multidict proxy with all headers."""
return self._headers
@reify
def raw_headers(self) -> RawHeaders:
"""A sequence of pairs for all headers."""
return self._message.raw_headers
@reify
def if_modified_since(self) -> datetime.datetime | None:
"""The value of If-Modified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE))
@reify
def if_unmodified_since(self) -> datetime.datetime | None:
"""The value of If-Unmodified-Since HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE))
@staticmethod
def _etag_values(etag_header: str) -> Iterator[ETag]:
"""Extract `ETag` objects from raw header."""
if etag_header == ETAG_ANY:
yield ETag(
is_weak=False,
value=ETAG_ANY,
)
else:
for match in LIST_QUOTED_ETAG_RE.finditer(etag_header):
is_weak, value, garbage = match.group(2, 3, 4)
# Any symbol captured by 4th group means
# that the following sequence is invalid.
if garbage:
break
yield ETag(
is_weak=bool(is_weak),
value=value,
)
@classmethod
def _if_match_or_none_impl(
cls, header_value: str | None
) -> tuple[ETag, ...] | None:
if not header_value:
return None
return tuple(cls._etag_values(header_value))
@reify
def if_match(self) -> tuple[ETag, ...] | None:
"""The value of If-Match HTTP header, or None.
This header is represented as a `tuple` of `ETag` objects.
"""
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_MATCH))
@reify
def if_none_match(self) -> tuple[ETag, ...] | None:
"""The value of If-None-Match HTTP header, or None.
This header is represented as a `tuple` of `ETag` objects.
"""
return self._if_match_or_none_impl(self.headers.get(hdrs.IF_NONE_MATCH))
@reify
def if_range(self) -> datetime.datetime | None:
"""The value of If-Range HTTP header, or None.
This header is represented as a `datetime` object.
"""
return parse_http_date(self.headers.get(hdrs.IF_RANGE))
@reify
def keep_alive(self) -> bool:
"""Is keepalive enabled by client?"""
return not self._message.should_close
@reify
def cookies(self) -> Mapping[str, str]:
"""Return request cookies.
A read-only dictionary-like object.
"""
# Use parse_cookie_header for RFC 6265 compliant Cookie header parsing
# that accepts special characters in cookie names (fixes #2683)
parsed = parse_cookie_header(self.headers.get(hdrs.COOKIE, ""))
# Extract values from Morsel objects
return MappingProxyType({name: morsel.value for name, morsel in parsed})
@reify
def http_range(self) -> "slice[int, int, int]":
"""The content of Range HTTP header.
Return a slice instance.
"""
rng = self._headers.get(hdrs.RANGE)
start, end = None, None
if rng is not None:
try:
pattern = r"^bytes=(\d*)-(\d*)$"
start, end = re.findall(pattern, rng)[0]
except IndexError: # pattern was not found in header
raise ValueError("range not in acceptable format")
end = int(end) if end else None
start = int(start) if start else None
if start is None and end is not None:
# end with no start is to return tail of content
start = -end
end = None
if start is not None and end is not None:
# end is inclusive in range header, exclusive for slice
end += 1
if start >= end:
raise ValueError("start cannot be after end")
if start is end is None: # No valid range supplied
raise ValueError("No start or end of range specified")
return slice(start, end, 1)
@reify
def content(self) -> StreamReader:
"""Return raw payload stream."""
return self._payload
@property
def can_read_body(self) -> bool:
"""Return True if request's HTTP BODY can be read, False otherwise."""
return not self._payload.at_eof()
@reify
def body_exists(self) -> bool:
"""Return True if request has HTTP BODY, False otherwise."""
return type(self._payload) is not EmptyStreamReader
async def release(self) -> None:
"""Release request.
Eat unread part of HTTP BODY if present.
"""
while not self._payload.at_eof():
await self._payload.readany()
async def read(self) -> bytes:
"""Read request body if present.
Returns bytes object with full request content.
"""
if self._read_bytes is None:
body = bytearray()
while True:
chunk = await self._payload.readany()
body.extend(chunk)
if self._client_max_size:
body_size = len(body)
if body_size > self._client_max_size:
raise HTTPRequestEntityTooLarge(
max_size=self._client_max_size, actual_size=body_size
)
if not chunk:
break
self._read_bytes = bytes(body)
return self._read_bytes
async def text(self) -> str:
"""Return BODY as text using encoding from .charset."""
bytes_body = await self.read()
encoding = self.charset or "utf-8"
try:
return bytes_body.decode(encoding)
except LookupError:
raise HTTPUnsupportedMediaType()
async def json(
self,
*,
loads: JSONDecoder = DEFAULT_JSON_DECODER,
content_type: str | None = "application/json",
) -> Any:
"""Return BODY as JSON."""
body = await self.text()
if content_type:
if not is_expected_content_type(self.content_type, content_type):
raise HTTPBadRequest(
text=(
"Attempt to decode JSON with "
"unexpected mimetype: %s" % self.content_type
)
)
return loads(body)
async def multipart(self) -> MultipartReader:
"""Return async iterator to process BODY as multipart."""
return MultipartReader(self._headers, self._payload)
async def post(self) -> "MultiDictProxy[str | bytes | FileField]":
"""Return POST parameters."""
if self._post is not None:
return self._post
if self._method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if content_type not in (
"",
"application/x-www-form-urlencoded",
"multipart/form-data",
):
self._post = MultiDictProxy(MultiDict())
return self._post
out: MultiDict[str | bytes | FileField] = MultiDict()
if content_type == "multipart/form-data":
multipart = await self.multipart()
max_size = self._client_max_size
field = await multipart.next()
while field is not None:
size = 0
field_ct = field.headers.get(hdrs.CONTENT_TYPE)
if isinstance(field, BodyPartReader):
assert field.name is not None
# Note that according to RFC 7578, the Content-Type header
# is optional, even for files, so we can't assume it's
# present.
# https://tools.ietf.org/html/rfc7578#section-4.4
if field.filename:
# store file in temp file
tmp = await self._loop.run_in_executor(
None, tempfile.TemporaryFile
)
chunk = await field.read_chunk(size=2**16)
while chunk:
chunk = field.decode(chunk)
await self._loop.run_in_executor(None, tmp.write, chunk)
size += len(chunk)
if 0 < max_size < size:
await self._loop.run_in_executor(None, tmp.close)
raise HTTPRequestEntityTooLarge(
max_size=max_size, actual_size=size
)
chunk = await field.read_chunk(size=2**16)
await self._loop.run_in_executor(None, tmp.seek, 0)
if field_ct is None:
field_ct = "application/octet-stream"
ff = FileField(
field.name,
field.filename,
cast(io.BufferedReader, tmp),
field_ct,
field.headers,
)
out.add(field.name, ff)
else:
# deal with ordinary data
value = await field.read(decode=True)
if field_ct is None or field_ct.startswith("text/"):
charset = field.get_charset(default="utf-8")
out.add(field.name, value.decode(charset))
else:
out.add(field.name, value)
size += len(value)
if 0 < max_size < size:
raise HTTPRequestEntityTooLarge(
max_size=max_size, actual_size=size
)
else:
raise ValueError(
"To decode nested multipart you need to use custom reader",
)
field = await multipart.next()
else:
data = await self.read()
if data:
charset = self.charset or "utf-8"
bytes_query = data.rstrip()
try:
query = bytes_query.decode(charset)
except LookupError:
raise HTTPUnsupportedMediaType()
out.extend(
parse_qsl(qs=query, keep_blank_values=True, encoding=charset)
)
self._post = MultiDictProxy(out)
return self._post
def get_extra_info(self, name: str, default: Any = None) -> Any:
"""Extra info from protocol transport"""
transport = self._protocol.transport
if transport is None:
return default
return transport.get_extra_info(name, default)
def __repr__(self) -> str:
ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode(
"ascii"
)
return f"<{self.__class__.__name__} {self._method} {ascii_encodable_path} >"
def __eq__(self, other: object) -> bool:
return id(self) == id(other)
def __bool__(self) -> bool:
return True
async def _prepare_hook(self, response: StreamResponse) -> None:
return
def _cancel(self, exc: BaseException) -> None:
set_exception(self._payload, exc)
def _finish(self) -> None:
if self._post is None or self.content_type != "multipart/form-data":
return
# NOTE: Release file descriptors for the
# NOTE: `tempfile.Temporaryfile`-created `_io.BufferedRandom`
# NOTE: instances of files sent within multipart request body
# NOTE: via HTTP POST request.
for file_name, file_field_object in self._post.items():
if isinstance(file_field_object, FileField):
file_field_object.file.close()
| BaseRequest |
python | pypa__pip | src/pip/_internal/network/session.py | {
"start": 10112,
"end": 10397
} | class ____(HTTPAdapter):
def cert_verify(
self,
conn: ConnectionPool,
url: str,
verify: bool | str,
cert: str | tuple[str, str] | None,
) -> None:
super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
| InsecureHTTPAdapter |
python | scipy__scipy | scipy/_lib/_ccallback.py | {
"start": 116,
"end": 348
} | class ____:
pass
def _import_cffi():
global ffi, CData
if ffi is not None:
return
try:
import cffi
ffi = cffi.FFI()
CData = ffi.CData
except ImportError:
ffi = False
| CData |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 103267,
"end": 104590
} | class ____(TestCase):
def test_simple(self):
[x, y] = np.indices((4, 3))
assert_array_equal(x, np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2], [3, 3, 3]]))
assert_array_equal(y, np.array([[0, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]))
def test_single_input(self):
[x] = np.indices((4,))
assert_array_equal(x, np.array([0, 1, 2, 3]))
[x] = np.indices((4,), sparse=True)
assert_array_equal(x, np.array([0, 1, 2, 3]))
def test_scalar_input(self):
assert_array_equal([], np.indices(()))
assert_array_equal([], np.indices((), sparse=True))
assert_array_equal([[]], np.indices((0,)))
assert_array_equal([[]], np.indices((0,), sparse=True))
def test_sparse(self):
[x, y] = np.indices((4, 3), sparse=True)
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
@parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
assert_(inds.dtype == dtype)
for arr in np.indices(dims, dtype=dtype, sparse=True):
assert_(arr.dtype == dtype)
@xpassIfTorchDynamo_np # (reason="TODO")
| TestIndices |
python | cython__cython | Cython/Debugger/libpython.py | {
"start": 82701,
"end": 83273
} | class ____:
"""
Make this a mixin so CyStep can also inherit from this and use a
CythonCodeStepper at the same time.
"""
def python_step(self, stepinto):
"""
Set a watchpoint on the Python bytecode instruction pointer and try
to finish the frame
"""
output = gdb.execute('watch f->f_lasti', to_string=True)
watchpoint = int(re.search(r'[Ww]atchpoint (\d+):', output).group(1))
self.step(stepinto=stepinto, stepover_command='finish')
gdb.execute('delete %s' % watchpoint)
| PythonStepperMixin |
python | great-expectations__great_expectations | great_expectations/execution_engine/sqlalchemy_execution_engine.py | {
"start": 7253,
"end": 71524
} | class ____(ExecutionEngine[SQLAColumnClause]):
"""SparkDFExecutionEngine instantiates the ExecutionEngine API to support computations using Spark platform.
Constructor builds a SqlAlchemyExecutionEngine, using a provided connection string/url/engine/credentials to \
access the desired database.
Also initializes the dialect to be used.
Args:
name (str): The name of the SqlAlchemyExecutionEngine
credentials: If the Execution Engine is not provided, the credentials can be used to build the \
ExecutionEngine object. If the Engine is provided, it will be used instead.
data_context (DataContext): An object representing a Great Expectations project that can be used to \
access ExpectationSuite objects and the Project Data itself.
engine (Engine): A SqlAlchemy Engine used to set the SqlAlchemyExecutionEngine being configured, \
useful if an Engine has already been configured and should be reused. Will override Credentials if \
provided. If you are passing an engine that requires a single connection e.g. if temporary tables are \
not persisted if the connection is closed (e.g. sqlite, mssql) then you should create the engine with \
a StaticPool e.g. engine = sa.create_engine(connection_url, poolclass=sa.pool.StaticPool)
connection_string (string): If neither the engines nor the credentials have been provided, a \
connection string can be used to access the data. This will be overridden by both the engine and \
credentials if those are provided.
url (string): If neither the engines, the credentials, nor the connection_string have been provided, a \
URL can be used to access the data. This will be overridden by all other configuration options if \
any are provided.
kwargs (dict): These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
For example:
```python
execution_engine: ExecutionEngine = SqlAlchemyExecutionEngine(connection_string="dbmstype://user:password@host:5432/database_name")
```
""" # noqa: E501 # FIXME CoP
# noinspection PyUnusedLocal
def __init__( # noqa: C901, PLR0912, PLR0913, PLR0915 # FIXME CoP
self,
name: Optional[str] = None,
credentials: Optional[dict] = None,
data_context: Optional[Any] = None,
engine: Optional[SaEngine] = None,
connection_string: Optional[str] = None,
url: Optional[str] = None,
batch_data_dict: Optional[dict] = None,
create_temp_table: bool = True,
# kwargs will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine # noqa: E501 # FIXME CoP
**kwargs,
) -> None:
super().__init__(name=name, batch_data_dict=batch_data_dict)
self._name = name
self._credentials = credentials
self._connection_string = connection_string
self._url = url
self._create_temp_table = create_temp_table
os.environ["SF_PARTNER"] = "great_expectations_oss" # noqa: TID251 # FIXME CoP
# sqlite/mssql temp tables only persist within a connection, so we need to keep the connection alive by # noqa: E501 # FIXME CoP
# keeping a reference to it.
# Even though we use a single connection pool for dialects that need a single persisted connection # noqa: E501 # FIXME CoP
# (e.g. for accessing temporary tables), if we don't keep a reference
# then we get errors like sqlite3.ProgrammingError: Cannot operate on a closed database.
self._connection: sqlalchemy.Connection | None = None
# Use a single instance of SQLAlchemy engine to avoid creating multiple engine instances
# for the same SQLAlchemy engine. This allows us to take advantage of SQLAlchemy's
# built-in caching.
self._inspector = None
if engine is not None:
if credentials is not None:
logger.warning(
"Both credentials and engine were provided during initialization of SqlAlchemyExecutionEngine. " # noqa: E501 # FIXME CoP
"Ignoring credentials."
)
self.engine = engine
else:
self._setup_engine(
kwargs=kwargs,
connection_string=connection_string,
credentials=credentials,
url=url,
)
# these are two backends where temp_table_creation is not supported we set the default value to False. # noqa: E501 # FIXME CoP
if (
self.dialect_name
in [
GXSqlDialect.TRINO,
GXSqlDialect.AWSATHENA, # WKS 202201 - AWS Athena currently doesn't support temp_tables. # noqa: E501 # FIXME CoP
GXSqlDialect.CLICKHOUSE,
]
):
self._create_temp_table = False
# Get the dialect **for purposes of identifying types**
if self.dialect_name in [
GXSqlDialect.POSTGRESQL,
GXSqlDialect.MYSQL,
GXSqlDialect.SQLITE,
GXSqlDialect.ORACLE,
GXSqlDialect.MSSQL,
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect_module = import_library_module(
module_name=f"sqlalchemy.dialects.{self.engine.dialect.name}"
)
elif self.dialect_name == GXSqlDialect.SNOWFLAKE:
self.dialect_module = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.dialect_name == GXSqlDialect.DREMIO:
# WARNING: Dremio Support is experimental, functionality is not fully under test
self.dialect_module = import_library_module(module_name="sqlalchemy_dremio.pyodbc")
elif self.dialect_name == GXSqlDialect.REDSHIFT:
self.dialect_module = import_library_module(module_name="sqlalchemy_redshift.dialect")
elif self.dialect_name == GXSqlDialect.BIGQUERY:
self.dialect_module = import_library_module(module_name=_BIGQUERY_MODULE_NAME)
elif self.dialect_name == GXSqlDialect.TERADATASQL:
# WARNING: Teradata Support is experimental, functionality is not fully under test
self.dialect_module = import_library_module(module_name="teradatasqlalchemy.dialect")
elif self.dialect_name == GXSqlDialect.TRINO:
# WARNING: Trino Support is experimental, functionality is not fully under test
self.dialect_module = import_library_module(module_name="trino.sqlalchemy.dialect")
elif self.dialect_name == GXSqlDialect.CLICKHOUSE:
# WARNING: ClickHouse Support is experimental, functionality is not fully under test
self.dialect_module = import_library_module(
module_name="clickhouse_sqlalchemy.drivers.base"
)
elif self.dialect_name == GXSqlDialect.DATABRICKS:
self.dialect_module = import_library_module("databricks.sqlalchemy")
else:
self.dialect_module = None
# <WILL> 20210726 - engine_backup is used by the snowflake connector, which requires connection and engine # noqa: E501 # FIXME CoP
# to be closed and disposed separately. Currently self.engine can refer to either a Connection or Engine, # noqa: E501 # FIXME CoP
# depending on the backend. This will need to be cleaned up in an upcoming refactor, so that Engine and # noqa: E501 # FIXME CoP
# Connection can be handled separately.
self._engine_backup = None
if self.engine and self.dialect_name in [
GXSqlDialect.SQLITE,
GXSqlDialect.MSSQL,
GXSqlDialect.SNOWFLAKE,
GXSqlDialect.MYSQL,
]:
if self.engine.dialect.name.lower() == GXSqlDialect.SQLITE:
def _add_sqlite_functions(connection):
logger.info(f"Adding custom sqlite functions to connection {connection}")
connection.create_function("sqrt", 1, lambda x: math.sqrt(x))
connection.create_function(
"md5",
2,
lambda x, d: hashlib.md5(str(x).encode("utf-8")).hexdigest()[-1 * d :],
)
# Add sqlite functions to any future connections.
def _on_connect(dbapi_con, connection_record):
logger.info(
f"A new sqlite connection was created: {dbapi_con}, {connection_record}"
)
_add_sqlite_functions(dbapi_con)
sa.event.listen(self.engine, "connect", _on_connect)
# Also immediately add the sqlite functions in case there already exists an underlying # noqa: E501 # FIXME CoP
# sqlite3.Connection (distinct from a sqlalchemy Connection).
_raw_dbapi_con = self.engine.raw_connection()
try:
_add_sqlite_functions(_raw_dbapi_con)
finally:
# Ensure the temporary raw DB-API connection is closed to avoid ResourceWarning.
try:
_raw_dbapi_con.close()
except Exception:
pass
self._engine_backup = self.engine
# Gather the call arguments of the present function (and add the "class_name"), filter out the Falsy values, # noqa: E501 # FIXME CoP
# and set the instance "_config" variable equal to the resulting dictionary.
self._config = {
"name": name,
"credentials": credentials,
"data_context": data_context,
"engine": engine, # type: ignore[dict-item] # FIXME CoP
"connection_string": connection_string,
"url": url,
"batch_data_dict": batch_data_dict,
"module_name": self.__class__.__module__,
"class_name": self.__class__.__name__,
}
self._config.update(kwargs)
filter_properties_dict(properties=self._config, clean_falsy=True, inplace=True)
self._data_partitioner = SqlAlchemyDataPartitioner(dialect=self.dialect_name)
self._data_sampler = SqlAlchemyDataSampler()
def _setup_engine(
self,
kwargs: MutableMapping[str, Any],
connection_string: str | None = None,
credentials: dict | None = None,
url: str | None = None,
):
"""Create an engine and set the engine instance variable on the execution engine.
Args:
kwargs: These will be passed as optional parameters to the SQLAlchemy engine, **not** the ExecutionEngine
connection_string: Used to connect to the database.
credentials: Used to connect to the database.
url: Used to connect to the database.
Returns:
Nothing, the engine instance variable is set.
""" # noqa: E501 # FIXME CoP
if credentials is not None:
self.engine = self._build_engine(credentials=credentials, **kwargs)
elif connection_string is not None:
if _dialect_requires_persisted_connection(
connection_string=connection_string, credentials=credentials, url=url
):
self.engine = sa.create_engine(
connection_string, **kwargs, poolclass=sqlalchemy.StaticPool
)
else:
self.engine = sa.create_engine(connection_string, **kwargs)
elif url is not None:
parsed_url = make_url(url)
self.drivername = parsed_url.drivername
if _dialect_requires_persisted_connection(
connection_string=connection_string, credentials=credentials, url=url
):
self.engine = sa.create_engine(url, **kwargs, poolclass=sqlalchemy.StaticPool)
else:
self.engine = sa.create_engine(url, **kwargs)
else:
raise InvalidConfigError( # noqa: TRY003 # FIXME CoP
"Credentials or an engine are required for a SqlAlchemyExecutionEngine."
)
@property
def credentials(self) -> Optional[dict]:
return self._credentials
@property
def connection_string(self) -> Optional[str]:
return self._connection_string
@property
def url(self) -> Optional[str]:
return self._url
@property
@override
def dialect(self) -> sqlalchemy.Dialect:
return self.engine.dialect
@property
def dialect_name(self) -> str:
"""Retrieve the string name of the engine dialect in lowercase e.g. "postgresql".
Returns:
String representation of the sql dialect.
"""
return self.engine.dialect.name.lower()
def _build_engine(self, credentials: dict, **kwargs) -> sa.engine.Engine:
"""
Using a set of given credentials, constructs an Execution Engine , connecting to a database using a URL or a
private key path.
""" # noqa: E501 # FIXME CoP
# Update credentials with anything passed during connection time
drivername = credentials.pop("drivername")
schema_name = credentials.pop("schema_name", None)
if schema_name is not None:
logger.warning(
"schema_name specified creating a URL with schema is not supported. Set a default "
"schema on the user connecting to your database."
)
create_engine_kwargs = kwargs
connect_args = credentials.pop("connect_args", None)
if connect_args:
create_engine_kwargs["connect_args"] = connect_args
if "private_key_path" in credentials:
options, create_engine_kwargs = self._get_sqlalchemy_key_pair_auth_url(
drivername, credentials
)
else:
options = get_sqlalchemy_url(drivername, **credentials)
self.drivername = drivername
if _dialect_requires_persisted_connection(credentials=credentials):
engine = sa.create_engine(
options, **create_engine_kwargs, poolclass=sqlalchemy.StaticPool
)
else:
engine = sa.create_engine(options, **create_engine_kwargs)
return engine
@staticmethod
def _get_sqlalchemy_key_pair_auth_url(
drivername: str,
credentials: dict,
) -> Tuple[sa.engine.url.URL, dict]:
"""
Utilizing a private key path and a passphrase in a given credentials dictionary, attempts to encode the provided
values into a private key. If passphrase is incorrect, this will fail and an exception is raised.
Args:
drivername(str) - The name of the driver class
credentials(dict) - A dictionary of database credentials used to access the database
Returns:
a tuple consisting of a url with the serialized key-pair authentication, and a dictionary of engine kwargs.
""" # noqa: E501 # FIXME CoP
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
private_key_path = credentials.pop("private_key_path")
private_key_passphrase = credentials.pop("private_key_passphrase")
with Path(private_key_path).expanduser().resolve().open(mode="rb") as key:
try:
p_key = serialization.load_pem_private_key(
key.read(),
password=(private_key_passphrase.encode() if private_key_passphrase else None),
backend=default_backend(),
)
except ValueError as e:
if "incorrect password" in str(e).lower():
raise DatasourceKeyPairAuthBadPassphraseError(
datasource_name="SqlAlchemyDatasource",
message="Decryption of key failed, was the passphrase incorrect?",
) from e
else:
raise e # noqa: TRY201 # FIXME CoP
pkb = p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
credentials_driver_name = credentials.pop("drivername", None)
create_engine_kwargs = {"connect_args": {"private_key": pkb}}
return (
get_sqlalchemy_url(drivername or credentials_driver_name, **credentials),
create_engine_kwargs,
)
@override
def get_domain_records( # noqa: C901, PLR0912, PLR0915 # FIXME CoP
self,
domain_kwargs: dict,
) -> sqlalchemy.Selectable:
"""Uses the given Domain kwargs (which include row_condition, condition_parser, and ignore_row_if directives) to obtain and/or query a Batch of data.
Args:
domain_kwargs (dict) - A dictionary consisting of the Domain kwargs specifying which data to obtain
Returns:
An SqlAlchemy table/column(s) (the selectable object for obtaining data on which to compute returned in the format of an SqlAlchemy table/column(s) object)
""" # noqa: E501 # FIXME CoP
data_object: SqlAlchemyBatchData
batch_id: Optional[str] = domain_kwargs.get("batch_id")
if batch_id is None:
# We allow no batch id specified if there is only one batch
if self.batch_manager.active_batch_data:
data_object = cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data)
else:
raise GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"No batch is specified, but could not identify a loaded batch."
)
else: # noqa: PLR5501 # FIXME CoP
if batch_id in self.batch_manager.batch_data_cache:
data_object = cast(
"SqlAlchemyBatchData", self.batch_manager.batch_data_cache[batch_id]
)
else:
raise GreatExpectationsError(f"Unable to find batch with batch_id {batch_id}") # noqa: TRY003 # FIXME CoP
selectable: sqlalchemy.Selectable
if "table" in domain_kwargs and domain_kwargs["table"] is not None:
# TODO: Add logic to handle record_set_name once implemented
# (i.e. multiple record sets (tables) in one batch
if domain_kwargs["table"] != data_object.selectable.name:
# noinspection PyProtectedMember
selectable = sa.Table(
domain_kwargs["table"],
sa.MetaData(),
schema=data_object._schema_name,
)
else:
selectable = data_object.selectable
elif "query" in domain_kwargs:
raise ValueError("query is not currently supported by SqlAlchemyExecutionEngine") # noqa: TRY003 # FIXME CoP
else:
selectable = data_object.selectable
"""
If a custom query is passed, selectable will be TextClause and not formatted
as a subquery wrapped in "(subquery) alias". TextClause must first be converted
to TextualSelect using sa.columns() before it can be converted to type Subquery
"""
if sqlalchemy.TextClause and isinstance(selectable, sqlalchemy.TextClause): # type: ignore[truthy-function] # FIXME CoP
selectable = selectable.columns().subquery()
# Filtering by row condition.
if "row_condition" in domain_kwargs and domain_kwargs["row_condition"] is not None:
row_condition = domain_kwargs["row_condition"]
condition_parser = domain_kwargs.get("condition_parser", None)
if isinstance(row_condition, dict):
row_condition = deserialize_row_condition(row_condition)
# PassThroughCondition is not supported for SQLAlchemy
if isinstance(row_condition, PassThroughCondition):
raise GreatExpectationsError( # noqa: TRY003 # FIXME
"PassThroughCondition (pandas/spark syntax) is not supported for "
"SqlAlchemyExecutionEngine. Please use the latest documented "
"row_condition syntax, which does not require condition_parser."
)
if isinstance(row_condition, Condition):
parsed_condition = self.condition_to_filter_clause(row_condition)
elif condition_parser in [
CONDITION_PARSER_GREAT_EXPECTATIONS,
CONDITION_PARSER_GREAT_EXPECTATIONS_DEPRECATED,
]:
parsed_condition = parse_condition_to_sqlalchemy(row_condition)
else:
raise GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"SqlAlchemyExecutionEngine only supports the great_expectations condition_parser." # noqa: E501 # FIXME CoP
)
selectable = sa.select(sa.text("*")).select_from(selectable).where(parsed_condition) # type: ignore[arg-type] # FIXME CoP
# Filtering by filter_conditions
filter_conditions: List[RowCondition] = domain_kwargs.get("filter_conditions", [])
# For SqlAlchemyExecutionEngine only one filter condition is allowed
if len(filter_conditions) == 1:
filter_condition = filter_conditions[0]
assert filter_condition.condition_type == RowConditionParserType.GE, (
"filter_condition must be of type GX for SqlAlchemyExecutionEngine"
)
# SQLAlchemy 2.0 deprecated select_from() from a non-Table asset without a subquery.
# Implicit coercion of SELECT and textual SELECT constructs into FROM clauses is deprecated. # noqa: E501 # FIXME CoP
if not isinstance(selectable, (sa.Table, Subquery)):
selectable = selectable.subquery() # type: ignore[attr-defined] # FIXME CoP
selectable = (
sa.select(sa.text("*"))
.select_from(selectable) # type: ignore[arg-type] # FIXME CoP
.where(parse_condition_to_sqlalchemy(filter_condition.condition))
)
elif len(filter_conditions) > 1:
raise GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"SqlAlchemyExecutionEngine currently only supports a single filter condition."
)
if "column" in domain_kwargs:
return selectable
# Filtering by ignore_row_if directive
if (
"column_A" in domain_kwargs
and "column_B" in domain_kwargs
and "ignore_row_if" in domain_kwargs
):
if cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data).use_quoted_name:
# Checking if case-sensitive and using appropriate name
# noinspection PyPep8Naming
column_A_name = sqlalchemy.quoted_name(domain_kwargs["column_A"], quote=True)
# noinspection PyPep8Naming
column_B_name = sqlalchemy.quoted_name(domain_kwargs["column_B"], quote=True)
else:
# noinspection PyPep8Naming
column_A_name = domain_kwargs["column_A"]
# noinspection PyPep8Naming
column_B_name = domain_kwargs["column_B"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "both_values_are_missing":
selectable = get_sqlalchemy_selectable(
sa.select(sa.text("*"))
.select_from(get_sqlalchemy_selectable(selectable)) # type: ignore[arg-type] # FIXME CoP
.where(
sa.not_(
sa.and_(
sa.column(column_A_name) == None, # noqa: E711 # FIXME CoP
sa.column(column_B_name) == None, # noqa: E711 # FIXME CoP
)
)
)
)
elif ignore_row_if == "either_value_is_missing":
selectable = get_sqlalchemy_selectable(
sa.select(sa.text("*"))
.select_from(get_sqlalchemy_selectable(selectable)) # type: ignore[arg-type] # FIXME CoP
.where(
sa.not_(
sa.or_(
sa.column(column_A_name) == None, # noqa: E711 # FIXME CoP
sa.column(column_B_name) == None, # noqa: E711 # FIXME CoP
)
)
)
)
else: # noqa: PLR5501 # FIXME CoP
if ignore_row_if != "neither":
raise ValueError(f'Unrecognized value of ignore_row_if ("{ignore_row_if}").') # noqa: TRY003 # FIXME CoP
return selectable
if "column_list" in domain_kwargs and "ignore_row_if" in domain_kwargs:
if cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data).use_quoted_name:
# Checking if case-sensitive and using appropriate name
column_list = [
sqlalchemy.quoted_name(domain_kwargs[column_name], quote=True)
for column_name in domain_kwargs["column_list"]
]
else:
column_list = domain_kwargs["column_list"]
ignore_row_if = domain_kwargs["ignore_row_if"]
if ignore_row_if == "all_values_are_missing":
selectable = get_sqlalchemy_selectable(
sa.select(sa.text("*"))
.select_from(get_sqlalchemy_selectable(selectable)) # type: ignore[arg-type] # FIXME CoP
.where(
sa.not_(
sa.and_(
*(
sa.column(column_name) == None # noqa: E711 # FIXME CoP
for column_name in column_list
)
)
)
)
)
elif ignore_row_if == "any_value_is_missing":
selectable = get_sqlalchemy_selectable(
sa.select(sa.text("*"))
.select_from(get_sqlalchemy_selectable(selectable)) # type: ignore[arg-type] # FIXME CoP
.where(
sa.not_(
sa.or_(
*(
sa.column(column_name) == None # noqa: E711 # FIXME CoP
for column_name in column_list
)
)
)
)
)
else: # noqa: PLR5501 # FIXME CoP
if ignore_row_if != "never":
raise ValueError(f'Unrecognized value of ignore_row_if ("{ignore_row_if}").') # noqa: TRY003 # FIXME CoP
return selectable
return selectable
@override
def get_compute_domain(
self,
domain_kwargs: dict,
domain_type: Union[str, MetricDomainTypes],
accessor_keys: Optional[Iterable[str]] = None,
) -> Tuple[sqlalchemy.Selectable, dict, dict]:
"""Uses a given batch dictionary and Domain kwargs to obtain a SqlAlchemy column object.
Args:
domain_kwargs (dict): a dictionary consisting of the Domain kwargs specifying which data to obtain
domain_type (str or MetricDomainTypes): an Enum value indicating which metric Domain the user would like \
to be using, or a corresponding string value representing it. String types include "identity", "column", \
"column_pair", "table" and "other". Enum types include capitalized versions of these from the class \
MetricDomainTypes.
accessor_keys (str iterable): keys that are part of the compute Domain but should be ignored when \
describing the Domain and simply transferred with their associated values into accessor_domain_kwargs.
Returns:
SqlAlchemy column
""" # noqa: E501 # FIXME CoP
partitioned_domain_kwargs: PartitionDomainKwargs = self._partition_domain_kwargs(
domain_kwargs, domain_type, accessor_keys
)
selectable: sqlalchemy.Selectable = self.get_domain_records(domain_kwargs=domain_kwargs)
return (
selectable,
partitioned_domain_kwargs.compute,
partitioned_domain_kwargs.accessor,
)
@override
def _partition_column_metric_domain_kwargs( # type: ignore[override] # ExecutionEngine method is static
self,
domain_kwargs: dict,
domain_type: MetricDomainTypes,
) -> PartitionDomainKwargs:
"""Partition domain_kwargs for column Domain types into compute and accessor Domain kwargs.
Args:
domain_kwargs: A dictionary consisting of the Domain kwargs specifying which data to obtain
domain_type: an Enum value indicating which metric Domain the user would
like to be using.
Returns:
compute_domain_kwargs, accessor_domain_kwargs partition from domain_kwargs
The union of compute_domain_kwargs, accessor_domain_kwargs is the input domain_kwargs
""" # noqa: E501 # FIXME CoP
assert domain_type == MetricDomainTypes.COLUMN, (
"This method only supports MetricDomainTypes.COLUMN"
)
compute_domain_kwargs: dict = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs: dict = {}
if "column" not in compute_domain_kwargs:
raise gx_exceptions.GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"Column not provided in compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data).use_quoted_name:
accessor_domain_kwargs["column"] = sqlalchemy.quoted_name(
compute_domain_kwargs.pop("column"), quote=True
)
else:
accessor_domain_kwargs["column"] = compute_domain_kwargs.pop("column")
return PartitionDomainKwargs(compute_domain_kwargs, accessor_domain_kwargs)
@override
def _partition_column_pair_metric_domain_kwargs( # type: ignore[override] # ExecutionEngine method is static
self,
domain_kwargs: dict,
domain_type: MetricDomainTypes,
) -> PartitionDomainKwargs:
"""Partition domain_kwargs for column pair Domain types into compute and accessor Domain kwargs.
Args:
domain_kwargs: A dictionary consisting of the Domain kwargs specifying which data to obtain
domain_type: an Enum value indicating which metric Domain the user would
like to be using.
Returns:
compute_domain_kwargs, accessor_domain_kwargs partition from domain_kwargs
The union of compute_domain_kwargs, accessor_domain_kwargs is the input domain_kwargs
""" # noqa: E501 # FIXME CoP
assert domain_type == MetricDomainTypes.COLUMN_PAIR, (
"This method only supports MetricDomainTypes.COLUMN_PAIR"
)
compute_domain_kwargs: dict = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs: dict = {}
if not ("column_A" in compute_domain_kwargs and "column_B" in compute_domain_kwargs):
raise gx_exceptions.GreatExpectationsError( # noqa: TRY003 # FIXME CoP
"column_A or column_B not found within compute_domain_kwargs"
)
# Checking if case-sensitive and using appropriate name
if cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data).use_quoted_name:
accessor_domain_kwargs["column_A"] = sqlalchemy.quoted_name(
compute_domain_kwargs.pop("column_A"), quote=True
)
accessor_domain_kwargs["column_B"] = sqlalchemy.quoted_name(
compute_domain_kwargs.pop("column_B"), quote=True
)
else:
accessor_domain_kwargs["column_A"] = compute_domain_kwargs.pop("column_A")
accessor_domain_kwargs["column_B"] = compute_domain_kwargs.pop("column_B")
return PartitionDomainKwargs(compute_domain_kwargs, accessor_domain_kwargs)
@override
def _partition_multi_column_metric_domain_kwargs( # type: ignore[override] # ExecutionEngine method is static
self,
domain_kwargs: dict,
domain_type: MetricDomainTypes,
) -> PartitionDomainKwargs:
"""Partition domain_kwargs for multicolumn Domain types into compute and accessor Domain kwargs.
Args:
domain_kwargs: A dictionary consisting of the Domain kwargs specifying which data to obtain
domain_type: an Enum value indicating which metric Domain the user would
like to be using.
Returns:
compute_domain_kwargs, accessor_domain_kwargs partition from domain_kwargs
The union of compute_domain_kwargs, accessor_domain_kwargs is the input domain_kwargs
""" # noqa: E501 # FIXME CoP
assert domain_type == MetricDomainTypes.MULTICOLUMN, (
"This method only supports MetricDomainTypes.MULTICOLUMN"
)
compute_domain_kwargs: dict = copy.deepcopy(domain_kwargs)
accessor_domain_kwargs: dict = {}
if "column_list" not in domain_kwargs:
raise GreatExpectationsError("column_list not found within domain_kwargs") # noqa: TRY003 # FIXME CoP
column_list = compute_domain_kwargs.pop("column_list")
if len(column_list) < 2: # noqa: PLR2004 # FIXME CoP
raise GreatExpectationsError("column_list must contain at least 2 columns") # noqa: TRY003 # FIXME CoP
# Checking if case-sensitive and using appropriate name
if cast("SqlAlchemyBatchData", self.batch_manager.active_batch_data).use_quoted_name:
accessor_domain_kwargs["column_list"] = [
sqlalchemy.quoted_name(column_name, quote=True) for column_name in column_list
]
else:
accessor_domain_kwargs["column_list"] = column_list
return PartitionDomainKwargs(compute_domain_kwargs, accessor_domain_kwargs)
@override
def resolve_metric_bundle(
self,
metric_fn_bundle: Iterable[MetricComputationConfiguration],
) -> dict[MetricConfigurationID, MetricValue]:
"""For every metric in a set of Metrics to resolve, obtains necessary metric keyword arguments and builds
bundles of the metrics into one large query dictionary so that they are all executed simultaneously. Will fail
if bundling the metrics together is not possible.
Args:
metric_fn_bundle (Iterable[MetricComputationConfiguration]): \
"MetricComputationConfiguration" contains MetricProvider's MetricConfiguration (its unique identifier),
its metric provider function (the function that actually executes the metric), and arguments to pass
to metric provider function (dictionary of metrics defined in registry and corresponding arguments).
Returns:
A dictionary of "MetricConfiguration" IDs and their corresponding now-queried (fully resolved) values.
""" # noqa: E501 # FIXME CoP
resolved_metrics: dict[MetricConfigurationID, MetricValue] = {}
res: List[sqlalchemy.Row]
queries: list[dict] = self._organize_metrics_by_domain(
metric_fn_bundle,
limit=DATABRICKS_MAX_PARAMS_PER_QUERY
if self.engine.dialect.name.lower() == GXSqlDialect.DATABRICKS
else None,
)
for query in queries:
domain_kwargs: dict = query["domain_kwargs"]
selectable: sqlalchemy.Selectable = self.get_domain_records(domain_kwargs=domain_kwargs)
assert len(query["select"]) == len(query["metric_ids"])
try:
"""
If a custom query is passed, selectable will be TextClause and not formatted
as a subquery wrapped in "(subquery) alias". TextClause must first be converted
to TextualSelect using sa.columns() before it can be converted to type Subquery
"""
if sqlalchemy.TextClause and isinstance(selectable, sqlalchemy.TextClause): # type: ignore[truthy-function] # FIXME CoP
sa_query_object = sa.select(*query["select"]).select_from(
selectable.columns().subquery()
)
elif (sqlalchemy.Select and isinstance(selectable, sqlalchemy.Select)) or ( # type: ignore[truthy-function] # FIXME CoP
sqlalchemy.TextualSelect and isinstance(selectable, sqlalchemy.TextualSelect) # type: ignore[truthy-function] # FIXME CoP
):
sa_query_object = sa.select(*query["select"]).select_from(selectable.subquery())
else:
sa_query_object = sa.select(*query["select"]).select_from(selectable) # type: ignore[arg-type] # FIXME CoP
logger.debug(f"Attempting query {sa_query_object!s}")
res = self.execute_query(sa_query_object).fetchall() # type: ignore[assignment] # FIXME CoP
logger.debug(
f"""SqlAlchemyExecutionEngine computed {len(res[0])} metrics on domain_id \
{IDDict(domain_kwargs).to_id()}"""
)
except sqlalchemy.OperationalError as oe:
exception_message: str = "An SQL execution Exception occurred. "
exception_traceback: str = traceback.format_exc()
exception_message += (
f'{type(oe).__name__}: "{oe!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
raise ExecutionEngineError(message=exception_message)
assert len(res) == 1, "all bundle-computed metrics must be single-value statistics"
assert len(query["metric_ids"]) == len(res[0]), "unexpected number of metrics returned"
idx: int
metric_id: MetricConfigurationID
for idx, metric_id in enumerate(query["metric_ids"]):
# Converting SQL query execution results into JSON-serializable format produces simple data types, # noqa: E501 # FIXME CoP
# amenable for subsequent post-processing by higher-level "Metric" and "Expectation" layers. # noqa: E501 # FIXME CoP
resolved_metrics[metric_id] = convert_to_json_serializable(data=res[0][idx])
return resolved_metrics
def close(self) -> None:
"""
Note: Will 20210729
This is a helper function that will close and dispose Sqlalchemy objects that are used to connect to a database.
Databases like Snowflake require the connection and engine to be instantiated and closed separately, and not
doing so has caused problems with hanging connections.
Currently the ExecutionEngine does not support handling connections and engine separately, and will actually
override the engine with a connection in some cases, obfuscating what object is used to actually used by the
ExecutionEngine to connect to the external database. This will be handled in an upcoming refactor, which will
allow this function to eventually become:
self.connection.close()
self.engine.dispose()
More background can be found here: https://github.com/great-expectations/great_expectations/pull/3104/
""" # noqa: E501 # FIXME CoP
if self._engine_backup:
if self._connection:
self._connection.close()
self._engine_backup.dispose()
else:
self.engine.dispose()
def _finalize_domain_query(
self,
domain_id: IDDictID,
domain_batches: dict,
batch_counters: dict,
domain_kwargs_map: dict,
) -> tuple[IDDictID | None, dict | None, int | None]:
"""Finalize the current accumulated metrics for a domain into a query.
This method calculates what new query entry should be added and what
batch state should be reset for the next parameter batch.
Returns:
Tuple of (final_domain_id, new_query_entry, new_batch_counter)
Returns (None, None, None) if no finalization is needed
"""
new_query_entry = None
new_batch_counter = None
final_domain_id = None
if domain_id in domain_batches and domain_batches[domain_id]["select"]:
batch_idx = batch_counters.get(domain_id, 0)
domain_kwargs = domain_kwargs_map[domain_id]
if batch_idx == 0:
final_domain_id = domain_id
else:
final_domain_id = IDDict({**domain_kwargs, "_batch_idx": batch_idx}).to_id()
new_query_entry = {
"select": domain_batches[domain_id]["select"],
"metric_ids": domain_batches[domain_id]["metric_ids"],
"domain_kwargs": domain_kwargs,
}
new_batch_counter = batch_idx + 1
return final_domain_id, new_query_entry, new_batch_counter
return final_domain_id, new_query_entry, new_batch_counter
def _organize_metrics_by_domain( # noqa: C901 # FIXME
self, metric_fn_bundle: Iterable[MetricComputationConfiguration], limit: int | None = None
) -> list[dict]:
"""Organize metrics from a bundle into domain-grouped queries.
Args:
metric_fn_bundle: The metric bundle containing configurations to organize.
limit: The maximum number of parameters per query.
Returns:
Dictionary of domain IDs mapped to query configurations
with select expressions and metric IDs.
"""
queries: list[dict] = []
domain_batches: dict[IDDictID, dict] = {}
batch_counters: dict[IDDictID, int] = {}
domain_kwargs_map: dict[IDDictID, dict] = {}
for bundled_metric_configuration in metric_fn_bundle:
metric_to_resolve: MetricConfiguration = (
bundled_metric_configuration.metric_configuration
)
metric_fn: Any = bundled_metric_configuration.metric_fn
domain_kwargs: dict = bundled_metric_configuration.compute_domain_kwargs or {}
if not isinstance(domain_kwargs, IDDict):
domain_kwargs = IDDict(domain_kwargs)
domain_id = domain_kwargs.to_id()
selectable: sqlalchemy.Selectable = self.get_domain_records(domain_kwargs=domain_kwargs)
if domain_id not in domain_batches:
domain_batches[domain_id] = {"select": [], "metric_ids": []}
batch_counters[domain_id] = 0
domain_kwargs_map[domain_id] = domain_kwargs
if limit:
test_selects = domain_batches[domain_id]["select"] + [
metric_fn.label(metric_to_resolve.metric_name)
]
test_param_count = self._count_query_parameters(selectable, test_selects)
if test_param_count > limit and domain_batches[domain_id]["select"]:
final_domain_id, new_query_entry, new_batch_counter = (
self._finalize_domain_query(
domain_id, domain_batches, batch_counters, domain_kwargs_map
)
)
if final_domain_id is not None:
assert new_query_entry is not None
assert new_batch_counter is not None
queries.append(new_query_entry)
domain_batches[domain_id] = {"select": [], "metric_ids": []}
batch_counters[domain_id] = new_batch_counter
if self.engine.dialect.name.lower() == GXSqlDialect.CLICKHOUSE:
domain_batches[domain_id]["select"].append(
metric_fn.label(
metric_to_resolve.metric_name.join(
random.choices(string.ascii_lowercase, k=4)
)
)
)
domain_batches[domain_id]["metric_ids"].append(metric_to_resolve.id)
else:
domain_batches[domain_id]["select"].append(
metric_fn.label(metric_to_resolve.metric_name)
)
domain_batches[domain_id]["metric_ids"].append(metric_to_resolve.id)
for domain_id in list(domain_batches.keys()):
final_domain_id, new_query_entry, new_batch_counter = self._finalize_domain_query(
domain_id, domain_batches, batch_counters, domain_kwargs_map
)
if final_domain_id is not None:
assert new_query_entry is not None
assert new_batch_counter is not None
queries.append(new_query_entry)
domain_batches[domain_id] = {"select": [], "metric_ids": []}
batch_counters[domain_id] = new_batch_counter
return queries
def _count_query_parameters(self, selectable: sqlalchemy.Selectable, select_list: list) -> int:
"""Count the total number of parameters in a query with the given select expressions.
Args:
selectable: The base selectable object
select_list: List of SELECT expressions to include in the query
Returns:
Total number of parameters that would be generated when the query is compiled
"""
DEFAULT_PARAMS_PER_SELECT = 2 # Conservative upper bound
if isinstance(selectable, sqlalchemy.TextClause):
test_query = sa.select(*select_list).select_from(selectable.columns().subquery())
elif isinstance(selectable, (sqlalchemy.Select, sqlalchemy.TextualSelect)):
test_query = sa.select(*select_list).select_from(selectable.subquery())
elif isinstance(selectable, sa.sql.FromClause):
test_query = sa.select(*select_list).select_from(selectable)
else:
return len(select_list) * DEFAULT_PARAMS_PER_SELECT
try:
compiled = test_query.compile(dialect=self.engine.dialect)
return len(compiled.params)
except Exception:
# If compilation fails, fall back to conservative upper bound estimate
return len(select_list) * DEFAULT_PARAMS_PER_SELECT
def _get_partitioner_method(self, partitioner_method_name: str) -> Callable:
"""Get the appropriate partitioner method from the method name.
Args:
partitioner_method_name: name of the partitioner to retrieve.
Returns:
partitioner method.
"""
return self._data_partitioner.get_partitioner_method(partitioner_method_name)
def execute_partitioned_query(
self, partitioned_query: sqlalchemy.Selectable
) -> List[sqlalchemy.Row]:
"""Use the execution engine to run the partitioned query and fetch all of the results.
Args:
partitioned_query: Query to be executed as a sqlalchemy Selectable.
Returns:
List of row results.
"""
if self.dialect_name == "awsathena":
# Note: Athena does not support casting to string, only to varchar
# but sqlalchemy currently generates a query as `CAST(colname AS STRING)` instead
# of `CAST(colname AS VARCHAR)` with other dialects.
partitioned_query = str( # type: ignore[assignment] # FIXME CoP
partitioned_query.compile(self.engine, compile_kwargs={"literal_binds": True})
)
pattern = re.compile(r"(CAST\(EXTRACT\(.*?\))( AS STRING\))", re.IGNORECASE)
partitioned_query = re.sub(pattern, r"\1 AS VARCHAR)", partitioned_query) # type: ignore[call-overload] # FIXME CoP
return self.execute_query(partitioned_query).fetchall() # type: ignore[return-value] # FIXME CoP
def get_data_for_batch_identifiers(
self,
selectable: sqlalchemy.Selectable,
partitioner_method_name: str,
partitioner_kwargs: dict,
) -> List[dict]:
"""Build data used to construct batch identifiers for the input table using the provided partitioner config.
Sql partitioner configurations yield the unique values that comprise a batch by introspecting your data.
Args:
selectable: Selectable to partition.
partitioner_method_name: Desired partitioner method to use.
partitioner_kwargs: Dict of directives used by the partitioner method as keyword arguments of key=value.
Returns:
List of dicts of the form [{column_name: {"key": value}}]
""" # noqa: E501 # FIXME CoP
return self._data_partitioner.get_data_for_batch_identifiers(
execution_engine=self,
selectable=selectable,
partitioner_method_name=partitioner_method_name,
partitioner_kwargs=partitioner_kwargs,
)
def _build_selectable_from_batch_spec(self, batch_spec: BatchSpec) -> sqlalchemy.Selectable:
if batch_spec.get("query") is not None and batch_spec.get("sampling_method") is not None:
raise ValueError( # noqa: TRY003 # FIXME CoP
"Sampling is not supported on query data. "
"It is currently only supported on table data."
)
if "partitioner_method" in batch_spec:
partitioner_fn: Callable = self._get_partitioner_method(
partitioner_method_name=batch_spec["partitioner_method"]
)
partition_clause = partitioner_fn(
batch_identifiers=batch_spec["batch_identifiers"],
**batch_spec["partitioner_kwargs"],
)
else: # noqa: PLR5501 # FIXME CoP
if self.dialect_name == GXSqlDialect.SQLITE:
partition_clause = sa.text("1 = 1")
else:
partition_clause = sa.true()
selectable: sqlalchemy.Selectable = self._subselectable(batch_spec)
sampling_method: Optional[str] = batch_spec.get("sampling_method")
if sampling_method is not None:
if sampling_method in [
"_sample_using_limit",
"sample_using_limit",
"_sample_using_random",
"sample_using_random",
]:
sampler_fn = self._data_sampler.get_sampler_method(sampling_method)
return sampler_fn(
execution_engine=self,
batch_spec=batch_spec,
where_clause=partition_clause,
)
else:
sampler_fn = self._data_sampler.get_sampler_method(sampling_method)
return (
sa.select("*")
.select_from(selectable) # type: ignore[arg-type] # FIXME CoP
.where(
sa.and_(
partition_clause,
sampler_fn(batch_spec),
)
)
)
return sa.select("*").select_from(selectable).where(partition_clause) # type: ignore[arg-type] # FIXME CoP
def _subselectable(self, batch_spec: BatchSpec) -> sqlalchemy.Selectable:
table_name = batch_spec.get("table_name")
query = batch_spec.get("query")
selectable: sqlalchemy.Selectable
if table_name:
selectable = sa.table(table_name, schema=batch_spec.get("schema_name", None))
else:
if not isinstance(query, str):
raise ValueError(f"SQL query should be a str but got {query}") # noqa: TRY003 # FIXME CoP
# Query is a valid SELECT query that begins with r"\w+select\w"
selectable = sa.select(
sa.text(query.lstrip()[6:].strip().rstrip(";").rstrip())
).subquery()
return selectable
@override
def get_batch_data_and_markers(
self, batch_spec: BatchSpec
) -> Tuple[SqlAlchemyBatchData, BatchMarkers]:
if not isinstance(batch_spec, (SqlAlchemyDatasourceBatchSpec, RuntimeQueryBatchSpec)):
raise InvalidBatchSpecError( # noqa: TRY003 # FIXME CoP
f"""SqlAlchemyExecutionEngine accepts batch_spec only of type SqlAlchemyDatasourceBatchSpec or
RuntimeQueryBatchSpec (illegal type "{type(batch_spec)!s}" was received).
""" # noqa: E501 # FIXME CoP
)
if sum(1 if x else 0 for x in [batch_spec.get("query"), batch_spec.get("table_name")]) != 1:
raise InvalidBatchSpecError( # noqa: TRY003 # FIXME CoP
"SqlAlchemyExecutionEngine only accepts a batch_spec where exactly 1 of "
"'query' or 'table_name' is specified. "
f"table_name={batch_spec.get('table_name')}, query={batch_spec.get('query')}"
)
batch_data: Optional[SqlAlchemyBatchData] = None
batch_markers = BatchMarkers(
{
"ge_load_time": datetime.datetime.now(datetime.timezone.utc).strftime(
"%Y%m%dT%H%M%S.%fZ"
)
}
)
temp_table_schema_name: Optional[str] = batch_spec.get("temp_table_schema_name")
source_schema_name: Optional[str] = batch_spec.get("schema_name", None)
source_table_name: Optional[str] = batch_spec.get("table_name", None)
create_temp_table: bool = batch_spec.get("create_temp_table", self._create_temp_table)
# this is where partitioner components are added to the selectable
selectable: sqlalchemy.Selectable = self._build_selectable_from_batch_spec(
batch_spec=batch_spec
)
# NOTE: what's being checked here is the presence of a `query` attribute, we could check this directly # noqa: E501 # FIXME CoP
# instead of doing an instance check
if isinstance(batch_spec, RuntimeQueryBatchSpec):
# query != None is already checked when RuntimeQueryBatchSpec is instantiated
# re-compile the query to include any new parameters
compiled_query = selectable.compile(
dialect=self.engine.dialect,
compile_kwargs={"literal_binds": True},
)
query_str = str(compiled_query)
batch_data = SqlAlchemyBatchData(
execution_engine=self,
query=query_str,
temp_table_schema_name=temp_table_schema_name,
create_temp_table=create_temp_table,
)
elif isinstance(batch_spec, SqlAlchemyDatasourceBatchSpec):
batch_data = SqlAlchemyBatchData(
execution_engine=self,
selectable=selectable,
create_temp_table=create_temp_table,
source_table_name=source_table_name,
source_schema_name=source_schema_name,
)
return batch_data, batch_markers
def get_inspector(self) -> sqlalchemy.engine.reflection.Inspector:
if self._inspector is None:
if version.parse(sa.__version__) < version.parse("1.4"):
# Inspector.from_engine deprecated since 1.4, sa.inspect() should be used instead
self._inspector = sqlalchemy.reflection.Inspector.from_engine(self.engine) # type: ignore[assignment] # FIXME CoP
else:
self._inspector = sa.inspect(self.engine) # type: ignore[assignment] # FIXME CoP
return self._inspector # type: ignore[return-value] # FIXME CoP
@contextmanager
def get_connection(self) -> Generator[sqlalchemy.Connection, None, None]:
"""Get a connection for executing queries.
Some databases sqlite/mssql temp tables only persist within a connection,
so we need to keep the connection alive by keeping a reference to it.
Even though we use a single connection pool for dialects that need a single persisted connection
(e.g. for accessing temporary tables), if we don't keep a reference
then we get errors like sqlite3.ProgrammingError: Cannot operate on a closed database.
Returns:
Sqlalchemy connection
""" # noqa: E501 # FIXME CoP
if self.dialect_name in _PERSISTED_CONNECTION_DIALECTS:
try:
if not self._connection:
self._connection = self.engine.connect()
yield self._connection
finally:
# Temp tables only persist within a connection for some dialects,
# so we need to keep the connection alive.
pass
else:
with self.engine.connect() as connection:
yield connection
@staticmethod
def _execute_query_with_recovery(
connection: sqlalchemy.Connection,
query: sqlalchemy.Selectable | sqlalchemy.TextClause,
) -> sqlalchemy.CursorResult | sqlalchemy.LegacyCursorResult:
"""Execute a query with automatic recovery from invalid transaction state.
This handles PendingRollbackError which was introduced in SQLAlchemy 2.0.
For SQLAlchemy 1.x, this error doesn't exist and won't be raised.
Args:
connection: SQLAlchemy connection to use
query: Sqlalchemy selectable query.
Returns:
CursorResult for sqlalchemy 2.0+ or LegacyCursorResult for earlier versions.
"""
try:
return connection.execute(query) # type: ignore[arg-type] # Selectable union type too broad
except PendingRollbackError:
# Connection has an invalid transaction from a previous failed operation
# Roll back and retry with the same connection
connection.rollback()
return connection.execute(query) # type: ignore[arg-type] # Selectable union type too broad
@new_method_or_class(version="0.16.14")
def execute_query(
self, query: sqlalchemy.Selectable | sqlalchemy.TextClause
) -> sqlalchemy.CursorResult | sqlalchemy.LegacyCursorResult:
"""Execute a query using the underlying database engine.
Args:
query: Sqlalchemy selectable query.
Returns:
CursorResult for sqlalchemy 2.0+ or LegacyCursorResult for earlier versions.
"""
with self.get_connection() as connection:
result = self._execute_query_with_recovery(connection, query)
return result
@staticmethod
def _connection_has_transaction(connection: sqlalchemy.Connection) -> bool:
"""Check if a connection has an active transaction.
This is specifically for SQLAlchemy 2.0+ autobegin behavior where connections
might not have an active transaction if the database is in autocommit mode.
Args:
connection: SQLAlchemy connection to check
Returns:
True if there's an active transaction, False otherwise
"""
# This method is only called in the SQLAlchemy 2.0+ code path
# The in_transaction() method was added in 1.4, but we check for 2.0+
# because that's when autobegin behavior was introduced
if is_version_greater_or_equal(sqlalchemy.sqlalchemy.__version__, "2.0.0"):
return connection.in_transaction()
# For SQLAlchemy < 2.0, we use explicit connection.begin(), so always in transaction
return True
@new_method_or_class(version="0.16.14")
def execute_query_in_transaction(
self, query: sqlalchemy.Selectable
) -> sqlalchemy.CursorResult | sqlalchemy.LegacyCursorResult:
"""Execute a query using the underlying database engine within a transaction
that will auto commit.
Begin once: https://docs.sqlalchemy.org/en/20/core/connections.html#begin-once
Args:
query: Sqlalchemy selectable query.
Returns:
CursorResult for sqlalchemy 2.0+ or LegacyCursorResult for earlier versions.
"""
with self.get_connection() as connection:
if (
is_version_greater_or_equal(sqlalchemy.sqlalchemy.__version__, "2.0.0")
and not connection.closed
):
result = self._execute_query_with_recovery(connection, query)
# Some databases auto-commit and don't support explicit transaction management
# Try to commit, but ignore errors from databases that auto-commit
if self._connection_has_transaction(connection):
try:
connection.commit()
except DatabaseError as e:
# Databricks and other auto-commit databases may not have
# an active transaction even though in_transaction() returns True
if "no active transaction" not in str(e).lower():
raise
else:
with connection.begin():
result = self._execute_query_with_recovery(connection, query)
return result
@override
def condition_to_filter_clause(self, condition: Condition) -> sa.ColumnElement:
# This override is just to help the type system,
# since we can't make the class generic on sqlalchemy
# since it's not installed in all environments."""
output = super().condition_to_filter_clause(condition)
if not isinstance(output, sa.ColumnElement):
raise InvalidFilterClause(output)
return output
@override
def _comparison_condition_to_filter_clause( # noqa: C901, PLR0911
self, condition: ComparisonCondition
) -> sa.ColumnElement:
col: sa.ColumnClause = sa.column(condition.column.name)
val = sa.literal(condition.parameter)
op = condition.operator
if op == Operator.LESS_THAN:
return col < val
elif op == Operator.LESS_THAN_OR_EQUAL:
return col <= val
elif op == Operator.EQUAL:
return col == val
elif op == Operator.NOT_EQUAL:
return col != val
elif op == Operator.GREATER_THAN:
return col > val
elif op == Operator.GREATER_THAN_OR_EQUAL:
return col >= val
elif op == Operator.IN:
return col.in_(condition.parameter)
elif op == Operator.NOT_IN:
return ~col.in_(condition.parameter)
else:
raise InvalidOperatorError(op)
@override
def _nullity_condition_to_filter_clause(self, condition: NullityCondition) -> sa.ColumnElement:
col: sa.ColumnClause = sa.column(condition.column.name)
return col.is_(None) if condition.is_null else col.isnot(None)
@override
def _and_condition_to_filter_clause(self, condition: AndCondition) -> sa.ColumnElement:
output = sa.and_(*[self.condition_to_filter_clause(c) for c in condition.conditions])
return output
@override
def _or_condition_to_filter_clause(self, condition: OrCondition) -> sa.ColumnElement:
return sa.or_(*[self.condition_to_filter_clause(c) for c in condition.conditions])
| SqlAlchemyExecutionEngine |
python | python-attrs__attrs | tests/test_dunders.py | {
"start": 12891,
"end": 13074
} | class ____:
def __init__(self):
self.hash_value = 100
def __hash__(self):
rv = self.hash_value
self.hash_value += 1
return rv
| IncrementingHasher |
python | pypa__warehouse | warehouse/admin/forms.py | {
"start": 1789,
"end": 3233
} | class ____(wtforms.Form):
"""
Form for validating total size limit input in admin interface.
Used by both project and organization admin views to ensure
consistent validation of total size limits.
"""
total_size_limit = wtforms.StringField(
validators=[
wtforms.validators.Optional(),
],
filters=[lambda x: None if (x == "" or not x) else x],
)
def validate_total_size_limit(self, field):
"""
Validate total size limit value.
- Empty string means remove the limit (use system default)
- Must be a valid integer if provided
- Must be at least the system default
"""
if field.data is None:
# Already None from filter
return
try:
limit_value = int(field.data)
except (ValueError, TypeError):
raise wtforms.ValidationError(
"Total size limit must be a valid integer or empty"
)
# Check minimum (must be at least the system default)
min_limit = MAX_PROJECT_SIZE // ONE_GIB
if limit_value < min_limit:
raise wtforms.ValidationError(
f"Total organization size can not be less than {min_limit:0.1f}GiB"
)
# No maximum cap for total size (can be very large)
# Convert to bytes for storage
field.data = limit_value * ONE_GIB
| SetTotalSizeLimitForm |
python | huggingface__transformers | tests/models/blip_2/test_modeling_blip_2.py | {
"start": 43106,
"end": 45959
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (Blip2TextModelWithProjection,) if is_torch_available() else ()
test_resize_embeddings = True
test_attention_outputs = False
def setUp(self):
self.model_tester = Blip2TextModelWithProjectionTester(self)
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
@unittest.skip(reason="Training is not yet supported")
def test_training(self):
pass
@unittest.skip(reason="Training is not yet supported")
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(reason="Hidden_states is tested in individual model tests")
def test_hidden_states_output(self):
pass
@unittest.skip(reason="Blip2TextModelWithProjection does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Blip2TextModelWithProjection does not support input and output embeddings")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Retain_grad is tested in individual model tests")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(reason="Blip2TextModelWithProjection does not have input/output embeddings")
def test_model_common_attributes(self):
pass
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_ids", "attention_mask", "position_ids"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@slow
@require_torch_accelerator
def test_model_from_pretrained(self):
model_name = "Salesforce/blip2-itm-vit-g"
model = Blip2TextModelWithProjection.from_pretrained(model_name)
self.assertIsNotNone(model)
self.assertTrue(hasattr(model, "text_projection"))
_, input_ids, attention_mask = self.model_tester.prepare_config_and_inputs()
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(input_ids=input_ids, attention_mask=attention_mask)
self.assertEqual(
outputs.text_embeds.shape,
(
self.model_tester.qformer_model_tester.batch_size,
input_ids.shape[1],
model.config.image_text_hidden_size,
),
)
| Blip2TextModelWithProjectionTest |
python | tensorflow__tensorflow | tensorflow/python/debug/lib/debug_data.py | {
"start": 1683,
"end": 8498
} | class ____:
"""Represents a TensorProto that cannot be converted to np.ndarray."""
def __init__(self, tensor_proto, initialized=True):
"""Constructor.
Args:
tensor_proto: the `TensorProto` object that cannot be represented as a
`np.ndarray` object.
initialized: (`bool`) whether the Tensor is initialized.
"""
self._tensor_proto = tensor_proto
self._initialized = initialized
def __str__(self):
output = "" if self._initialized else "Uninitialized tensor:\n"
output += str(self._tensor_proto)
return output
@property
def initialized(self):
return self._initialized
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a `Event` protobuf and the `Event`
protobuf contains a `Tensor` value.
Args:
event_file_path: (`str`) path to the event file.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`. For
uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be converted to `numpy.ndarray` (e.g., `tf.resource`), return
`None`.
"""
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return load_tensor_from_event(event)
def load_tensor_from_event(event):
"""Load a tensor from an Event proto.
Args:
event: The Event proto, assumed to hold a tensor value in its
summary.value[0] field.
Returns:
The tensor value loaded from the event file, as a `numpy.ndarray`, if
representation of the tensor value by a `numpy.ndarray` is possible.
For uninitialized Tensors, returns `None`. For Tensors of data types that
cannot be represented as `numpy.ndarray` (e.g., `tf.resource`), return
the `TensorProto` protobuf object without converting it to a
`numpy.ndarray`.
"""
tensor_proto = event.summary.value[0].tensor
shape = tensor_util.TensorShapeProtoToList(tensor_proto.tensor_shape)
num_elements = 1
for shape_dim in shape:
num_elements *= shape_dim
if tensor_proto.tensor_content or tensor_proto.string_val or not num_elements:
# Initialized tensor or empty tensor.
if tensor_proto.dtype == types_pb2.DT_RESOURCE:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
try:
tensor_value = tensor_util.MakeNdarray(tensor_proto)
except KeyError:
tensor_value = InconvertibleTensorProto(tensor_proto)
else:
# Uninitialized tensor or tensor of unconvertible data type.
tensor_value = InconvertibleTensorProto(tensor_proto, False)
return tensor_value
def _load_graph_def_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return graph_pb2.GraphDef.FromString(event.graph_def)
def _load_log_message_from_event_file(event_file_path):
event = event_pb2.Event()
with gfile.Open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
return event.log_message.message
def _is_graph_file(file_name):
return file_name.startswith(METADATA_FILE_PREFIX + GRAPH_FILE_TAG)
def _is_run_fetches_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FETCHES_INFO_FILE_TAG
def _is_run_feed_keys_info_file(file_name):
return file_name == METADATA_FILE_PREFIX + FEED_KEYS_INFO_FILE_TAG
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include `nan`s and `inf`s.
The signature of this function follows the requirement of the method
`DebugDumpDir.find()`.
Args:
datum: (`DebugTensorDatum`) Datum metadata.
tensor: (`numpy.ndarray` or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(`bool`) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicate.
if isinstance(tensor, InconvertibleTensorProto):
# Uninitialized tensor doesn't have bad numerical values.
# Also return False for data types that cannot be represented as numpy
# arrays.
return False
elif (np.issubdtype(tensor.dtype, np.floating) or
np.issubdtype(tensor.dtype, np.complexfloating) or
np.issubdtype(tensor.dtype, np.integer)):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
else:
return False
_CoreMetadata = collections.namedtuple("CoreMetadata", [
"global_step", "session_run_index", "executor_step_index", "input_names",
"output_names", "target_nodes"
])
def extract_core_metadata_from_event_proto(event):
json_metadata = json.loads(event.log_message.message)
return _CoreMetadata(json_metadata["global_step"],
json_metadata["session_run_index"],
json_metadata["executor_step_index"],
json_metadata["input_names"],
json_metadata["output_names"],
json_metadata["target_nodes"])
def device_name_to_device_path(device_name):
"""Convert device name to device path."""
device_name_items = compat.as_text(device_name).split("/")
device_name_items = [item.replace(":", "_") for item in device_name_items]
return METADATA_FILE_PREFIX + DEVICE_TAG + ",".join(device_name_items)
def device_path_to_device_name(device_dir):
"""Parse device name from device path.
Args:
device_dir: (str) a directory name for the device.
Returns:
(str) parsed device name.
"""
path_items = os.path.basename(device_dir)[
len(METADATA_FILE_PREFIX) + len(DEVICE_TAG):].split(",")
return "/".join([
path_item.replace("device_", "device:").replace("_", ":", 1)
for path_item in path_items])
| InconvertibleTensorProto |
python | pytorch__pytorch | torch/utils/data/datapipes/iter/combining.py | {
"start": 755,
"end": 2244
} | class ____(IterDataPipe):
r"""
Concatenates multiple Iterable DataPipes (functional name: ``concat``).
The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones.
Args:
datapipes: Iterable DataPipes being concatenated
Example:
>>> # xdoctest: +REQUIRES(module:torchdata)
>>> import random
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp1 = IterableWrapper(range(3))
>>> dp2 = IterableWrapper(range(5))
>>> list(dp1.concat(dp2))
[0, 1, 2, 0, 1, 2, 3, 4]
"""
datapipes: tuple[IterDataPipe]
def __init__(self, *datapipes: IterDataPipe) -> None:
if len(datapipes) == 0:
raise ValueError("Expected at least one DataPipe, but got nothing")
if not all(isinstance(dp, IterDataPipe) for dp in datapipes):
raise TypeError("Expected all inputs to be `IterDataPipe`")
self.datapipes = datapipes # type: ignore[assignment]
def __iter__(self) -> Iterator:
for dp in self.datapipes:
yield from dp
def __len__(self) -> int:
if all(isinstance(dp, Sized) for dp in self.datapipes):
# pyrefly: ignore [bad-argument-type]
return sum(len(dp) for dp in self.datapipes)
else:
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
@functional_datapipe("fork")
| ConcaterIterDataPipe |
python | xlwings__xlwings | xlwings/_xlmac.py | {
"start": 3452,
"end": 4537
} | class ____(base_classes.Apps):
def _iter_excel_instances(self):
asn = subprocess.check_output(
["lsappinfo", "visibleprocesslist", "-includehidden"]
).decode("utf-8")
for asn in asn.split(" "):
if "Microsoft_Excel" in asn:
pid_info = subprocess.check_output(
["lsappinfo", "info", "-only", "pid", asn]
).decode("utf-8")
if pid_info != '"pid"=[ NULL ] \n':
yield int(pid_info.split("=")[1])
def keys(self):
return list(self._iter_excel_instances())
def add(self, spec=None, add_book=None, visible=None):
return App(spec=spec, add_book=add_book, visible=visible)
def __iter__(self):
for pid in self._iter_excel_instances():
yield App(xl=pid)
def __len__(self):
return len(list(self._iter_excel_instances()))
def __getitem__(self, pid):
if pid not in self.keys():
raise KeyError("Could not find an Excel instance with this PID.")
return App(xl=pid)
| Apps |
python | astropy__astropy | astropy/io/ascii/fixedwidth.py | {
"start": 15483,
"end": 15649
} | class ____(FixedWidthData):
"""Data reader for fixed with tables with two header lines."""
splitter_class = FixedWidthTwoLineDataSplitter
| FixedWidthTwoLineData |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 2961,
"end": 3162
} | class ____(FakeOperator):
def __init__(self, test_sub_param, test_param, **kwargs):
super().__init__(test_param=test_param, **kwargs)
self.test_sub_param = test_sub_param
| FakeSubClass |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0028_remove_comments_and_update_old_migration.py | {
"start": 150,
"end": 9323
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0027_remove_json_with_html_feature"),
]
operations = [
migrations.RemoveField(
model_name="project",
name="allow_comments",
),
migrations.RemoveField(
model_name="project",
name="comment_moderation",
),
migrations.AlterField(
model_name="domain",
name="https",
field=models.BooleanField(
default=False,
help_text="Always use HTTPS for this domain",
verbose_name="Use HTTPS",
),
),
migrations.AlterField(
model_name="project",
name="documentation_type",
field=models.CharField(
choices=[
("auto", "Automatically Choose"),
("sphinx", "Sphinx Html"),
("mkdocs", "Mkdocs (Markdown)"),
("sphinx_htmldir", "Sphinx HtmlDir"),
("sphinx_singlehtml", "Sphinx Single Page HTML"),
],
default="sphinx",
help_text='Type of documentation you are building. <a href="http://www.sphinx-doc.org/en/stable/builders.html#sphinx.builders.html.DirectoryHTMLBuilder">More info</a>.',
max_length=20,
verbose_name="Documentation type",
),
),
migrations.AlterField(
model_name="project",
name="language",
field=models.CharField(
choices=[
("aa", "Afar"),
("ab", "Abkhaz"),
("af", "Afrikaans"),
("am", "Amharic"),
("ar", "Arabic"),
("as", "Assamese"),
("ay", "Aymara"),
("az", "Azerbaijani"),
("ba", "Bashkir"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("bh", "Bihari"),
("bi", "Bislama"),
("bn", "Bengali"),
("bo", "Tibetan"),
("br", "Breton"),
("ca", "Catalan"),
("co", "Corsican"),
("cs", "Czech"),
("cy", "Welsh"),
("da", "Danish"),
("de", "German"),
("dz", "Dzongkha"),
("el", "Greek"),
("en", "English"),
("eo", "Esperanto"),
("es", "Spanish"),
("et", "Estonian"),
("eu", "Basque"),
("fa", "Iranian"),
("fi", "Finnish"),
("fj", "Fijian"),
("fo", "Faroese"),
("fr", "French"),
("fy", "Western Frisian"),
("ga", "Irish"),
("gd", "Scottish Gaelic"),
("gl", "Galician"),
("gn", "Guarani"),
("gu", "Gujarati"),
("ha", "Hausa"),
("hi", "Hindi"),
("he", "Hebrew"),
("hr", "Croatian"),
("hu", "Hungarian"),
("hy", "Armenian"),
("ia", "Interlingua"),
("id", "Indonesian"),
("ie", "Interlingue"),
("ik", "Inupiaq"),
("is", "Icelandic"),
("it", "Italian"),
("iu", "Inuktitut"),
("ja", "Japanese"),
("jv", "Javanese"),
("ka", "Georgian"),
("kk", "Kazakh"),
("kl", "Kalaallisut"),
("km", "Khmer"),
("kn", "Kannada"),
("ko", "Korean"),
("ks", "Kashmiri"),
("ku", "Kurdish"),
("ky", "Kyrgyz"),
("la", "Latin"),
("ln", "Lingala"),
("lo", "Lao"),
("lt", "Lithuanian"),
("lv", "Latvian"),
("mg", "Malagasy"),
("mi", "Maori"),
("mk", "Macedonian"),
("ml", "Malayalam"),
("mn", "Mongolian"),
("mr", "Marathi"),
("ms", "Malay"),
("mt", "Maltese"),
("my", "Burmese"),
("na", "Nauru"),
("ne", "Nepali"),
("nl", "Dutch"),
("no", "Norwegian"),
("oc", "Occitan"),
("om", "Oromo"),
("or", "Oriya"),
("pa", "Panjabi"),
("pl", "Polish"),
("ps", "Pashto"),
("pt", "Portuguese"),
("qu", "Quechua"),
("rm", "Romansh"),
("rn", "Kirundi"),
("ro", "Romanian"),
("ru", "Russian"),
("rw", "Kinyarwanda"),
("sa", "Sanskrit"),
("sd", "Sindhi"),
("sg", "Sango"),
("si", "Sinhala"),
("sk", "Slovak"),
("sl", "Slovenian"),
("sm", "Samoan"),
("sn", "Shona"),
("so", "Somali"),
("sq", "Albanian"),
("sr", "Serbian"),
("ss", "Swati"),
("st", "Southern Sotho"),
("su", "Sudanese"),
("sv", "Swedish"),
("sw", "Swahili"),
("ta", "Tamil"),
("te", "Telugu"),
("tg", "Tajik"),
("th", "Thai"),
("ti", "Tigrinya"),
("tk", "Turkmen"),
("tl", "Tagalog"),
("tn", "Tswana"),
("to", "Tonga"),
("tr", "Turkish"),
("ts", "Tsonga"),
("tt", "Tatar"),
("tw", "Twi"),
("ug", "Uyghur"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("uz", "Uzbek"),
("vi", "Vietnamese"),
("vo", "Volapuk"),
("wo", "Wolof"),
("xh", "Xhosa"),
("yi", "Yiddish"),
("yo", "Yoruba"),
("za", "Zhuang"),
("zh", "Chinese"),
("zu", "Zulu"),
("nb_NO", "Norwegian Bokmal"),
("pt_BR", "Brazilian Portuguese"),
("es_MX", "Mexican Spanish"),
("uk_UA", "Ukrainian"),
("zh_CN", "Simplified Chinese"),
("zh_TW", "Traditional Chinese"),
],
default="en",
help_text="The language the project documentation is rendered in. Note: this affects your project's URL.",
max_length=20,
verbose_name="Language",
),
),
migrations.AlterField(
model_name="project",
name="privacy_level",
field=models.CharField(
choices=[
("public", "Public"),
("protected", "Protected"),
("private", "Private"),
],
default="public",
help_text="Level of privacy that you want on the repository. Protected means public but not in listings.",
max_length=20,
verbose_name="Privacy Level",
),
),
migrations.AlterField(
model_name="project",
name="python_interpreter",
field=models.CharField(
choices=[("python", "CPython 2.x"), ("python3", "CPython 3.x")],
default="python",
help_text="The Python interpreter used to create the virtual environment.",
max_length=20,
verbose_name="Python Interpreter",
),
),
migrations.AlterField(
model_name="project",
name="version_privacy_level",
field=models.CharField(
choices=[
("public", "Public"),
("protected", "Protected"),
("private", "Private"),
],
default="public",
help_text="Default level of privacy you want on built versions of documentation.",
max_length=20,
verbose_name="Version Privacy Level",
),
),
]
| Migration |
python | paramiko__paramiko | tests/test_ssh_gss.py | {
"start": 1170,
"end": 2187
} | class ____(paramiko.ServerInterface):
def get_allowed_auths(self, username):
return "gssapi-with-mic,publickey"
def check_auth_gssapi_with_mic(
self, username, gss_authenticated=paramiko.AUTH_FAILED, cc_file=None
):
if gss_authenticated == paramiko.AUTH_SUCCESSFUL:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def enable_auth_gssapi(self):
return True
def check_auth_publickey(self, username, key):
try:
expected = FINGERPRINTS[key.get_name()]
except KeyError:
return paramiko.AUTH_FAILED
else:
if key.get_fingerprint() == expected:
return paramiko.AUTH_SUCCESSFUL
return paramiko.AUTH_FAILED
def check_channel_request(self, kind, chanid):
return paramiko.OPEN_SUCCEEDED
def check_channel_exec_request(self, channel, command):
if command != b"yes":
return False
return True
@needs_gssapi
| NullServer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructorCallable2.py | {
"start": 1491,
"end": 1840
} | class ____:
def __new__(cls) -> Class6Proxy:
# This should generate an error because "cls" isn't compatible.
return Class6Proxy.__new__(cls)
def __init__(self, x: int) -> None:
pass
r6 = accepts_callable(Class6)
reveal_type(r6, expected_text="() -> Class6Proxy")
reveal_type(r6(), expected_text="Class6Proxy")
| Class6 |
python | google__jax | tests/linalg_test.py | {
"start": 84686,
"end": 94600
} | class ____(jtu.JaxTestCase):
"""Tests for lax.linalg primitives."""
@jtu.sample_product(
n=[0, 4, 5, 50],
dtype=float_types + complex_types,
lower=[True, False],
sort_eigenvalues=[True, False],
)
def testEigh(self, n, dtype, lower, sort_eigenvalues):
implementations = [
None,
lax.linalg.EighImplementation.QR,
lax.linalg.EighImplementation.JACOBI,
lax.linalg.EighImplementation.QDWH,
]
for implementation in implementations:
if (
implementation == lax.linalg.EighImplementation.QR
and jtu.test_device_matches(["tpu"])
):
continue
if (
implementation == lax.linalg.EighImplementation.JACOBI
and jtu.test_device_matches(["cpu"])
):
continue
if (
implementation == lax.linalg.EighImplementation.QDWH
and jtu.test_device_matches(["cpu", "gpu"])
):
continue
rng = jtu.rand_default(self.rng())
tol = 1e-3
args_maker = lambda: [rng((n, n), dtype)]
a, = args_maker()
a = (a + np.conj(a.T)) / 2
v, w = lax.linalg.eigh(np.tril(a) if lower else np.triu(a),
lower=lower, symmetrize_input=False,
sort_eigenvalues=sort_eigenvalues,
implementation=implementation)
w = np.asarray(w)
v = np.asarray(v)
self.assertLessEqual(
np.linalg.norm(np.eye(n) - np.matmul(np.conj(T(v)), v)), 1e-3)
self.assertLessEqual(np.linalg.norm(np.matmul(a, v) - w * v),
tol * np.linalg.norm(a))
w_expected, v_expected = np.linalg.eigh(np.asarray(a))
self.assertAllClose(w_expected, w if sort_eigenvalues else np.sort(w),
rtol=1e-4, atol=1e-4)
def run_eigh_tridiagonal_test(self, alpha, beta):
n = alpha.shape[-1]
# scipy.linalg.eigh_tridiagonal doesn't support complex inputs, so for
# this we call the slower numpy.linalg.eigh.
if np.issubdtype(alpha.dtype, np.complexfloating):
tridiagonal = np.diag(alpha) + np.diag(beta, 1) + np.diag(
np.conj(beta), -1)
eigvals_expected, _ = np.linalg.eigh(tridiagonal)
else:
eigvals_expected = scipy.linalg.eigh_tridiagonal(
alpha, beta, eigvals_only=True)
eigvals = jax.scipy.linalg.eigh_tridiagonal(
alpha, beta, eigvals_only=True)
finfo = np.finfo(alpha.dtype)
atol = 4 * np.sqrt(n) * finfo.eps * np.amax(np.abs(eigvals_expected))
self.assertAllClose(eigvals_expected, eigvals, atol=atol, rtol=1e-4)
@jtu.sample_product(
n=[1, 2, 3, 7, 8, 100],
dtype=float_types + complex_types,
)
def testToeplitz(self, n, dtype):
for a, b in [[2, -1], [1, 0], [0, 1], [-1e10, 1e10], [-1e-10, 1e-10]]:
alpha = a * np.ones([n], dtype=dtype)
beta = b * np.ones([n - 1], dtype=dtype)
self.run_eigh_tridiagonal_test(alpha, beta)
@jtu.sample_product(
n=[1, 2, 3, 7, 8, 100],
dtype=float_types + complex_types,
)
def testRandomUniform(self, n, dtype):
alpha = jtu.rand_uniform(self.rng())((n,), dtype)
beta = jtu.rand_uniform(self.rng())((n - 1,), dtype)
self.run_eigh_tridiagonal_test(alpha, beta)
@jtu.sample_product(dtype=float_types + complex_types)
def testSelect(self, dtype):
n = 5
alpha = jtu.rand_uniform(self.rng())((n,), dtype)
beta = jtu.rand_uniform(self.rng())((n - 1,), dtype)
eigvals_all = jax.scipy.linalg.eigh_tridiagonal(alpha, beta, select="a",
eigvals_only=True)
eps = np.finfo(alpha.dtype).eps
atol = 2 * n * eps
for first in range(n - 1):
for last in range(first + 1, n - 1):
# Check that we get the expected eigenvalues by selecting by
# index range.
eigvals_index = jax.scipy.linalg.eigh_tridiagonal(
alpha, beta, select="i", select_range=(first, last),
eigvals_only=True)
self.assertAllClose(
eigvals_all[first:(last + 1)], eigvals_index, atol=atol)
@jtu.sample_product(shape=[(3,), (3, 4), (3, 4, 5)],
dtype=float_types + complex_types)
def test_tridiagonal_solve(self, shape, dtype):
if dtype not in float_types and jtu.test_device_matches(["gpu"]):
self.skipTest("Data type not supported on GPU")
rng = self.rng()
d = 1.0 + jtu.rand_positive(rng)(shape, dtype)
dl = jtu.rand_default(rng)(shape, dtype)
du = jtu.rand_default(rng)(shape, dtype)
b = jtu.rand_default(rng)(shape + (1,), dtype)
x = lax.linalg.tridiagonal_solve(dl, d, du, b)
def build_tri(dl, d, du):
return jnp.diag(d) + jnp.diag(dl[1:], -1) + jnp.diag(du[:-1], 1)
for _ in shape[:-1]:
build_tri = jax.vmap(build_tri)
a = build_tri(dl, d, du)
with jax.default_matmul_precision("float32"):
self.assertAllClose(a @ x, b, atol={
np.float32: 1e-3, np.float64: 1e-10, np.complex64: 1e-3,
np.complex128: 1e-10})
def test_tridiagonal_solve_endpoints(self):
# tridagonal_solve shouldn't depend on the endpoints being explicitly zero.
dtype = np.float32
size = 10
dl = np.linspace(-1.0, 1.0, size, dtype=dtype)
dlz = np.copy(dl)
dlz[0] = 0.0
d = np.linspace(1.0, 2.0, size, dtype=dtype)
du = np.linspace(1.0, -1.0, size, dtype=dtype)
duz = np.copy(du)
duz[-1] = 0.0
b = np.linspace(0.1, -0.1, size, dtype=dtype)[:, None]
self.assertAllClose(
lax.linalg.tridiagonal_solve(dl, d, du, b),
lax.linalg.tridiagonal_solve(dlz, d, duz, b),
)
@jtu.sample_product(shape=[(3,), (3, 4)], dtype=float_types + complex_types)
def test_tridiagonal_solve_grad(self, shape, dtype):
if dtype not in float_types and jtu.test_device_matches(["gpu"]):
self.skipTest("Data type not supported on GPU")
rng = self.rng()
d = 1.0 + jtu.rand_positive(rng)(shape, dtype)
dl = jtu.rand_default(rng)(shape, dtype)
du = jtu.rand_default(rng)(shape, dtype)
b = jtu.rand_default(rng)(shape + (1,), dtype)
args = (dl, d, du, b)
jtu.check_grads(lax.linalg.tridiagonal_solve, args, order=2, atol=1e-1,
rtol=1e-1)
@jtu.sample_product(
shape=[(4, 4), (15, 15), (50, 50), (100, 100)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("cpu")
def testSchur(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args = rng(shape, dtype)
Ts, Ss = lax.linalg.schur(args)
eps = np.finfo(dtype).eps
self.assertAllClose(args, Ss @ Ts @ jnp.conj(Ss.T), atol=600 * eps)
self.assertAllClose(
np.eye(*shape, dtype=dtype), Ss @ jnp.conj(Ss.T), atol=100 * eps
)
@jtu.sample_product(
shape=[(2, 2), (4, 4), (15, 15), (50, 50), (100, 100)],
dtype=float_types + complex_types,
)
@jtu.run_on_devices("cpu")
def testSchurBatching(self, shape, dtype):
rng = jtu.rand_default(self.rng())
batch_size = 10
shape = (batch_size,) + shape
args = rng(shape, dtype)
reconstruct = vmap(lambda S, T: S @ T @ jnp.conj(S.T))
Ts, Ss = vmap(lax.linalg.schur)(args)
self.assertAllClose(reconstruct(Ss, Ts), args, atol=1e-4)
@jtu.sample_product(
shape=[(2, 3), (2, 3, 4), (2, 3, 4, 5)],
dtype=float_types + complex_types,
)
def testMatrixTranspose(self, shape, dtype):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
jnp_fun = jnp.linalg.matrix_transpose
np_fun = np.linalg.matrix_transpose
self._CheckAgainstNumpy(np_fun, jnp_fun, args_maker)
self._CompileAndCheck(jnp_fun, args_maker)
@jtu.sample_product(
n=[0, 1, 5, 10, 20],
)
def testHilbert(self, n):
args_maker = lambda: []
osp_fun = partial(osp.linalg.hilbert, n=n)
jsp_fun = partial(jsp.linalg.hilbert, n=n)
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker)
self._CompileAndCheck(jsp_fun, args_maker)
@jtu.sample_product(
shape=[(5, 1), (10, 4), (128, 12)],
dtype=float_types,
symmetrize_output=[True, False],
)
@jtu.skip_on_devices("tpu")
def testSymmetricProduct(self, shape, dtype, symmetrize_output):
rng = jtu.rand_default(self.rng())
batch_size = 10
atol = 1e-6 if dtype == jnp.float64 else 1e-3
a_matrix = rng((batch_size,) + shape, dtype)
c_shape = a_matrix.shape[:-1] + (a_matrix.shape[-2],)
c_matrix = jnp.zeros(c_shape, dtype)
old_product = jnp.einsum("...ij,...kj->...ik", a_matrix, a_matrix,
precision=lax.Precision.HIGHEST)
new_product = lax_linalg.symmetric_product(
a_matrix, c_matrix, symmetrize_output=symmetrize_output)
new_product_with_batching = jax.vmap(
lambda a, c: lax_linalg.symmetric_product(
a, c, symmetrize_output=symmetrize_output),
in_axes=(0, 0))(a_matrix, c_matrix)
if not symmetrize_output:
old_product = jnp.tril(old_product)
new_product = jnp.tril(new_product)
new_product_with_batching = jnp.tril(new_product_with_batching)
self.assertAllClose(new_product, old_product, atol=atol)
self.assertAllClose(
new_product_with_batching, old_product, atol=atol)
@jtu.sample_product(
n=[0, 1, 5, 10, 20],
kind=["symmetric", "lower", "upper"],
)
@jax.default_matmul_precision("float32")
def testPascal(self, n, kind):
args_maker = lambda: []
osp_fun = partial(osp.linalg.pascal, n=n, kind=kind, exact=False)
jsp_fun = partial(jsp.linalg.pascal, n=n, kind=kind)
self._CheckAgainstNumpy(osp_fun,
jsp_fun, args_maker,
atol=1e-3,
rtol=1e-2 if jtu.test_device_matches(['tpu']) else 1e-3,
check_dtypes=False)
self._CompileAndCheck(jsp_fun, args_maker)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| LaxLinalgTest |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 226617,
"end": 226963
} | class ____(VegaLiteSchema):
"""ConditionalPredicateValueDefFontWeightnullExprRef schema wrapper."""
_schema = {
"$ref": "#/definitions/ConditionalPredicate<(ValueDef<(FontWeight|null)>|ExprRef)>"
}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ConditionalPredicateValueDefFontWeightnullExprRef |
python | keon__algorithms | tests/test_strings.py | {
"start": 14146,
"end": 14348
} | class ____(unittest.TestCase):
def test_strong_password(self):
self.assertEqual(3, strong_password(3, "Ab1"))
self.assertEqual(1, strong_password(11, "#Algorithms"))
| TestStrongPassword |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/variables/variable_scope_test.py | {
"start": 60782,
"end": 66301
} | class ____(test.TestCase):
# Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegex(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegex(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions "
".* and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertIs(v, v_reused)
def testNoReuseInEagerByDefault(self):
with context.eager_mode():
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v1 = variable_scope.get_variable("name0", shape=(3, 1, 1))
v2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertIsNot(v1, v2)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
# Not converted to use wrap_function because of
# obtaining different results in the eager case compared to the graph one
@test_util.run_deprecated_v1
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
def testPartitionConcatenatesAlongCorrectAxisResourceInEager(self):
with context.eager_mode():
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
| VariableScopeWithPartitioningTest |
python | walkccc__LeetCode | solutions/1116. Print Zero Even Odd/1116.py | {
"start": 34,
"end": 870
} | class ____:
def __init__(self, n):
self.n = n
self.zeroSemaphore = Semaphore(1)
self.evenSemaphore = Semaphore(0)
self.oddSemaphore = Semaphore(0)
# printNumber(x) outputs "x", where x is an integer.
def zero(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(self.n):
self.zeroSemaphore.acquire()
printNumber(0)
(self.oddSemaphore if i & 2 == 0 else self.evenSemaphore).release()
def even(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(2, self.n + 1, 2):
self.evenSemaphore.acquire()
printNumber(i)
self.zeroSemaphore.release()
def odd(self, printNumber: 'Callable[[int], None]') -> None:
for i in range(1, self.n + 1, 2):
self.oddSemaphore.acquire()
printNumber(i)
self.zeroSemaphore.release()
| ZeroEvenOdd |
python | apache__airflow | task-sdk/src/airflow/sdk/io/path.py | {
"start": 2623,
"end": 14775
} | class ____(CloudPath):
"""A path-like object for object storage."""
__version__: ClassVar[int] = 1
_protocol_dispatch = False
sep: ClassVar[str] = "/"
root_marker: ClassVar[str] = "/"
__slots__ = ("_hash_cached",)
@classmethod
def _transform_init_args(
cls,
args: tuple[str | os.PathLike, ...],
protocol: str,
storage_options: dict[str, Any],
) -> tuple[tuple[str | os.PathLike, ...], str, dict[str, Any]]:
"""Extract conn_id from the URL and set it as a storage option."""
if args:
arg0 = args[0]
parsed_url = urlsplit(stringify_path(arg0))
userinfo, have_info, hostinfo = parsed_url.netloc.rpartition("@")
if have_info:
storage_options.setdefault("conn_id", userinfo or None)
parsed_url = parsed_url._replace(netloc=hostinfo)
args = (parsed_url.geturl(),) + args[1:]
protocol = protocol or parsed_url.scheme
return args, protocol, storage_options
@classmethod
def _fs_factory(
cls, urlpath: str, protocol: str, storage_options: Mapping[str, Any]
) -> AbstractFileSystem:
return attach(protocol or "file", storage_options.get("conn_id")).fs
def __hash__(self) -> int:
self._hash_cached: int
try:
return self._hash_cached
except AttributeError:
self._hash_cached = hash(str(self))
return self._hash_cached
def __eq__(self, other: Any) -> bool:
return self.samestore(other) and str(self) == str(other)
def samestore(self, other: Any) -> bool:
return (
isinstance(other, ObjectStoragePath)
and self.protocol == other.protocol
and self.storage_options.get("conn_id") == other.storage_options.get("conn_id")
)
@property
def container(self) -> str:
return self.bucket
@property
def bucket(self) -> str:
if self._url:
return self._url.netloc
return ""
@property
def key(self) -> str:
if self._url:
# per convention, we strip the leading slashes to ensure a relative key is returned
# we keep the trailing slash to allow for directory-like semantics
return self._url.path.lstrip(self.sep)
return ""
@property
def namespace(self) -> str:
return f"{self.protocol}://{self.bucket}" if self.bucket else self.protocol
def open(self, mode="r", **kwargs):
"""Open the file pointed to by this path."""
kwargs.setdefault("block_size", kwargs.pop("buffering", None))
return _TrackingFileWrapper(self, self.fs.open(self.path, mode=mode, **kwargs))
def stat(self) -> stat_result: # type: ignore[override]
"""Call ``stat`` and return the result."""
return stat_result(
self.fs.stat(self.path),
protocol=self.protocol,
conn_id=self.storage_options.get("conn_id"),
)
def samefile(self, other_path: Any) -> bool:
"""Return whether other_path is the same or not as this file."""
if not isinstance(other_path, ObjectStoragePath):
return False
st = self.stat()
other_st = other_path.stat()
return (
st["protocol"] == other_st["protocol"]
and st["conn_id"] == other_st["conn_id"]
and st["ino"] == other_st["ino"]
)
def _scandir(self):
# Emulate os.scandir(), which returns an object that can be used as a
# context manager.
return contextlib.nullcontext(self.iterdir())
def replace(self, target) -> ObjectStoragePath:
"""
Rename this path to the target path, overwriting if that path exists.
The target path may be absolute or relative. Relative paths are
interpreted relative to the current working directory, *not* the
directory of the Path object.
Returns the new Path instance pointing to the target path.
"""
return self.rename(target)
@classmethod
def cwd(cls):
if cls is ObjectStoragePath:
return get_upath_class("").cwd()
raise NotImplementedError
@classmethod
def home(cls):
if cls is ObjectStoragePath:
return get_upath_class("").home()
raise NotImplementedError
# EXTENDED OPERATIONS
def ukey(self) -> str:
"""Hash of file properties, to tell if it has changed."""
return self.fs.ukey(self.path)
def checksum(self) -> int:
"""Return the checksum of the file at this path."""
# we directly access the fs here to avoid changing the abstract interface
return self.fs.checksum(self.path)
def read_block(self, offset: int, length: int, delimiter=None):
r"""
Read a block of bytes.
Starting at ``offset`` of the file, read ``length`` bytes. If
``delimiter`` is set then we ensure that the read starts and stops at
delimiter boundaries that follow the locations ``offset`` and ``offset
+ length``. If ``offset`` is zero then we start at zero. The
bytestring returned WILL include the end delimiter string.
If offset+length is beyond the eof, reads to eof.
:param offset: int
Byte offset to start read
:param length: int
Number of bytes to read. If None, read to the end.
:param delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
Examples
--------
.. code-block:: pycon
# Read the first 13 bytes (no delimiter)
>>> read_block(0, 13)
b'Alice, 100\nBo'
# Read first 13 bytes, but force newline boundaries
>>> read_block(0, 13, delimiter=b"\n")
b'Alice, 100\nBob, 200\n'
# Read until EOF, but only stop at newline
>>> read_block(0, None, delimiter=b"\n")
b'Alice, 100\nBob, 200\nCharlie, 300'
See Also
--------
:func:`fsspec.utils.read_block`
"""
return self.fs.read_block(self.path, offset=offset, length=length, delimiter=delimiter)
def sign(self, expiration: int = 100, **kwargs):
"""
Create a signed URL representing the given path.
Some implementations allow temporary URLs to be generated, as a
way of delegating credentials.
:param path: str
The path on the filesystem
:param expiration: int
Number of seconds to enable the URL for (if supported)
:returns URL: str
The signed URL
:raises NotImplementedError: if the method is not implemented for a store
"""
return self.fs.sign(self.path, expiration=expiration, **kwargs)
def size(self) -> int:
"""Size in bytes of the file at this path."""
return self.fs.size(self.path)
def _cp_file(self, dst: ObjectStoragePath, **kwargs):
"""Copy a single file from this path to another location by streaming the data."""
# create the directory or bucket if required
if dst.key.endswith(self.sep) or not dst.key:
dst.mkdir(exist_ok=True, parents=True)
dst = dst / self.key
elif dst.is_dir():
dst = dst / self.key
# streaming copy
with self.open("rb") as f1, dst.open("wb") as f2:
# make use of system dependent buffer size
shutil.copyfileobj(f1, f2, **kwargs)
def copy(self, dst: str | ObjectStoragePath, recursive: bool = False, **kwargs) -> None:
"""
Copy file(s) from this path to another location.
For remote to remote copies, the key used for the destination will be the same as the source.
So that s3://src_bucket/foo/bar will be copied to gcs://dst_bucket/foo/bar and not
gcs://dst_bucket/bar.
:param dst: Destination path
:param recursive: If True, copy directories recursively.
kwargs: Additional keyword arguments to be passed to the underlying implementation.
"""
from airflow.lineage.hook import get_hook_lineage_collector
if isinstance(dst, str):
dst = ObjectStoragePath(dst)
if self.samestore(dst) or self.protocol == "file" or dst.protocol == "file":
# only emit this in "optimized" variants - else lineage will be captured by file writes/reads
get_hook_lineage_collector().add_input_asset(context=self, uri=str(self))
get_hook_lineage_collector().add_output_asset(context=dst, uri=str(dst))
# same -> same
if self.samestore(dst):
self.fs.copy(self.path, dst.path, recursive=recursive, **kwargs)
return
# use optimized path for local -> remote or remote -> local
if self.protocol == "file":
dst.fs.put(self.path, dst.path, recursive=recursive, **kwargs)
return
if dst.protocol == "file":
self.fs.get(self.path, dst.path, recursive=recursive, **kwargs)
return
if not self.exists():
raise FileNotFoundError(f"{self} does not exist")
# remote dir -> remote dir
if self.is_dir():
if dst.is_file():
raise ValueError("Cannot copy directory to a file.")
dst.mkdir(exist_ok=True, parents=True)
out = self.fs.expand_path(self.path, recursive=True, **kwargs)
for path in out:
# this check prevents one extra call to is_dir() as
# glob returns self as well
if path == self.path:
continue
src_obj = ObjectStoragePath(
path,
protocol=self.protocol,
conn_id=self.storage_options.get("conn_id"),
)
# skip directories, empty directories will not be created
if src_obj.is_dir():
continue
src_obj._cp_file(dst)
return
# remote file -> remote dir
self._cp_file(dst, **kwargs)
def move(self, path: str | ObjectStoragePath, recursive: bool = False, **kwargs) -> None:
"""
Move file(s) from this path to another location.
:param path: Destination path
:param recursive: bool
If True, move directories recursively.
kwargs: Additional keyword arguments to be passed to the underlying implementation.
"""
from airflow.lineage.hook import get_hook_lineage_collector
if isinstance(path, str):
path = ObjectStoragePath(path)
if self.samestore(path):
get_hook_lineage_collector().add_input_asset(context=self, uri=str(self))
get_hook_lineage_collector().add_output_asset(context=path, uri=str(path))
return self.fs.move(self.path, path.path, recursive=recursive, **kwargs)
# non-local copy
self.copy(path, recursive=recursive, **kwargs)
self.unlink()
def serialize(self) -> dict[str, Any]:
_kwargs = {**self.storage_options}
conn_id = _kwargs.pop("conn_id", None)
return {
"path": str(self),
"conn_id": conn_id,
"kwargs": _kwargs,
}
@classmethod
def deserialize(cls, data: dict, version: int) -> ObjectStoragePath:
if version > cls.__version__:
raise ValueError(f"Cannot deserialize version {version} with version {cls.__version__}.")
_kwargs = data.pop("kwargs")
path = data.pop("path")
conn_id = data.pop("conn_id", None)
return ObjectStoragePath(path, conn_id=conn_id, **_kwargs)
def __str__(self):
conn_id = self.storage_options.get("conn_id")
if self._protocol and conn_id:
return f"{self._protocol}://{conn_id}@{self.path}"
return super().__str__()
| ObjectStoragePath |
python | tensorflow__tensorflow | tensorflow/python/distribute/parameter_server_strategy_v2_test.py | {
"start": 26847,
"end": 29678
} | class ____(test.TestCase):
def testArbitraryJobName(self):
cluster_def = multi_worker_test_base.create_cluster_spec(
num_workers=1, num_ps=1, has_chief=True)
cluster_def["some_arbitrary_name"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def), rpc_layer="grpc")
with self.assertRaisesRegex(ValueError, "Disallowed task type found in"):
parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)
def testArbitraryCurrentTaskType(self):
cluster_def = multi_worker_test_base.create_cluster_spec(
num_workers=1, num_ps=1, has_chief=True)
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
rpc_layer="grpc", task_type="foobar",
)
with self.assertRaisesRegex(ValueError, "Unrecognized task_type: foobar"):
parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)
def testMoreThanOneChief(self):
cluster_def = multi_worker_test_base.create_cluster_spec(
num_workers=1, num_ps=1)
chief_ports = [multi_worker_test_base.pick_unused_port() for _ in range(3)]
cluster_def["chief"] = ["localhost:%s" % port for port in chief_ports]
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
rpc_layer="grpc",
task_type="chief",
task_id=1)
with self.assertRaisesRegex(ValueError,
"There must be at most one 'chief' job."):
parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)
def testLessThanOneWorker(self):
cluster_def = multi_worker_test_base.create_cluster_spec(
num_workers=0, num_ps=1, has_chief=True)
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
rpc_layer="grpc", task_type="ps", task_id=0,
)
with self.assertRaisesRegex(ValueError,
"There must be at least one worker."):
parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)
def testLessThanOnePs(self):
cluster_def = multi_worker_test_base.create_cluster_spec(
num_workers=1, num_ps=0, has_chief=True)
cluster_resolver = cluster_resolver_lib.SimpleClusterResolver(
server_lib.ClusterSpec(cluster_def),
rpc_layer="grpc",
task_type="worker",
task_id=0)
with self.assertRaisesRegex(ValueError, "There must be at least one ps."):
parameter_server_strategy_v2.ParameterServerStrategyV2(cluster_resolver)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
| ClusterTypeNameTest |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.