language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | scrapy__scrapy | tests/test_downloader_handlers_http_base.py | {
"start": 27066,
"end": 27161
} | class ____(TestSimpleHttpsBase):
cipher_string = "CAMELLIA256-SHA"
| TestHttpsCustomCiphersBase |
python | spyder-ide__spyder | spyder/plugins/toolbar/container.py | {
"start": 1184,
"end": 1276
} | class ____:
Main = "main_section"
Secondary = "secondary_section"
| ToolbarsMenuSections |
python | scrapy__scrapy | tests/test_spidermiddleware_output_chain.py | {
"start": 1694,
"end": 2033
} | class ____(RecoverySpider):
name = "RecoveryAsyncGenSpider"
async def parse(self, response):
for r in super().parse(response):
yield r
# ================================================================================
# (1) exceptions from a spider middleware's process_spider_input method
| RecoveryAsyncGenSpider |
python | getsentry__sentry | src/sentry/grouping/variants.py | {
"start": 2990,
"end": 5764
} | class ____(BaseVariant):
"""A variant that produces a hash from the `BaseGroupingComponent` it encloses."""
type = "component"
def __init__(
self,
# The root of the component tree
root_component: RootGroupingComponent,
# The highest non-root contributing component in the tree, representing the overall grouping
# method (exception, threads, message, etc.). For non-contributing variants, this will be
# None.
contributing_component: ContributingComponent | None,
strategy_config: StrategyConfiguration,
):
self.root_component = root_component
self.config = strategy_config
self.contributing_component = contributing_component
self.variant_name = self.root_component.id # "app", "system", or "default"
@property
def key(self) -> str:
"""
Create a key for this variant in the grouping info dictionary.
"""
key = self.root_component.key
if self.variant_name in ["app", "system"]:
key = f"{self.variant_name}_{key}"
return key
@property
def description(self) -> str:
return self.root_component.description
@property
def contributes(self) -> bool:
return self.root_component.contributes
@property
def hint(self) -> str | None:
return self.root_component.hint
def get_hash(self) -> str | None:
return self.root_component.get_hash()
def _get_metadata_as_dict(self) -> Mapping[str, Any]:
return {"component": self.root_component.as_dict()}
def __repr__(self) -> str:
return super().__repr__() + f" contributes={self.contributes} ({self.description})"
def expose_fingerprint_dict(
fingerprint: list[str], fingerprint_info: FingerprintInfo
) -> FingerprintVariantMetadata:
rv: FingerprintVariantMetadata = {
"values": fingerprint,
}
client_fingerprint = fingerprint_info.get("client_fingerprint")
if client_fingerprint and (
len(client_fingerprint) != 1 or not is_default_fingerprint_var(client_fingerprint[0])
):
rv["client_values"] = client_fingerprint
matched_rule = fingerprint_info.get("matched_rule")
if matched_rule:
# TODO: Before late October 2024, we didn't store the rule text along with the matched rule,
# meaning there are still events out there whose `_fingerprint_info` entry doesn't have it.
# Once those events have aged out (in February or so), we can remove the default value here
# and the `test_old_event_with_no_fingerprint_rule_text` test in `test_variants.py`.
rv["matched_rule"] = matched_rule.get("text", FingerprintRule.from_json(matched_rule).text)
return rv
| ComponentVariant |
python | mwaskom__seaborn | seaborn/_marks/base.py | {
"start": 559,
"end": 2843
} | class ____:
def __init__(
self,
val: Any = None,
depend: str | None = None,
rc: str | None = None,
auto: bool = False,
grouping: bool = True,
):
"""
Property that can be mapped from data or set directly, with flexible defaults.
Parameters
----------
val : Any
Use this value as the default.
depend : str
Use the value of this feature as the default.
rc : str
Use the value of this rcParam as the default.
auto : bool
The default value will depend on other parameters at compile time.
grouping : bool
If True, use the mapped variable to define groups.
"""
if depend is not None:
assert depend in PROPERTIES
if rc is not None:
assert rc in mpl.rcParams
self._val = val
self._rc = rc
self._depend = depend
self._auto = auto
self._grouping = grouping
def __repr__(self):
"""Nice formatting for when object appears in Mark init signature."""
if self._val is not None:
s = f"<{repr(self._val)}>"
elif self._depend is not None:
s = f"<depend:{self._depend}>"
elif self._rc is not None:
s = f"<rc:{self._rc}>"
elif self._auto:
s = "<auto>"
else:
s = "<undefined>"
return s
@property
def depend(self) -> Any:
"""Return the name of the feature to source a default value from."""
return self._depend
@property
def grouping(self) -> bool:
return self._grouping
@property
def default(self) -> Any:
"""Get the default value for this feature, or access the relevant rcParam."""
if self._val is not None:
return self._val
elif self._rc is not None:
return mpl.rcParams.get(self._rc)
# TODO where is the right place to put this kind of type aliasing?
MappableBool = Union[bool, Mappable]
MappableString = Union[str, Mappable]
MappableFloat = Union[float, Mappable]
MappableColor = Union[str, tuple, Mappable]
MappableStyle = Union[str, DashPattern, DashPatternWithOffset, Mappable]
@dataclass
| Mappable |
python | readthedocs__readthedocs.org | readthedocs/organizations/tests/test_views.py | {
"start": 9916,
"end": 16745
} | class ____(TestCase):
def setUp(self):
self.owner = get(User, username="owner")
self.member = get(User, username="member")
self.project = get(Project, slug="project")
self.project_b = get(Project, slug="project-b")
self.organization = get(
Organization,
owners=[self.owner],
projects=[self.project, self.project_b],
)
self.team = get(
Team,
organization=self.organization,
members=[self.member],
)
self.another_owner = get(User, username="another-owner")
self.another_member = get(User, username="another-member")
self.another_project = get(Project, slug="another-project")
self.another_organization = get(
Organization,
owners=[self.another_owner],
projects=[self.another_project],
)
self.another_team = get(
Team,
organization=self.another_organization,
members=[self.another_member],
)
self.client.force_login(self.owner)
actions = [
AuditLog.AUTHN,
AuditLog.AUTHN_FAILURE,
AuditLog.LOGOUT,
AuditLog.PAGEVIEW,
AuditLog.DOWNLOAD,
]
ips = [
"10.10.10.1",
"10.10.10.2",
]
users = [self.owner, self.member, self.another_owner, self.another_member]
projects = [self.project, self.project_b, self.another_project]
AuditLog.objects.all().delete()
for action, ip, user in itertools.product(actions, ips, users):
get(
AuditLog,
user=user,
action=action,
ip=ip,
)
for project in projects:
get(
AuditLog,
user=user,
action=action,
project=project,
ip=ip,
)
self.url = reverse("organization_security_log", args=[self.organization.slug])
self.queryset = AuditLog.objects.filter(
log_organization_id=self.organization.pk
)
def test_list_security_logs(self):
# Show logs for self.organization only.
resp = self.client.get(self.url)
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, self.queryset)
# Show logs filtered by project.
resp = self.client.get(self.url + "?project=project")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(
auditlogs, self.queryset.filter(log_project_slug="project")
)
resp = self.client.get(self.url + "?project=another-project")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertEqual(auditlogs.count(), 0)
# Show logs filtered by IP.
ip = "10.10.10.2"
resp = self.client.get(self.url + f"?ip={ip}")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, self.queryset.filter(ip=ip))
# Show logs filtered by action.
for action in [
AuditLog.AUTHN,
AuditLog.AUTHN_FAILURE,
AuditLog.PAGEVIEW,
AuditLog.DOWNLOAD,
]:
resp = self.client.get(self.url + f"?action={action}")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, self.queryset.filter(action=action))
# Show logs filtered by user.
resp = self.client.get(self.url + "?user=member")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(
auditlogs, self.queryset.filter(log_user_username="member")
)
@mock.patch("django.utils.timezone.now")
def test_filter_by_date(self, now_mock):
date = timezone.datetime(year=2021, month=1, day=15)
now_mock.return_value = date
self.organization.pub_date = date
self.organization.save()
date = timezone.datetime(year=2021, month=3, day=10)
AuditLog.objects.all().update(created=date)
date = timezone.datetime(year=2021, month=2, day=13)
AuditLog.objects.filter(action=AuditLog.AUTHN).update(created=date)
date = timezone.datetime(year=2021, month=4, day=24)
AuditLog.objects.filter(action=AuditLog.AUTHN_FAILURE).update(created=date)
resp = self.client.get(self.url + "?date_before=2020-10-10")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertEqual(auditlogs.count(), 0)
resp = self.client.get(self.url + "?date_after=2023-10-10")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertEqual(auditlogs.count(), 0)
resp = self.client.get(self.url + "?date_before=2021-03-9")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(auditlogs, self.queryset.filter(action=AuditLog.AUTHN))
resp = self.client.get(self.url + "?date_after=2021-03-11")
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(
auditlogs, self.queryset.filter(action=AuditLog.AUTHN_FAILURE)
)
resp = self.client.get(
self.url + "?date_after=2021-01-01&date_before=2021-03-10"
)
self.assertEqual(resp.status_code, 200)
auditlogs = resp.context_data["object_list"]
self.assertQuerySetEqual(
auditlogs, self.queryset.exclude(action=AuditLog.AUTHN_FAILURE)
)
def test_download_csv(self):
self.assertEqual(AuditLog.objects.count(), 160)
resp = self.client.get(self.url, {"download": "true"})
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp["Content-Type"], "text/csv")
# convert streaming data to csv format
content = [
line.decode() for line in b"".join(resp.streaming_content).splitlines()
]
csv_data = list(csv.reader(content))
# All records + the header.
self.assertEqual(
len(csv_data),
AuditLog.objects.filter(log_organization_id=self.organization.pk).count()
+ 1,
)
@override_settings(RTD_ALLOW_ORGANIZATIONS=True)
| OrganizationSecurityLogTests |
python | Textualize__textual | src/textual/widgets/_content_switcher.py | {
"start": 388,
"end": 4326
} | class ____(Container):
"""A widget for switching between different children.
Note:
All child widgets that are to be switched between need a unique ID.
Children that have no ID will be hidden and ignored.
"""
DEFAULT_CSS = """
ContentSwitcher {
height: auto;
}
"""
current: reactive[str | None] = reactive[Optional[str]](None, init=False)
"""The ID of the currently-displayed widget.
If set to `None` then no widget is visible.
Note:
If set to an unknown ID, this will result in
[`NoMatches`][textual.css.query.NoMatches] being raised.
"""
def __init__(
self,
*children: Widget,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
initial: str | None = None,
) -> None:
"""Initialise the content switching widget.
Args:
*children: The widgets to switch between.
name: The name of the content switcher.
id: The ID of the content switcher in the DOM.
classes: The CSS classes of the content switcher.
disabled: Whether the content switcher is disabled or not.
initial: The ID of the initial widget to show, ``None`` or empty string for the first tab.
Note:
If `initial` is not supplied no children will be shown to start with.
"""
super().__init__(
*children,
name=name,
id=id,
classes=classes,
disabled=disabled,
)
self._initial = initial
def _on_mount(self, _: Mount) -> None:
"""Perform the initial setup of the widget once the DOM is ready."""
initial = self._initial
with self.app.batch_update():
for child in self.children:
child.display = bool(initial) and child.id == initial
self._reactive_current = initial
@property
def visible_content(self) -> Widget | None:
"""A reference to the currently-visible widget.
`None` if nothing is visible.
"""
return self.get_child_by_id(self.current) if self.current is not None else None
def watch_current(self, old: str | None, new: str | None) -> None:
"""React to the current visible child choice being changed.
Args:
old: The old widget ID (or `None` if there was no widget).
new: The new widget ID (or `None` if nothing should be shown).
"""
with self.app.batch_update():
if old:
try:
self.get_child_by_id(old).display = False
except NoMatches:
pass
if new:
self.get_child_by_id(new).display = True
def add_content(
self, widget: Widget, *, id: str | None = None, set_current: bool = False
) -> AwaitComplete:
"""Add new content to the `ContentSwitcher`.
Args:
widget: A Widget to add.
id: ID for the widget, or `None` if the widget already has an ID.
set_current: Set the new widget as current (which will cause it to display).
Returns:
An awaitable to wait for the new content to be mounted.
"""
if id is not None and widget.id != id:
widget.id = id
if not widget.id:
raise ValueError(
"Widget must have an ID (or set id parameter when calling add_content)"
)
async def _add_content() -> None:
"""Add new widget and potentially change the current widget."""
widget.display = False
with self.app.batch_update():
await self.mount(widget)
if set_current:
self.current = widget.id
return AwaitComplete(_add_content())
| ContentSwitcher |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/variant_on_dependency_condition_b/package.py | {
"start": 216,
"end": 613
} | class ____(Package):
"""Test that dependencies that are conditional on the state of
other dependencies are added correctly, for instance:
depends_on('A')
depends_on('B', when='^A+x')
"""
homepage = "https://www.example.org"
url = "https://example.org/files/v3.4/cmake-3.4.3.tar.gz"
version("1.0", md5="4cb3ff35b2472aae70f542116d616e63")
| VariantOnDependencyConditionB |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_interop_test.py | {
"start": 1130,
"end": 2473
} | class ____(tf.test.TestCase):
def testBroadcastAdd(self):
x_np = np.ones([2, 1]) + np.ones([1, 2])
x_onp = onp.ones([2, 1]) + onp.ones([1, 2])
self.assertAllClose(x_onp, x_np)
def testTypePromotion(self):
x_np = np.ones([1, 2], dtype=np.int16) + np.ones([2, 1], dtype=np.uint8)
x_onp = np.ones([1, 2], dtype=np.int16) + np.ones([2, 1], dtype=np.uint8)
self.assertEqual(x_onp.dtype, x_np.dtype)
self.assertAllClose(x_onp, x_np)
def testTFInterop(self):
x_np = np.sum(np.ones([1, 2]) + tf.ones([2, 1]))
x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))
self.assertAllClose(x_onp, x_np)
def testOnpInterop(self):
x_np = onp.sum(np.ones([1, 2]) + onp.ones([2, 1]))
x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))
self.assertAllClose(x_onp, x_np)
def testDevice(self):
if tf.test.is_gpu_available():
with tf.device('GPU:0'):
x = np.ones([1, 2])
self.assertIn('GPU', tf.convert_to_tensor(x).device)
with tf.device('CPU:0'):
x = np.ones([1, 2])
self.assertIn('CPU', tf.convert_to_tensor(x).device)
def testFunction(self):
@tf.function
def f(x, y):
return np.sum(x + y)
x_np = f(np.ones([1, 2]), tf.ones([2, 1]))
x_onp = onp.sum(onp.ones([1, 2]) + onp.ones([2, 1]))
self.assertAllClose(x_onp, x_np)
| ReadmeTest |
python | huggingface__transformers | src/transformers/models/blt/modular_blt.py | {
"start": 38522,
"end": 41133
} | class ____(MllamaForCausalLM):
config: BltConfig
_can_compile_fullgraph = False
base_model_prefix = "model"
_tied_weights_keys = {"model.local_encoder.embed_tokens.weight": "lm_head.weight"}
def __init__(self, config: BltConfig):
super().__init__(config)
self.vocab_size = config.vocab_size
self.model = BltModel(config)
self.lm_head = nn.Linear(config.decoder_config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
cross_attention_states: Optional[torch.LongTensor] = None, # Keep for compatibility
cross_attention_mask: Optional[torch.LongTensor] = None,
full_text_row_masked_out_mask: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, CausalLMOutputWithPast]:
# Call parent forward but exclude cross_attention_states from model call
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
cross_attention_mask=cross_attention_mask,
full_text_row_masked_out_mask=full_text_row_masked_out_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :]).float()
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"BltPreTrainedModel",
"BltModel",
"BltPatcher",
"BltForCausalLM",
]
| BltForCausalLM |
python | google__jax | tests/pmap_test.py | {
"start": 129465,
"end": 129606
} | class ____(EagerPmapMixin, ArrayPmapTest):
pass
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| ArrayPmapEagerTest |
python | django__django | tests/admin_views/models.py | {
"start": 27926,
"end": 28041
} | class ____(User):
"""Proxy a model with a different app_label."""
class Meta:
proxy = True
| UserProxy |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass2.py | {
"start": 126,
"end": 234
} | class ____(abc.ABC):
@abc.abstractmethod
def a(self) -> None:
print("InterfaceA.a")
| InterfaceA |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 12914,
"end": 12991
} | class ____:
def __init__(self, a, *args):
self.args = args
| SuperTwo |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/feature_store.py | {
"start": 1743,
"end": 5127
} | class ____(GoogleCloudBaseOperator):
"""
Initiate a synchronization operation for a Feature View in Vertex AI Feature Store.
This operator triggers a sync operation that updates the online serving data for a feature view
based on the latest data in the underlying batch source. The sync operation ensures that
the online feature values are up-to-date for real-time serving.
:param project_id: Required. The ID of the Google Cloud project that contains the feature store.
This is used to identify which project's resources to interact with.
:param location: Required. The location of the feature store (e.g., 'us-central1', 'us-east1').
This specifies the Google Cloud region where the feature store resources are located.
:param feature_online_store_id: Required. The ID of the online feature store that contains
the feature view to be synchronized. This store serves as the online serving layer.
:param feature_view_id: Required. The ID of the feature view to synchronize. This identifies
the specific view that needs to have its online values updated from the batch source.
:param gcp_conn_id: The connection ID to use for connecting to Google Cloud Platform.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials. Can be either a single account or a chain of accounts required to
get the access_token of the last account in the list, which will be impersonated
in the request. If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role. If set as a sequence, the identities
from the list must grant Service Account Token Creator IAM role to the directly
preceding identity, with first account from the list granting this role to the
originating account.
"""
template_fields: Sequence[str] = (
"project_id",
"location",
"feature_online_store_id",
"feature_view_id",
)
def __init__(
self,
*,
project_id: str,
location: str,
feature_online_store_id: str,
feature_view_id: str,
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.location = location
self.feature_online_store_id = feature_online_store_id
self.feature_view_id = feature_view_id
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> str:
"""Execute the feature view sync operation."""
self.hook = FeatureStoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Submitting Feature View sync job now...")
response = self.hook.sync_feature_view(
project_id=self.project_id,
location=self.location,
feature_online_store_id=self.feature_online_store_id,
feature_view_id=self.feature_view_id,
)
self.log.info("Retrieved Feature View sync: %s", response)
return response
| SyncFeatureViewOperator |
python | openai__openai-python | src/openai/types/responses/response_reasoning_item_param.py | {
"start": 744,
"end": 1539
} | class ____(TypedDict, total=False):
id: Required[str]
"""The unique identifier of the reasoning content."""
summary: Required[Iterable[Summary]]
"""Reasoning summary content."""
type: Required[Literal["reasoning"]]
"""The type of the object. Always `reasoning`."""
content: Iterable[Content]
"""Reasoning text content."""
encrypted_content: Optional[str]
"""
The encrypted content of the reasoning item - populated when a response is
generated with `reasoning.encrypted_content` in the `include` parameter.
"""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
| ResponseReasoningItemParam |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 9609,
"end": 10374
} | class ____(nodes.inline, _desc_classes_injector):
"""Common parent class of nodes for inline text of a signature."""
classes: list[str] = []
def __init__(
self, rawsource: str = '', text: str = '', *children: Element, **attributes: Any
) -> None:
super().__init__(rawsource, text, *children, **attributes)
self['classes'].extend(self.classes)
def __init_subclass__(cls, *, _sig_element: bool = False, **kwargs: Any) -> None:
super().__init_subclass__(**kwargs)
if _sig_element:
# add the class to the SIG_ELEMENTS set if asked
SIG_ELEMENTS.add(cls)
# to not reinvent the wheel, the classes in the following desc_sig classes
# are based on those used in Pygments
| desc_sig_element |
python | kubernetes-client__python | kubernetes/client/models/v1_volume.py | {
"start": 383,
"end": 25783
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'aws_elastic_block_store': 'V1AWSElasticBlockStoreVolumeSource',
'azure_disk': 'V1AzureDiskVolumeSource',
'azure_file': 'V1AzureFileVolumeSource',
'cephfs': 'V1CephFSVolumeSource',
'cinder': 'V1CinderVolumeSource',
'config_map': 'V1ConfigMapVolumeSource',
'csi': 'V1CSIVolumeSource',
'downward_api': 'V1DownwardAPIVolumeSource',
'empty_dir': 'V1EmptyDirVolumeSource',
'ephemeral': 'V1EphemeralVolumeSource',
'fc': 'V1FCVolumeSource',
'flex_volume': 'V1FlexVolumeSource',
'flocker': 'V1FlockerVolumeSource',
'gce_persistent_disk': 'V1GCEPersistentDiskVolumeSource',
'git_repo': 'V1GitRepoVolumeSource',
'glusterfs': 'V1GlusterfsVolumeSource',
'host_path': 'V1HostPathVolumeSource',
'image': 'V1ImageVolumeSource',
'iscsi': 'V1ISCSIVolumeSource',
'name': 'str',
'nfs': 'V1NFSVolumeSource',
'persistent_volume_claim': 'V1PersistentVolumeClaimVolumeSource',
'photon_persistent_disk': 'V1PhotonPersistentDiskVolumeSource',
'portworx_volume': 'V1PortworxVolumeSource',
'projected': 'V1ProjectedVolumeSource',
'quobyte': 'V1QuobyteVolumeSource',
'rbd': 'V1RBDVolumeSource',
'scale_io': 'V1ScaleIOVolumeSource',
'secret': 'V1SecretVolumeSource',
'storageos': 'V1StorageOSVolumeSource',
'vsphere_volume': 'V1VsphereVirtualDiskVolumeSource'
}
attribute_map = {
'aws_elastic_block_store': 'awsElasticBlockStore',
'azure_disk': 'azureDisk',
'azure_file': 'azureFile',
'cephfs': 'cephfs',
'cinder': 'cinder',
'config_map': 'configMap',
'csi': 'csi',
'downward_api': 'downwardAPI',
'empty_dir': 'emptyDir',
'ephemeral': 'ephemeral',
'fc': 'fc',
'flex_volume': 'flexVolume',
'flocker': 'flocker',
'gce_persistent_disk': 'gcePersistentDisk',
'git_repo': 'gitRepo',
'glusterfs': 'glusterfs',
'host_path': 'hostPath',
'image': 'image',
'iscsi': 'iscsi',
'name': 'name',
'nfs': 'nfs',
'persistent_volume_claim': 'persistentVolumeClaim',
'photon_persistent_disk': 'photonPersistentDisk',
'portworx_volume': 'portworxVolume',
'projected': 'projected',
'quobyte': 'quobyte',
'rbd': 'rbd',
'scale_io': 'scaleIO',
'secret': 'secret',
'storageos': 'storageos',
'vsphere_volume': 'vsphereVolume'
}
def __init__(self, aws_elastic_block_store=None, azure_disk=None, azure_file=None, cephfs=None, cinder=None, config_map=None, csi=None, downward_api=None, empty_dir=None, ephemeral=None, fc=None, flex_volume=None, flocker=None, gce_persistent_disk=None, git_repo=None, glusterfs=None, host_path=None, image=None, iscsi=None, name=None, nfs=None, persistent_volume_claim=None, photon_persistent_disk=None, portworx_volume=None, projected=None, quobyte=None, rbd=None, scale_io=None, secret=None, storageos=None, vsphere_volume=None, local_vars_configuration=None): # noqa: E501
"""V1Volume - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._aws_elastic_block_store = None
self._azure_disk = None
self._azure_file = None
self._cephfs = None
self._cinder = None
self._config_map = None
self._csi = None
self._downward_api = None
self._empty_dir = None
self._ephemeral = None
self._fc = None
self._flex_volume = None
self._flocker = None
self._gce_persistent_disk = None
self._git_repo = None
self._glusterfs = None
self._host_path = None
self._image = None
self._iscsi = None
self._name = None
self._nfs = None
self._persistent_volume_claim = None
self._photon_persistent_disk = None
self._portworx_volume = None
self._projected = None
self._quobyte = None
self._rbd = None
self._scale_io = None
self._secret = None
self._storageos = None
self._vsphere_volume = None
self.discriminator = None
if aws_elastic_block_store is not None:
self.aws_elastic_block_store = aws_elastic_block_store
if azure_disk is not None:
self.azure_disk = azure_disk
if azure_file is not None:
self.azure_file = azure_file
if cephfs is not None:
self.cephfs = cephfs
if cinder is not None:
self.cinder = cinder
if config_map is not None:
self.config_map = config_map
if csi is not None:
self.csi = csi
if downward_api is not None:
self.downward_api = downward_api
if empty_dir is not None:
self.empty_dir = empty_dir
if ephemeral is not None:
self.ephemeral = ephemeral
if fc is not None:
self.fc = fc
if flex_volume is not None:
self.flex_volume = flex_volume
if flocker is not None:
self.flocker = flocker
if gce_persistent_disk is not None:
self.gce_persistent_disk = gce_persistent_disk
if git_repo is not None:
self.git_repo = git_repo
if glusterfs is not None:
self.glusterfs = glusterfs
if host_path is not None:
self.host_path = host_path
if image is not None:
self.image = image
if iscsi is not None:
self.iscsi = iscsi
self.name = name
if nfs is not None:
self.nfs = nfs
if persistent_volume_claim is not None:
self.persistent_volume_claim = persistent_volume_claim
if photon_persistent_disk is not None:
self.photon_persistent_disk = photon_persistent_disk
if portworx_volume is not None:
self.portworx_volume = portworx_volume
if projected is not None:
self.projected = projected
if quobyte is not None:
self.quobyte = quobyte
if rbd is not None:
self.rbd = rbd
if scale_io is not None:
self.scale_io = scale_io
if secret is not None:
self.secret = secret
if storageos is not None:
self.storageos = storageos
if vsphere_volume is not None:
self.vsphere_volume = vsphere_volume
@property
def aws_elastic_block_store(self):
"""Gets the aws_elastic_block_store of this V1Volume. # noqa: E501
:return: The aws_elastic_block_store of this V1Volume. # noqa: E501
:rtype: V1AWSElasticBlockStoreVolumeSource
"""
return self._aws_elastic_block_store
@aws_elastic_block_store.setter
def aws_elastic_block_store(self, aws_elastic_block_store):
"""Sets the aws_elastic_block_store of this V1Volume.
:param aws_elastic_block_store: The aws_elastic_block_store of this V1Volume. # noqa: E501
:type: V1AWSElasticBlockStoreVolumeSource
"""
self._aws_elastic_block_store = aws_elastic_block_store
@property
def azure_disk(self):
"""Gets the azure_disk of this V1Volume. # noqa: E501
:return: The azure_disk of this V1Volume. # noqa: E501
:rtype: V1AzureDiskVolumeSource
"""
return self._azure_disk
@azure_disk.setter
def azure_disk(self, azure_disk):
"""Sets the azure_disk of this V1Volume.
:param azure_disk: The azure_disk of this V1Volume. # noqa: E501
:type: V1AzureDiskVolumeSource
"""
self._azure_disk = azure_disk
@property
def azure_file(self):
"""Gets the azure_file of this V1Volume. # noqa: E501
:return: The azure_file of this V1Volume. # noqa: E501
:rtype: V1AzureFileVolumeSource
"""
return self._azure_file
@azure_file.setter
def azure_file(self, azure_file):
"""Sets the azure_file of this V1Volume.
:param azure_file: The azure_file of this V1Volume. # noqa: E501
:type: V1AzureFileVolumeSource
"""
self._azure_file = azure_file
@property
def cephfs(self):
"""Gets the cephfs of this V1Volume. # noqa: E501
:return: The cephfs of this V1Volume. # noqa: E501
:rtype: V1CephFSVolumeSource
"""
return self._cephfs
@cephfs.setter
def cephfs(self, cephfs):
"""Sets the cephfs of this V1Volume.
:param cephfs: The cephfs of this V1Volume. # noqa: E501
:type: V1CephFSVolumeSource
"""
self._cephfs = cephfs
@property
def cinder(self):
"""Gets the cinder of this V1Volume. # noqa: E501
:return: The cinder of this V1Volume. # noqa: E501
:rtype: V1CinderVolumeSource
"""
return self._cinder
@cinder.setter
def cinder(self, cinder):
"""Sets the cinder of this V1Volume.
:param cinder: The cinder of this V1Volume. # noqa: E501
:type: V1CinderVolumeSource
"""
self._cinder = cinder
@property
def config_map(self):
"""Gets the config_map of this V1Volume. # noqa: E501
:return: The config_map of this V1Volume. # noqa: E501
:rtype: V1ConfigMapVolumeSource
"""
return self._config_map
@config_map.setter
def config_map(self, config_map):
"""Sets the config_map of this V1Volume.
:param config_map: The config_map of this V1Volume. # noqa: E501
:type: V1ConfigMapVolumeSource
"""
self._config_map = config_map
@property
def csi(self):
"""Gets the csi of this V1Volume. # noqa: E501
:return: The csi of this V1Volume. # noqa: E501
:rtype: V1CSIVolumeSource
"""
return self._csi
@csi.setter
def csi(self, csi):
"""Sets the csi of this V1Volume.
:param csi: The csi of this V1Volume. # noqa: E501
:type: V1CSIVolumeSource
"""
self._csi = csi
@property
def downward_api(self):
"""Gets the downward_api of this V1Volume. # noqa: E501
:return: The downward_api of this V1Volume. # noqa: E501
:rtype: V1DownwardAPIVolumeSource
"""
return self._downward_api
@downward_api.setter
def downward_api(self, downward_api):
"""Sets the downward_api of this V1Volume.
:param downward_api: The downward_api of this V1Volume. # noqa: E501
:type: V1DownwardAPIVolumeSource
"""
self._downward_api = downward_api
@property
def empty_dir(self):
"""Gets the empty_dir of this V1Volume. # noqa: E501
:return: The empty_dir of this V1Volume. # noqa: E501
:rtype: V1EmptyDirVolumeSource
"""
return self._empty_dir
@empty_dir.setter
def empty_dir(self, empty_dir):
"""Sets the empty_dir of this V1Volume.
:param empty_dir: The empty_dir of this V1Volume. # noqa: E501
:type: V1EmptyDirVolumeSource
"""
self._empty_dir = empty_dir
@property
def ephemeral(self):
"""Gets the ephemeral of this V1Volume. # noqa: E501
:return: The ephemeral of this V1Volume. # noqa: E501
:rtype: V1EphemeralVolumeSource
"""
return self._ephemeral
@ephemeral.setter
def ephemeral(self, ephemeral):
"""Sets the ephemeral of this V1Volume.
:param ephemeral: The ephemeral of this V1Volume. # noqa: E501
:type: V1EphemeralVolumeSource
"""
self._ephemeral = ephemeral
@property
def fc(self):
"""Gets the fc of this V1Volume. # noqa: E501
:return: The fc of this V1Volume. # noqa: E501
:rtype: V1FCVolumeSource
"""
return self._fc
@fc.setter
def fc(self, fc):
"""Sets the fc of this V1Volume.
:param fc: The fc of this V1Volume. # noqa: E501
:type: V1FCVolumeSource
"""
self._fc = fc
@property
def flex_volume(self):
"""Gets the flex_volume of this V1Volume. # noqa: E501
:return: The flex_volume of this V1Volume. # noqa: E501
:rtype: V1FlexVolumeSource
"""
return self._flex_volume
@flex_volume.setter
def flex_volume(self, flex_volume):
"""Sets the flex_volume of this V1Volume.
:param flex_volume: The flex_volume of this V1Volume. # noqa: E501
:type: V1FlexVolumeSource
"""
self._flex_volume = flex_volume
@property
def flocker(self):
"""Gets the flocker of this V1Volume. # noqa: E501
:return: The flocker of this V1Volume. # noqa: E501
:rtype: V1FlockerVolumeSource
"""
return self._flocker
@flocker.setter
def flocker(self, flocker):
"""Sets the flocker of this V1Volume.
:param flocker: The flocker of this V1Volume. # noqa: E501
:type: V1FlockerVolumeSource
"""
self._flocker = flocker
@property
def gce_persistent_disk(self):
"""Gets the gce_persistent_disk of this V1Volume. # noqa: E501
:return: The gce_persistent_disk of this V1Volume. # noqa: E501
:rtype: V1GCEPersistentDiskVolumeSource
"""
return self._gce_persistent_disk
@gce_persistent_disk.setter
def gce_persistent_disk(self, gce_persistent_disk):
"""Sets the gce_persistent_disk of this V1Volume.
:param gce_persistent_disk: The gce_persistent_disk of this V1Volume. # noqa: E501
:type: V1GCEPersistentDiskVolumeSource
"""
self._gce_persistent_disk = gce_persistent_disk
@property
def git_repo(self):
"""Gets the git_repo of this V1Volume. # noqa: E501
:return: The git_repo of this V1Volume. # noqa: E501
:rtype: V1GitRepoVolumeSource
"""
return self._git_repo
@git_repo.setter
def git_repo(self, git_repo):
"""Sets the git_repo of this V1Volume.
:param git_repo: The git_repo of this V1Volume. # noqa: E501
:type: V1GitRepoVolumeSource
"""
self._git_repo = git_repo
@property
def glusterfs(self):
"""Gets the glusterfs of this V1Volume. # noqa: E501
:return: The glusterfs of this V1Volume. # noqa: E501
:rtype: V1GlusterfsVolumeSource
"""
return self._glusterfs
@glusterfs.setter
def glusterfs(self, glusterfs):
"""Sets the glusterfs of this V1Volume.
:param glusterfs: The glusterfs of this V1Volume. # noqa: E501
:type: V1GlusterfsVolumeSource
"""
self._glusterfs = glusterfs
@property
def host_path(self):
"""Gets the host_path of this V1Volume. # noqa: E501
:return: The host_path of this V1Volume. # noqa: E501
:rtype: V1HostPathVolumeSource
"""
return self._host_path
@host_path.setter
def host_path(self, host_path):
"""Sets the host_path of this V1Volume.
:param host_path: The host_path of this V1Volume. # noqa: E501
:type: V1HostPathVolumeSource
"""
self._host_path = host_path
@property
def image(self):
"""Gets the image of this V1Volume. # noqa: E501
:return: The image of this V1Volume. # noqa: E501
:rtype: V1ImageVolumeSource
"""
return self._image
@image.setter
def image(self, image):
"""Sets the image of this V1Volume.
:param image: The image of this V1Volume. # noqa: E501
:type: V1ImageVolumeSource
"""
self._image = image
@property
def iscsi(self):
"""Gets the iscsi of this V1Volume. # noqa: E501
:return: The iscsi of this V1Volume. # noqa: E501
:rtype: V1ISCSIVolumeSource
"""
return self._iscsi
@iscsi.setter
def iscsi(self, iscsi):
"""Sets the iscsi of this V1Volume.
:param iscsi: The iscsi of this V1Volume. # noqa: E501
:type: V1ISCSIVolumeSource
"""
self._iscsi = iscsi
@property
def name(self):
"""Gets the name of this V1Volume. # noqa: E501
name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:return: The name of this V1Volume. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1Volume.
name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names # noqa: E501
:param name: The name of this V1Volume. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def nfs(self):
"""Gets the nfs of this V1Volume. # noqa: E501
:return: The nfs of this V1Volume. # noqa: E501
:rtype: V1NFSVolumeSource
"""
return self._nfs
@nfs.setter
def nfs(self, nfs):
"""Sets the nfs of this V1Volume.
:param nfs: The nfs of this V1Volume. # noqa: E501
:type: V1NFSVolumeSource
"""
self._nfs = nfs
@property
def persistent_volume_claim(self):
"""Gets the persistent_volume_claim of this V1Volume. # noqa: E501
:return: The persistent_volume_claim of this V1Volume. # noqa: E501
:rtype: V1PersistentVolumeClaimVolumeSource
"""
return self._persistent_volume_claim
@persistent_volume_claim.setter
def persistent_volume_claim(self, persistent_volume_claim):
"""Sets the persistent_volume_claim of this V1Volume.
:param persistent_volume_claim: The persistent_volume_claim of this V1Volume. # noqa: E501
:type: V1PersistentVolumeClaimVolumeSource
"""
self._persistent_volume_claim = persistent_volume_claim
@property
def photon_persistent_disk(self):
"""Gets the photon_persistent_disk of this V1Volume. # noqa: E501
:return: The photon_persistent_disk of this V1Volume. # noqa: E501
:rtype: V1PhotonPersistentDiskVolumeSource
"""
return self._photon_persistent_disk
@photon_persistent_disk.setter
def photon_persistent_disk(self, photon_persistent_disk):
"""Sets the photon_persistent_disk of this V1Volume.
:param photon_persistent_disk: The photon_persistent_disk of this V1Volume. # noqa: E501
:type: V1PhotonPersistentDiskVolumeSource
"""
self._photon_persistent_disk = photon_persistent_disk
@property
def portworx_volume(self):
"""Gets the portworx_volume of this V1Volume. # noqa: E501
:return: The portworx_volume of this V1Volume. # noqa: E501
:rtype: V1PortworxVolumeSource
"""
return self._portworx_volume
@portworx_volume.setter
def portworx_volume(self, portworx_volume):
"""Sets the portworx_volume of this V1Volume.
:param portworx_volume: The portworx_volume of this V1Volume. # noqa: E501
:type: V1PortworxVolumeSource
"""
self._portworx_volume = portworx_volume
@property
def projected(self):
"""Gets the projected of this V1Volume. # noqa: E501
:return: The projected of this V1Volume. # noqa: E501
:rtype: V1ProjectedVolumeSource
"""
return self._projected
@projected.setter
def projected(self, projected):
"""Sets the projected of this V1Volume.
:param projected: The projected of this V1Volume. # noqa: E501
:type: V1ProjectedVolumeSource
"""
self._projected = projected
@property
def quobyte(self):
"""Gets the quobyte of this V1Volume. # noqa: E501
:return: The quobyte of this V1Volume. # noqa: E501
:rtype: V1QuobyteVolumeSource
"""
return self._quobyte
@quobyte.setter
def quobyte(self, quobyte):
"""Sets the quobyte of this V1Volume.
:param quobyte: The quobyte of this V1Volume. # noqa: E501
:type: V1QuobyteVolumeSource
"""
self._quobyte = quobyte
@property
def rbd(self):
"""Gets the rbd of this V1Volume. # noqa: E501
:return: The rbd of this V1Volume. # noqa: E501
:rtype: V1RBDVolumeSource
"""
return self._rbd
@rbd.setter
def rbd(self, rbd):
"""Sets the rbd of this V1Volume.
:param rbd: The rbd of this V1Volume. # noqa: E501
:type: V1RBDVolumeSource
"""
self._rbd = rbd
@property
def scale_io(self):
"""Gets the scale_io of this V1Volume. # noqa: E501
:return: The scale_io of this V1Volume. # noqa: E501
:rtype: V1ScaleIOVolumeSource
"""
return self._scale_io
@scale_io.setter
def scale_io(self, scale_io):
"""Sets the scale_io of this V1Volume.
:param scale_io: The scale_io of this V1Volume. # noqa: E501
:type: V1ScaleIOVolumeSource
"""
self._scale_io = scale_io
@property
def secret(self):
"""Gets the secret of this V1Volume. # noqa: E501
:return: The secret of this V1Volume. # noqa: E501
:rtype: V1SecretVolumeSource
"""
return self._secret
@secret.setter
def secret(self, secret):
"""Sets the secret of this V1Volume.
:param secret: The secret of this V1Volume. # noqa: E501
:type: V1SecretVolumeSource
"""
self._secret = secret
@property
def storageos(self):
"""Gets the storageos of this V1Volume. # noqa: E501
:return: The storageos of this V1Volume. # noqa: E501
:rtype: V1StorageOSVolumeSource
"""
return self._storageos
@storageos.setter
def storageos(self, storageos):
"""Sets the storageos of this V1Volume.
:param storageos: The storageos of this V1Volume. # noqa: E501
:type: V1StorageOSVolumeSource
"""
self._storageos = storageos
@property
def vsphere_volume(self):
"""Gets the vsphere_volume of this V1Volume. # noqa: E501
:return: The vsphere_volume of this V1Volume. # noqa: E501
:rtype: V1VsphereVirtualDiskVolumeSource
"""
return self._vsphere_volume
@vsphere_volume.setter
def vsphere_volume(self, vsphere_volume):
"""Sets the vsphere_volume of this V1Volume.
:param vsphere_volume: The vsphere_volume of this V1Volume. # noqa: E501
:type: V1VsphereVirtualDiskVolumeSource
"""
self._vsphere_volume = vsphere_volume
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Volume):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Volume):
return True
return self.to_dict() != other.to_dict()
| V1Volume |
python | ipython__ipython | IPython/core/history.py | {
"start": 1680,
"end": 4908
} | class ____:
"""Dummy DB that will act as a black hole for history.
Only used in the absence of sqlite"""
def execute(*args: typing.Any, **kwargs: typing.Any) -> list:
return []
def commit(self, *args, **kwargs): # type: ignore [no-untyped-def]
pass
def __enter__(self, *args, **kwargs): # type: ignore [no-untyped-def]
pass
def __exit__(self, *args, **kwargs): # type: ignore [no-untyped-def]
pass
@decorator
def only_when_enabled(f, self, *a, **kw): # type: ignore [no-untyped-def]
"""Decorator: return an empty list in the absence of sqlite."""
if not self.enabled:
return []
else:
return f(self, *a, **kw)
# use 16kB as threshold for whether a corrupt history db should be saved
# that should be at least 100 entries or so
_SAVE_DB_SIZE = 16384
@decorator
def catch_corrupt_db(f, self, *a, **kw): # type: ignore [no-untyped-def]
"""A decorator which wraps HistoryAccessor method calls to catch errors from
a corrupt SQLite database, move the old database out of the way, and create
a new one.
We avoid clobbering larger databases because this may be triggered due to filesystem issues,
not just a corrupt file.
"""
try:
return f(self, *a, **kw)
except (DatabaseError, OperationalError) as e:
self._corrupt_db_counter += 1
self.log.error("Failed to open SQLite history %s (%s).", self.hist_file, e)
if self.hist_file != ":memory:":
if self._corrupt_db_counter > self._corrupt_db_limit:
self.hist_file = ":memory:"
self.log.error(
"Failed to load history too many times, history will not be saved."
)
elif self.hist_file.is_file():
# move the file out of the way
base = str(self.hist_file.parent / self.hist_file.stem)
ext = self.hist_file.suffix
size = self.hist_file.stat().st_size
if size >= _SAVE_DB_SIZE:
# if there's significant content, avoid clobbering
now = (
datetime.datetime.now(datetime.timezone.utc)
.isoformat()
.replace(":", ".")
)
newpath = base + "-corrupt-" + now + ext
# don't clobber previous corrupt backups
for i in range(100):
if not Path(newpath).exists():
break
else:
newpath = base + "-corrupt-" + now + ("-%i" % i) + ext
else:
# not much content, possibly empty; don't worry about clobbering
# maybe we should just delete it?
newpath = base + "-corrupt" + ext
self.hist_file.rename(newpath)
self.log.error(
"History file was moved to %s and a new file created.", newpath
)
self.init_db()
return []
else:
# Failed with :memory:, something serious is wrong
raise
| DummyDB |
python | rapidsai__cudf | python/cudf/cudf/core/accessors/categorical.py | {
"start": 447,
"end": 11102
} | class ____(BaseAccessor):
"""
Accessor object for categorical properties of the Series values.
Be aware that assigning to `categories` is a inplace operation,
while all methods return new categorical data per default.
Parameters
----------
column : Column
parent : Series or CategoricalIndex
Examples
--------
>>> s = cudf.Series([1,2,3], dtype='category')
>>> s
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]
>>> s.cat.categories
Index([1, 2, 3], dtype='int64')
>>> s.cat.reorder_categories([3,2,1])
0 1
1 2
2 3
dtype: category
Categories (3, int64): [3, 2, 1]
>>> s.cat.remove_categories([1])
0 <NA>
1 2
2 3
dtype: category
Categories (2, int64): [2, 3]
>>> s.cat.set_categories(list('abcde'))
0 <NA>
1 <NA>
2 <NA>
dtype: category
Categories (5, object): ['a', 'b', 'c', 'd', 'e']
>>> s.cat.as_ordered()
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1 < 2 < 3]
>>> s.cat.as_unordered()
0 1
1 2
2 3
dtype: category
Categories (3, int64): [1, 2, 3]
"""
_column: CategoricalColumn
def __init__(self, parent: Series | Index):
if not isinstance(parent.dtype, CategoricalDtype):
raise AttributeError(
"Can only use .cat accessor with a 'category' dtype"
)
super().__init__(parent=parent)
@property
def categories(self) -> Index:
"""
The categories of this categorical.
"""
return self._column.dtype.categories
@property
def codes(self) -> Series:
"""
Return Series of codes as well as the index.
"""
from cudf.core.series import Series
index = (
self._parent.index if isinstance(self._parent, Series) else None
)
return Series._from_column(self._column.codes, index=index)
@property
def ordered(self) -> bool | None:
"""
Whether the categories have an ordered relationship.
"""
return self._column.ordered
def as_ordered(self) -> Series | Index | None:
"""
Set the Categorical to be ordered.
Returns
-------
Categorical
Ordered Categorical.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.as_ordered()
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1 < 2 < 10]
"""
return self._return_or_inplace(self._column.as_ordered(ordered=True))
def as_unordered(self) -> Series | Index | None:
"""
Set the Categorical to be unordered.
Returns
-------
Categorical
Unordered Categorical or None if inplace.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s = s.cat.as_ordered()
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1 < 2 < 10]
>>> s.cat.as_unordered()
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
"""
return self._return_or_inplace(self._column.as_ordered(ordered=False))
def add_categories(self, new_categories: Any) -> Series | Index | None:
"""
Add new categories.
`new_categories` will be included at the last/highest
place in the categories and will be unused directly
after this call.
Parameters
----------
new_categories : category or list-like of category
The new categories to be included.
Returns
-------
cat
Categorical with new categories added.
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 2], dtype="category")
>>> s
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
>>> s.cat.add_categories([0, 3, 4])
0 1
1 2
dtype: category
Categories (5, int64): [1, 2, 0, 3, 4]
>>> s
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
"""
return self._return_or_inplace(
self._column.add_categories(new_categories=new_categories)
)
def remove_categories(
self,
removals: Any,
) -> Series | Index | None:
"""
Remove the specified categories.
`removals` must be included in the
old categories. Values which were in the
removed categories will be set to null.
Parameters
----------
removals : category or list-like of category
The categories which should be removed.
Returns
-------
cat
Categorical with removed categories
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.remove_categories([1])
0 10
1 <NA>
2 <NA>
3 2
4 10
5 2
6 10
dtype: category
Categories (2, int64): [2, 10]
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
"""
return self._return_or_inplace(
self._column.remove_categories(removals=removals)
)
def set_categories(
self,
new_categories: Any,
ordered: bool = False,
rename: bool = False,
) -> Series | Index | None:
"""
Set the categories to the specified new_categories.
`new_categories` can include new categories (which
will result in unused categories) or remove old categories
(which results in values set to null). If `rename==True`,
the categories will simple be renamed (less or more items
than in old categories will result in values set to null or
in unused categories respectively).
This method can be used to perform more than one action
of adding, removing, and reordering simultaneously and
is therefore faster than performing the individual steps
via the more specialised methods.
On the other hand this methods does not do checks
(e.g., whether the old categories are included in the
new categories on a reorder), which can result in
surprising changes.
Parameters
----------
new_categories : list-like
The categories in new order.
ordered : bool, default None
Whether or not the categorical is treated as
a ordered categorical. If not given, do
not change the ordered information.
rename : bool, default False
Whether or not the `new_categories` should be
considered as a rename of the old categories
or as reordered categories.
Returns
-------
cat
Categorical with reordered categories
Examples
--------
>>> import cudf
>>> s = cudf.Series([1, 1, 2, 10, 2, 10], dtype='category')
>>> s
0 1
1 1
2 2
3 10
4 2
5 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.set_categories([1, 10])
0 1
1 1
2 <NA>
3 10
4 <NA>
5 10
dtype: category
Categories (2, int64): [1, 10]
"""
return self._return_or_inplace(
self._column.set_categories(
new_categories=new_categories, ordered=ordered, rename=rename
)
)
def reorder_categories(
self,
new_categories: Any,
ordered: bool = False,
) -> Series | Index | None:
"""
Reorder categories as specified in new_categories.
`new_categories` need to include all old categories
and no new category items.
Parameters
----------
new_categories : Index-like
The categories in new order.
ordered : bool, optional
Whether or not the categorical is treated
as a ordered categorical. If not given, do
not change the ordered information.
Returns
-------
cat
Categorical with reordered categories
Raises
------
ValueError
If the new categories do not contain all old
category items or any new ones.
Examples
--------
>>> import cudf
>>> s = cudf.Series([10, 1, 1, 2, 10, 2, 10], dtype="category")
>>> s
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [1, 2, 10]
>>> s.cat.reorder_categories([10, 1, 2])
0 10
1 1
2 1
3 2
4 10
5 2
6 10
dtype: category
Categories (3, int64): [10, 1, 2]
>>> s.cat.reorder_categories([10, 1])
ValueError: items in new_categories are not the same as in
old categories
"""
return self._return_or_inplace(
self._column.reorder_categories(new_categories, ordered=ordered),
)
| CategoricalAccessor |
python | numpy__numpy | numpy/distutils/_shell_utils.py | {
"start": 223,
"end": 812
} | class ____:
"""
An object that knows how to split and join command-line arguments.
It must be true that ``argv == split(join(argv))`` for all ``argv``.
The reverse neednt be true - `join(split(cmd))` may result in the addition
or removal of unnecessary escaping.
"""
@staticmethod
def join(argv):
""" Join a list of arguments into a command line string """
raise NotImplementedError
@staticmethod
def split(cmd):
""" Split a command line string into a list of arguments """
raise NotImplementedError
| CommandLineParser |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/scalarint.py | {
"start": 230,
"end": 2876
} | class ____(int):
def __new__(cls, *args, **kw):
# type: (Any, Any, Any) -> Any
width = kw.pop('width', None)
underscore = kw.pop('underscore', None)
anchor = kw.pop('anchor', None)
v = int.__new__(cls, *args, **kw)
v._width = width
v._underscore = underscore
if anchor is not None:
v.yaml_set_anchor(anchor, always_dump=True)
return v
def __iadd__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self + a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __ifloordiv__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self // a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __imul__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self * a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __ipow__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self ** a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
def __isub__(self, a): # type: ignore
# type: (Any) -> Any
x = type(self)(self - a)
x._width = self._width # type: ignore
x._underscore = ( # type: ignore
self._underscore[:] if self._underscore is not None else None # type: ignore
) # NOQA
return x
@property
def anchor(self):
# type: () -> Any
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any=False):
# type: (bool) -> Any
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value, always_dump=False):
# type: (Any, bool) -> None
self.anchor.value = value
self.anchor.always_dump = always_dump
| ScalarInt |
python | kamyu104__LeetCode-Solutions | Python/bitwise-or-of-adjacent-elements.py | {
"start": 37,
"end": 233
} | class ____(object):
def orArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
return [nums[i]|nums[i+1] for i in range(len(nums)-1)]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/find-original-array-from-doubled-array.py | {
"start": 74,
"end": 511
} | class ____(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
if len(changed)%2:
return []
cnts = collections.Counter(changed)
for x in sorted(cnts.iterkeys()):
if cnts[x] > cnts[2*x]:
return []
cnts[2*x] -= cnts[x] if x else cnts[x]//2
return list(cnts.elements())
| Solution |
python | coleifer__peewee | tests/base.py | {
"start": 7008,
"end": 7712
} | class ____(DatabaseTestCase):
database = db
requires = None
def setUp(self):
super(ModelDatabaseTestCase, self).setUp()
self._db_mapping = {}
# Override the model's database object with test db.
if self.requires:
for model in self.requires:
self._db_mapping[model] = model._meta.database
model._meta.set_database(self.database)
def tearDown(self):
# Restore the model's previous database object.
if self.requires:
for model in self.requires:
model._meta.set_database(self._db_mapping[model])
super(ModelDatabaseTestCase, self).tearDown()
| ModelDatabaseTestCase |
python | wandb__wandb | wandb/sdk/wandb_summary.py | {
"start": 1856,
"end": 3931
} | class ____(SummaryDict):
"""Track single values for each metric for each run.
By default, a metric's summary is the last value of its History.
For example, `wandb.log({'accuracy': 0.9})` will add a new step to History and
update Summary to the latest value. In some cases, it's more useful to have
the maximum or minimum of a metric instead of the final value. You can set
history manually `(wandb.summary['accuracy'] = best_acc)`.
In the UI, summary metrics appear in the table to compare across runs.
Summary metrics are also used in visualizations like the scatter plot and
parallel coordinates chart.
After training has completed, you may want to save evaluation metrics to a
run. Summary can handle numpy arrays and PyTorch/TensorFlow tensors. When
you save one of these types to Summary, we persist the entire tensor in a
binary file and store high level metrics in the summary object, such as min,
mean, variance, and 95th percentile.
Examples:
```python
wandb.init(config=args)
best_accuracy = 0
for epoch in range(1, args.epochs + 1):
test_loss, test_accuracy = test()
if test_accuracy > best_accuracy:
wandb.run.summary["best_accuracy"] = test_accuracy
best_accuracy = test_accuracy
```
"""
_update_callback: t.Callable
_get_current_summary_callback: t.Callable
def __init__(self, get_current_summary_callback: t.Callable):
super().__init__()
object.__setattr__(self, "_update_callback", None)
object.__setattr__(
self, "_get_current_summary_callback", get_current_summary_callback
)
def _set_update_callback(self, update_callback: t.Callable):
object.__setattr__(self, "_update_callback", update_callback)
def _as_dict(self):
return self._get_current_summary_callback()
def _update(self, record: SummaryRecord):
if self._update_callback: # type: ignore
self._update_callback(record)
| Summary |
python | getsentry__sentry | tests/acceptance/test_organization_integration_detail_view.py | {
"start": 379,
"end": 2789
} | class ____(AcceptanceTestCase):
"""
As a developer, I can create an integration, install it, and uninstall it
"""
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def load_page(self, slug: str, configuration_tab: bool = False) -> None:
url = f"/settings/{self.organization.slug}/integrations/{slug}/"
if configuration_tab:
url += "?tab=configurations"
self.browser.get(url)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
@pytest.mark.skip(reason="Flaky on region runs when creating integration")
def test_example_installation(self) -> None:
self.provider = mock.Mock()
self.provider.key = "alert_rule_integration"
self.provider.name = "Example Installation"
self.load_page("alert_rule_integration")
detail_view_page = OrganizationIntegrationDetailViewPage(browser=self.browser)
detail_view_page.click_install_button()
detail_view_page.click_through_integration_setup(
ExampleIntegrationSetupWindowElement, {"name": self.provider.name}
)
self.wait_for_loading()
integration = Integration.objects.filter(
provider=self.provider.key, external_id=self.provider.name
).first()
assert integration
assert (
f"/settings/{self.organization.slug}/integrations/{self.provider.key}/{integration.id}/"
in self.browser.driver.current_url
)
def test_uninstallation(self) -> None:
model = self.create_provider_integration(
provider="slack",
external_id="some_slack",
name="Test Slack",
metadata={
"domain_name": "slack-test.slack.com",
"installation_type": "born_as_bot",
"access_token": "xoxp-xxxxxxxxx-xxxxxxxxxx-xxxxxxxxxxxx",
},
)
model.add_organization(self.organization, self.user)
self.load_page("slack", configuration_tab=True)
detail_view_page = OrganizationIntegrationDetailViewPage(browser=self.browser)
assert self.browser.element_exists('[aria-label="Configure"]')
detail_view_page.uninstall()
assert (
self.browser.element('[data-test-id="integration-status"]').text == "Pending Deletion"
)
| OrganizationIntegrationDetailView |
python | py-pdf__pypdf | pypdf/_codecs/_codecs.py | {
"start": 787,
"end": 10555
} | class ____(Codec):
"""Lempel-Ziv-Welch (LZW) adaptive compression codec."""
CLEAR_TABLE_MARKER = 256 # Special code to indicate table reset
EOD_MARKER = 257 # End-of-data marker
INITIAL_BITS_PER_CODE = 9 # Initial code bit width
MAX_BITS_PER_CODE = 12 # Maximum code bit width
def __init__(self, max_output_length: int = 75_000_000) -> None:
self.max_output_length = max_output_length
def _initialize_encoding_table(self) -> None:
"""Initialize the encoding table and state to initial conditions."""
self.encoding_table: dict[bytes, int] = {bytes([i]): i for i in range(256)}
self.next_code = self.EOD_MARKER + 1
self.bits_per_code = self.INITIAL_BITS_PER_CODE
self.max_code_value = (1 << self.bits_per_code) - 1
def _increase_next_code(self) -> None:
"""Update bits_per_code and max_code_value if necessary."""
self.next_code += 1
if (
self.next_code > self.max_code_value
and self.bits_per_code < self.MAX_BITS_PER_CODE
):
self.bits_per_code += 1
self.max_code_value = (1 << self.bits_per_code) - 1
def encode(self, data: bytes) -> bytes:
"""
Encode data using the LZW compression algorithm.
Taken from PDF 1.7 specs, "7.4.4.2 Details of LZW Encoding".
"""
result_codes: list[int] = []
# The encoder shall begin by issuing a clear-table code
result_codes.append(self.CLEAR_TABLE_MARKER)
self._initialize_encoding_table()
current_sequence = b""
for byte in data:
next_sequence = current_sequence + bytes([byte])
if next_sequence in self.encoding_table:
# Extend current sequence if already in the table
current_sequence = next_sequence
else:
# Output code for the current sequence
result_codes.append(self.encoding_table[current_sequence])
# Add the new sequence to the table if there's room
if self.next_code <= (1 << self.MAX_BITS_PER_CODE) - 1:
self.encoding_table[next_sequence] = self.next_code
self._increase_next_code()
else:
# If the table is full, emit a clear-table command
result_codes.append(self.CLEAR_TABLE_MARKER)
self._initialize_encoding_table()
# Start new sequence
current_sequence = bytes([byte])
# Ensure everything actually is encoded
if current_sequence:
result_codes.append(self.encoding_table[current_sequence])
result_codes.append(self.EOD_MARKER)
return self._pack_codes_into_bytes(result_codes)
def _pack_codes_into_bytes(self, codes: list[int]) -> bytes:
"""
Convert the list of result codes into a continuous byte stream, with codes packed as per the code bit-width.
The bit-width starts at 9 bits and expands as needed.
"""
self._initialize_encoding_table()
buffer = 0
bits_in_buffer = 0
output = bytearray()
for code in codes:
buffer = (buffer << self.bits_per_code) | code
bits_in_buffer += self.bits_per_code
# Codes shall be packed into a continuous bit stream, high-order bit
# first. This stream shall then be divided into bytes, high-order bit
# first.
while bits_in_buffer >= 8:
bits_in_buffer -= 8
output.append((buffer >> bits_in_buffer) & 0xFF)
if code == self.CLEAR_TABLE_MARKER:
self._initialize_encoding_table()
elif code == self.EOD_MARKER:
continue
else:
self._increase_next_code()
# Flush any remaining bits in the buffer
if bits_in_buffer > 0:
output.append((buffer << (8 - bits_in_buffer)) & 0xFF)
return bytes(output)
def _initialize_decoding_table(self) -> None:
self.max_code_value = (1 << self.MAX_BITS_PER_CODE) - 1
self.decoding_table = [bytes([i]) for i in range(self.CLEAR_TABLE_MARKER)] + [
b""
] * (self.max_code_value - self.CLEAR_TABLE_MARKER + 1)
self._table_index = self.EOD_MARKER + 1
self._bits_to_get = 9
def _next_code_decode(self, data: bytes) -> int:
self._next_data: int
try:
while self._next_bits < self._bits_to_get:
self._next_data = (self._next_data << 8) | (
data[self._byte_pointer]
)
self._byte_pointer += 1
self._next_bits += 8
code = (
self._next_data >> (self._next_bits - self._bits_to_get)
) & self._and_table[self._bits_to_get - 9]
self._next_bits -= self._bits_to_get
# Reduce data to get rid of the overhead,
# which increases performance on large streams significantly.
self._next_data = self._next_data & 0xFFFFF
return code
except IndexError:
return self.EOD_MARKER
# The following method has been converted to Python from PDFsharp:
# https://github.com/empira/PDFsharp/blob/5fbf6ed14740bc4e16786816882d32e43af3ff5d/src/foundation/src/PDFsharp/src/PdfSharp/Pdf.Filters/LzwDecode.cs
#
# Original license:
#
# -------------------------------------------------------------------------
# Copyright (c) 2001-2024 empira Software GmbH, Troisdorf (Cologne Area),
# Germany
#
# http://docs.pdfsharp.net
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# --------------------------------------------------------------------------
def decode(self, data: bytes) -> bytes:
"""
The following code was converted to Python from the following code:
https://github.com/empira/PDFsharp/blob/master/src/foundation/src/PDFsharp/src/PdfSharp/Pdf.Filters/LzwDecode.cs
"""
self._and_table = [511, 1023, 2047, 4095]
self._table_index = 0
self._bits_to_get = 9
self._byte_pointer = 0
self._next_data = 0
self._next_bits = 0
output_stream = io.BytesIO()
output_length = 0
self._initialize_decoding_table()
self._byte_pointer = 0
self._next_data = 0
self._next_bits = 0
old_code = self.CLEAR_TABLE_MARKER
while True:
code = self._next_code_decode(data)
if code == self.EOD_MARKER:
break
if code == self.CLEAR_TABLE_MARKER:
self._initialize_decoding_table()
code = self._next_code_decode(data)
if code == self.EOD_MARKER:
break
output_stream.write(decoded := self.decoding_table[code])
old_code = code
elif code < self._table_index:
decoded = self.decoding_table[code]
output_stream.write(decoded)
if old_code != self.CLEAR_TABLE_MARKER:
self._add_entry_decode(self.decoding_table[old_code], decoded[0])
old_code = code
else:
# The code is not in the table and not one of the special codes
decoded = (
self.decoding_table[old_code] + self.decoding_table[old_code][:1]
)
output_stream.write(decoded)
self._add_entry_decode(self.decoding_table[old_code], decoded[0])
old_code = code
output_length += len(decoded)
if output_length > self.max_output_length:
raise LimitReachedError(
f"Limit reached while decompressing: {output_length} > {self.max_output_length}"
)
return output_stream.getvalue()
def _add_entry_decode(self, old_string: bytes, new_char: int) -> None:
new_string = old_string + bytes([new_char])
if self._table_index > self.max_code_value:
logger_warning("Ignoring too large LZW table index.", __name__)
return
self.decoding_table[self._table_index] = new_string
self._table_index += 1
# Update the number of bits to get based on the table index
if self._table_index == 511:
self._bits_to_get = 10
elif self._table_index == 1023:
self._bits_to_get = 11
elif self._table_index == 2047:
self._bits_to_get = 12
| LzwCodec |
python | huggingface__transformers | tests/models/perception_lm/test_image_processing_perception_lm.py | {
"start": 1158,
"end": 3596
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
tile_size=16,
do_normalize=True,
image_mean=IMAGENET_STANDARD_MEAN,
image_std=IMAGENET_STANDARD_STD,
do_convert_rgb=True,
max_num_tiles=4,
vision_input_type="thumb+tile",
resample=Image.Resampling.BICUBIC, # dummy value
size={"shortest_edge": 20}, # dummy value
):
super().__init__()
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.tile_size = tile_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
self.max_num_tiles = max_num_tiles
self.vision_input_type = vision_input_type
self.resample = resample
self.size = size
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"tile_size": self.tile_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
"max_num_tiles": self.max_num_tiles,
"vision_input_type": self.vision_input_type,
"resample": self.resample,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.crop_size["height"], self.crop_size["width"]
# Copied from tests.models.clip.test_image_processing_clip.CLIPImageProcessingTester.prepare_image_inputs
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| PerceptionLMImageProcessingTester |
python | keras-team__keras | guides/training_with_built_in_methods.py | {
"start": 8458,
"end": 10058
} | class ____(keras.losses.Loss):
def __init__(self, regularization_factor=0.1, name="custom_mse"):
super().__init__(name=name)
self.regularization_factor = regularization_factor
def call(self, y_true, y_pred):
mse = ops.mean(ops.square(y_true - y_pred), axis=-1)
reg = ops.mean(ops.square(0.5 - y_pred), axis=-1)
return mse + reg * self.regularization_factor
model = get_uncompiled_model()
model.compile(optimizer=keras.optimizers.Adam(), loss=CustomMSE())
y_train_one_hot = ops.one_hot(y_train, num_classes=10)
model.fit(x_train, y_train_one_hot, batch_size=64, epochs=1)
"""
### Custom metrics
If you need a metric that isn't part of the API, you can easily create custom metrics
by subclassing the `keras.metrics.Metric` class. You will need to implement 4
methods:
- `__init__(self)`, in which you will create state variables for your metric.
- `update_state(self, y_true, y_pred, sample_weight=None)`, which uses the targets
y_true and the model predictions y_pred to update the state variables.
- `result(self)`, which uses the state variables to compute the final results.
- `reset_state(self)`, which reinitializes the state of the metric.
State update and results computation are kept separate (in `update_state()` and
`result()`, respectively) because in some cases, the results computation might be very
expensive and would only be done periodically.
Here's a simple example showing how to implement a `CategoricalTruePositives` metric
that counts how many samples were correctly classified as belonging to a given class:
"""
| CustomMSE |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 18804,
"end": 21442
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.auth = RpcSignatureAuthentication()
self.org = self.create_organization(owner=self.user)
@override_settings(RPC_SHARED_SECRET=["a-long-secret-key"])
def test_authenticate_success(self) -> None:
data = b'{"meta":{},"args":{"id":1}'
request = drf_request_from_request(
RequestFactory().post("/", data=data, content_type="application/json")
)
signature = generate_request_signature(request.path_info, request.body)
request.META["HTTP_AUTHORIZATION"] = f"rpcsignature {signature}"
user, token = self.auth.authenticate(request)
assert user.is_anonymous
assert token == signature
def test_authenticate_old_key_validates(self) -> None:
request = drf_request_from_request(
RequestFactory().post("/", data="", content_type="application/json")
)
with override_settings(RPC_SHARED_SECRET=["an-old-key"]):
signature = generate_request_signature(request.path_info, request.body)
request.META["HTTP_AUTHORIZATION"] = f"rpcsignature {signature}"
# Update settings so that we have a new key
with override_settings(RPC_SHARED_SECRET=["a-long-secret-key", "an-old-key"]):
user, token = self.auth.authenticate(request)
assert user.is_anonymous
assert token == signature
def test_authenticate_without_signature(self) -> None:
request = drf_request_from_request(
RequestFactory().post("/", data="", content_type="application/json")
)
request.META["HTTP_AUTHORIZATION"] = "Bearer abcdef"
assert self.auth.authenticate(request) is None
@override_settings(RPC_SHARED_SECRET=["a-long-secret-key"])
def test_authenticate_invalid_signature(self) -> None:
request = drf_request_from_request(
RequestFactory().post("/", data="", content_type="application/json")
)
request.META["HTTP_AUTHORIZATION"] = "rpcsignature abcdef"
with pytest.raises(AuthenticationFailed):
self.auth.authenticate(request)
def test_authenticate_no_shared_secret(self) -> None:
request = drf_request_from_request(
RequestFactory().post("/", data="", content_type="application/json")
)
request.META["HTTP_AUTHORIZATION"] = "rpcsignature abcdef"
with override_settings(RPC_SHARED_SECRET=None):
with pytest.raises(RpcAuthenticationSetupException):
self.auth.authenticate(request)
| TestRpcSignatureAuthentication |
python | numpy__numpy | numpy/ma/core.py | {
"start": 34910,
"end": 72969
} | class ____(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
They have no reduce, outer or accumulate.
Parameters
----------
mbfunc : function
The function for which to define a masked version. Made available
as ``_DomainedBinaryOperation.f``.
domain : class instance
Default domain for the function. Should be one of the ``_Domain*``
classes.
fillx : scalar, optional
Filling value for the first argument, default is 0.
filly : scalar, optional
Filling value for the second argument, default is 0.
"""
def __init__(self, dbfunc, domain, fillx=0, filly=0):
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
super().__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
def __call__(self, a, b, *args, **kwargs):
"Execute the call behavior."
# Get the data
(da, db) = (getdata(a), getdata(b))
# Get the result
with np.errstate(divide='ignore', invalid='ignore'):
result = self.f(da, db, *args, **kwargs)
# Get the mask as a combination of the source masks and invalid
m = ~umath.isfinite(result)
m |= getmask(a)
m |= getmask(b)
# Apply the domain
domain = ufunc_domain.get(self.f, None)
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
if not m.ndim:
if m:
return masked
else:
return result
# When the mask is True, put back da if possible
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, 0, casting='unsafe', where=m)
# avoid using "*" since this may be overlaid
masked_da = umath.multiply(m, da)
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
except Exception:
pass
# Transforms to a (subclass of) MaskedArray
masked_result = result.view(get_masked_subclass(a, b))
masked_result._mask = m
if isinstance(a, MaskedArray):
masked_result._update_from(a)
elif isinstance(b, MaskedArray):
masked_result._update_from(b)
return masked_result
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
cosh = _MaskedUnaryOperation(umath.cosh)
tanh = _MaskedUnaryOperation(umath.tanh)
abs = absolute = _MaskedUnaryOperation(umath.absolute)
angle = _MaskedUnaryOperation(angle)
fabs = _MaskedUnaryOperation(umath.fabs)
negative = _MaskedUnaryOperation(umath.negative)
floor = _MaskedUnaryOperation(umath.floor)
ceil = _MaskedUnaryOperation(umath.ceil)
around = _MaskedUnaryOperation(np.around)
logical_not = _MaskedUnaryOperation(umath.logical_not)
# Domained unary ufuncs
sqrt = _MaskedUnaryOperation(umath.sqrt, 0.0,
_DomainGreaterEqual(0.0))
log = _MaskedUnaryOperation(umath.log, 1.0,
_DomainGreater(0.0))
log2 = _MaskedUnaryOperation(umath.log2, 1.0,
_DomainGreater(0.0))
log10 = _MaskedUnaryOperation(umath.log10, 1.0,
_DomainGreater(0.0))
tan = _MaskedUnaryOperation(umath.tan, 0.0,
_DomainTan(1e-35))
arcsin = _MaskedUnaryOperation(umath.arcsin, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccos = _MaskedUnaryOperation(umath.arccos, 0.0,
_DomainCheckInterval(-1.0, 1.0))
arccosh = _MaskedUnaryOperation(umath.arccosh, 1.0,
_DomainGreaterEqual(1.0))
arctanh = _MaskedUnaryOperation(umath.arctanh, 0.0,
_DomainCheckInterval(-1.0 + 1e-15, 1.0 - 1e-15))
# Binary ufuncs
add = _MaskedBinaryOperation(umath.add)
subtract = _MaskedBinaryOperation(umath.subtract)
multiply = _MaskedBinaryOperation(umath.multiply, 1, 1)
arctan2 = _MaskedBinaryOperation(umath.arctan2, 0.0, 1.0)
equal = _MaskedBinaryOperation(umath.equal)
equal.reduce = None
not_equal = _MaskedBinaryOperation(umath.not_equal)
not_equal.reduce = None
less_equal = _MaskedBinaryOperation(umath.less_equal)
less_equal.reduce = None
greater_equal = _MaskedBinaryOperation(umath.greater_equal)
greater_equal.reduce = None
less = _MaskedBinaryOperation(umath.less)
less.reduce = None
greater = _MaskedBinaryOperation(umath.greater)
greater.reduce = None
logical_and = _MaskedBinaryOperation(umath.logical_and)
alltrue = _MaskedBinaryOperation(umath.logical_and, 1, 1).reduce
logical_or = _MaskedBinaryOperation(umath.logical_or)
sometrue = logical_or.reduce
logical_xor = _MaskedBinaryOperation(umath.logical_xor)
bitwise_and = _MaskedBinaryOperation(umath.bitwise_and)
bitwise_or = _MaskedBinaryOperation(umath.bitwise_or)
bitwise_xor = _MaskedBinaryOperation(umath.bitwise_xor)
hypot = _MaskedBinaryOperation(umath.hypot)
# Domained binary ufuncs
divide = _DomainedBinaryOperation(umath.divide, _DomainSafeDivide(), 0, 1)
true_divide = divide # Just an alias for divide.
floor_divide = _DomainedBinaryOperation(umath.floor_divide,
_DomainSafeDivide(), 0, 1)
remainder = _DomainedBinaryOperation(umath.remainder,
_DomainSafeDivide(), 0, 1)
fmod = _DomainedBinaryOperation(umath.fmod, _DomainSafeDivide(), 0, 1)
mod = remainder
###############################################################################
# Mask creation functions #
###############################################################################
def _replace_dtype_fields_recursive(dtype, primitive_dtype):
"Private function allowing recursion in _replace_dtype_fields."
_recurse = _replace_dtype_fields_recursive
# Do we have some name fields ?
if dtype.names is not None:
descr = []
for name in dtype.names:
field = dtype.fields[name]
if len(field) == 3:
# Prepend the title to the name
name = (field[-1], name)
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
# Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
new_dtype = np.dtype(tuple(descr))
# this is a primitive type, so do a direct replacement
else:
new_dtype = primitive_dtype
# preserve identity of dtypes
if new_dtype == dtype:
new_dtype = dtype
return new_dtype
def _replace_dtype_fields(dtype, primitive_dtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with all fields and subtypes in the given type
recursively replaced with `primitive_dtype`.
Arguments are coerced to dtypes first.
"""
dtype = np.dtype(dtype)
primitive_dtype = np.dtype(primitive_dtype)
return _replace_dtype_fields_recursive(dtype, primitive_dtype)
def make_mask_descr(ndtype):
"""
Construct a dtype description list from a given dtype.
Returns a new dtype object, with the type of all fields in `ndtype` to a
boolean type. Field names are not altered.
Parameters
----------
ndtype : dtype
The dtype to convert.
Returns
-------
result : dtype
A dtype that looks like `ndtype`, the type of all fields is boolean.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
dtype('bool')
"""
return _replace_dtype_fields(ndtype, MaskType)
def getmask(a):
"""
Return the mask of a masked array, or nomask.
Return the mask of `a` as an ndarray if `a` is a `MaskedArray` and the
mask is not `nomask`, else return `nomask`. To guarantee a full array
of booleans of the same shape as a, use `getmaskarray`.
Parameters
----------
a : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getdata : Return the data of a masked array as an ndarray.
getmaskarray : Return the mask of a masked array, or full array of False.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
Equivalently use the `MaskedArray` `mask` attribute.
>>> a.mask
array([[False, True],
[False, False]])
Result when mask == `nomask`
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
True
>>> b.mask == ma.nomask
True
"""
return getattr(a, '_mask', nomask)
get_mask = getmask
def getmaskarray(arr):
"""
Return the mask of a masked array, or full boolean array of False.
Return the mask of `arr` as an ndarray if `arr` is a `MaskedArray` and
the mask is not `nomask`, else return a full boolean array of False of
the same shape as `arr`.
Parameters
----------
arr : array_like
Input `MaskedArray` for which the mask is required.
See Also
--------
getmask : Return the mask of a masked array, or nomask.
getdata : Return the data of a masked array as an ndarray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
masked_array(
data=[[1, --],
[3, 4]],
mask=[[False, True],
[False, False]],
fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
Result when mask == ``nomask``
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
masked_array(
data=[[1, 2],
[3, 4]],
mask=False,
fill_value=999999)
>>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
"""
mask = getmask(arr)
if mask is nomask:
mask = make_mask_none(np.shape(arr), getattr(arr, 'dtype', None))
return mask
def is_mask(m):
"""
Return True if m is a valid, standard mask.
This function does not check the contents of the input, only that the
type is MaskType. In particular, this function returns False if the
mask has a flexible dtype.
Parameters
----------
m : array_like
Array to test.
Returns
-------
result : bool
True if `m.dtype.type` is MaskType, False otherwise.
See Also
--------
ma.isMaskedArray : Test whether input is an instance of MaskedArray.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
masked_array(data=[--, 1, --, 2, 3],
mask=[ True, False, True, False, False],
fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
True
Input must be an ndarray (or have similar attributes)
for it to be considered a valid mask.
>>> m = [False, True, False]
>>> ma.is_mask(m)
False
>>> m = np.array([False, True, False])
>>> m
array([False, True, False])
>>> ma.is_mask(m)
True
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
... dtype=dtype)
>>> m
array([( True, False), (False, True), ( True, False)],
dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
"""
try:
return m.dtype.type is MaskType
except AttributeError:
return False
def _shrink_mask(m):
"""
Shrink a mask to nomask if possible
"""
if m.dtype.names is None and not m.any():
return nomask
else:
return m
def make_mask(m, copy=False, shrink=True, dtype=MaskType):
"""
Create a boolean mask from an array.
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
of 0 are interpreted as False, everything else as True.
Parameters
----------
m : array_like
Potential mask.
copy : bool, optional
Whether to return a copy of `m` (True) or `m` itself (False).
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
Data-type of the output mask. By default, the output mask has a
dtype of MaskType (bool). If the dtype is flexible, each field has
a boolean dtype. This is ignored when `m` is ``nomask``, in which
case ``nomask`` is always returned.
Returns
-------
result : ndarray
A boolean mask derived from `m`.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> m = [True, False, True, True]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 1, 1]
>>> ma.make_mask(m)
array([ True, False, True, True])
>>> m = [1, 0, 2, -3]
>>> ma.make_mask(m)
array([ True, False, True, True])
Effect of the `shrink` parameter.
>>> m = np.zeros(4)
>>> m
array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
array([False, False, False, False])
Using a flexible `dtype`.
>>> m = [1, 0, 1, 1]
>>> n = [0, 1, 0, 0]
>>> arr = []
>>> for man, mouse in zip(m, n):
... arr.append((man, mouse))
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
if m is nomask:
return nomask
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
# legacy boolean special case: "existence of fields implies true"
if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool:
return np.ones(m.shape, dtype=dtype)
# Fill the mask in case there are missing data; turn it into an ndarray.
copy = None if not copy else True
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
if shrink:
result = _shrink_mask(result)
return result
def make_mask_none(newshape, dtype=None):
"""
Return a boolean mask of the given shape, filled with False.
This function returns a boolean ndarray with all entries False, that can
be used in common mask manipulations. If a complex dtype is specified, the
type of each field is converted to a boolean type.
Parameters
----------
newshape : tuple
A tuple indicating the shape of the mask.
dtype : {None, dtype}, optional
If None, use a MaskType instance. Otherwise, use a new datatype with
the same fields as `dtype`, converted to boolean types.
Returns
-------
result : ndarray
An ndarray of appropriate shape and dtype, filled with False.
See Also
--------
make_mask : Create a boolean mask from an array.
make_mask_descr : Construct a dtype description list from a given dtype.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> ma.make_mask_none((3,))
array([False, False, False])
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
... 'formats':[np.float32, np.int64]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
"""
if dtype is None:
result = np.zeros(newshape, dtype=MaskType)
else:
result = np.zeros(newshape, dtype=make_mask_descr(dtype))
return result
def _recursive_mask_or(m1, m2, newmask):
names = m1.dtype.names
for name in names:
current1 = m1[name]
if current1.dtype.names is not None:
_recursive_mask_or(current1, m2[name], newmask[name])
else:
umath.logical_or(current1, m2[name], newmask[name])
def mask_or(m1, m2, copy=False, shrink=True):
"""
Combine two masks with the ``logical_or`` operator.
The result may be a view on `m1` or `m2` if the other is `nomask`
(i.e. False).
Parameters
----------
m1, m2 : array_like
Input masks.
copy : bool, optional
If copy is False and one of the inputs is `nomask`, return a view
of the other input mask. Defaults to False.
shrink : bool, optional
Whether to shrink the output to `nomask` if all its values are
False. Defaults to True.
Returns
-------
mask : output mask
The result masks values that are masked in either `m1` or `m2`.
Raises
------
ValueError
If `m1` and `m2` have different flexible dtypes.
Examples
--------
>>> import numpy as np
>>> m1 = np.ma.make_mask([0, 1, 1, 0])
>>> m2 = np.ma.make_mask([1, 0, 0, 0])
>>> np.ma.mask_or(m1, m2)
array([ True, True, True, False])
"""
if (m1 is nomask) or (m1 is False):
dtype = getattr(m2, 'dtype', MaskType)
return make_mask(m2, copy=copy, shrink=shrink, dtype=dtype)
if (m2 is nomask) or (m2 is False):
dtype = getattr(m1, 'dtype', MaskType)
return make_mask(m1, copy=copy, shrink=shrink, dtype=dtype)
if m1 is m2 and is_mask(m1):
return _shrink_mask(m1) if shrink else m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
if dtype1 != dtype2:
raise ValueError(f"Incompatible dtypes '{dtype1}'<>'{dtype2}'")
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
newmask = np.empty(np.broadcast(m1, m2).shape, dtype1)
_recursive_mask_or(m1, m2, newmask)
return newmask
return make_mask(umath.logical_or(m1, m2), copy=copy, shrink=shrink)
def flatten_mask(mask):
"""
Returns a completely flattened version of the mask, where nested fields
are collapsed.
Parameters
----------
mask : array_like
Input array, which will be interpreted as booleans.
Returns
-------
flattened_mask : ndarray of bools
The flattened input.
Examples
--------
>>> import numpy as np
>>> mask = np.array([0, 0, 1])
>>> np.ma.flatten_mask(mask)
array([False, False, True])
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
>>> np.ma.flatten_mask(mask)
array([False, False, False, True])
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
>>> np.ma.flatten_mask(mask)
array([False, False, False, False, False, True])
"""
def _flatmask(mask):
"Flatten the mask and returns a (maybe nested) sequence of booleans."
mnames = mask.dtype.names
if mnames is not None:
return [flatten_mask(mask[name]) for name in mnames]
else:
return mask
def _flatsequence(sequence):
"Generates a flattened version of the sequence."
try:
for element in sequence:
if hasattr(element, '__iter__'):
yield from _flatsequence(element)
else:
yield element
except TypeError:
yield sequence
mask = np.asarray(mask)
flattened = _flatsequence(_flatmask(mask))
return np.array(list(flattened), dtype=bool)
def _check_mask_axis(mask, axis, keepdims=np._NoValue):
"Check whether there are masked values along the given axis"
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
if mask is not nomask:
return mask.all(axis=axis, **kwargs)
return nomask
###############################################################################
# Masking functions #
###############################################################################
def masked_where(condition, a, copy=True):
"""
Mask an array where a condition is met.
Return `a` as an array masked where `condition` is True.
Any masked values of `a` or `condition` are also masked in the output.
Parameters
----------
condition : array_like
Masking condition. When `condition` tests floating point values for
equality, consider using ``masked_values`` instead.
a : array_like
Array to mask.
copy : bool
If True (default) make a copy of `a` in the result. If False modify
`a` in place and return a view.
Returns
-------
result : MaskedArray
The result of masking `a` where `condition` is True.
See Also
--------
masked_values : Mask using floating point equality.
masked_equal : Mask where equal to a given value.
masked_not_equal : Mask where *not* equal to a given value.
masked_less_equal : Mask where less than or equal to a given value.
masked_greater_equal : Mask where greater than or equal to a given value.
masked_less : Mask where less than a given value.
masked_greater : Mask where greater than a given value.
masked_inside : Mask inside a given interval.
masked_outside : Mask outside a given interval.
masked_invalid : Mask invalid values (NaNs or infs).
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
masked_array(data=['a', 'b', --, 'd'],
mask=[False, False, True, False],
fill_value='N/A',
dtype='<U1')
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
masked_array(data=[99, --, --, 3],
mask=[False, True, True, False],
fill_value=999999)
>>> a
array([99, 1, 2, 3])
When `condition` or `a` contain masked values.
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
masked_array(data=[--, 1, 2, 3],
mask=[ True, False, False, False],
fill_value=999999)
>>> ma.masked_where(a == 3, b)
masked_array(data=[--, 1, --, --],
mask=[ True, False, True, True],
fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
cond = make_mask(condition, shrink=False)
a = np.array(a, copy=copy, subok=True)
(cshape, ashape) = (cond.shape, a.shape)
if cshape and cshape != ashape:
raise IndexError("Inconsistent shape between the condition and the input"
" (got %s and %s)" % (cshape, ashape))
if hasattr(a, '_mask'):
cond = mask_or(cond, a._mask)
cls = type(a)
else:
cls = MaskedArray
result = a.view(cls)
# Assign to *.mask so that structured masks are handled correctly.
result.mask = _shrink_mask(cond)
# There is no view of a boolean so when 'a' is a MaskedArray with nomask
# the update to the result's mask has no effect.
if not copy and hasattr(a, '_mask') and getmask(a) is nomask:
a._mask = result._mask.view()
return result
def masked_greater(x, value, copy=True):
"""
Mask an array where greater than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x > value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
masked_array(data=[0, 1, 2, --],
mask=[False, False, False, True],
fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
def masked_greater_equal(x, value, copy=True):
"""
Mask an array where greater than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x >= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
masked_array(data=[0, 1, --, --],
mask=[False, False, True, True],
fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
def masked_less(x, value, copy=True):
"""
Mask an array where less than a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x < value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
masked_array(data=[--, --, 2, 3],
mask=[ True, True, False, False],
fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
def masked_less_equal(x, value, copy=True):
"""
Mask an array where less than or equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x <= value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
masked_array(data=[--, --, --, 3],
mask=[ True, True, True, False],
fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
def masked_not_equal(x, value, copy=True):
"""
Mask an array where *not* equal to a given value.
This function is a shortcut to ``masked_where``, with
`condition` = (x != value).
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
masked_array(data=[--, --, 2, --],
mask=[ True, True, False, True],
fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
def masked_equal(x, value, copy=True):
"""
Mask an array where equal to a given value.
Return a MaskedArray, masked where the data in array `x` are
equal to `value`. The fill_value of the returned MaskedArray
is set to `value`.
For floating point arrays, consider using ``masked_values(x, value)``.
See Also
--------
masked_where : Mask where a condition is met.
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(4)
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
masked_array(data=[0, 1, --, 3],
mask=[False, False, True, False],
fill_value=2)
"""
output = masked_where(equal(x, value), x, copy=copy)
output.fill_value = value
return output
def masked_inside(x, v1, v2, copy=True):
"""
Mask an array inside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` inside
the interval [v1,v2] (v1 <= x <= v2). The boundaries `v1` and `v2`
can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
mask=[False, False, True, True, False, False],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf >= v1) & (xf <= v2)
return masked_where(condition, x, copy=copy)
def masked_outside(x, v1, v2, copy=True):
"""
Mask an array outside a given interval.
Shortcut to ``masked_where``, where `condition` is True for `x` outside
the interval [v1,v2] (x < v1)|(x > v2).
The boundaries `v1` and `v2` can be given in either order.
See Also
--------
masked_where : Mask where a condition is met.
Notes
-----
The array `x` is prefilled with its filling value.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
masked_array(data=[--, --, 0.01, 0.2, --, --],
mask=[ True, True, False, False, True, True],
fill_value=1e+20)
"""
if v2 < v1:
(v1, v2) = (v2, v1)
xf = filled(x)
condition = (xf < v1) | (xf > v2)
return masked_where(condition, x, copy=copy)
def masked_object(x, value, copy=True, shrink=True):
"""
Mask the array `x` where the data are exactly equal to value.
This function is similar to `masked_values`, but only suitable
for object arrays: for floating point, use `masked_values` instead.
Parameters
----------
x : array_like
Array to mask
value : object
Comparison value
copy : {True, False}, optional
Whether to return a copy of `x`.
shrink : {True, False}, optional
Whether to collapse a mask full of False to nomask
Returns
-------
result : MaskedArray
The result of masking `x` where equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
masked_values : Mask using floating point equality.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
>>> eat
masked_array(data=[--, 'ham'],
mask=[ True, False],
fill_value='green_eggs',
dtype=object)
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
Note that `mask` is set to ``nomask`` if possible.
>>> eat
masked_array(data=['cheese', 'ham', 'pineapple'],
mask=False,
fill_value='green_eggs',
dtype=object)
"""
if isMaskedArray(x):
condition = umath.equal(x._data, value)
mask = x._mask
else:
condition = umath.equal(np.asarray(x), value)
mask = nomask
mask = mask_or(mask, make_mask(condition, shrink=shrink))
return masked_array(x, mask=mask, copy=copy, fill_value=value)
def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
"""
Mask using floating point equality.
Return a MaskedArray, masked where the data in array `x` are approximately
equal to `value`, determined using `isclose`. The default tolerances for
`masked_values` are the same as those for `isclose`.
For integer types, exact equality is used, in the same way as
`masked_equal`.
The fill_value is set to `value` and the mask is set to ``nomask`` if
possible.
Parameters
----------
x : array_like
Array to mask.
value : float
Masking value.
rtol, atol : float, optional
Tolerance parameters passed on to `isclose`
copy : bool, optional
Whether to return a copy of `x`.
shrink : bool, optional
Whether to collapse a mask full of False to ``nomask``.
Returns
-------
result : MaskedArray
The result of masking `x` where approximately equal to `value`.
See Also
--------
masked_where : Mask where a condition is met.
masked_equal : Mask where equal to a given value (integers).
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
masked_array(data=[1.0, --, 2.0, --, 3.0],
mask=[False, True, False, True, False],
fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 2.1)
masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
mask=False,
fill_value=2.1)
Unlike `masked_equal`, `masked_values` can perform approximate equalities.
>>> ma.masked_values(x, 2.1, atol=1e-1)
masked_array(data=[1.0, 1.1, --, 1.1, 3.0],
mask=[False, False, True, False, False],
fill_value=2.1)
"""
xnew = filled(x, value)
if np.issubdtype(xnew.dtype, np.floating):
mask = np.isclose(xnew, value, atol=atol, rtol=rtol)
else:
mask = umath.equal(xnew, value)
ret = masked_array(xnew, mask=mask, copy=copy, fill_value=value)
if shrink:
ret.shrink_mask()
return ret
def masked_invalid(a, copy=True):
"""
Mask an array where invalid values occur (NaNs or infs).
This function is a shortcut to ``masked_where``, with
`condition` = ~(np.isfinite(a)). Any pre-existing mask is conserved.
Only applies to arrays with a dtype where NaNs or infs make sense
(i.e. floating point types), but accepts any array_like object.
See Also
--------
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy as np
>>> import numpy.ma as ma
>>> a = np.arange(5, dtype=float)
>>> a[2] = np.nan
>>> a[3] = np.inf
>>> a
array([ 0., 1., nan, inf, 4.])
>>> ma.masked_invalid(a)
masked_array(data=[0.0, 1.0, --, --, 4.0],
mask=[False, False, True, True, False],
fill_value=1e+20)
"""
a = np.array(a, copy=None, subok=True)
res = masked_where(~(np.isfinite(a)), a, copy=copy)
# masked_invalid previously never returned nomask as a mask and doing so
# threw off matplotlib (gh-22842). So use shrink=False:
if res._mask is nomask:
res._mask = make_mask_none(res.shape, res.dtype)
return res
###############################################################################
# Printing options #
###############################################################################
| _DomainedBinaryOperation |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_condition_evaluations.py | {
"start": 1051,
"end": 1263
} | class ____(enum.Enum):
TRUE = "TRUE"
FALSE = "FALSE"
SKIPPED = "SKIPPED"
GrapheneAssetConditionEvaluationStatus = graphene.Enum.from_enum(AssetConditionEvaluationStatus)
| AssetConditionEvaluationStatus |
python | getsentry__sentry | src/sentry/search/events/builder/sessions.py | {
"start": 432,
"end": 2379
} | class ____(BaseQueryBuilder):
filter_allowlist_fields = {"project", "project_id", "environment", "release"}
requires_organization_condition = True
organization_column: str = "org_id"
config_class = SessionsDatasetConfig
def __init__(
self,
*args: Any,
granularity: int | None = None,
extra_filter_allowlist_fields: Sequence[str] | None = None,
**kwargs: Any,
):
self._extra_filter_allowlist_fields = extra_filter_allowlist_fields or []
self.granularity = Granularity(granularity) if granularity is not None else None
super().__init__(*args, **kwargs)
def resolve_groupby(self, groupby_columns: list[str] | None = None) -> list[SelectType]:
"""
The default QueryBuilder `resolve_groupby` function needs to be overridden here because, it only adds the
columns in the groupBy clause to the query if the query has `aggregates` present in it. For this specific case
of the `sessions` dataset, the session fields are aggregates but these aggregate definitions are hidden away in
snuba so if we rely on the default QueryBuilder `resolve_groupby` method, then it won't add the requested
groupBy columns as it does not consider these fields as aggregates, and so we end up with clickhouse error that
the column is not under an aggregate function or in the `groupBy` basically.
"""
if groupby_columns is None:
return []
return list({self.resolve_column(column) for column in groupby_columns})
def default_filter_converter(self, search_filter: SearchFilter) -> WhereType | None:
name = search_filter.key.name
if name in self.filter_allowlist_fields or name in self._extra_filter_allowlist_fields:
return super().default_filter_converter(search_filter)
raise InvalidSearchQuery(f"Invalid search filter: {name}")
| SessionsV2QueryBuilder |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/components/databricks_asset_bundle/scaffolder.py | {
"start": 311,
"end": 817
} | class ____(BaseModel):
"""Parameters for scaffolding DatabricksAssetBundleComponent from Databricks asset bundle."""
databricks_config_path: str = Field(
description="Path to the databricks.yml config file",
)
databricks_workspace_host: str = Field(
description="The host of your Databricks workspace.",
)
databricks_workspace_token: str = Field(
description="The token to access your Databricks workspace.",
)
@preview
| DatabricksAssetBundleScaffoldParams |
python | PrefectHQ__prefect | src/prefect/cli/_types.py | {
"start": 1963,
"end": 7496
} | class ____(typer.Typer):
"""
Wraps commands created by `Typer` to support async functions and handle errors.
"""
console: Console
def __init__(
self,
*args: Any,
deprecated: bool = False,
deprecated_start_date: Optional[datetime] = None,
deprecated_help: str = "",
deprecated_name: str = "",
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self.deprecated = deprecated
if self.deprecated:
if not deprecated_name:
raise ValueError("Provide the name of the deprecated command group.")
self.deprecated_message: str = generate_deprecation_message(
name=f"The {deprecated_name!r} command group",
start_date=deprecated_start_date,
help=deprecated_help,
)
self.console = Console(
highlight=False,
theme=Theme({"prompt.choices": "bold blue"}),
color_system="auto" if get_current_settings().cli.colors else None,
)
def add_typer(
self,
typer_instance: "PrefectTyper",
*args: Any,
no_args_is_help: bool = True,
aliases: Optional[list[str]] = None,
**kwargs: Any,
) -> None:
"""
This will cause help to be default command for all sub apps unless specifically stated otherwise, opposite of before.
"""
if aliases:
for alias in aliases:
super().add_typer(
typer_instance,
*args,
name=alias,
no_args_is_help=no_args_is_help,
hidden=True,
**kwargs,
)
return super().add_typer(
typer_instance, *args, no_args_is_help=no_args_is_help, **kwargs
)
def command(
self,
name: Optional[str] = None,
*args: Any,
aliases: Optional[List[str]] = None,
deprecated: bool = False,
deprecated_start_date: Optional[datetime] = None,
deprecated_help: str = "",
deprecated_name: str = "",
**kwargs: Any,
) -> Callable[[Callable[..., Any]], Callable[..., Any]]:
"""
Create a new command. If aliases are provided, the same command function
will be registered with multiple names.
Provide `deprecated=True` to mark the command as deprecated. If `deprecated=True`,
`deprecated_name` and `deprecated_start_date` must be provided.
"""
def wrapper(original_fn: Callable[..., Any]) -> Callable[..., Any]:
# click doesn't support async functions, so we wrap them in
# asyncio.run(). This has the advantage of keeping the function in
# the main thread, which means signal handling works for e.g. the
# server and workers. However, it means that async CLI commands can
# not directly call other async CLI commands (because asyncio.run()
# can not be called nested). In that (rare) circumstance, refactor
# the CLI command so its business logic can be invoked separately
# from its entrypoint.
if is_async_fn(original_fn):
async_fn = original_fn
@functools.wraps(original_fn)
def sync_fn(*args: Any, **kwargs: Any) -> Any:
return asyncio.run(async_fn(*args, **kwargs))
setattr(sync_fn, "aio", async_fn)
wrapped_fn = sync_fn
else:
wrapped_fn = original_fn
wrapped_fn = with_cli_exception_handling(wrapped_fn)
if deprecated:
if not deprecated_name or not deprecated_start_date:
raise ValueError(
"Provide the name of the deprecated command and a deprecation start date."
)
command_deprecated_message = generate_deprecation_message(
name=f"The {deprecated_name!r} command",
start_date=deprecated_start_date,
help=deprecated_help,
)
wrapped_fn = with_deprecated_message(command_deprecated_message)(
wrapped_fn
)
elif self.deprecated:
wrapped_fn = with_deprecated_message(self.deprecated_message)(
wrapped_fn
)
# register fn with its original name
command_decorator = super(PrefectTyper, self).command(
name=name, *args, **kwargs
)
original_command = command_decorator(wrapped_fn)
# register fn for each alias, e.g. @marvin_app.command(aliases=["r"])
if aliases:
for alias in aliases:
super(PrefectTyper, self).command(
name=alias,
*args,
**{k: v for k, v in kwargs.items() if k != "aliases"},
)(wrapped_fn)
return original_command
return wrapper
def setup_console(self, soft_wrap: bool, prompt: bool) -> None:
self.console = Console(
highlight=False,
color_system="auto" if get_current_settings().cli.colors else None,
theme=Theme({"prompt.choices": "bold blue"}),
soft_wrap=not soft_wrap,
force_interactive=prompt,
)
| PrefectTyper |
python | pdm-project__pdm | src/pdm/cli/commands/venv/backends.py | {
"start": 6023,
"end": 6626
} | class ____(VirtualenvBackend):
def pip_args(self, with_pip: bool) -> Iterable[str]:
if with_pip:
return ("--seed",)
return ()
def perform_create(self, location: Path, args: tuple[str, ...], prompt: str | None = None) -> None:
prompt_option = (f"--prompt={prompt}",) if prompt else ()
cmd = [
*self.project.core.uv_cmd,
"venv",
"-p",
str(self._resolved_interpreter.executable),
*prompt_option,
*args,
str(location),
]
self.subprocess_call(cmd)
| UvBackend |
python | explosion__spaCy | spacy/lang/mr/__init__.py | {
"start": 151,
"end": 247
} | class ____(Language):
lang = "mr"
Defaults = MarathiDefaults
__all__ = ["Marathi"]
| Marathi |
python | apache__airflow | providers/apache/pinot/src/airflow/providers/apache/pinot/hooks/pinot.py | {
"start": 10168,
"end": 13092
} | class ____(DbApiHook):
"""
Interact with Pinot Broker Query API.
This hook uses standard-SQL endpoint since PQL endpoint is soon to be deprecated.
https://docs.pinot.apache.org/users/api/querying-pinot-using-standard-sql
"""
conn_name_attr = "pinot_broker_conn_id"
default_conn_name = "pinot_broker_default"
conn_type = "pinot"
hook_name = "Pinot Broker"
supports_autocommit = False
def get_conn(self) -> Any:
"""Establish a connection to pinot broker through pinot dbapi."""
conn = self.get_connection(self.get_conn_id())
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
username=conn.login,
password=conn.password,
path=conn.extra_dejson.get("endpoint", "/query/sql"),
scheme=conn.extra_dejson.get("schema", "http"),
)
self.log.info("Get the connection to pinot broker on %s", conn.host)
return pinot_broker_conn
def get_uri(self) -> str:
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/query/sql
"""
conn = self.get_connection(self.get_conn_id())
host = conn.host or ""
if conn.login and conn.password:
host = f"{quote_plus(conn.login)}:{quote_plus(conn.password)}@{host}"
if conn.port:
host += f":{conn.port}"
conn_type = conn.conn_type or "http"
endpoint = conn.extra_dejson.get("endpoint", "query/sql")
return f"{conn_type}://{host}/{endpoint}"
def get_records(
self, sql: str | list[str], parameters: Iterable | Mapping[str, Any] | None = None, **kwargs
) -> Any:
"""
Execute the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping[str, Any] | None = None) -> Any:
"""
Execute the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
def set_autocommit(self, conn: Connection, autocommit: Any) -> Any:
raise NotImplementedError()
def insert_rows(
self,
table: str,
rows: str,
target_fields: str | None = None,
commit_every: int = 1000,
replace: bool = False,
**kwargs: Any,
) -> Any:
raise NotImplementedError()
| PinotDbApiHook |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib_test.py | {
"start": 11080,
"end": 47219
} | class ____(DistributedIteratorTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu
]))
def testMultiDeviceIterInitialize(self, distribution):
if tf2.enabled():
self.skipTest("Only V1 is supported.")
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
input_workers = input_lib.InputWorkers(worker_device_pairs)
dist_dataset = input_util.get_distributed_dataset(
dataset_fn(distribute_lib.InputContext()), input_workers, distribution)
iterator = dataset_ops.make_one_shot_iterator(dist_dataset)
@def_function.function
def init_func_for_iter():
self.evaluate(iterator.initializer)
init_func_for_iter()
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_one_gpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
)
)
def testAutoShardExplicit(self, input_type, distribution):
worker_device_pairs = [(
"/device:CPU:0",
distribution.extended.worker_devices,
)]
dataset_fn = lambda _: dataset_ops.Dataset.range(10).batch(1)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn
)
input_workers = input_lib.InputWorkers(worker_device_pairs)
distribution.extended.experimental_enable_get_next_as_optional = True
dataset = self._wrap_dataset(
input_type,
dataset_or_input_fn,
input_workers,
num_replicas_in_sync=None,
strategy=distribution)
dataset1 = input_ops.auto_shard_dataset(dataset, 2, 0)
iterator = iter(dataset1)
if len(distribution.extended.worker_devices) == 2:
expected_values = [[0, 2], [4, 6], [8]]
else:
expected_values = [[0], [2], [4], [6], [8]]
for element, expected in zip(iterator, expected_values):
local = distribution.experimental_local_results(element)
local_list = array_ops.concat(local, axis=0).numpy().tolist()
self.assertAllEqual(local_list, expected)
if len(distribution.extended.worker_devices) == 2:
expected_values = [[1, 3], [5, 7], [9]]
else:
expected_values = [[1], [3], [5], [7], [9]]
dataset2 = input_ops.auto_shard_dataset(dataset, 2, 1)
iterator = iter(dataset2)
for element, expected in zip(iterator, expected_values):
local = distribution.experimental_local_results(element)
local_list = array_ops.concat(local, axis=0).numpy().tolist()
self.assertAllEqual(local_list, expected)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.multi_worker_mirrored_2x1_cpu],
enable_get_next_as_optional=[True, False]))
def testOneDeviceCPUMultiWorker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i] for i in range(10)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testTwoDevicesOneGPUOneCPU(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[strategy_combinations.tpu_strategy],
enable_get_next_as_optional=[True, False]))
def testTPU(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = collections.OrderedDict()
for tpu_device in distribution.extended.worker_devices:
host_device = device_util.get_host_for_device(tpu_device)
worker_device_pairs.setdefault(host_device, [])
worker_device_pairs[host_device].append(tpu_device)
worker_device_pairs = worker_device_pairs.items()
dataset_fn = lambda _: dataset_ops.Dataset.range(10)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [[i, i + 1] for i in range(0, 10, 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu,
],
enable_get_next_as_optional=[True, False]))
def testTupleDataset(self, input_type, api_type, iteration_type, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
],
enable_get_next_as_optional=[True, False]))
def testTupleDatasetMultiworker(self, input_type, api_type, iteration_type,
distribution, enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
def dataset_fn(ctx):
del ctx
dataset1 = dataset_ops.Dataset.range(10)
dataset2 = dataset_ops.Dataset.range(10).map(lambda x: x**2)
return dataset_ops.Dataset.zip((dataset1, dataset2))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
expected_values = [
[(i, i**2), (i + 1, (i + 1)**2)] for i in range(0, 10, 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
# Input_context is not passed in and thus no sharding.
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testIterableIterator(self, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
input_workers = input_lib.InputWorkers(worker_device_pairs)
dataset = dataset_ops.Dataset.range(10)
dist_dataset = input_util.get_distributed_dataset(dataset, input_workers,
distribution)
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(distribution.experimental_local_results(element), [i])
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_two_cpus,
],
use_iterator=[False, True]))
def testIteratorAndDatasetEnumerateError(self, distribution, use_iterator):
# enumerate is not supported within tf.function for these types.
dataset = dataset_ops.Dataset.range(10).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
if use_iterator:
iterable = iter(dist_dataset)
else:
iterable = dist_dataset
@def_function.function
def enumerate_fn(iterable):
for _, batch in enumerate(iterable):
distribution.experimental_local_results(batch)
with self.assertRaises(NotImplementedError):
enumerate_fn(iterable)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_two_cpus,
]))
def testIterableIteratorError(self, distribution):
dataset = dataset_ops.Dataset.range(10).batch(2)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
iterator = iter(dist_dataset)
# Raises error when next(iterator) is called without strategy scope
with self.assertRaises(ValueError):
def replica_fn1(iterator):
return next(iterator)
distribution.run(replica_fn1, args=(iterator,))
if distribution.num_replicas_in_sync == 1:
expected_result = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8, 9]]]
elif distribution.num_replicas_in_sync == 2:
expected_result = [[[0], [1]], [[2], [3]], [[4], [5]], [[6], [7]],
[[8], [9]]]
with distribution.scope():
def replica_fn2(iterator):
return iterator
result = distribution.run(replica_fn2, args=(next(iterator),))
self.assertAllEqual(
distribution.experimental_local_results(result), expected_result[0])
# Confirm default ReplicaContext also works
iterator = iter(dist_dataset)
for i, element in enumerate(iterator):
self.assertAllEqual(
distribution.experimental_local_results(element), expected_result[i])
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
]))
def testUnevenDatasetBatches(self, input_type, api_type, iteration_type,
drop_remainder, distribution):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
dataset_fn = lambda _: dataset_ops.Dataset.range(9).batch( # pylint: disable=g-long-lambda
2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
else:
expected_values = [[[0, 1], [2, 3]], [[4, 5], [6, 7]], [[8], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
]))
def testUnevenDatasetBatchesMultiWorker(self, input_type, api_type,
iteration_type, drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(9)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(2, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]]]
else:
# The last global batch only contains data for one replica.
if id_in_cluster == 0:
expected_values = [[[0]], [[2]], [[4]], [[6]], [[8]]]
else:
expected_values = [[[1]], [[3]], [[5]], [[7]], [[]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["input_fn", "dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
drop_remainder=[True, False],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
]))
def testUnevenDatasetBatchesMultiWorkerFourReplicas(self, input_type,
api_type, iteration_type,
drop_remainder,
distribution):
# Actual devices don't matter in this test as long as the number of global
# repices is 2.
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
worker_count = multi_worker_util.worker_count(cr.cluster_spec(),
cr.task_type)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(15)
if input_type == "input_fn":
# When input_fn is used, there is no automatic rebatching and sharding,
# so we add them here.
return dataset.shard(worker_count, id_in_cluster).batch(1)
else:
return dataset.batch(4, drop_remainder=drop_remainder)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
# The last global batch only contains data for one replica.
if drop_remainder and input_type == "dataset":
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]]]
else:
if id_in_cluster == 0:
expected_values = [[[0], [2]], [[4], [6]], [[8], [10]], [[12], [14]]]
else:
expected_values = [[[1], [3]], [[5], [7]], [[9], [11]], [[13], []]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
num_replicas_in_sync=distribution.num_replicas_in_sync,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["graph", "eager"],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.central_storage_strategy_with_gpu_and_cpu
],
enable_get_next_as_optional=[True, False]))
def testBatchSplitting(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:CPU:0"])]
batch_size = 10
dataset_fn = lambda _: dataset_ops.Dataset.range(100).batch(batch_size)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [[
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
input_type=["dataset"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
num_replicas_in_sync=[None, 2],
distribution=[
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call
],
enable_get_next_as_optional=[True, False]))
def testBatchSplittingMultiWorker(self, input_type, api_type, iteration_type,
num_replicas_in_sync, distribution,
enable_get_next_as_optional):
worker_device_pairs = [("/device:CPU:0", ["/device:GPU:0",
"/device:GPU:1"])]
batch_size = 10
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
def dataset_fn(_):
dataset = dataset_ops.Dataset.range(100).batch(batch_size)
return dataset
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
updated_batch_size = (
batch_size //
num_replicas_in_sync if num_replicas_in_sync else batch_size)
expected_values = [
[ # pylint: disable=g-complex-comprehension
range(i, i + updated_batch_size),
range(i + updated_batch_size, i + 2 * updated_batch_size)
] for i in range(0, 100, updated_batch_size * 2)
]
distribution.extended.experimental_enable_get_next_as_optional = (
enable_get_next_as_optional)
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
sess=None,
num_replicas_in_sync=num_replicas_in_sync)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
))
def testCacheAcrossIteration(self, distribution):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(16).shuffle(16).cache().batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
reshuffle=[True, False]))
def testShuffleAcrossIterations(self, distribution, reshuffle):
if not tf2.enabled():
self.skipTest("Only V2 is supported.")
dataset = dataset_ops.Dataset.range(12).shuffle(
12, reshuffle_each_iteration=reshuffle).batch(4)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
first_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
second_epoch = list(
distribution.experimental_local_results(x) for x in dist_dataset)
if reshuffle:
self.assertNotAllEqual(first_epoch, second_epoch)
else:
self.assertAllEqual(first_epoch, second_epoch)
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeFinite(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dist_dataset = distribution.experimental_distribute_dataset(dataset)
@def_function.function
def train_fn():
for data in dist_dataset:
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([None, 10],
feature[replica_id].shape.as_list())
self.assertEqual([None], label[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeInfinite(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat()
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
data = iter(dist_dataset).get_next_as_optional().get_value()
data = nest.map_structure(distribution.experimental_local_results, data)
feature = data["feature"]
label = data["label"]
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature[replica_id].shape.as_list())
self.assertEqual([per_replica_batch_size],
label[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.one_device_strategy,
strategy_combinations.mirrored_strategy_with_one_cpu,
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.central_storage_strategy_with_two_gpus,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu_no_merge_call,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
]))
def testGetNextOptionalShapeEmpty(self, distribution):
batch_size = 8
dataset = dataset_ops.DatasetV2.from_tensor_slices({
"feature": array_ops.ones([batch_size, 10]),
"label": array_ops.ones([batch_size]),
})
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.repeat()
dist_dataset = distribution.experimental_distribute_dataset(dataset)
per_replica_batch_size = batch_size // distribution.num_replicas_in_sync
@def_function.function
def train_fn():
data = iter(dist_dataset).get_next_as_optional()
feature_specs = data.element_spec["feature"]._component_specs
value_specs = data.element_spec["label"]._component_specs
if not isinstance(feature_specs, tuple):
feature_specs = (feature_specs,)
value_specs = (value_specs,)
# Assert the shapes are still static from all replicas.
for replica_id in range(len(distribution.extended.worker_devices)):
self.assertEqual([per_replica_batch_size, 10],
feature_specs[replica_id].shape.as_list())
self.assertEqual([per_replica_batch_size],
value_specs[replica_id].shape.as_list())
train_fn()
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["dataset"],
api_type=["wrap_into_iterator", "wrap_into_dataset"],
iteration_type=["get_next", "for_loop"],
auto_shard_policy=[AutoShardPolicy.AUTO, AutoShardPolicy.OFF]))
def testAutoshardingOption(self, distribution, input_type, api_type,
iteration_type, auto_shard_policy):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
ds_option = options_lib.Options()
ds_option.experimental_distribute.auto_shard_policy = auto_shard_policy
dataset_fn = (
lambda _: dataset_ops.Dataset.range(4).with_options(ds_option))
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if auto_shard_policy == AutoShardPolicy.AUTO:
if id_in_cluster == 0:
expected_values = [[0], [2]]
else:
expected_values = [[1], [3]]
else:
expected_values = [[0], [1], [2], [3]]
self._test_input_iteration(
input_type,
api_type,
iteration_type,
dataset_or_input_fn,
worker_device_pairs,
expected_values,
distribution,
input_context=distribution.extended._make_input_context())
@combinations.generate(
combinations.combine(
mode=["eager"],
distribution=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
],
input_type=["input_fn"],
api_type=["wrap_into_dataset"],
iteration_type=["get_next", "for_loop"]))
def testDifferentDatasetsMultiWorker(self, distribution, input_type, api_type,
iteration_type):
cr = distribution.cluster_resolver
self.assertIsNotNone(cr)
id_in_cluster = multi_worker_util.id_in_cluster(cr.cluster_spec(),
cr.task_type, cr.task_id)
def dataset_fn(ctx):
if ctx.input_pipeline_id == 0:
return dataset_ops.Dataset.range(8).batch(2)
else:
return dataset_ops.Dataset.range(9).batch(2)
dataset_or_input_fn = self._create_dataset_or_input_fn(
input_type, dataset_fn)
worker_device_pairs = [("/device:CPU:0", ["/device:CPU:0"])]
if id_in_cluster == 0:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[]]]
else:
expected_values = [[[0, 1]], [[2, 3]], [[4, 5]], [[6, 7]], [[8]]]
distribution.extended.experimental_enable_get_next_as_optional = True
self._test_input_iteration(input_type, api_type, iteration_type,
dataset_or_input_fn, worker_device_pairs,
expected_values, distribution)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"]))
def testLoopOverDatasetInTFFunction(self, strategy):
dataset = dataset_ops.Dataset.range(10).map(lambda x: { # pylint: disable=g-long-lambda
"y": math_ops.cast(x, dtypes.float32) ** 2,
}).batch(4)
dist_dataset = strategy.experimental_distribute_dataset(dataset)
with strategy.scope():
v = variables.Variable(0.0, aggregation=variables.VariableAggregation.SUM)
@def_function.function
def iterator_fn(dist_dataset):
def assign_add_fn(data):
v.assign_add(math_ops.reduce_sum(data["y"]))
for data in dist_dataset:
strategy.run(assign_add_fn, args=(data,))
iterator_fn(dist_dataset)
self.assertEqual(v.numpy(), 285.0)
| DistributedIteratorTest |
python | getsentry__sentry | src/sentry/api/serializers/models/userrollback.py | {
"start": 427,
"end": 1067
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs) -> RollbackSerializerResponse:
rollback_org = kwargs.get("rollback_org")
return {
"organization": {
"id": obj.organization.id,
"name": obj.organization.name,
"slug": obj.organization.slug,
},
"user": {
"id": obj.user_id,
"name": user.name,
},
"data": {
"user": obj.data,
"organization": rollback_org.data if rollback_org else None,
},
}
| UserRollbackSerializer |
python | ray-project__ray | python/ray/data/dataset.py | {
"start": 280014,
"end": 280875
} | class ____(Dataset, Generic[T]):
"""A Dataset materialized in Ray memory, e.g., via `.materialize()`.
The blocks of a MaterializedDataset object are materialized into Ray object store
memory, which means that this class can be shared or iterated over by multiple Ray
tasks without re-executing the underlying computations for producing the stream.
"""
def num_blocks(self) -> int:
"""Return the number of blocks of this :class:`MaterializedDataset`.
Examples:
>>> import ray
>>> ds = ray.data.range(100).repartition(10).materialize()
>>> ds.num_blocks()
10
Time complexity: O(1)
Returns:
The number of blocks of this :class:`Dataset`.
"""
return self._plan.initial_num_blocks()
@PublicAPI(stability="beta")
| MaterializedDataset |
python | pypa__pip | src/pip/_vendor/packaging/version.py | {
"start": 4507,
"end": 12379
} | class ____(_BaseVersion):
"""This class abstracts handling of a project's versions.
A :class:`Version` instance is comparison aware and can be compared and
sorted using the standard Python interfaces.
>>> v1 = Version("1.0a5")
>>> v2 = Version("1.0")
>>> v1
<Version('1.0a5')>
>>> v2
<Version('1.0')>
>>> v1 < v2
True
>>> v1 == v2
False
>>> v1 > v2
False
>>> v1 >= v2
False
>>> v1 <= v2
True
"""
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
_key: CmpKey
def __init__(self, version: str) -> None:
"""Initialize a Version object.
:param version:
The string representation of a version which will be parsed and normalized
before use.
:raises InvalidVersion:
If the ``version`` does not conform to PEP 440 in any way then this
exception will be raised.
"""
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion(f"Invalid version: {version!r}")
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self) -> str:
"""A representation of the Version that shows all internal state.
>>> Version('1.0.0')
<Version('1.0.0')>
"""
return f"<Version('{self}')>"
def __str__(self) -> str:
"""A string representation of the version that can be round-tripped.
>>> str(Version("1.0a5"))
'1.0a5'
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(f".post{self.post}")
# Development release
if self.dev is not None:
parts.append(f".dev{self.dev}")
# Local version segment
if self.local is not None:
parts.append(f"+{self.local}")
return "".join(parts)
@property
def epoch(self) -> int:
"""The epoch of the version.
>>> Version("2.0.0").epoch
0
>>> Version("1!2.0.0").epoch
1
"""
return self._version.epoch
@property
def release(self) -> tuple[int, ...]:
"""The components of the "release" segment of the version.
>>> Version("1.2.3").release
(1, 2, 3)
>>> Version("2.0.0").release
(2, 0, 0)
>>> Version("1!2.0.0.post0").release
(2, 0, 0)
Includes trailing zeroes but not the epoch or any pre-release / development /
post-release suffixes.
"""
return self._version.release
@property
def pre(self) -> tuple[str, int] | None:
"""The pre-release segment of the version.
>>> print(Version("1.2.3").pre)
None
>>> Version("1.2.3a1").pre
('a', 1)
>>> Version("1.2.3b1").pre
('b', 1)
>>> Version("1.2.3rc1").pre
('rc', 1)
"""
return self._version.pre
@property
def post(self) -> int | None:
"""The post-release number of the version.
>>> print(Version("1.2.3").post)
None
>>> Version("1.2.3.post1").post
1
"""
return self._version.post[1] if self._version.post else None
@property
def dev(self) -> int | None:
"""The development number of the version.
>>> print(Version("1.2.3").dev)
None
>>> Version("1.2.3.dev1").dev
1
"""
return self._version.dev[1] if self._version.dev else None
@property
def local(self) -> str | None:
"""The local version segment of the version.
>>> print(Version("1.2.3").local)
None
>>> Version("1.2.3+abc").local
'abc'
"""
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self) -> str:
"""The public portion of the version.
>>> Version("1.2.3").public
'1.2.3'
>>> Version("1.2.3+abc").public
'1.2.3'
>>> Version("1!1.2.3dev1+abc").public
'1!1.2.3.dev1'
"""
return str(self).split("+", 1)[0]
@property
def base_version(self) -> str:
"""The "base version" of the version.
>>> Version("1.2.3").base_version
'1.2.3'
>>> Version("1.2.3+abc").base_version
'1.2.3'
>>> Version("1!1.2.3dev1+abc").base_version
'1!1.2.3'
The "base version" is the public version of the project without any pre or post
release markers.
"""
parts = []
# Epoch
if self.epoch != 0:
parts.append(f"{self.epoch}!")
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self) -> bool:
"""Whether this version is a pre-release.
>>> Version("1.2.3").is_prerelease
False
>>> Version("1.2.3a1").is_prerelease
True
>>> Version("1.2.3b1").is_prerelease
True
>>> Version("1.2.3rc1").is_prerelease
True
>>> Version("1.2.3dev1").is_prerelease
True
"""
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self) -> bool:
"""Whether this version is a post-release.
>>> Version("1.2.3").is_postrelease
False
>>> Version("1.2.3.post1").is_postrelease
True
"""
return self.post is not None
@property
def is_devrelease(self) -> bool:
"""Whether this version is a development release.
>>> Version("1.2.3").is_devrelease
False
>>> Version("1.2.3.dev1").is_devrelease
True
"""
return self.dev is not None
@property
def major(self) -> int:
"""The first item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").major
1
"""
return self.release[0] if len(self.release) >= 1 else 0
@property
def minor(self) -> int:
"""The second item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").minor
2
>>> Version("1").minor
0
"""
return self.release[1] if len(self.release) >= 2 else 0
@property
def micro(self) -> int:
"""The third item of :attr:`release` or ``0`` if unavailable.
>>> Version("1.2.3").micro
3
>>> Version("1").micro
0
"""
return self.release[2] if len(self.release) >= 3 else 0
| Version |
python | ApeWorX__ape | src/ape_ethereum/query.py | {
"start": 323,
"end": 5370
} | class ____(QueryAPI):
"""
Implements more advanced queries specific to Ethereum clients.
"""
def __init__(self):
self.supports_contract_creation = None # will be set after we try for the first time
@singledispatchmethod
def estimate_query(self, query: QueryType) -> Optional[int]: # type: ignore[override]
return None
@singledispatchmethod
def perform_query(self, query: QueryType) -> Iterator: # type: ignore[override]
raise QueryEngineError(f"Cannot handle '{type(query)}'.")
@estimate_query.register
def estimate_contract_creation_query(self, query: ContractCreationQuery) -> Optional[int]:
# NOTE: Extremely expensive query, involves binary search of all blocks in a chain
# Very loose estimate of 5s per transaction for this query.
if self.supports_contract_creation is False:
return None
return 5000
@perform_query.register
def perform_contract_creation_query(
self, query: ContractCreationQuery
) -> Iterator[ContractCreation]:
"""
Find when a contract was deployed using binary search and block tracing.
"""
# skip the search if there is still no code at address at head
if not self.chain_manager.get_code(query.contract):
return None
def find_creation_block(lo, hi):
# perform a binary search to find the block when the contract was deployed.
# takes log2(height), doesn't work with contracts that have been reinit.
while hi - lo > 1:
mid = (lo + hi) // 2
code = self.chain_manager.get_code(query.contract, block_id=mid)
if not code:
lo = mid
else:
hi = mid
if self.chain_manager.get_code(query.contract, block_id=hi):
return hi
return None
try:
block = find_creation_block(0, self.chain_manager.blocks.height)
except ProviderError:
self.supports_contract_creation = False
return None
# iterate over block transaction traces to find the deployment call
# this method also supports contracts created by factories
try:
if "geth" in self.provider.client_version.lower():
yield from self._find_creation_in_block_via_geth(block, query.contract)
else:
yield from self._find_creation_in_block_via_parity(block, query.contract)
except (ProviderError, APINotImplementedError):
self.supports_contract_creation = False
return None
self.supports_contract_creation = True
def _find_creation_in_block_via_parity(self, block, contract_address):
# NOTE requires `trace_` namespace
traces = self.provider.make_request("trace_replayBlockTransactions", [block, ["trace"]])
for tx in traces:
for trace in tx["trace"]:
if (
"error" not in trace
and trace["type"] == "create"
and trace["result"]["address"] == contract_address.lower()
):
receipt = self.chain_manager.get_receipt(tx["transactionHash"])
creator = self.conversion_manager.convert(trace["action"]["from"], AddressType)
yield ContractCreation(
txn_hash=tx["transactionHash"],
block=block,
deployer=receipt.sender,
factory=creator if creator != receipt.sender else None,
)
def _find_creation_in_block_via_geth(self, block, contract_address):
# NOTE requires `debug_` namespace
traces = self.provider.make_request(
"debug_traceBlockByNumber", [hex(block), {"tracer": "callTracer"}]
)
def flatten(call):
if call["type"] in ["CREATE", "CREATE2"]:
yield call["from"], call["to"]
if "error" in call or "calls" not in call:
return
for sub in call["calls"]:
if sub["type"] in ["CREATE", "CREATE2"]:
yield sub["from"], sub["to"]
else:
yield from flatten(sub)
for tx in traces:
call = tx["result"]
sender = call["from"]
for factory, contract in flatten(call):
if contract == contract_address.lower():
yield ContractCreation(
txn_hash=tx["txHash"],
block=block,
deployer=self.conversion_manager.convert(sender, AddressType),
factory=(
self.conversion_manager.convert(factory, AddressType)
if factory != sender
else None
),
)
| EthereumQueryProvider |
python | pandas-dev__pandas | pandas/core/groupby/groupby.py | {
"start": 73,
"end": 12472
} | class ____ the base-class of operations.
The SeriesGroupBy and DataFrameGroupBy sub-class
(defined in pandas.core.groupby.generic)
expose these user-facing objects to provide specific functionality.
"""
from __future__ import annotations
from collections.abc import (
Callable,
Hashable,
Iterable,
Iterator,
Mapping,
Sequence,
)
import datetime
from functools import (
partial,
wraps,
)
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Concatenate,
Literal,
Self,
TypeAlias,
TypeVar,
Union,
cast,
final,
overload,
)
import warnings
import numpy as np
from pandas._libs import (
Timestamp,
lib,
)
from pandas._libs.algos import rank_1d
import pandas._libs.groupby as libgroupby
from pandas._libs.missing import NA
from pandas._typing import (
AnyArrayLike,
ArrayLike,
DtypeObj,
IndexLabel,
IntervalClosedType,
NDFrameT,
PositionalIndexer,
RandomState,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.errors import (
AbstractMethodError,
DataError,
Pandas4Warning,
)
from pandas.util._decorators import (
Appender,
Substitution,
cache_readonly,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.cast import (
coerce_indexer_dtype,
ensure_dtype_can_hold_na,
)
from pandas.core.dtypes.common import (
is_bool,
is_bool_dtype,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_string_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.missing import (
isna,
na_value_for_dtype,
notna,
)
from pandas.core import (
algorithms,
sample,
)
from pandas.core._numba import executor
from pandas.core.arrays import (
ArrowExtensionArray,
BaseMaskedArray,
ExtensionArray,
FloatingArray,
IntegerArray,
SparseArray,
)
from pandas.core.arrays.string_ import StringDtype
from pandas.core.arrays.string_arrow import ArrowStringArray
from pandas.core.base import (
PandasObject,
SelectionMixin,
)
import pandas.core.common as com
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import (
base,
numba_,
ops,
)
from pandas.core.groupby.grouper import get_grouper
from pandas.core.groupby.indexing import (
GroupByIndexingMixin,
GroupByNthSelector,
)
from pandas.core.indexes.api import (
Index,
MultiIndex,
default_index,
)
from pandas.core.internals.blocks import ensure_block_shape
from pandas.core.series import Series
from pandas.core.sorting import get_group_index_sorter
from pandas.core.util.numba_ import (
get_jit_arguments,
maybe_use_numba,
prepare_function_arguments,
)
if TYPE_CHECKING:
from pandas._libs.tslibs import BaseOffset
from pandas._libs.tslibs.timedeltas import Timedelta
from pandas._typing import (
Any,
P,
T,
)
from pandas.core.indexers.objects import BaseIndexer
from pandas.core.resample import Resampler
from pandas.core.window import (
ExpandingGroupby,
ExponentialMovingWindowGroupby,
RollingGroupby,
)
_common_see_also = """
See Also
--------
Series.%(name)s : Apply a function %(name)s to a Series.
DataFrame.%(name)s : Apply a function %(name)s
to each row or column of a DataFrame.
"""
_groupby_agg_method_engine_template = """
Compute {fname} of group values.
Parameters
----------
numeric_only : bool, default {no}
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default {mc}
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
engine : str, default None {e}
* ``'cython'`` : Runs rolling apply through C-extensions from cython.
* ``'numba'`` : Runs rolling apply through JIT compiled code from numba.
Only available when ``raw`` is set to ``True``.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
engine_kwargs : dict, default None {ek}
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to both the ``func`` and the ``apply`` groupby aggregation.
Returns
-------
Series or DataFrame
Computed {fname} of values within each group.
See Also
--------
SeriesGroupBy.min : Return the min of the group values.
DataFrameGroupBy.min : Return the min of the group values.
SeriesGroupBy.max : Return the max of the group values.
DataFrameGroupBy.max : Return the max of the group values.
SeriesGroupBy.sum : Return the sum of the group values.
DataFrameGroupBy.sum : Return the sum of the group values.
Examples
--------
{example}
"""
_groupby_agg_method_skipna_engine_template = """
Compute {fname} of group values.
Parameters
----------
numeric_only : bool, default {no}
Include only float, int, boolean columns.
.. versionchanged:: 2.0.0
numeric_only no longer accepts ``None``.
min_count : int, default {mc}
The required number of valid values to perform the operation. If fewer
than ``min_count`` non-NA values are present the result will be NA.
skipna : bool, default {s}
Exclude NA/null values. If the entire group is NA and ``skipna`` is
``True``, the result will be NA.
.. versionchanged:: 3.0.0
engine : str, default None {e}
* ``'cython'`` : Runs rolling apply through C-extensions from cython.
* ``'numba'`` : Runs rolling apply through JIT compiled code from numba.
Only available when ``raw`` is set to ``True``.
* ``None`` : Defaults to ``'cython'`` or globally setting ``compute.use_numba``
engine_kwargs : dict, default None {ek}
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{{'nopython': True, 'nogil': False, 'parallel': False}}`` and will be
applied to both the ``func`` and the ``apply`` groupby aggregation.
Returns
-------
Series or DataFrame
Computed {fname} of values within each group.
See Also
--------
SeriesGroupBy.min : Return the min of the group values.
DataFrameGroupBy.min : Return the min of the group values.
SeriesGroupBy.max : Return the max of the group values.
DataFrameGroupBy.max : Return the max of the group values.
SeriesGroupBy.sum : Return the sum of the group values.
DataFrameGroupBy.sum : Return the sum of the group values.
Examples
--------
{example}
"""
_pipe_template = """
Apply a ``func`` with arguments to this %(klass)s object and return its result.
Use `.pipe` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h = lambda x, arg2, arg3: x + 1 - arg2 * arg3
>>> g = lambda x, arg1: x * 5 / arg1
>>> f = lambda x: x ** 4
>>> df = pd.DataFrame([["a", 4], ["b", 5]], columns=["group", "value"])
>>> h(g(f(df.groupby('group')), arg1=1), arg2=2, arg3=3) # doctest: +SKIP
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=1)
... .pipe(h, arg2=2, arg3=3)) # doctest: +SKIP
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, str)
Function to apply to this %(klass)s object or, alternatively,
a `(callable, data_keyword)` tuple where `data_keyword` is a
string indicating the keyword of `callable` that expects the
%(klass)s object.
*args : iterable, optional
Positional arguments passed into `func`.
**kwargs : dict, optional
A dictionary of keyword arguments passed into `func`.
Returns
-------
%(klass)s
The original object with the function `func` applied.
See Also
--------
Series.pipe : Apply a function with arguments to a series.
DataFrame.pipe: Apply a function with arguments to a dataframe.
apply : Apply function to each group instead of to the
full %(klass)s object.
Notes
-----
See more `here
<https://pandas.pydata.org/pandas-docs/stable/user_guide/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
"""
_transform_template = """
Call function producing a same-indexed %(klass)s on each group.
Returns a %(klass)s having the same indexes as the original object
filled with the transformed values.
Parameters
----------
func : function, str
Function to apply to each group. See the Notes section below for requirements.
Accepted inputs are:
- String
- Python function
- Numba JIT function with ``engine='numba'`` specified.
Only passing a single function is supported with this engine.
If the ``'numba'`` engine is chosen, the function must be
a user defined function with ``values`` and ``index`` as the
first and second arguments respectively in the function signature.
Each group's index will be passed to the user defined function
and optionally available for use.
If a string is chosen, then it needs to be the name
of the groupby method you want to use.
*args
Positional arguments to pass to func.
engine : str, default None
* ``'cython'`` : Runs the function through C-extensions from cython.
* ``'numba'`` : Runs the function through JIT compiled code from numba.
* ``None`` : Defaults to ``'cython'`` or the global setting ``compute.use_numba``
engine_kwargs : dict, default None
* For ``'cython'`` engine, there are no accepted ``engine_kwargs``
* For ``'numba'`` engine, the engine can accept ``nopython``, ``nogil``
and ``parallel`` dictionary keys. The values must either be ``True`` or
``False``. The default ``engine_kwargs`` for the ``'numba'`` engine is
``{'nopython': True, 'nogil': False, 'parallel': False}`` and will be
applied to the function
**kwargs
Keyword arguments to be passed into func.
Returns
-------
%(klass)s
%(klass)s with the same indexes as the original object filled
with transformed values.
See Also
--------
%(klass)s.groupby.apply : Apply function ``func`` group-wise and combine
the results together.
%(klass)s.groupby.aggregate : Aggregate using one or more operations.
%(klass)s.transform : Call ``func`` on self producing a %(klass)s with the
same axis shape as self.
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, if `f` returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results. See :ref:`gotchas.udf-mutation` for more details.
When using ``engine='numba'``, there will be no "fall back" behavior internally.
The group data and group index will be passed as numpy arrays to the JITed
user defined function, and no alternative execution attempts will be tried.
The resulting dtype will reflect the return value of the passed ``func``,
see the examples below.
.. versionchanged:: 2.0.0
When using ``.transform`` on a grouped DataFrame and the transformation function
returns a DataFrame, pandas now aligns the result's index
with the input's index. You can call ``.to_numpy()`` on the
result of the transformation function to avoid alignment.
Examples
--------
%(example)s"""
@final
| providing |
python | google__python-fire | fire/fire_test.py | {
"start": 738,
"end": 29218
} | class ____(testutils.BaseTestCase):
def testFire(self):
with mock.patch.object(sys, 'argv', ['progname']):
fire.Fire(tc.Empty)
fire.Fire(tc.OldStyleEmpty)
fire.Fire(tc.WithInit)
# Test both passing command as a sequence and as a string.
self.assertEqual(fire.Fire(tc.NoDefaults, command='triple 4'), 12)
self.assertEqual(fire.Fire(tc.WithDefaults, command=('double', '2')), 4)
self.assertEqual(fire.Fire(tc.WithDefaults, command=['triple', '4']), 12)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults,
command=['double', '2']), 4)
self.assertEqual(fire.Fire(tc.OldStyleWithDefaults,
command=['triple', '4']), 12)
def testFirePositionalCommand(self):
# Test passing command as a positional argument.
self.assertEqual(fire.Fire(tc.NoDefaults, 'double 2'), 4)
self.assertEqual(fire.Fire(tc.NoDefaults, ['double', '2']), 4)
def testFireInvalidCommandArg(self):
with self.assertRaises(ValueError):
# This is not a valid command.
fire.Fire(tc.WithDefaults, command=10)
def testFireDefaultName(self):
with mock.patch.object(sys, 'argv',
[os.path.join('python-fire', 'fire',
'base_filename.py')]):
with self.assertOutputMatches(stdout='SYNOPSIS.*base_filename.py',
stderr=None):
fire.Fire(tc.Empty)
def testFireNoArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, command=['ten']), 10)
def testFireExceptions(self):
# Exceptions of Fire are printed to stderr and a FireExit is raised.
with self.assertRaisesFireExit(2):
fire.Fire(tc.Empty, command=['nomethod']) # Member doesn't exist.
with self.assertRaisesFireExit(2):
fire.Fire(tc.NoDefaults, command=['double']) # Missing argument.
with self.assertRaisesFireExit(2):
fire.Fire(tc.TypedProperties, command=['delta', 'x']) # Missing key.
# Exceptions of the target components are still raised.
with self.assertRaises(ZeroDivisionError):
fire.Fire(tc.NumberDefaults, command=['reciprocal', '0.0'])
def testFireNamedArgs(self):
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['double', '--count', '5']), 10)
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['triple', '--count', '5']), 15)
self.assertEqual(
fire.Fire(tc.OldStyleWithDefaults, command=['double', '--count', '5']),
10)
self.assertEqual(
fire.Fire(tc.OldStyleWithDefaults, command=['triple', '--count', '5']),
15)
def testFireNamedArgsSingleHyphen(self):
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['double', '-count', '5']), 10)
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['triple', '-count', '5']), 15)
self.assertEqual(
fire.Fire(tc.OldStyleWithDefaults, command=['double', '-count', '5']),
10)
self.assertEqual(
fire.Fire(tc.OldStyleWithDefaults, command=['triple', '-count', '5']),
15)
def testFireNamedArgsWithEquals(self):
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['double', '--count=5']), 10)
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['triple', '--count=5']), 15)
def testFireNamedArgsWithEqualsSingleHyphen(self):
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['double', '-count=5']), 10)
self.assertEqual(fire.Fire(tc.WithDefaults,
command=['triple', '-count=5']), 15)
def testFireAllNamedArgs(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, command=['sum', '1', '2']), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '--alpha', '1', '2']), 5)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '--beta', '1', '2']), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '1', '--alpha', '2']), 4)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '1', '--beta', '2']), 5)
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['sum', '--alpha', '1', '--beta', '2']), 5)
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['sum', '--beta', '1', '--alpha', '2']), 4)
def testFireAllNamedArgsOneMissing(self):
self.assertEqual(fire.Fire(tc.MixedDefaults, command=['sum']), 0)
self.assertEqual(fire.Fire(tc.MixedDefaults, command=['sum', '1']), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '--alpha', '1']), 1)
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['sum', '--beta', '2']), 4)
def testFirePartialNamedArgs(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults, command=['identity', '1', '2']), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', '1', '2']), (1, 2))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--beta', '1', '2']), (2, 1))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '1', '--alpha', '2']), (2, 1))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '1', '--beta', '2']), (1, 2))
self.assertEqual(
fire.Fire(
tc.MixedDefaults,
command=['identity', '--alpha', '1', '--beta', '2']), (1, 2))
self.assertEqual(
fire.Fire(
tc.MixedDefaults,
command=['identity', '--beta', '1', '--alpha', '2']), (2, 1))
def testFirePartialNamedArgsOneMissing(self):
# Errors are written to standard out and a FireExit is raised.
with self.assertRaisesFireExit(2):
fire.Fire(tc.MixedDefaults,
command=['identity']) # Identity needs an arg.
with self.assertRaisesFireExit(2):
# Identity needs a value for alpha.
fire.Fire(tc.MixedDefaults, command=['identity', '--beta', '2'])
self.assertEqual(
fire.Fire(tc.MixedDefaults, command=['identity', '1']), (1, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, command=['identity', '--alpha', '1']),
(1, '0'))
def testFireAnnotatedArgs(self):
self.assertEqual(fire.Fire(tc.Annotations, command=['double', '5']), 10)
self.assertEqual(fire.Fire(tc.Annotations, command=['triple', '5']), 15)
def testFireKeywordOnlyArgs(self):
with self.assertRaisesFireExit(2):
# Keyword arguments must be passed with flag syntax.
fire.Fire(tc.py3.KeywordOnly, command=['double', '5'])
self.assertEqual(
fire.Fire(tc.py3.KeywordOnly, command=['double', '--count', '5']), 10)
self.assertEqual(
fire.Fire(tc.py3.KeywordOnly, command=['triple', '--count', '5']), 15)
def testFireProperties(self):
self.assertEqual(fire.Fire(tc.TypedProperties, command=['alpha']), True)
self.assertEqual(fire.Fire(tc.TypedProperties, command=['beta']), (1, 2, 3))
def testFireRecursion(self):
self.assertEqual(
fire.Fire(tc.TypedProperties,
command=['charlie', 'double', 'hello']), 'hellohello')
self.assertEqual(fire.Fire(tc.TypedProperties,
command=['charlie', 'triple', 'w']), 'www')
def testFireVarArgs(self):
self.assertEqual(
fire.Fire(tc.VarArgs,
command=['cumsums', 'a', 'b', 'c', 'd']),
['a', 'ab', 'abc', 'abcd'])
self.assertEqual(
fire.Fire(tc.VarArgs, command=['cumsums', '1', '2', '3', '4']),
[1, 3, 6, 10])
def testFireVarArgsWithNamedArgs(self):
self.assertEqual(
fire.Fire(tc.VarArgs, command=['varchars', '1', '2', 'c', 'd']),
(1, 2, 'cd'))
self.assertEqual(
fire.Fire(tc.VarArgs, command=['varchars', '3', '4', 'c', 'd', 'e']),
(3, 4, 'cde'))
def testFireKeywordArgs(self):
self.assertEqual(
fire.Fire(
tc.Kwargs,
command=['props', '--name', 'David', '--age', '24']),
{'name': 'David', 'age': 24})
# Run this test both with a list command and a string command.
self.assertEqual(
fire.Fire(
tc.Kwargs,
command=['props', '--message',
'"This is a message it has -- in it"']), # Quotes stripped
{'message': 'This is a message it has -- in it'})
self.assertEqual(
fire.Fire(
tc.Kwargs,
command=['props', '--message',
'This is a message it has -- in it']),
{'message': 'This is a message it has -- in it'})
self.assertEqual(
fire.Fire(
tc.Kwargs,
command='props --message "This is a message it has -- in it"'),
{'message': 'This is a message it has -- in it'})
self.assertEqual(
fire.Fire(tc.Kwargs,
command=['upper', '--alpha', 'A', '--beta', 'B']),
'ALPHA BETA')
self.assertEqual(
fire.Fire(
tc.Kwargs,
command=['upper', '--alpha', 'A', '--beta', 'B', '-', 'lower']),
'alpha beta')
def testFireKeywordArgsWithMissingPositionalArgs(self):
self.assertEqual(
fire.Fire(tc.Kwargs, command=['run', 'Hello', 'World', '--cell', 'is']),
('Hello', 'World', {'cell': 'is'}))
self.assertEqual(
fire.Fire(tc.Kwargs, command=['run', 'Hello', '--cell', 'ok']),
('Hello', None, {'cell': 'ok'}))
def testFireObject(self):
self.assertEqual(
fire.Fire(tc.WithDefaults(), command=['double', '--count', '5']), 10)
self.assertEqual(
fire.Fire(tc.WithDefaults(), command=['triple', '--count', '5']), 15)
def testFireDict(self):
component = {
'double': lambda x=0: 2 * x,
'cheese': 'swiss',
}
self.assertEqual(fire.Fire(component, command=['double', '5']), 10)
self.assertEqual(fire.Fire(component, command=['cheese']), 'swiss')
def testFireObjectWithDict(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, command=['delta', 'echo']), 'E')
self.assertEqual(
fire.Fire(tc.TypedProperties, command=['delta', 'echo', 'lower']), 'e')
self.assertIsInstance(
fire.Fire(tc.TypedProperties, command=['delta', 'nest']), dict)
self.assertEqual(
fire.Fire(tc.TypedProperties, command=['delta', 'nest', '0']), 'a')
def testFireSet(self):
component = tc.simple_set()
result = fire.Fire(component, command=[])
self.assertEqual(len(result), 3)
def testFireFrozenset(self):
component = tc.simple_frozenset()
result = fire.Fire(component, command=[])
self.assertEqual(len(result), 3)
def testFireList(self):
component = ['zero', 'one', 'two', 'three']
self.assertEqual(fire.Fire(component, command=['2']), 'two')
self.assertEqual(fire.Fire(component, command=['3']), 'three')
self.assertEqual(fire.Fire(component, command=['-1']), 'three')
def testFireObjectWithList(self):
self.assertEqual(fire.Fire(tc.TypedProperties, command=['echo', '0']),
'alex')
self.assertEqual(fire.Fire(tc.TypedProperties, command=['echo', '1']),
'bethany')
def testFireObjectWithTuple(self):
self.assertEqual(fire.Fire(tc.TypedProperties, command=['fox', '0']),
'carry')
self.assertEqual(fire.Fire(tc.TypedProperties, command=['fox', '1']),
'divide')
def testFireObjectWithListAsObject(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, command=['echo', 'count', 'bethany']),
1)
def testFireObjectWithTupleAsObject(self):
self.assertEqual(
fire.Fire(tc.TypedProperties, command=['fox', 'count', 'divide']),
1)
def testFireNoComponent(self):
self.assertEqual(fire.Fire(command=['tc', 'WithDefaults', 'double', '10']),
20)
last_char = lambda text: text[-1] # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command=['last_char', '"Hello"']), 'o')
self.assertEqual(fire.Fire(command=['last-char', '"World"']), 'd')
rset = lambda count=0: set(range(count)) # pylint: disable=unused-variable
self.assertEqual(fire.Fire(command=['rset', '5']), {0, 1, 2, 3, 4})
def testFireUnderscores(self):
self.assertEqual(
fire.Fire(tc.Underscores,
command=['underscore-example']), 'fish fingers')
self.assertEqual(
fire.Fire(tc.Underscores,
command=['underscore_example']), 'fish fingers')
def testFireUnderscoresInArg(self):
self.assertEqual(
fire.Fire(tc.Underscores,
command=['underscore-function', 'example']), 'example')
self.assertEqual(
fire.Fire(tc.Underscores,
command=['underscore_function', '--underscore-arg=score']),
'score')
self.assertEqual(
fire.Fire(tc.Underscores,
command=['underscore_function', '--underscore_arg=score']),
'score')
def testBoolParsing(self):
self.assertEqual(fire.Fire(tc.BoolConverter, command=['as-bool', 'True']),
True)
self.assertEqual(
fire.Fire(tc.BoolConverter, command=['as-bool', 'False']), False)
self.assertEqual(
fire.Fire(tc.BoolConverter, command=['as-bool', '--arg=True']), True)
self.assertEqual(
fire.Fire(tc.BoolConverter, command=['as-bool', '--arg=False']), False)
self.assertEqual(fire.Fire(tc.BoolConverter, command=['as-bool', '--arg']),
True)
self.assertEqual(
fire.Fire(tc.BoolConverter, command=['as-bool', '--noarg']), False)
def testBoolParsingContinued(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', 'True', 'False']), (True, False))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha=False', '10']), (False, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', '--beta', '10']), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', '--beta=10']), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--noalpha', '--beta']), (False, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults, command=['identity', '10', '--beta']),
(10, True))
def testBoolParsingSingleHyphen(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-alpha=False', '10']), (False, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-alpha', '-beta', '10']), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-alpha', '-beta=10']), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-noalpha', '-beta']), (False, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-alpha', '-10', '-beta']), (-10, True))
def testBoolParsingLessExpectedCases(self):
# Note: Does not return (True, 10).
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', '10']), (10, '0'))
# To get (True, 10), use one of the following:
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', '--beta=10']),
(True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', 'True', '10']), (True, 10))
# Note: Does not return (True, '--test') or ('--test', 0).
with self.assertRaisesFireExit(2):
fire.Fire(tc.MixedDefaults, command=['identity', '--alpha', '--test'])
self.assertEqual(
fire.Fire(
tc.MixedDefaults,
command=['identity', '--alpha', 'True', '"--test"']),
(True, '--test'))
# To get ('--test', '0'), use one of the following:
self.assertEqual(fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha=--test']),
('--test', '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults, command=r'identity --alpha \"--test\"'),
('--test', '0'))
def testSingleCharFlagParsing(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a']), (True, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a', '--beta=10']), (True, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a', '-b']), (True, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a', '42', '-b']), (42, True))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a', '42', '-b', '10']), (42, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '--alpha', 'True', '-b', '10']),
(True, 10))
with self.assertRaisesFireExit(2):
# This test attempts to use an ambiguous shortcut flag on a function with
# a naming conflict for the shortcut, triggering a FireError.
fire.Fire(tc.SimilarArgNames, command=['identity', '-b'])
def testSingleCharFlagParsingEqualSign(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a=True']), (True, '0'))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a=3', '--beta=10']), (3, 10))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a=False', '-b=15']), (False, 15))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a', '42', '-b=12']), (42, 12))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-a=42', '-b', '10']), (42, 10))
def testSingleCharFlagParsingExactMatch(self):
self.assertEqual(
fire.Fire(tc.SimilarArgNames,
command=['identity2', '-a']), (True, None))
self.assertEqual(
fire.Fire(tc.SimilarArgNames,
command=['identity2', '-a=10']), (10, None))
self.assertEqual(
fire.Fire(tc.SimilarArgNames,
command=['identity2', '--a']), (True, None))
self.assertEqual(
fire.Fire(tc.SimilarArgNames,
command=['identity2', '-alpha']), (None, True))
self.assertEqual(
fire.Fire(tc.SimilarArgNames,
command=['identity2', '-a', '-alpha']), (True, True))
def testSingleCharFlagParsingCapitalLetter(self):
self.assertEqual(
fire.Fire(tc.CapitalizedArgNames,
command=['sum', '-D', '5', '-G', '10']), 15)
def testBoolParsingWithNo(self):
# In these examples --nothing always refers to the nothing argument:
def fn1(thing, nothing):
return thing, nothing
self.assertEqual(fire.Fire(fn1, command=['--thing', '--nothing']),
(True, True))
self.assertEqual(fire.Fire(fn1, command=['--thing', '--nonothing']),
(True, False))
with self.assertRaisesFireExit(2):
# In this case nothing=False (since rightmost setting of a flag gets
# precedence), but it errors because thing has no value.
fire.Fire(fn1, command=['--nothing', '--nonothing'])
# In these examples, --nothing sets thing=False:
def fn2(thing, **kwargs):
return thing, kwargs
self.assertEqual(fire.Fire(fn2, command=['--thing']), (True, {}))
self.assertEqual(fire.Fire(fn2, command=['--nothing']), (False, {}))
with self.assertRaisesFireExit(2):
# In this case, nothing=True, but it errors because thing has no value.
fire.Fire(fn2, command=['--nothing=True'])
self.assertEqual(fire.Fire(fn2, command=['--nothing', '--nothing=True']),
(False, {'nothing': True}))
def fn3(arg, **kwargs):
return arg, kwargs
self.assertEqual(fire.Fire(fn3, command=['--arg=value', '--thing']),
('value', {'thing': True}))
self.assertEqual(fire.Fire(fn3, command=['--arg=value', '--nothing']),
('value', {'thing': False}))
self.assertEqual(fire.Fire(fn3, command=['--arg=value', '--nonothing']),
('value', {'nothing': False}))
def testTraceFlag(self):
with self.assertRaisesFireExit(0, 'Fire trace:\n'):
fire.Fire(tc.BoolConverter, command=['as-bool', 'True', '--', '--trace'])
with self.assertRaisesFireExit(0, 'Fire trace:\n'):
fire.Fire(tc.BoolConverter, command=['as-bool', 'True', '--', '-t'])
with self.assertRaisesFireExit(0, 'Fire trace:\n'):
fire.Fire(tc.BoolConverter, command=['--', '--trace'])
def testHelpFlag(self):
with self.assertRaisesFireExit(0):
fire.Fire(tc.BoolConverter, command=['as-bool', 'True', '--', '--help'])
with self.assertRaisesFireExit(0):
fire.Fire(tc.BoolConverter, command=['as-bool', 'True', '--', '-h'])
with self.assertRaisesFireExit(0):
fire.Fire(tc.BoolConverter, command=['--', '--help'])
def testHelpFlagAndTraceFlag(self):
with self.assertRaisesFireExit(0, 'Fire trace:\n.*SYNOPSIS'):
fire.Fire(tc.BoolConverter,
command=['as-bool', 'True', '--', '--help', '--trace'])
with self.assertRaisesFireExit(0, 'Fire trace:\n.*SYNOPSIS'):
fire.Fire(tc.BoolConverter, command=['as-bool', 'True', '--', '-h', '-t'])
with self.assertRaisesFireExit(0, 'Fire trace:\n.*SYNOPSIS'):
fire.Fire(tc.BoolConverter, command=['--', '-h', '--trace'])
def testTabCompletionNoName(self):
completion_script = fire.Fire(tc.NoDefaults, command=['--', '--completion'])
self.assertIn('double', completion_script)
self.assertIn('triple', completion_script)
def testTabCompletion(self):
completion_script = fire.Fire(
tc.NoDefaults, command=['--', '--completion'], name='c')
self.assertIn('double', completion_script)
self.assertIn('triple', completion_script)
def testTabCompletionWithDict(self):
actions = {'multiply': lambda a, b: a * b}
completion_script = fire.Fire(
actions, command=['--', '--completion'], name='actCLI')
self.assertIn('actCLI', completion_script)
self.assertIn('multiply', completion_script)
def testBasicSeparator(self):
# '-' is the default separator.
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '+', '_']), ('+', '_'))
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '_', '+', '-']), ('_', '+'))
# If we change the separator we can use '-' as an argument.
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['identity', '-', '_', '--', '--separator', '&']),
('-', '_'))
# The separator triggers a function call, but there aren't enough arguments.
with self.assertRaisesFireExit(2):
fire.Fire(tc.MixedDefaults, command=['identity', '-', '_', '+'])
def testNonComparable(self):
"""Fire should work with classes that disallow comparisons."""
# Make sure this test passes both with a string command or a list command.
self.assertIsInstance(
fire.Fire(tc.NonComparable, command=''), tc.NonComparable)
self.assertIsInstance(
fire.Fire(tc.NonComparable, command=[]), tc.NonComparable)
# The first separator instantiates the NonComparable object.
# The second separator causes Fire to check if the separator was necessary.
self.assertIsInstance(
fire.Fire(tc.NonComparable, command=['-', '-']), tc.NonComparable)
def testExtraSeparators(self):
self.assertEqual(
fire.Fire(
tc.ReturnsObj,
command=['get-obj', 'arg1', 'arg2', '-', '-', 'as-bool', 'True']),
True)
self.assertEqual(
fire.Fire(
tc.ReturnsObj,
command=['get-obj', 'arg1', 'arg2', '-', '-', '-', 'as-bool',
'True']),
True)
def testSeparatorForChaining(self):
# Without a separator all args are consumed by get_obj.
self.assertIsInstance(
fire.Fire(tc.ReturnsObj,
command=['get-obj', 'arg1', 'arg2', 'as-bool', 'True']),
tc.BoolConverter)
# With a separator only the preceding args are consumed by get_obj.
self.assertEqual(
fire.Fire(
tc.ReturnsObj,
command=['get-obj', 'arg1', 'arg2', '-', 'as-bool', 'True']), True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
command=['get-obj', 'arg1', 'arg2', '&', 'as-bool', 'True',
'--', '--separator', '&']),
True)
self.assertEqual(
fire.Fire(tc.ReturnsObj,
command=['get-obj', 'arg1', '$$', 'as-bool', 'True', '--',
'--separator', '$$']),
True)
def testNegativeNumbers(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['sum', '--alpha', '-3', '--beta', '-4']), -11)
def testFloatForExpectedInt(self):
self.assertEqual(
fire.Fire(tc.MixedDefaults,
command=['sum', '--alpha', '2.2', '--beta', '3.0']), 8.2)
self.assertEqual(
fire.Fire(
tc.NumberDefaults,
command=['integer_reciprocal', '--divisor', '5.0']), 0.2)
self.assertEqual(
fire.Fire(tc.NumberDefaults, command=['integer_reciprocal', '4.0']),
0.25)
def testClassInstantiation(self):
self.assertIsInstance(fire.Fire(tc.InstanceVars,
command=['--arg1=a1', '--arg2=a2']),
tc.InstanceVars)
with self.assertRaisesFireExit(2):
# Cannot instantiate a class with positional args.
fire.Fire(tc.InstanceVars, command=['a1', 'a2'])
def testTraceErrors(self):
# Class needs additional value but runs out of args.
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars, command=['a1'])
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars, command=['--arg1=a1'])
# Routine needs additional value but runs out of args.
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars, command=['a1', 'a2', '-', 'run', 'b1'])
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars,
command=['--arg1=a1', '--arg2=a2', '-', 'run b1'])
# Extra args cannot be consumed.
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars,
command=['a1', 'a2', '-', 'run', 'b1', 'b2', 'b3'])
with self.assertRaisesFireExit(2):
fire.Fire(
tc.InstanceVars,
command=['--arg1=a1', '--arg2=a2', '-', 'run', 'b1', 'b2', 'b3'])
# Cannot find member to access.
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars, command=['a1', 'a2', '-', 'jog'])
with self.assertRaisesFireExit(2):
fire.Fire(tc.InstanceVars, command=['--arg1=a1', '--arg2=a2', '-', 'jog'])
def testClassWithDefaultMethod(self):
self.assertEqual(
fire.Fire(tc.DefaultMethod, command=['double', '10']), 20
)
def testClassWithInvalidProperty(self):
self.assertEqual(
fire.Fire(tc.InvalidProperty, command=['double', '10']), 20
)
def testHelpKwargsDecorator(self):
# Issue #190, follow the wrapped method instead of crashing.
with self.assertRaisesFireExit(0):
fire.Fire(tc.decorated_method, command=['-h'])
with self.assertRaisesFireExit(0):
fire.Fire(tc.decorated_method, command=['--help'])
def testFireAsyncio(self):
self.assertEqual(fire.Fire(tc.py3.WithAsyncio,
command=['double', '--count', '10']), 20)
if __name__ == '__main__':
testutils.main()
| FireTest |
python | PrefectHQ__prefect | src/prefect/server/events/filters.py | {
"start": 22681,
"end": 23314
} | class ____(EventDataFilter):
id: Optional[list[UUID]] = Field(
default=None, description="Only include events with one of these IDs"
)
def includes(self, event: Event) -> bool:
if self.id:
if not any(event.id == id for id in self.id):
return False
return True
@db_injector
def build_where_clauses(
self, db: PrefectDBInterface
) -> Sequence["ColumnExpressionArgument[bool]"]:
filters: list["ColumnExpressionArgument[bool]"] = []
if self.id:
filters.append(db.Event.id.in_(self.id))
return filters
| EventIDFilter |
python | PyCQA__pylint | pylint/checkers/imports.py | {
"start": 10844,
"end": 49657
} | class ____(DeprecatedMixin, BaseChecker):
"""BaseChecker for import statements.
Checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
* uses of modules instead of preferred modules
"""
name = "imports"
msgs = {**DeprecatedMixin.DEPRECATED_MODULE_MESSAGE, **MSGS}
default_deprecated_modules = ()
options = (
(
"deprecated-modules",
{
"default": default_deprecated_modules,
"type": "csv",
"metavar": "<modules>",
"help": "Deprecated modules which should not be used,"
" separated by a comma.",
},
),
(
"preferred-modules",
{
"default": DEFAULT_PREFERRED_MODULES,
"type": "csv",
"metavar": "<module:preferred-module>",
"help": "Couples of modules and preferred modules,"
" separated by a comma.",
},
),
(
"import-graph",
{
"default": "",
"type": "path",
"metavar": "<file.gv>",
"help": "Output a graph (.gv or any supported image format) of"
" all (i.e. internal and external) dependencies to the given file"
" (report RP0402 must not be disabled).",
},
),
(
"ext-import-graph",
{
"default": "",
"type": "path",
"metavar": "<file.gv>",
"help": "Output a graph (.gv or any supported image format)"
" of external dependencies to the given file"
" (report RP0402 must not be disabled).",
},
),
(
"int-import-graph",
{
"default": "",
"type": "path",
"metavar": "<file.gv>",
"help": "Output a graph (.gv or any supported image format)"
" of internal dependencies to the given file"
" (report RP0402 must not be disabled).",
},
),
(
"known-standard-library",
{
"default": DEFAULT_STANDARD_LIBRARY,
"type": "csv",
"metavar": "<modules>",
"help": "Force import order to recognize a module as part of "
"the standard compatibility libraries.",
},
),
(
"known-third-party",
{
"default": DEFAULT_KNOWN_THIRD_PARTY,
"type": "csv",
"metavar": "<modules>",
"help": "Force import order to recognize a module as part of "
"a third party library.",
},
),
(
"allow-any-import-level",
{
"default": (),
"type": "csv",
"metavar": "<modules>",
"help": (
"List of modules that can be imported at any level, not just "
"the top level one."
),
},
),
(
"allow-wildcard-with-all",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": "Allow wildcard imports from modules that define __all__.",
},
),
(
"allow-reexport-from-package",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": "Allow explicit reexports by alias from a package __init__.",
},
),
)
def __init__(self, linter: PyLinter) -> None:
BaseChecker.__init__(self, linter)
self.import_graph: defaultdict[str, set[str]] = defaultdict(set)
self._imports_stack: list[tuple[ImportNode, str]] = []
self._first_non_import_node = None
self._module_pkg: dict[Any, Any] = (
{}
) # mapping of modules to the pkg they belong in
self._allow_any_import_level: set[Any] = set()
self.reports = (
("RP0401", "External dependencies", self._report_external_dependencies),
("RP0402", "Modules dependencies graph", self._report_dependencies_graph),
)
self._excluded_edges: defaultdict[str, set[str]] = defaultdict(set)
self._isort_config = isort.Config(
# There is no typo here. EXTRA_standard_library is
# what most users want. The option has been named
# KNOWN_standard_library for ages in pylint, and we
# don't want to break compatibility.
extra_standard_library=linter.config.known_standard_library,
known_third_party=linter.config.known_third_party,
)
def open(self) -> None:
"""Called before visiting project (i.e set of modules)."""
self.linter.stats.dependencies = {}
self.linter.stats = self.linter.stats
self.import_graph = defaultdict(set)
self._module_pkg = {} # mapping of modules to the pkg they belong in
self._current_module_package = False
self._ignored_modules: Sequence[str] = self.linter.config.ignored_modules
# Build a mapping {'module': 'preferred-module'}
self.preferred_modules = dict(
module.split(":")
for module in self.linter.config.preferred_modules
if ":" in module
)
self._allow_any_import_level = set(self.linter.config.allow_any_import_level)
self._allow_reexport_package = self.linter.config.allow_reexport_from_package
def _import_graph_without_ignored_edges(self) -> defaultdict[str, set[str]]:
filtered_graph = copy.deepcopy(self.import_graph)
for node in filtered_graph:
filtered_graph[node].difference_update(self._excluded_edges[node])
return filtered_graph
def close(self) -> None:
"""Called before visiting project (i.e set of modules)."""
if self.linter.is_message_enabled("cyclic-import"):
graph = self._import_graph_without_ignored_edges()
vertices = list(graph)
for cycle in get_cycles(graph, vertices=vertices):
self.add_message("cyclic-import", args=" -> ".join(cycle))
def get_map_data(
self,
) -> tuple[defaultdict[str, set[str]], defaultdict[str, set[str]]]:
if self.linter.is_message_enabled("cyclic-import"):
return (self.import_graph, self._excluded_edges)
return (defaultdict(set), defaultdict(set))
def reduce_map_data(
self,
linter: PyLinter,
data: list[tuple[defaultdict[str, set[str]], defaultdict[str, set[str]]]],
) -> None:
if self.linter.is_message_enabled("cyclic-import"):
self.import_graph = defaultdict(set)
self._excluded_edges = defaultdict(set)
for to_update in data:
graph, excluded_edges = to_update
self.import_graph.update(graph)
self._excluded_edges.update(excluded_edges)
self.close()
def deprecated_modules(self) -> set[str]:
"""Callback returning the deprecated modules."""
# First get the modules the user indicated
all_deprecated_modules = set(self.linter.config.deprecated_modules)
# Now get the hard-coded ones from the stdlib
for since_vers, mod_set in DEPRECATED_MODULES.items():
if since_vers <= sys.version_info:
all_deprecated_modules = all_deprecated_modules.union(mod_set)
return all_deprecated_modules
def visit_module(self, node: nodes.Module) -> None:
"""Store if current module is a package, i.e. an __init__ file."""
self._current_module_package = node.package
def visit_import(self, node: nodes.Import) -> None:
"""Triggered when an import statement is seen."""
self._check_reimport(node)
self._check_import_as_rename(node)
self._check_toplevel(node)
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message("multiple-imports", args=", ".join(names), node=node)
for name in names:
self.check_deprecated_module(node, name)
self._check_preferred_module(node, name)
imported_module = self._get_imported_module(node, name)
if isinstance(node.parent, nodes.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), nodes.Module):
self._record_import(node, imported_module)
if imported_module is None:
continue
self._add_imported_module(node, imported_module.name)
def visit_importfrom(self, node: nodes.ImportFrom) -> None:
"""Triggered when a from statement is seen."""
basename = node.modname
imported_module = self._get_imported_module(node, basename)
absolute_name = get_import_name(node, basename)
self._check_import_as_rename(node)
self._check_misplaced_future(node)
self.check_deprecated_module(node, absolute_name)
self._check_preferred_module(node, basename)
self._check_wildcard_imports(node, imported_module)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
self._check_toplevel(node)
if isinstance(node.parent, nodes.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), nodes.Module):
self._record_import(node, imported_module)
if imported_module is None:
return
for name, _ in node.names:
if name != "*":
self._add_imported_module(node, f"{imported_module.name}.{name}")
else:
self._add_imported_module(node, imported_module.name)
def leave_module(self, node: nodes.Module) -> None:
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check that imports are grouped by package within a given category
met_import: set[str] = set() # set for 'import x' style
met_from: set[str] = set() # set for 'from x import y' style
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
met = met_from if isinstance(import_node, nodes.ImportFrom) else met_import
package, _, _ = import_name.partition(".")
if (
current_package
and current_package != package
and package in met
and not in_type_checking_block(import_node)
and not (
isinstance(import_node.parent, nodes.If)
and is_sys_guard(import_node.parent)
)
):
self.add_message("ungrouped-imports", node=import_node, args=package)
current_package = package
if not self.linter.is_message_enabled(
"ungrouped-imports", import_node.fromlineno
):
continue
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def compute_first_non_import_node(
self,
node: (
nodes.If
| nodes.Expr
| nodes.Comprehension
| nodes.IfExp
| nodes.Assign
| nodes.AssignAttr
| nodes.Try
),
) -> None:
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, nodes.Module):
return
if isinstance(node, nodes.Try) and any(
node.nodes_of_class((nodes.Import, nodes.ImportFrom))
):
return
if isinstance(node, nodes.Assign):
# Add compatibility for module level dunder names
# https://www.python.org/dev/peps/pep-0008/#module-level-dunder-names
valid_targets = [
isinstance(target, nodes.AssignName)
and target.name.startswith("__")
and target.name.endswith("__")
for target in node.targets
]
if all(valid_targets):
return
self._first_non_import_node = node
visit_try = visit_assignattr = visit_assign = visit_ifexp = visit_comprehension = (
visit_expr
) = visit_if = compute_first_non_import_node
def visit_functiondef(
self, node: nodes.FunctionDef | nodes.While | nodes.For | nodes.ClassDef
) -> None:
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), nodes.Module):
return
root = node
while not isinstance(root.parent, nodes.Module):
root = root.parent
if isinstance(root, (nodes.If, nodes.Try)):
if any(root.nodes_of_class((nodes.Import, nodes.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node: nodes.ImportFrom) -> None:
basename = node.modname
if basename == "__future__":
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (
isinstance(prev, nodes.ImportFrom) and prev.modname == "__future__"
):
self.add_message("misplaced-future", node=node)
def _check_same_line_imports(self, node: nodes.ImportFrom) -> None:
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message("reimported", node=node, args=(name, node.fromlineno))
def _check_position(self, node: ImportNode) -> None:
"""Check `node` import or importfrom node position is correct.
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
if self.linter.is_message_enabled(
"wrong-import-position", self._first_non_import_node.fromlineno
):
self.add_message(
"wrong-import-position", node=node, args=node.as_string()
)
else:
self.linter.add_ignored_message(
"wrong-import-position", node.fromlineno, node
)
def _record_import(
self,
node: ImportNode,
importedmodnode: nodes.Module | None,
) -> None:
"""Record the package `node` imports from."""
if isinstance(node, nodes.ImportFrom):
importedname = node.modname
else:
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
importedname = node.names[0][0].split(".")[0]
if isinstance(node, nodes.ImportFrom) and (node.level or 0) >= 1:
# We need the importedname with first point to detect local package
# Example of node:
# 'from .my_package1 import MyClass1'
# the output should be '.my_package1' instead of 'my_package1'
# Example of node:
# 'from . import my_package2'
# the output should be '.my_package2' instead of '{pyfile}'
importedname = "." + importedname
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(
node: ImportNode, imports: list[tuple[ImportNode, str]]
) -> bool:
imports = [import_node for (import_node, _) in imports]
return any(astroid.are_exclusive(import_node, node) for import_node in imports)
def _check_imports_order(self, _module_node: nodes.Module) -> tuple[
list[tuple[ImportNode, str]],
list[tuple[ImportNode, str]],
list[tuple[ImportNode, str]],
]:
"""Checks imports of module `node` are grouped by category.
Imports must follow this order: standard, 3rd party, local
"""
std_imports: list[tuple[ImportNode, str]] = []
third_party_imports: list[tuple[ImportNode, str]] = []
first_party_imports: list[tuple[ImportNode, str]] = []
# need of a list that holds third or first party ordered import
external_imports: list[tuple[ImportNode, str]] = []
local_imports: list[tuple[ImportNode, str]] = []
third_party_not_ignored: list[tuple[ImportNode, str]] = []
first_party_not_ignored: list[tuple[ImportNode, str]] = []
local_not_ignored: list[tuple[ImportNode, str]] = []
for node, modname in self._imports_stack:
if modname.startswith("."):
package = "." + modname.split(".")[1]
else:
package = modname.split(".")[0]
nested = not isinstance(node.parent, nodes.Module)
ignore_for_import_order = not self.linter.is_message_enabled(
"wrong-import-order", node.fromlineno
)
import_category = isort.place_module(package, config=self._isort_config)
node_and_package_import = (node, package)
match import_category:
case "FUTURE" | "STDLIB":
std_imports.append(node_and_package_import)
wrong_import = (
third_party_not_ignored
or first_party_not_ignored
or local_not_ignored
)
if self._is_fallback_import(node, wrong_import):
continue
if wrong_import and not nested:
self.add_message(
"wrong-import-order",
node=node,
args=( ## TODO - this isn't right for multiple on the same line...
f'standard import "{self._get_full_import_name((node, package))}"',
self._get_out_of_order_string(
third_party_not_ignored,
first_party_not_ignored,
local_not_ignored,
),
),
)
case "THIRDPARTY":
third_party_imports.append(node_and_package_import)
external_imports.append(node_and_package_import)
if not nested:
if not ignore_for_import_order:
third_party_not_ignored.append(node_and_package_import)
else:
self.linter.add_ignored_message(
"wrong-import-order", node.fromlineno, node
)
wrong_import = first_party_not_ignored or local_not_ignored
if wrong_import and not nested:
self.add_message(
"wrong-import-order",
node=node,
args=(
f'third party import "{self._get_full_import_name((node, package))}"',
self._get_out_of_order_string(
None, first_party_not_ignored, local_not_ignored
),
),
)
case "FIRSTPARTY":
first_party_imports.append(node_and_package_import)
external_imports.append(node_and_package_import)
if not nested:
if not ignore_for_import_order:
first_party_not_ignored.append(node_and_package_import)
else:
self.linter.add_ignored_message(
"wrong-import-order", node.fromlineno, node
)
wrong_import = local_not_ignored
if wrong_import and not nested:
self.add_message(
"wrong-import-order",
node=node,
args=(
f'first party import "{self._get_full_import_name((node, package))}"',
self._get_out_of_order_string(
None, None, local_not_ignored
),
),
)
case "LOCALFOLDER":
local_imports.append((node, package))
if not nested:
if not ignore_for_import_order:
local_not_ignored.append((node, package))
else:
self.linter.add_ignored_message(
"wrong-import-order", node.fromlineno, node
)
return std_imports, external_imports, local_imports
def _get_out_of_order_string(
self,
third_party_imports: list[tuple[ImportNode, str]] | None,
first_party_imports: list[tuple[ImportNode, str]] | None,
local_imports: list[tuple[ImportNode, str]] | None,
) -> str:
# construct the string listing out of order imports used in the message
# for wrong-import-order
if third_party_imports:
plural = "s" if len(third_party_imports) > 1 else ""
if len(third_party_imports) > MAX_NUMBER_OF_IMPORT_SHOWN:
imports_list = (
", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in third_party_imports[
: int(MAX_NUMBER_OF_IMPORT_SHOWN // 2)
]
]
)
+ " (...) "
+ ", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in third_party_imports[
int(-MAX_NUMBER_OF_IMPORT_SHOWN // 2) :
]
]
)
)
else:
imports_list = ", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in third_party_imports
]
)
third_party = f"third party import{plural} {imports_list}"
else:
third_party = ""
if first_party_imports:
plural = "s" if len(first_party_imports) > 1 else ""
if len(first_party_imports) > MAX_NUMBER_OF_IMPORT_SHOWN:
imports_list = (
", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in first_party_imports[
: int(MAX_NUMBER_OF_IMPORT_SHOWN // 2)
]
]
)
+ " (...) "
+ ", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in first_party_imports[
int(-MAX_NUMBER_OF_IMPORT_SHOWN // 2) :
]
]
)
)
else:
imports_list = ", ".join(
[
f'"{self._get_full_import_name(fpi)}"'
for fpi in first_party_imports
]
)
first_party = f"first party import{plural} {imports_list}"
else:
first_party = ""
if local_imports:
plural = "s" if len(local_imports) > 1 else ""
if len(local_imports) > MAX_NUMBER_OF_IMPORT_SHOWN:
imports_list = (
", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in local_imports[
: int(MAX_NUMBER_OF_IMPORT_SHOWN // 2)
]
]
)
+ " (...) "
+ ", ".join(
[
f'"{self._get_full_import_name(tpi)}"'
for tpi in local_imports[
int(-MAX_NUMBER_OF_IMPORT_SHOWN // 2) :
]
]
)
)
else:
imports_list = ", ".join(
[f'"{self._get_full_import_name(li)}"' for li in local_imports]
)
local = f"local import{plural} {imports_list}"
else:
local = ""
delimiter_third_party = (
(
", "
if (first_party and local)
else (" and " if (first_party or local) else "")
)
if third_party
else ""
)
delimiter_first_party1 = (
(", " if (third_party and local) else " ") if first_party else ""
)
delimiter_first_party2 = ("and " if local else "") if first_party else ""
delimiter_first_party = f"{delimiter_first_party1}{delimiter_first_party2}"
msg = (
f"{third_party}{delimiter_third_party}"
f"{first_party}{delimiter_first_party}"
f'{local if local else ""}'
)
return msg
def _get_full_import_name(self, importNode: ImportNode) -> str:
# construct a more descriptive name of the import
# for: import X, this returns X
# for: import X.Y this returns X.Y
# for: from X import Y, this returns X.Y
try:
# this will only succeed for ImportFrom nodes, which in themselves
# contain the information needed to reconstruct the package
return f"{importNode[0].modname}.{importNode[0].names[0][0]}"
except AttributeError:
# in all other cases, the import will either be X or X.Y
node: str = importNode[0].names[0][0]
package: str = importNode[1]
if node.split(".")[0] == package:
# this is sufficient with one import per line, since package = X
# and node = X.Y or X
return node
# when there is a node that contains multiple imports, the "current"
# import being analyzed is specified by package (node is the first
# import on the line and therefore != package in this case)
return package
def _get_imported_module(
self, importnode: ImportNode, modname: str
) -> nodes.Module | None:
try:
return importnode.do_import_module(modname)
except astroid.TooManyLevelsError:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
self.add_message("relative-beyond-top-level", node=importnode)
except astroid.AstroidSyntaxError as exc:
message = f"Cannot import {modname!r} due to '{exc.error}'"
self.add_message(
"syntax-error", line=importnode.lineno, args=message, confidence=HIGH
)
except astroid.AstroidBuildingError:
if not self.linter.is_message_enabled("import-error"):
return None
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
if (
not self.linter.config.analyse_fallback_blocks
and is_from_fallback_block(importnode)
):
return None
dotted_modname = get_import_name(importnode, modname)
self.add_message("import-error", args=repr(dotted_modname), node=importnode)
except Exception as e: # pragma: no cover
raise astroid.AstroidError from e
return None
def _add_imported_module(self, node: ImportNode, importedmodname: str) -> None:
"""Notify an imported module, used to analyze dependencies."""
module_file = node.root().file
context_name = node.root().name
base = os.path.splitext(os.path.basename(module_file))[0]
try:
if isinstance(node, nodes.ImportFrom) and node.level:
importedmodname = astroid.modutils.get_module_part(
importedmodname, module_file
)
else:
importedmodname = astroid.modutils.get_module_part(importedmodname)
except ImportError:
pass
if context_name == importedmodname:
self.add_message("import-self", node=node)
elif not astroid.modutils.is_stdlib_module(importedmodname):
# if this is not a package __init__ module
if base != "__init__" and context_name not in self._module_pkg:
# record the module's parent, or the module itself if this is
# a top level module, as the package it belongs to
self._module_pkg[context_name] = context_name.rsplit(".", 1)[0]
# handle dependencies
dependencies_stat: dict[str, set[str]] = self.linter.stats.dependencies
importedmodnames = dependencies_stat.setdefault(importedmodname, set())
if context_name not in importedmodnames:
importedmodnames.add(context_name)
# update import graph
self.import_graph[context_name].add(importedmodname)
if not self.linter.is_message_enabled(
"cyclic-import", line=node.lineno
) or in_type_checking_block(node):
self._excluded_edges[context_name].add(importedmodname)
def _check_preferred_module(self, node: ImportNode, mod_path: str) -> None:
"""Check if the module has a preferred replacement."""
mod_compare = [mod_path]
# build a comparison list of possible names using importfrom
if isinstance(node, nodes.ImportFrom):
mod_compare = [f"{node.modname}.{name[0]}" for name in node.names]
# find whether there are matches with the import vs preferred_modules keys
matches = [
k
for k in self.preferred_modules
for mod in mod_compare
# exact match
if k == mod
# checks for base module matches
or k in mod.split(".")[0]
]
# if we have matches, add message
if matches:
self.add_message(
"preferred-module",
node=node,
args=(self.preferred_modules[matches[0]], matches[0]),
)
def _check_import_as_rename(self, node: ImportNode) -> None:
names = node.names
for name in names:
if not all(name):
return
splitted_packages = name[0].rsplit(".", maxsplit=1)
import_name = splitted_packages[-1]
aliased_name = name[1]
if import_name != aliased_name:
continue
if len(splitted_packages) == 1 and (
self._allow_reexport_package is False
or self._current_module_package is False
):
self.add_message("useless-import-alias", node=node, confidence=HIGH)
elif len(splitted_packages) == 2:
self.add_message(
"consider-using-from-import",
node=node,
args=(splitted_packages[0], import_name),
)
def _check_reimport(
self,
node: ImportNode,
basename: str | None = None,
level: int | None = None,
) -> None:
"""Check if a module with the same name is already imported or aliased."""
if not self.linter.is_message_enabled(
"reimported"
) and not self.linter.is_message_enabled("shadowed-import"):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for known_context, known_level in contexts:
for name, alias in node.names:
first, msg = _get_first_import(
node, known_context, name, basename, known_level, alias
)
if first is not None and msg is not None:
name = name if msg == "reimported" else alias
self.add_message(
msg, node=node, args=(name, first.fromlineno), confidence=HIGH
)
def _report_external_dependencies(
self, sect: Section, _: LinterStats, _dummy: LinterStats | None
) -> None:
"""Return a verbatim layout for displaying dependencies."""
dep_info = _make_tree_defs(self._external_dependencies_info.items())
if not dep_info:
raise EmptyReportError()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def _report_dependencies_graph(
self, sect: Section, _: LinterStats, _dummy: LinterStats | None
) -> None:
"""Write dependencies as a dot (graphviz) file."""
dep_info = self.linter.stats.dependencies
if not (
dep_info
and (
self.linter.config.import_graph
or self.linter.config.ext_import_graph
or self.linter.config.int_import_graph
)
):
raise EmptyReportError()
filename = self.linter.config.import_graph
if filename:
_make_graph(filename, dep_info, sect, "")
filename = self.linter.config.ext_import_graph
if filename:
_make_graph(filename, self._external_dependencies_info, sect, "external ")
filename = self.linter.config.int_import_graph
if filename:
_make_graph(filename, self._internal_dependencies_info, sect, "internal ")
def _filter_dependencies_graph(self, internal: bool) -> defaultdict[str, set[str]]:
"""Build the internal or the external dependency graph."""
graph: defaultdict[str, set[str]] = defaultdict(set)
for importee, importers in self.linter.stats.dependencies.items():
for importer in importers:
package = self._module_pkg.get(importer, importer)
is_inside = importee.startswith(package)
if (is_inside and internal) or (not is_inside and not internal):
graph[importee].add(importer)
return graph
@cached_property
def _external_dependencies_info(self) -> defaultdict[str, set[str]]:
"""Return cached external dependencies information or build and
cache them.
"""
return self._filter_dependencies_graph(internal=False)
@cached_property
def _internal_dependencies_info(self) -> defaultdict[str, set[str]]:
"""Return cached internal dependencies information or build and
cache them.
"""
return self._filter_dependencies_graph(internal=True)
def _check_wildcard_imports(
self, node: nodes.ImportFrom, imported_module: nodes.Module | None
) -> None:
if node.root().package:
# Skip the check if in __init__.py issue #2026
return
wildcard_import_is_allowed = self._wildcard_import_is_allowed(imported_module)
for name, _ in node.names:
if name == "*" and not wildcard_import_is_allowed:
self.add_message("wildcard-import", args=node.modname, node=node)
def _wildcard_import_is_allowed(self, imported_module: nodes.Module | None) -> bool:
return (
self.linter.config.allow_wildcard_with_all
and imported_module is not None
and "__all__" in imported_module.locals
)
def _check_toplevel(self, node: ImportNode) -> None:
"""Check whether the import is made outside the module toplevel."""
# If the scope of the import is a module, then obviously it is
# not outside the module toplevel.
if isinstance(node.scope(), nodes.Module):
return
module_names = [
(
f"{node.modname}.{name[0]}"
if isinstance(node, nodes.ImportFrom)
else name[0]
)
for name in node.names
]
# Get the full names of all the imports that are only allowed at the module level
scoped_imports = [
name for name in module_names if name not in self._allow_any_import_level
]
if scoped_imports:
self.add_message(
"import-outside-toplevel", args=", ".join(scoped_imports), node=node
)
def register(linter: PyLinter) -> None:
linter.register_checker(ImportsChecker(linter))
| ImportsChecker |
python | facebook__pyre-check | tools/generate_taint_models/tests/model_test.py | {
"start": 558,
"end": 22669
} | class ____(unittest.TestCase):
def test_callable_model(self) -> None:
name = f"{__name__}.test_function"
self.assertEqual(
str(
model.CallableModel(
callable_object=test_function,
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[tainted]"
),
)
),
f"def {name}(argument: TaintSource[tainted], *variable, **keyword): ...",
)
self.assertEqual(
str(
model.CallableModel(
callable_object=test_function,
parameter_annotation=AllParametersAnnotation(
vararg="TaintSource[tainted]"
),
)
),
f"def {name}(argument, *variable: TaintSource[tainted], **keyword): ...",
)
self.assertEqual(
str(
model.CallableModel(
callable_object=test_function,
parameter_annotation=AllParametersAnnotation(
kwarg="TaintSource[tainted]"
),
)
),
f"def {name}(argument, *variable, **keyword: TaintSource[tainted]): ...",
)
self.assertEqual(
str(
model.CallableModel(
callable_object=test_function, returns="TaintSink[returned]"
)
),
f"def {name}(argument, *variable, **keyword) -> TaintSink[returned]: ...",
)
# We handle the combined AnnotationSpecification
annotations = AnnotationSpecification(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[tainted]",
vararg="TaintSource[tainted]",
kwarg="TaintSource[tainted]",
),
returns="TaintSink[returned]",
)
self.assertEqual(
str(
model.CallableModel(
callable_object=test_function, annotations=annotations
)
),
f"def {name}(argument: TaintSource[tainted],"
+ " *variable: TaintSource[tainted],"
+ " **keyword: TaintSource[tainted]) -> TaintSink[returned]: ...",
)
# We don't generate models for local functions.
def local_function(x: int, *args: str) -> None:
...
with self.assertRaises(ValueError):
model.CallableModel(
callable_object=local_function, returns="TaintSink[returned]"
)
# Ensure that we don't choke on malformed types of functions.
class CallMe:
def __call__(self) -> None:
pass
with self.assertRaises(ValueError):
model.CallableModel(callable_object=CallMe)
def assert_modeled(
self,
source: str,
expected: str,
**kwargs: Union[str, ParameterAnnotation, WhitelistSpecification],
) -> None:
parsed_function = ast.parse(textwrap.dedent(source)).body[0]
# pyre-fixme[35]: Target cannot be annotated.
parsed_function: model.FunctionDefinition
self.assertEqual(
# pyre-ignore[6]: Expected `typing.Optional[typing.Set[str]]` for
# 2nd positional only parameter to call
# `model.FunctionDefinitionModel.__init__` but got `str`.
str(model.FunctionDefinitionModel(definition=parsed_function, **kwargs)),
expected,
)
# We handle the combined AnnotationSpecification
annotations = AnnotationSpecification(
# pyre-ignore[6]: Too dynamic.
parameter_annotation=kwargs.get("parameter_annotation"),
# pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got
# `Union[None, ParameterAnnotation, str]`.
returns=kwargs.get("returns"),
)
self.assertEqual(
str(
model.FunctionDefinitionModel(
definition=parsed_function,
annotations=annotations,
# pyre-ignore[6]: Too dynamic.
**kwargs,
)
),
expected,
)
def test_function_definition_model(self) -> None:
all_args_source = """
def test_fn(arg1, arg2, *v, **kw):
pass
"""
# Check that annotations work one at a time
self.assert_modeled(
all_args_source,
"def test_fn(arg1: Arg, arg2: Arg, *v, **kw): ...",
parameter_annotation=AllParametersAnnotation(arg="Arg"),
)
self.assert_modeled(
all_args_source,
"def test_fn(arg1, arg2, *v: Vararg, **kw): ...",
parameter_annotation=AllParametersAnnotation(vararg="Vararg"),
)
self.assert_modeled(
all_args_source,
"def test_fn(arg1, arg2, *v, **kw: Kwarg): ...",
parameter_annotation=AllParametersAnnotation(kwarg="Kwarg"),
)
self.assert_modeled(
all_args_source,
"def test_fn(arg1, arg2, *v, **kw) -> Return: ...",
returns="Return",
)
self.assert_modeled(
all_args_source,
"def qualifier.test_fn(arg1, arg2, *v, **kw): ...",
qualifier="qualifier",
)
# Check that all the bells and whistles work
self.assert_modeled(
all_args_source,
"def qualifier.test_fn(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ...",
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
qualifier="qualifier",
)
# Check that we handle functions without all arg types
self.assert_modeled(
"""
def test_fn(arg1: str, arg2):
pass
""",
"def test_fn(arg1: Arg, arg2: Arg): ...",
parameter_annotation=AllParametersAnnotation(arg="Arg"),
)
self.assert_modeled(
"""
def test_fn(*v):
pass
""",
"def test_fn(*v: Vararg): ...",
parameter_annotation=AllParametersAnnotation(vararg="Vararg"),
)
self.assert_modeled(
"""
def test_fn(**kw):
pass
""",
"def test_fn(**kw: Kwarg): ...",
parameter_annotation=AllParametersAnnotation(kwarg="Kwarg"),
)
# Check that we handle async functions
self.assert_modeled(
"""
async def test_fn(arg1, arg2, *v, **kw):
pass
""",
"def qualifier.test_fn(arg1: Arg, arg2: Arg, *v: Vararg, "
"**kw: Kwarg) -> Return: ...",
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
qualifier="qualifier",
)
# Check that we gracefully handle unused annotation parameters
self.assert_modeled(
"""
def test_fn():
pass
""",
"def qualifier.test_fn() -> Return: ...",
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
qualifier="qualifier",
)
self.assert_modeled(
"""
def test_fn(x, *, keyword_only):
pass
""",
"def qualifier.test_fn(x: Arg, *, keyword_only: Arg) -> Return: ...",
parameter_annotation=AllParametersAnnotation(
arg="Arg", vararg="Vararg", kwarg="Kwarg"
),
returns="Return",
qualifier="qualifier",
)
self.assert_modeled(
"""def test_fn(arg1: Optional[TestClass], arg2: str):
pass
""",
"def test_fn(arg1, arg2: TaintSource[UC]): ...",
parameter_annotation=AllParametersAnnotation(arg="TaintSource[UC]"),
whitelist=WhitelistSpecification(parameter_type={"Optional[TestClass]"}),
)
self.assert_modeled(
"""def test_fn(arg1: Tuple[int, int], arg2: str):
pass
""",
"def test_fn(arg1, arg2: TaintSource[UC]): ...",
parameter_annotation=AllParametersAnnotation(arg="TaintSource[UC]"),
whitelist=WhitelistSpecification(parameter_type={"Tuple[int, int]"}),
)
self.assert_modeled(
"""def test_fn(arg1: Annotated[TestClass, ExampleAnnotation(accesses=(Access.REVIEWED,))], arg2: str):
pass
""",
"def test_fn(arg1, arg2: TaintSource[UC]): ...",
parameter_annotation=AllParametersAnnotation(arg="TaintSource[UC]"),
whitelist=WhitelistSpecification(parameter_type={"TestClass"}),
)
self.assert_modeled(
"""def test_fn(arg1: Annotated[Optional[TestClass], ExampleAnnotation(accesses=(Access.REVIEWED,))], arg2: str):
pass
""",
"def test_fn(arg1, arg2: TaintSource[UC]): ...",
parameter_annotation=AllParametersAnnotation(arg="TaintSource[UC]"),
whitelist=WhitelistSpecification(parameter_type={"Optional[TestClass]"}),
)
self.assert_modeled(
"""def test_fn(arg1: Annotated[Tuple[int, int], ExampleAnnotation(accesses=(Access.REVIEWED,))], arg2: str):
pass
""",
"def test_fn(arg1, arg2: TaintSource[UC]): ...",
parameter_annotation=AllParametersAnnotation(arg="TaintSource[UC]"),
whitelist=WhitelistSpecification(parameter_type={"Tuple[int, int]"}),
)
def test_assignment_model(self) -> None:
model_1 = model.AssignmentModel(
annotation="TaintSink[A]", target="fully.qualified.name"
)
model_2 = model.AssignmentModel(
annotation="TaintSink[B]", target="fully.qualified.name"
)
self.assertEqual(str(model_1), "fully.qualified.name: TaintSink[A] = ...")
self.assertEqual(model_1, model_2)
test_set = set()
test_set.add(model_1)
# Checking for 'model_2' despite putting in 'model_1' is deliberate; we
# are testing the effectiveness of the hash equivalence
self.assertIn(model_2, test_set)
with self.assertRaises(ValueError):
model.AssignmentModel(
annotation="TaintSink[Test]", target="do-not-generate"
)
# pyre-fixme[56]: Argument `set()` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(model.RawCallableModel, "__abstractmethods__", set())
def test_raw_callable_model(self) -> None:
with patch.object(
model.RawCallableModel,
"_get_fully_qualified_callable_name",
return_value="qualified.C.name",
):
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("self", None, model.Parameter.Kind.ARG),
model.Parameter("a", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UserControlled]"
)
)
),
"def qualified.C.name(self: TaintSource[UserControlled], "
"a: TaintSource[UserControlled]): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("self", None, model.Parameter.Kind.ARG),
model.Parameter("*args", None, model.Parameter.Kind.VARARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
vararg="TaintSource[Var]"
)
)
),
"def qualified.C.name(self, *args: TaintSource[Var]): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("self", None, model.Parameter.Kind.ARG),
model.Parameter("**kwargs", None, model.Parameter.Kind.KWARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
kwarg="TaintSource[UC]"
)
)
),
"def qualified.C.name(self, **kwargs: TaintSource[UC]): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", "int", model.Parameter.Kind.ARG),
model.Parameter("b", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UC]"
),
parameter_type_whitelist=["int"],
)
),
"def qualified.C.name(a, b: TaintSource[UC]): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", None, model.Parameter.Kind.ARG),
model.Parameter("b", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UC]"
),
parameter_name_whitelist={"b"},
)
),
"def qualified.C.name(a: TaintSource[UC], b): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", "int", model.Parameter.Kind.ARG),
model.Parameter("b", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UC]"
),
whitelist=WhitelistSpecification(parameter_type={"int"}),
)
),
"def qualified.C.name(a, b: TaintSource[UC]): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", None, model.Parameter.Kind.ARG),
model.Parameter("b", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[UC]"
),
whitelist=WhitelistSpecification(parameter_name={"b"}),
)
),
"def qualified.C.name(a: TaintSource[UC], b): ...",
)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", None, model.Parameter.Kind.ARG),
model.Parameter("b", None, model.Parameter.Kind.ARG),
],
):
# pyre-ignore[45]: Cannot instantiate abstract class
model_1 = model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[A]",
vararg="TaintSource[A]",
kwarg="TaintSource[A]",
),
returns="TaintSource[A]",
)
# pyre-ignore[45]: Cannot instantiate abstract class
model_2 = model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[B]",
vararg="TaintSource[B]",
kwarg="TaintSource[B]",
),
returns="TaintSource[B]",
)
self.assertEqual(model_1, model_2)
test_set = set()
test_set.add(model_1)
# Checking for 'model_2' despite putting in 'model_1' is deliberate; we
# are testing the effectiveness of the hash equivalence
self.assertIn(model_2, test_set)
with patch.object(
model.RawCallableModel,
"_generate_parameters",
return_value=[
model.Parameter("a", None, model.Parameter.Kind.ARG),
model.Parameter("*", None, model.Parameter.Kind.ARG),
model.Parameter("keyword_only", None, model.Parameter.Kind.ARG),
],
):
self.assertEqual(
str(
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel(
parameter_annotation=AllParametersAnnotation(
arg="TaintSource[A]"
),
returns="TaintSource[A]",
)
),
"def qualified.C.name(a: TaintSource[A], *, keyword_only: TaintSource[A]) -> TaintSource[A]: ...",
)
with self.assertRaises(ValueError), patch.object(
model.RawCallableModel,
"_get_fully_qualified_callable_name",
return_value="my-qualifier.C.name",
):
# pyre-ignore[45]: Cannot instantiate abstract class
model.RawCallableModel()
def test_class_model(self) -> None:
model_1 = model.ClassModel(
class_name="qualified.C.name", annotation="TaintSource[A]"
)
model_2 = model.ClassModel(
class_name="qualified.C.name", annotation="TaintSource[B]"
)
self.assertEqual(str(model_1), "class qualified.C.name(TaintSource[A]): ...")
self.assertEqual(model_1, model_2)
test_set = set()
test_set.add(model_1)
# Checking for 'model_2' despite putting in 'model_1' is deliberate; we
# are testing the effectiveness of the hash equivalence
self.assertIn(model_2, test_set)
def test_property_model(self) -> None:
self.assertEqual(
str(
model.PropertyModel(
class_name="a.C", attribute_name="attr", annotation="TaintSource[A]"
)
),
"@property\ndef a.C.attr(self) -> TaintSource[A]: ...",
)
self.assertEqual(
model.PropertyModel(
class_name="a.C", attribute_name="attr", annotation="TaintSource[A]"
),
model.PropertyModel(
class_name="a.C", attribute_name="attr", annotation="TaintSource[B]"
),
)
self.assertNotEqual(
model.PropertyModel(
class_name="a.C", attribute_name="attr", annotation="TaintSource[A]"
),
model.PropertyModel(
class_name="a.D", attribute_name="attr", annotation="TaintSource[A]"
),
)
self.assertNotEqual(
model.PropertyModel(
class_name="a.C", attribute_name="attr1", annotation="TaintSource[A]"
),
model.PropertyModel(
class_name="a.C", attribute_name="attr2", annotation="TaintSource[A]"
),
)
| ModelTest |
python | pandas-dev__pandas | pandas/tests/arrays/categorical/test_dtypes.py | {
"start": 282,
"end": 6210
} | class ____:
def test_categories_match_up_to_permutation(self):
# test dtype comparisons between cats
c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False)
c2 = Categorical(list("aabca"), categories=list("cab"), ordered=False)
c3 = Categorical(list("aabca"), categories=list("cab"), ordered=True)
assert c1._categories_match_up_to_permutation(c1)
assert c2._categories_match_up_to_permutation(c2)
assert c3._categories_match_up_to_permutation(c3)
assert c1._categories_match_up_to_permutation(c2)
assert not c1._categories_match_up_to_permutation(c3)
assert not c1._categories_match_up_to_permutation(Index(list("aabca")))
assert not c1._categories_match_up_to_permutation(c1.astype(object))
assert c1._categories_match_up_to_permutation(CategoricalIndex(c1))
assert c1._categories_match_up_to_permutation(
CategoricalIndex(c1, categories=list("cab"))
)
assert not c1._categories_match_up_to_permutation(
CategoricalIndex(c1, ordered=True)
)
# GH 16659
s1 = Series(c1)
s2 = Series(c2)
s3 = Series(c3)
assert c1._categories_match_up_to_permutation(s1)
assert c2._categories_match_up_to_permutation(s2)
assert c3._categories_match_up_to_permutation(s3)
assert c1._categories_match_up_to_permutation(s2)
assert not c1._categories_match_up_to_permutation(s3)
assert not c1._categories_match_up_to_permutation(s1.astype(object))
def test_set_dtype_same(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(["a", "b", "c"]), copy=True)
tm.assert_categorical_equal(result, c)
def test_set_dtype_new_categories(self):
c = Categorical(["a", "b", "c"])
result = c._set_dtype(CategoricalDtype(list("abcd")), copy=True)
tm.assert_numpy_array_equal(result.codes, c.codes)
tm.assert_index_equal(result.dtype.categories, Index(list("abcd")))
@pytest.mark.parametrize(
"values, categories, new_categories, warn",
[
# No NaNs, same cats, same order
(["a", "b", "a"], ["a", "b"], ["a", "b"], None),
# No NaNs, same cats, different order
(["a", "b", "a"], ["a", "b"], ["b", "a"], None),
# Same, unsorted
(["b", "a", "a"], ["a", "b"], ["a", "b"], None),
# No NaNs, same cats, different order
(["b", "a", "a"], ["a", "b"], ["b", "a"], None),
# NaNs
(["a", "b", "c"], ["a", "b"], ["a", "b"], None),
(["a", "b", "c"], ["a", "b"], ["b", "a"], None),
(["b", "a", "c"], ["a", "b"], ["a", "b"], None),
(["b", "a", "c"], ["a", "b"], ["b", "a"], None),
# Introduce NaNs
(["a", "b", "c"], ["a", "b"], ["a"], Pandas4Warning),
(["a", "b", "c"], ["a", "b"], ["b"], Pandas4Warning),
(["b", "a", "c"], ["a", "b"], ["a"], Pandas4Warning),
(["b", "a", "c"], ["a", "b"], ["b"], Pandas4Warning),
# No overlap
(["a", "b", "c"], ["a", "b"], ["d", "e"], Pandas4Warning),
],
)
def test_set_dtype_many(self, values, categories, new_categories, warn, ordered):
msg = "Constructing a Categorical with a dtype and values containing"
warn1 = Pandas4Warning if set(values).difference(categories) else None
with tm.assert_produces_warning(warn1, match=msg):
c = Categorical(values, categories)
warn2 = Pandas4Warning if set(values).difference(new_categories) else None
with tm.assert_produces_warning(warn2, match=msg):
expected = Categorical(values, new_categories, ordered)
result = c._set_dtype(expected.dtype, copy=True)
tm.assert_categorical_equal(result, expected)
def test_set_dtype_no_overlap(self):
msg = "Constructing a Categorical with a dtype and values containing"
with tm.assert_produces_warning(Pandas4Warning, match=msg):
c = Categorical(["a", "b", "c"], ["d", "e"])
result = c._set_dtype(CategoricalDtype(["a", "b"]), copy=True)
expected = Categorical([None, None, None], categories=["a", "b"])
tm.assert_categorical_equal(result, expected)
def test_codes_dtypes(self):
# GH 8453
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = Categorical([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
result = Categorical([f"foo{i:05d}" for i in range(40000)])
assert result.codes.dtype == "int32"
# adding cats
result = Categorical(["foo", "bar", "baz"])
assert result.codes.dtype == "int8"
result = result.add_categories([f"foo{i:05d}" for i in range(400)])
assert result.codes.dtype == "int16"
# removing cats
result = result.remove_categories([f"foo{i:05d}" for i in range(300)])
assert result.codes.dtype == "int8"
def test_iter_python_types(self):
# GH-19909
cat = Categorical([1, 2])
assert isinstance(next(iter(cat)), int)
assert isinstance(cat.tolist()[0], int)
def test_iter_python_types_datetime(self):
cat = Categorical([Timestamp("2017-01-01"), Timestamp("2017-01-02")])
assert isinstance(next(iter(cat)), Timestamp)
assert isinstance(cat.tolist()[0], Timestamp)
def test_interval_index_category(self):
# GH 38316
index = IntervalIndex.from_breaks(np.arange(3, dtype="uint64"))
result = CategoricalIndex(index).dtype.categories
expected = IntervalIndex.from_arrays(
[0, 1], [1, 2], dtype="interval[uint64, right]"
)
tm.assert_index_equal(result, expected)
| TestCategoricalDtypes |
python | great-expectations__great_expectations | great_expectations/data_context/types/resource_identifiers.py | {
"start": 16534,
"end": 17673
} | class ____(DataContextKey):
def __init__(self, configuration_key: str) -> None:
super().__init__()
if not isinstance(configuration_key, str):
raise gx_exceptions.InvalidDataContextKeyError( # noqa: TRY003 # FIXME CoP
f"configuration_key must be a string, not {type(configuration_key).__name__}"
)
self._configuration_key = configuration_key
@property
def configuration_key(self) -> str:
return self._configuration_key
def to_tuple(self): # type: ignore[explicit-override] # FIXME
return tuple(self.configuration_key.split("."))
def to_fixed_length_tuple(self): # type: ignore[explicit-override] # FIXME
return (self.configuration_key,)
@classmethod
@override
def from_tuple(cls, tuple_):
return cls(configuration_key=tuple_[0])
@classmethod
@override
def from_fixed_length_tuple(cls, tuple_):
return cls.from_tuple(tuple_)
def __repr__(self): # type: ignore[explicit-override] # FIXME
return f"{self.__class__.__name__}::{self._configuration_key}"
| ConfigurationIdentifier |
python | cython__cython | Demos/benchmarks/bm_raytrace.py | {
"start": 4701,
"end": 5735
} | class ____(object):
def __init__(self, width, height):
self.bytes = array.array('B', [0] * (width * height * 3))
for i in range(width * height):
self.bytes[i * 3 + 2] = 255
self.width = width
self.height = height
def plot(self, x, y, r, g, b):
i = ((self.height - y - 1) * self.width + x) * 3
self.bytes[i] = max(0, min(255, int(r * 255)))
self.bytes[i + 1] = max(0, min(255, int(g * 255)))
self.bytes[i + 2] = max(0, min(255, int(b * 255)))
def write_ppm(self, filename):
header = 'P6 %d %d 255\n' % (self.width, self.height)
with open(filename, "wb") as fp:
fp.write(header.encode('ascii'))
fp.write(self.bytes.tobytes())
def firstIntersection(intersections):
result = None
for i in intersections:
candidateT = i[1]
if candidateT is not None and candidateT > -EPSILON:
if result is None or candidateT < result[1]:
result = i
return result
| Canvas |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/selector.py | {
"start": 6127,
"end": 6395
} | class ____:
location_name: str
def to_repository_selector(self) -> RepositorySelector:
return RepositorySelector(
location_name=self.location_name,
repository_name=SINGLETON_REPOSITORY_NAME,
)
@record
| CodeLocationSelector |
python | getsentry__sentry | tests/sentry/workflow_engine/migration_helpers/test_issue_alert_dual_write.py | {
"start": 1626,
"end": 4043
} | class ____(TestCase):
def setUp(self) -> None:
conditions = [
{"id": ReappearedEventCondition.id},
{"id": RegressionEventCondition.id},
{
"id": AgeComparisonFilter.id,
"comparison_type": AgeComparisonType.OLDER,
"value": "10",
"time": "hour",
},
]
integration = install_slack(self.organization)
self.action_data = [
{
"channel": "#my-channel",
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": str(integration.id),
"uuid": "test-uuid",
"channel_id": "C01234567890",
},
]
self.issue_alert = self.create_project_rule(
name="test",
condition_data=conditions,
action_match="any",
filter_match="any",
action_data=self.action_data,
)
self.issue_alert.data["frequency"] = 5
self.issue_alert.save()
self.filters = [
{
"id": TaggedEventFilter.id,
"match": MatchType.EQUAL,
"key": "LOGGER",
"value": "sentry.example",
},
{
"id": TaggedEventFilter.id,
"match": MatchType.IS_SET,
"key": "environment",
},
{
"id": EventAttributeFilter.id,
"match": MatchType.EQUAL,
"value": "hi",
"attribute": "message",
},
]
self.conditions = [
{
"interval": "1h",
"id": EventUniqueUserFrequencyConditionWithConditions.id,
"value": 50,
"comparisonType": ComparisonType.COUNT,
}
] + self.filters
self.expected_filters = [
{
"match": MatchType.EQUAL,
"key": self.filters[0]["key"],
"value": self.filters[0]["value"],
},
{"match": MatchType.IS_SET, "key": self.filters[1]["key"]},
{
"match": MatchType.EQUAL,
"attribute": self.filters[2]["attribute"],
"value": self.filters[2]["value"],
},
]
| RuleMigrationHelpersTestBase |
python | pytorch__pytorch | torch/distributed/elastic/rendezvous/api.py | {
"start": 1081,
"end": 1191
} | class ____(RendezvousError):
"""Raised when a rendezvous did not complete on time."""
| RendezvousTimeoutError |
python | catalyst-team__catalyst | examples/detection/models/ssd.py | {
"start": 358,
"end": 2003
} | class ____(nn.Module):
def __init__(self, backbone="resnet50", backbone_path=None):
"""
Args:
backbone (str): resnet backbone to use.
Expected one of ["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"]
Default is "resnet50".
backbone_path (str): path to pretrained backbone model.
If ``None`` then will be used torchvision pretrained model.
Default is None.
"""
super().__init__()
self.out_channels = _channels_map[backbone]
if backbone == "resnet18":
backbone = resnet.resnet18(pretrained=not backbone_path)
elif backbone == "resnet34":
backbone = resnet.resnet34(pretrained=not backbone_path)
elif backbone == "resnet50":
backbone = resnet.resnet50(pretrained=not backbone_path)
elif backbone == "resnet101":
backbone = resnet.resnet101(pretrained=not backbone_path)
elif backbone == "resnet152":
backbone = resnet.resnet152(pretrained=not backbone_path)
else:
raise ValueError(f"Unknown ResNet backbone - '{backbone}'!")
if backbone_path:
backbone.load_state_dict(torch.load(backbone_path))
self.feature_extractor = nn.Sequential(*list(backbone.children())[:7])
conv4_block1 = self.feature_extractor[-1][0]
conv4_block1.conv1.stride = (1, 1)
conv4_block1.conv2.stride = (1, 1)
conv4_block1.downsample[0].stride = (1, 1)
def forward(self, x):
x = self.feature_extractor(x)
return x
| ResnetBackbone |
python | scipy__scipy | scipy/sparse/_coo.py | {
"start": 66794,
"end": 70441
} | class ____(_coo_base, sparray):
"""
A sparse array in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_array(D)
where D is an ndarray
coo_array(S)
with another sparse array or matrix S (equivalent to S.tocoo())
coo_array(shape, [dtype])
to construct an empty sparse array with shape `shape`
dtype is optional, defaulting to dtype='d'.
coo_array((data, coords), [shape])
to construct from existing data and index arrays:
1. data[:] the entries of the sparse array, in any order
2. coords[i][:] the axis-i coordinates of the data entries
Where ``A[coords] = data``, and coords is a tuple of index arrays.
When shape is not specified, it is inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the sparse array
shape : tuple of integers
Shape of the sparse array
ndim : int
Number of dimensions of the sparse array
nnz
size
data
COO format data array of the sparse array
coords
COO format tuple of index arrays
has_canonical_format : bool
Whether the matrix has sorted coordinates and no duplicates
format
T
Notes
-----
Sparse arrays can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse arrays
- Once a COO array has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Canonical format
- Entries and coordinates sorted by row, then column.
- There are no duplicate entries (i.e. duplicate (i,j) locations)
- Data arrays MAY have explicit zeros.
Examples
--------
>>> # Constructing an empty sparse array
>>> import numpy as np
>>> from scipy.sparse import coo_array
>>> coo_array((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> # Constructing a sparse array using ijv format
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_array((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # Constructing a sparse array with duplicate coordinates
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo = coo_array((data, (row, col)), shape=(4, 4))
>>> # Duplicate coordinates are maintained until implicitly or explicitly summed
>>> np.max(coo.data)
1
>>> coo.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
| coo_array |
python | cython__cython | Cython/Compiler/Nodes.py | {
"start": 384036,
"end": 384254
} | class ____(GILExitNode):
"""
Ensure the GIL in nogil functions for cleanup before returning.
"""
def generate_execution_code(self, code):
code.put_ensure_gil(declare_gilstate=False)
| EnsureGILNode |
python | numpy__numpy | benchmarks/benchmarks/bench_function_base.py | {
"start": 458,
"end": 795
} | class ____(Benchmark):
def setup(self):
self.d = np.linspace(0, 100, 100000)
def time_full_coverage(self):
np.histogram(self.d, 200, (0, 100))
def time_small_coverage(self):
np.histogram(self.d, 200, (50, 51))
def time_fine_binning(self):
np.histogram(self.d, 10000, (0, 100))
| Histogram1D |
python | cython__cython | Cython/Compiler/Errors.py | {
"start": 2506,
"end": 2757
} | class ____(Exception):
# If this is ever raised, there is a bug in the compiler.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, "Internal compiler error: %s"
% message)
| InternalError |
python | walkccc__LeetCode | solutions/3408. Design Task Manager/3408.py | {
"start": 111,
"end": 319
} | class ____:
userId: int
taskId: int
priority: int
def __lt__(self, other):
if self.priority == other.priority:
return self.taskId > other.taskId
return self.priority > other.priority
| Task |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 170917,
"end": 171493
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("enterprise_id", "setting_value", "client_mutation_id")
enterprise_id = sgqlc.types.Field(
sgqlc.types.non_null(ID), graphql_name="enterpriseId"
)
setting_value = sgqlc.types.Field(
sgqlc.types.non_null(EnterpriseEnabledSettingValue), graphql_name="settingValue"
)
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
| UpdateEnterpriseTwoFactorAuthenticationRequiredSettingInput |
python | getsentry__sentry | src/sentry/tasks/on_demand_metrics.py | {
"start": 2879,
"end": 19232
} | class ____(Exception):
pass
@instrumented_task(
name="sentry.tasks.on_demand_metrics.schedule_on_demand_check",
namespace=performance_tasks,
expires=180,
processing_deadline_duration=120,
)
def schedule_on_demand_check() -> None:
"""
# Summary
This task schedules work to be done to check cardinality in group-by columns in dashboard widgets,
offloading it from `build_project_config` in the relay task (specifically in :func:`sentry.relay.config.metric_extraction.get_metric_extraction_config`).
Spawns a series of child tasks :func:`process_widget_specs`, and limits them using
a stateful (cached) count + modulo to spread out the work over `total_batches` number of scheduled task runs.
It's safe, but not ideal if a particular child tasks fails to run in cases where the current state is reset since memcache is ephemeral.
### Other
- The amount of work being done can be reduced once extraction is stateful in our db, which would allow us to
only iterate over widgets with currently enabled extraction.
# Ops
Killswitch option: `on_demand_metrics.check_widgets.enable`
- It is safe to turn off for a period of `_WIDGET_QUERY_CARDINALITY_TTL`, customer data should not be lost. After this time
high cardinality metrics may inadverently be stored for on-demand-extraction.
"""
if not options.get("on_demand_metrics.check_widgets.enable"):
return
rollout = options.get("on_demand_metrics.check_widgets.rollout")
total_batches = options.get("on_demand_metrics.check_widgets.query.total_batches")
widgets_per_batch = options.get("on_demand_metrics.check_widgets.query.batch_size")
currently_processing_batch = _get_current_processing_batch(total_batches)
widget_query_ids = []
dashboard_widget_pre_rollout_count = 0
dashboard_widget_count = 0
for (widget_query_id,) in RangeQuerySetWrapper(
DashboardWidgetQuery.objects.filter(
widget__widget_type__in=[
DashboardWidgetTypes.DISCOVER,
DashboardWidgetTypes.TRANSACTION_LIKE,
]
).values_list("id"),
result_value_getter=lambda item: item[0],
):
dashboard_widget_pre_rollout_count += 1
if ((widget_query_id % 1_000) / 1_000) > rollout:
# % rollout based on widget_id accurate to 0.1%
continue
batch_for_widget_query = _get_batch_for_widget_query(widget_query_id, total_batches)
if batch_for_widget_query != currently_processing_batch:
continue
widget_query_ids.append(widget_query_id)
dashboard_widget_count += 1
if len(widget_query_ids) >= widgets_per_batch:
process_widget_specs.delay(
widget_query_ids,
)
widget_query_ids = []
# Clean up any remaining widgets
if widget_query_ids:
process_widget_specs.delay(
widget_query_ids,
)
_set_currently_processing_batch(currently_processing_batch)
metrics.incr(
"task.on_demand_metrics.widgets.currently_processing_batch",
amount=currently_processing_batch, # Helps correlate which batch is currently being processed with metrics
sample_rate=1.0,
)
metrics.incr(
"task.on_demand_metrics.widgets.pre_rollout.total",
amount=dashboard_widget_pre_rollout_count,
sample_rate=1.0,
)
metrics.incr(
"task.on_demand_metrics.widgets.total",
amount=dashboard_widget_count,
sample_rate=1.0,
)
@instrumented_task(
name="sentry.tasks.on_demand_metrics.process_widget_specs",
namespace=performance_tasks,
expires=180,
processing_deadline_duration=120,
)
def process_widget_specs(widget_query_ids: list[int], **kwargs: Any) -> None:
"""
Child task spawned from :func:`schedule_on_demand_check`.
"""
if not options.get("on_demand_metrics.check_widgets.enable"):
return
widget_query_count = 0
widget_query_high_cardinality_count = 0
widget_query_no_spec_count = 0
for query in DashboardWidgetQuery.objects.filter(id__in=widget_query_ids).select_related(
"widget__dashboard__organization"
):
organization = query.widget.dashboard.organization
enabled_features = on_demand_metrics_feature_flags(organization)
widget_query_count += 1
widget_specs = _get_widget_on_demand_specs(query, organization)
if not widget_specs:
# It's possible this query doesn't qualify for on-demand.
widget_query_no_spec_count += 1
is_low_cardinality = None
# This only exists to make sure we're 1:1 with flagr since we're not fully rolled out.
# TODO: Remove feature flag check once we've checked metrics have gone to 0.
if "organizations:on-demand-metrics-extraction-widgets" in enabled_features:
if widget_specs:
is_low_cardinality = _get_widget_query_low_cardinality(query, organization)
if is_low_cardinality is False:
widget_query_high_cardinality_count += 1
else:
metrics.incr(
"task.on_demand_metrics.widget_queries.per_run.flag_disabled",
sample_rate=1.0,
)
_set_widget_on_demand_state(
widget_query=query,
specs=widget_specs,
is_low_cardinality=is_low_cardinality,
enabled_features=enabled_features,
)
metrics.incr(
"tasks.on_demand_metrics.widget_queries.per_run.no_spec",
amount=widget_query_no_spec_count,
sample_rate=1.0,
)
metrics.incr(
"task.on_demand_metrics.widget_queries.per_run.high_cardinality",
amount=widget_query_high_cardinality_count,
sample_rate=1.0,
)
metrics.incr(
"task.on_demand_metrics.widget_queries.per_run.total",
amount=widget_query_count,
sample_rate=1.0,
)
def _get_widget_on_demand_specs(
widget_query: DashboardWidgetQuery,
organization: Organization,
) -> Sequence[HashedMetricSpec]:
"""
Saves on-demand state for a widget query.
"""
project_for_query = cache.get(_get_project_for_query_cache_key(organization), None)
if not cache.has_key(_get_project_for_query_cache_key(organization)):
# This can just be the first project we find, since spec hashes should not be project
# dependent. If spec hashes become project dependent then this may need to change.
project_for_query = Project.objects.filter(organization=organization).first()
cache.set(
_get_project_for_query_cache_key(organization),
project_for_query,
timeout=_COLUMN_CARDINALITY_TTL,
)
if not project_for_query:
return []
widget_specs = convert_widget_query_to_metric(project_for_query, widget_query, True)
specs_per_version: dict[int, dict[str, HashedMetricSpec]] = {}
for hash, spec, spec_version in widget_specs:
specs_per_version.setdefault(spec_version.version, {})
specs_per_version[spec_version.version][hash] = (hash, spec, spec_version)
specs: list[HashedMetricSpec] = []
for _, _specs_for_version in specs_per_version.items():
specs += _specs_for_version.values()
return specs
def _set_widget_on_demand_state(
widget_query: DashboardWidgetQuery,
specs: Sequence[HashedMetricSpec],
is_low_cardinality: bool | None,
enabled_features: set[str],
) -> None:
specs_per_version: dict[int, list[HashedMetricSpec]] = {}
for hash, spec, spec_version in specs:
specs_per_version.setdefault(spec_version.version, [])
specs_per_version[spec_version.version].append((hash, spec, spec_version))
for spec_version in OnDemandMetricSpecVersioning.get_spec_versions():
version = spec_version.version
specs_for_version = specs_per_version.get(version, [])
extraction_state = _determine_extraction_state(specs, is_low_cardinality, enabled_features)
spec_hashes = [hashed_spec[0] for hashed_spec in specs_for_version]
(on_demand, _) = DashboardWidgetQueryOnDemand.objects.get_or_create(
dashboard_widget_query=widget_query,
spec_version=version,
defaults={
"spec_hashes": spec_hashes,
"extraction_state": extraction_state,
},
)
if on_demand.can_extraction_be_auto_overridden():
on_demand.extraction_state = extraction_state
if options.get("on_demand.update_on_demand_modified"):
# Only temporarily required to check we've updated data on rows the task has passed
# Or updated to pass the check against widget query date_modified.
on_demand.date_modified = timezone.now()
on_demand.spec_hashes = spec_hashes
on_demand.save()
def set_or_create_on_demand_state(
widget_query: DashboardWidgetQuery,
organization: Organization,
is_low_cardinality: bool,
feature_enabled: bool,
current_widget_specs: set[str],
) -> None:
specs = _get_widget_on_demand_specs(widget_query, organization)
specs_per_version: dict[int, list[HashedMetricSpec]] = {}
for hash, spec, spec_version in specs:
specs_per_version.setdefault(spec_version.version, [])
specs_per_version[spec_version.version].append((hash, spec, spec_version))
for spec_version in OnDemandMetricSpecVersioning.get_spec_versions():
version = spec_version.version
specs_for_version = specs_per_version.get(version, [])
if not specs:
extraction_state = OnDemandExtractionState.DISABLED_NOT_APPLICABLE
elif widget_exceeds_max_specs(specs, current_widget_specs, organization):
extraction_state = OnDemandExtractionState.DISABLED_SPEC_LIMIT
elif not is_low_cardinality:
extraction_state = OnDemandExtractionState.DISABLED_HIGH_CARDINALITY
elif not feature_enabled:
extraction_state = OnDemandExtractionState.DISABLED_PREROLLOUT
else:
extraction_state = OnDemandExtractionState.ENABLED_CREATION
spec_hashes = [hashed_spec[0] for hashed_spec in specs_for_version]
on_demand, created = DashboardWidgetQueryOnDemand.objects.get_or_create(
dashboard_widget_query=widget_query,
spec_version=version,
defaults={
"spec_hashes": spec_hashes,
"extraction_state": extraction_state,
},
)
if not created:
on_demand.spec_hashes = spec_hashes
on_demand.extraction_state = extraction_state
on_demand.save()
def _determine_extraction_state(
specs: Sequence[HashedMetricSpec], is_low_cardinality: bool | None, enabled_features: set[str]
) -> OnDemandExtractionState:
if not specs:
return OnDemandExtractionState.DISABLED_NOT_APPLICABLE
if "organizations:on-demand-metrics-extraction-widgets" not in enabled_features:
return OnDemandExtractionState.DISABLED_PREROLLOUT
if is_low_cardinality is False:
return OnDemandExtractionState.DISABLED_HIGH_CARDINALITY
return OnDemandExtractionState.ENABLED_ENROLLED
def _get_widget_query_low_cardinality(
widget_query: DashboardWidgetQuery, organization: Organization
) -> bool | None:
"""
Checks cardinality of existing widget queries before allowing the metric spec, so that
group-by clauses with high cardinality tags are not added to the on_demand metric.
New queries will be checked upon creation and not allowed at that time.
"""
query_columns = widget_query.columns
max_cardinality_allowed = options.get("on_demand.max_widget_cardinality.count")
field_cardinality = check_field_cardinality(
query_columns,
organization,
max_cardinality_allowed,
is_task=True,
widget_query=widget_query,
)
return all(field_cardinality.values())
@sentry_sdk.tracing.trace
def check_field_cardinality(
query_columns: list[str] | None,
organization: Organization,
max_cardinality: int,
is_task: bool = False,
widget_query: DashboardWidgetQuery | None = None,
) -> dict[str, str]:
if not features.has("organizations:on-demand-metrics-extraction-widgets", organization):
return {}
if not query_columns:
return {}
if is_task:
cache_identifier = TASK_CACHE_KEY
cache_ttl = _WIDGET_QUERY_CARDINALITY_TTL
period = TASK_QUERY_PERIOD
assert widget_query is not None, "widget_query is a required param"
else:
cache_identifier = DASHBOARD_CACHE_KEY
cache_ttl = _COLUMN_CARDINALITY_TTL
period = DASHBOARD_QUERY_PERIOD
# We cache each key individually to query less
cache_keys: dict[str, str] = {}
for column in query_columns:
column_cache_key = get_field_cardinality_cache_key(column, organization, cache_identifier)
cache_keys[column] = column_cache_key
cardinality_map = cache.get_many(cache_keys.values())
if len(cardinality_map) == len(query_columns):
return cardinality_map
query_columns = [col for col, key in cache_keys.items() if key not in cardinality_map]
with sentry_sdk.isolation_scope() as scope:
if widget_query:
scope.set_tag("widget_query.widget_id", widget_query.id)
scope.set_tag("widget_query.org_slug", organization.slug)
scope.set_tag("widget_query.conditions", widget_query.conditions)
else:
scope.set_tag("cardinality_check.org_slug", organization.slug)
try:
processed_results, columns_to_check = _query_cardinality(
query_columns, organization, period
)
for column in query_columns:
count = processed_results["data"][0][f"count_unique({column})"]
column_low_cardinality = count <= max_cardinality
cardinality_map[cache_keys[column]] = column_low_cardinality
if not column_low_cardinality:
scope.set_tag("widget_query.column_name", column)
if widget_query:
sentry_sdk.capture_message(
"On Demand Metrics: Cardinality exceeded for dashboard_widget_query",
level="warning",
tags={
"widget_query.id": widget_query.id,
"widget_query.column_name": column,
"widget_query.count": count,
},
)
except Exception as error:
sentry_sdk.capture_exception(error)
cache.set_many(cardinality_map, timeout=cache_ttl)
# assume that columns are low cardinality if we fail to retrieve it for some reason
return {key: cardinality_map.get(value, True) for key, value in cache_keys.items()}
@sentry_sdk.tracing.trace
def _query_cardinality(
query_columns: list[str], organization: Organization, period: str = "30m"
) -> tuple[EventsResponse, list[str]]:
# Restrict period down to an allowlist so we're not slamming snuba with giant queries
if period not in [TASK_QUERY_PERIOD, DASHBOARD_QUERY_PERIOD]:
raise Exception("Cardinality can only be queried with 1h or 30m")
params = SnubaParams(
stats_period=period,
organization=organization,
projects=list(Project.objects.filter(organization=organization)),
)
columns_to_check = [column for column in query_columns if not fields.is_function(column)]
unique_columns = [f"count_unique({column})" for column in columns_to_check]
query_builder = DiscoverQueryBuilder(
dataset=Dataset.Discover,
params={},
snuba_params=params,
selected_columns=unique_columns,
config=QueryBuilderConfig(
transform_alias_to_input_format=True,
),
)
results = query_builder.run_query(Referrer.METRIC_EXTRACTION_CARDINALITY_CHECK.value)
processed_results = query_builder.process_results(results)
return processed_results, columns_to_check
| HighCardinalityWidgetException |
python | getsentry__sentry | tests/sentry/api/serializers/test_project.py | {
"start": 31707,
"end": 32348
} | class ____(TestCase):
def test_simple(self) -> None:
user = self.create_user(username="foo")
organization = self.create_organization(owner=user)
team = self.create_team(organization=organization)
project = self.create_project(teams=[team], organization=organization, name="foo")
result = serialize(project, user, ProjectWithOrganizationSerializer())
assert result["slug"] == project.slug
assert result["name"] == project.name
assert result["id"] == str(project.id)
assert result["organization"] == serialize(organization, user)
| ProjectWithOrganizationSerializerTest |
python | buildout__buildout | src/zc/buildout/tests/recipes/src/environ.py | {
"start": 23,
"end": 454
} | class ____:
def __init__(self, buildout, name, options):
self.buildout = buildout
self.options = options
def install(self):
_ = self.options['name']
sys.stdout.write('HOME %s\\n' % os.environ['HOME'])
sys.stdout.write('USERPROFILE %s\\n' % os.environ['USERPROFILE'])
sys.stdout.write('expanduser %s\\n' % os.path.expanduser('~'))
return ()
update = install
| Environ |
python | FactoryBoy__factory_boy | tests/test_docs_internals.py | {
"start": 2071,
"end": 2385
} | class ____(factory.Factory):
class Meta:
model = UserLog
user = factory.SubFactory('test_docs_internals.UserFactory')
timestamp = factory.fuzzy.FuzzyDateTime(
datetime.datetime(2000, 1, 1, tzinfo=datetime.timezone.utc),
)
action = factory.Iterator(UserLog.ACTIONS)
| UserLogFactory |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py | {
"start": 24348,
"end": 26983
} | class ____:
def test_init(self):
"""
Test init by creating AzureServiceBusTopicDeleteOperator with task id, topic name and asserting
with values
"""
asb_delete_topic_operator = AzureServiceBusTopicDeleteOperator(
task_id="asb_delete_topic",
topic_name=TOPIC_NAME,
)
assert asb_delete_topic_operator.task_id == "asb_delete_topic"
assert asb_delete_topic_operator.topic_name == TOPIC_NAME
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook.get_conn")
@mock.patch("azure.servicebus.management.TopicProperties")
def test_delete_topic(self, mock_topic_properties, mock_get_conn):
"""
Test AzureServiceBusTopicDeleteOperator by mocking topic name, connection
"""
asb_delete_topic = AzureServiceBusTopicDeleteOperator(
task_id="asb_delete_topic",
topic_name=TOPIC_NAME,
)
mock_topic_properties.name = TOPIC_NAME
mock_get_conn.return_value.__enter__.return_value.get_topic.return_value = mock_topic_properties
with mock.patch.object(asb_delete_topic.log, "info") as mock_log_info:
asb_delete_topic.execute(None)
mock_log_info.assert_called_with("Topic %s deleted.", TOPIC_NAME)
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook.get_conn")
def test_delete_topic_not_exists(self, mock_get_conn):
"""
Test AzureServiceBusTopicDeleteOperator by mocking topic name, connection
"""
asb_delete_topic_not_exists = AzureServiceBusTopicDeleteOperator(
task_id="asb_delete_topic_not_exists",
topic_name=TOPIC_NAME,
)
mock_get_conn.return_value.__enter__.return_value.get_topic.return_value = None
with mock.patch.object(asb_delete_topic_not_exists.log, "info") as mock_log_info:
asb_delete_topic_not_exists.execute(None)
mock_log_info.assert_called_with("Topic %s does not exist.", TOPIC_NAME)
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook")
def test_delete_topic_exception(self, mock_sb_admin_client):
"""
Test `delete_topic` functionality to raise AirflowException,
by passing topic name as None and pytest raise Airflow Exception
"""
asb_delete_topic_exception = AzureServiceBusTopicDeleteOperator(
task_id="delete_service_bus_subscription",
topic_name=None,
)
with pytest.raises(TypeError):
asb_delete_topic_exception.execute(None)
| TestASBTopicDeleteOperator |
python | getsentry__sentry | src/sentry/search/events/builder/profiles.py | {
"start": 321,
"end": 538
} | class ____(Protocol):
@property
def config(self) -> ProfilesDatasetConfig: ...
@property
def params(self) -> SnubaParams: ...
def column(self, name: str) -> Column: ...
| ProfilesQueryBuilderProtocol |
python | dagster-io__dagster | python_modules/dagster/dagster/components/resolved/model.py | {
"start": 1191,
"end": 2523
} | class ____:
callable: Callable[["ResolutionContext", Any], Any]
_default_fn = AttrWithContextFn(lambda context, field_value: context.resolve_value(field_value))
_passthrough_fn = AttrWithContextFn(lambda context, val: val)
def resolve_union(resolvers: Sequence["Resolver"], context: "ResolutionContext", field_value: Any):
"""Resolve a union typed field by trying each resolver in order until one succeeds.
This attempts to mirror the behavior of the Union type in Pydantic using the left-to-right
strategy. If all resolvers fail, a ResolutionException is raised.
"""
accumulated_errors = []
custom_resolvers = [r for r in resolvers if not r.is_default]
default_resolvers = [r for r in resolvers if r.is_default]
# the default resolver will pass through any value, so run those last
for r in chain(custom_resolvers, default_resolvers):
try:
result = r.fn.callable(context, field_value)
if result is not None:
return result
except Exception:
accumulated_errors.append(traceback.format_exc())
raise ResolutionException(
"No resolver matched the field value"
+ "\n"
+ textwrap.indent(
"\n".join(accumulated_errors),
prefix=" ",
),
)
@public
| AttrWithContextFn |
python | django__django | tests/m2m_through/models.py | {
"start": 99,
"end": 219
} | class ____(models.Model):
name = models.CharField(max_length=128)
class Meta:
ordering = ("name",)
| Person |
python | getsentry__sentry | src/sentry/snuba/metrics/query.py | {
"start": 4276,
"end": 4859
} | class ____:
def __post_init__(self) -> None:
"""Run validation methods if declared.
The validation method can be a simple check
that raises ValueError or a transformation to
the field value.
The validation is performed by calling a function named:
`validate_<field_name>(self) -> None`
"""
for name, _ in self.__dataclass_fields__.items(): # type: ignore[attr-defined]
if method := getattr(self, f"validate_{name}", None):
method()
@dataclass(frozen=True)
| MetricsQueryValidationRunner |
python | sphinx-doc__sphinx | tests/test_transforms/test_transforms_post_transforms.py | {
"start": 2780,
"end": 12141
} | class ____:
"""Integration test for :class:`sphinx.transforms.post_transforms.SigElementFallbackTransform`."""
# safe copy of the "built-in" desc_sig_* nodes (during the test, instances of such nodes
# will be created sequentially, so we fix a possible order at the beginning using a tuple)
_builtin_sig_elements: tuple[type[addnodes.desc_sig_element], ...] = tuple(
SIG_ELEMENTS
)
@pytest.fixture(autouse=True)
def builtin_sig_elements(self) -> tuple[type[addnodes.desc_sig_element], ...]:
"""Fixture returning an ordered view on the original value of :data:`!sphinx.addnodes.SIG_ELEMENTS`."""
return self._builtin_sig_elements
@pytest.fixture
def document(
self,
app: SphinxTestApp,
builtin_sig_elements: tuple[type[addnodes.desc_sig_element], ...],
) -> nodes.document:
"""Fixture returning a new document with built-in ``desc_sig_*`` nodes and a final ``desc_inline`` node."""
doc = new_document('')
doc.settings.env = app.env
# Nodes that should be supported by a default custom translator class.
# It is important that builtin_sig_elements has a fixed order so that
# the nodes can be deterministically checked.
doc += [node_type('', '') for node_type in builtin_sig_elements]
doc += addnodes.desc_inline('py')
return doc
@pytest.fixture
def with_desc_sig_elements(self, value: Any) -> bool:
"""Dynamic fixture acting as the identity on booleans."""
assert isinstance(value, bool)
return value
@pytest.fixture
def add_visitor_method_for(self, value: Any) -> list[str]:
"""Dynamic fixture acting as the identity on a list of strings."""
assert isinstance(value, list)
assert all(isinstance(item, str) for item in value)
return value
@pytest.fixture(autouse=True)
def translator_class(self, request: SubRequest) -> type[nodes.NodeVisitor]:
"""Minimal interface fixture similar to SphinxTranslator but orthogonal thereof."""
logger = logging.getLogger(__name__)
class BaseCustomTranslatorClass(nodes.NodeVisitor):
"""Base class for a custom translator class, orthogonal to ``SphinxTranslator``."""
def __init__(self, document, *_a):
super().__init__(document)
# ignore other arguments
def dispatch_visit(self, node):
for node_class in node.__class__.__mro__:
if method := getattr(self, f'visit_{node_class.__name__}', None):
method(node)
break
else:
logger.info('generic visit: %r', node.__class__.__name__)
super().dispatch_visit(node)
def unknown_visit(self, node):
logger.warning('unknown visit: %r', node.__class__.__name__)
raise nodes.SkipDeparture # ignore unknown departure
def visit_document(self, node):
raise nodes.SkipDeparture # ignore departure
def mark_node(self, node: nodes.Node) -> NoReturn:
logger.info('mark: %r', node.__class__.__name__)
raise nodes.SkipDeparture # ignore departure
with_desc_sig_elements = request.getfixturevalue('with_desc_sig_elements')
if with_desc_sig_elements:
desc_sig_elements_list = request.getfixturevalue('builtin_sig_elements')
else:
desc_sig_elements_list = []
add_visitor_method_for = request.getfixturevalue('add_visitor_method_for')
visitor_methods = {f'visit_{tp.__name__}' for tp in desc_sig_elements_list}
visitor_methods.update(f'visit_{name}' for name in add_visitor_method_for)
class_dict = dict.fromkeys(visitor_methods, BaseCustomTranslatorClass.mark_node)
return type('CustomTranslatorClass', (BaseCustomTranslatorClass,), class_dict)
@pytest.mark.parametrize(
'add_visitor_method_for',
[[], ['desc_inline']],
ids=[
'no_explicit_visitor',
'explicit_desc_inline_visitor',
],
)
@pytest.mark.parametrize(
'with_desc_sig_elements',
[True, False],
ids=[
'with_default_visitors_for_desc_sig_elements',
'without_default_visitors_for_desc_sig_elements',
],
)
@pytest.mark.sphinx('dummy', testroot='root')
def test_support_desc_inline(
self,
document: nodes.document,
with_desc_sig_elements: bool,
add_visitor_method_for: list[str],
request: SubRequest,
) -> None:
document, _, _ = self._exec(request)
# count the number of desc_inline nodes with the extra _sig_node_type field
desc_inline_typename = addnodes.desc_inline.__name__
visit_desc_inline = desc_inline_typename in add_visitor_method_for
if visit_desc_inline:
assert_node(document[-1], addnodes.desc_inline)
else:
assert_node(document[-1], nodes.inline, _sig_node_type=desc_inline_typename)
@pytest.mark.parametrize(
'add_visitor_method_for',
[
[], # no support
['desc_sig_space'], # enable desc_sig_space visitor
['desc_sig_element'], # enable generic visitor
[
# enable desc_sig_space and generic visitors
'desc_sig_space',
'desc_sig_element',
],
],
ids=[
'no_explicit_visitor',
'explicit_desc_sig_space_visitor',
'explicit_desc_sig_element_visitor',
'explicit_desc_sig_space_and_desc_sig_element_visitors',
],
)
@pytest.mark.parametrize(
'with_desc_sig_elements',
[True, False],
ids=[
'with_default_visitors_for_desc_sig_elements',
'without_default_visitors_for_desc_sig_elements',
],
)
@pytest.mark.sphinx('dummy', testroot='root')
def test_custom_implementation(
self,
document: nodes.document,
with_desc_sig_elements: bool,
add_visitor_method_for: list[str],
request: SubRequest,
) -> None:
document, stdout, stderr = self._exec(request)
assert (
len(self._builtin_sig_elements)
== len(document.children[:-1])
== len(stdout[:-1])
)
visit_desc_sig_element = (
addnodes.desc_sig_element.__name__ in add_visitor_method_for
)
ignore_sig_element_fallback_transform = (
visit_desc_sig_element or with_desc_sig_elements
)
if ignore_sig_element_fallback_transform:
# desc_sig_element is implemented or desc_sig_* nodes are properly handled (and left untouched)
for node_type, node, mess in zip(
self._builtin_sig_elements,
document.children[:-1],
stdout[:-1],
strict=True,
):
assert_node(node, node_type)
assert not hasattr(node, '_sig_node_type')
assert mess == f'mark: {node_type.__name__!r}'
else:
# desc_sig_* nodes are converted into inline nodes
for node_type, node, mess in zip(
self._builtin_sig_elements,
document.children[:-1],
stdout[:-1],
strict=True,
):
assert_node(node, nodes.inline, _sig_node_type=node_type.__name__)
assert mess == f'generic visit: {nodes.inline.__name__!r}'
# desc_inline node is never handled and always transformed
assert addnodes.desc_inline.__name__ not in add_visitor_method_for
assert_node(
document[-1], nodes.inline, _sig_node_type=addnodes.desc_inline.__name__
)
assert stdout[-1] == f'generic visit: {nodes.inline.__name__!r}'
# nodes.inline are never handled
assert (
len(stderr) == 1
if ignore_sig_element_fallback_transform
else len(document.children)
)
assert set(stderr) == {f'unknown visit: {nodes.inline.__name__!r}'}
@staticmethod
def _exec(request: SubRequest) -> tuple[nodes.document, list[str], list[str]]:
caplog = request.getfixturevalue('caplog')
caplog.set_level(logging.INFO, logger=__name__)
app = request.getfixturevalue('app')
translator_class = request.getfixturevalue('translator_class')
app.set_translator('dummy', translator_class)
# run the post-transform directly [building phase]
# document contains SIG_ELEMENTS nodes followed by a desc_inline node
document = request.getfixturevalue('document')
SigElementFallbackTransform(document).run()
# run the translator [writing phase]
translator = translator_class(document, app.builder)
document.walkabout(translator)
# extract messages
messages = caplog.record_tuples
stdout = [message for _, lvl, message in messages if lvl == logging.INFO]
stderr = [message for _, lvl, message in messages if lvl == logging.WARNING]
return document, stdout, stderr
| TestSigElementFallbackTransform |
python | miyuchina__mistletoe | mistletoe/contrib/scheme.py | {
"start": 905,
"end": 1151
} | class ____(span_token.SpanToken):
pattern = re.compile(r"(\d+)")
parse_inner = False
def __init__(self, match):
self.number = eval(match.group(0))
def __repr__(self):
return '<Number {}>'.format(self.number)
| Number |
python | openai__openai-python | src/openai/types/responses/response_computer_tool_call.py | {
"start": 4012,
"end": 4671
} | class ____(BaseModel):
id: str
"""The unique ID of the computer call."""
action: Action
"""A click action."""
call_id: str
"""An identifier used when responding to the tool call with output."""
pending_safety_checks: List[PendingSafetyCheck]
"""The pending safety checks for the computer call."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
type: Literal["computer_call"]
"""The type of the computer call. Always `computer_call`."""
| ResponseComputerToolCall |
python | pydantic__pydantic | pydantic/v1/error_wrappers.py | {
"start": 667,
"end": 1295
} | class ____(Representation):
__slots__ = 'exc', '_loc'
def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None:
self.exc = exc
self._loc = loc
def loc_tuple(self) -> 'Loc':
if isinstance(self._loc, tuple):
return self._loc
else:
return (self._loc,)
def __repr_args__(self) -> 'ReprArgs':
return [('exc', self.exc), ('loc', self.loc_tuple())]
# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper]
# but recursive, therefore just use:
ErrorList = Union[Sequence[Any], ErrorWrapper]
| ErrorWrapper |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 120802,
"end": 120997
} | class ____(SmallInteger):
"""The SQL SMALLINT type.
.. seealso::
:class:`_types.SmallInteger` - documentation for the base type.
"""
__visit_name__ = "SMALLINT"
| SMALLINT |
python | kamyu104__LeetCode-Solutions | Python/decrypt-string-from-alphabet-to-integer-mapping.py | {
"start": 582,
"end": 1119
} | class ____(object):
def freqAlphabets(self, s):
"""
:type s: str
:rtype: str
"""
def alpha(num):
return chr(ord('a') + int(num)-1)
i = len(s)-1
result = []
while i >= 0:
if s[i] == '#':
result.append(alpha(s[i-2:i]))
i -= 3
else:
result.append(alpha(s[i]))
i -= 1
return "".join(reversed(result))
# Time: O(n)
# Space: O(1)
import re
# regex solution
| Solution2 |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 38946,
"end": 39327
} | class ____(themeable):
"""
Vertical minor grid lines
Parameters
----------
theme_element : element_line
"""
def apply_ax(self, ax: Axes):
super().apply_ax(ax)
ax.xaxis.grid(which="minor", **self.properties)
def blank_ax(self, ax: Axes):
super().blank_ax(ax)
ax.grid(False, which="minor", axis="x")
| panel_grid_minor_x |
python | huggingface__transformers | src/transformers/models/speech_to_text/modeling_speech_to_text.py | {
"start": 28844,
"end": 38710
} | class ____(Speech2TextPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`Speech2TextDecoderLayer`]
Args:
config: Speech2TextConfig
embed_tokens (nn.Embedding): output embedding
"""
def __init__(self, config: Speech2TextConfig):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_target_positions
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = Speech2TextSinusoidalPositionalEmbedding(
self.max_target_positions,
config.d_model,
self.padding_idx,
)
self.layers = nn.ModuleList(
[Speech2TextDecoderLayer(config, layer_idx=i) for i in range(config.decoder_layers)]
)
self.layer_norm = nn.LayerNorm(config.d_model)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
cache_position=None,
):
r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using [`Speech2TextTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache = True` is incompatible with gradient checkpointing. Setting `use_cache = False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
if cache_position is None:
cache_position = torch.arange(
past_key_values_length, past_key_values_length + input_shape[1], device=inputs_embeds.device
)
attention_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
)
encoder_attention_mask = create_bidirectional_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
)
# embed positions
positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.training:
dropout_probability = torch.rand([])
if dropout_probability < self.layerdrop:
continue
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
hidden_states = self.layer_norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@auto_docstring
| Speech2TextDecoder |
python | Textualize__textual | tests/snapshot_tests/snapshot_apps/text_area.py | {
"start": 153,
"end": 396
} | class ____(App):
def compose(self) -> ComposeResult:
text_area = TextArea.code_editor()
text_area.cursor_blink = False
yield text_area
app = TextAreaSnapshot()
if __name__ == "__main__":
app.run()
| TextAreaSnapshot |
python | pallets__jinja | src/jinja2/exceptions.py | {
"start": 2394,
"end": 4126
} | class ____(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(
self,
message: str,
lineno: int,
name: str | None = None,
filename: str | None = None,
) -> None:
super().__init__(message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source: str | None = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self) -> str:
# for translated errors we only return the message
if self.translated:
return t.cast(str, self.message)
# otherwise attach some stuff
location = f"line {self.lineno}"
name = self.filename or self.name
if name:
location = f'File "{name}", {location}'
lines = [t.cast(str, self.message), " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
pass
else:
lines.append(" " + line.strip())
return "\n".join(lines)
def __reduce__(self): # type: ignore
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
| TemplateSyntaxError |
python | huggingface__transformers | src/transformers/models/instructblip/modeling_instructblip.py | {
"start": 13665,
"end": 14790
} | class ____(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`InstructBlipEncoderLayer`].
Args:
config (`InstructBlipConfig`):
The corresponding vision configuration for the `InstructBlipEncoder`.
"""
def __init__(self, config: InstructBlipConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([InstructBlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@auto_docstring
def forward(
self,
inputs_embeds,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, BaseModelOutput]:
hidden_states = inputs_embeds
for encoder_layer in self.layers:
hidden_states = encoder_layer(
hidden_states,
**kwargs,
)
return BaseModelOutput(last_hidden_state=hidden_states)
# Copied from transformers.models.blip.modeling_blip.BlipVisionModel with Blip->InstructBlip, BLIP->INSTRUCTBLIP
| InstructBlipEncoder |
python | pytest-dev__pytest | src/_pytest/_io/saferepr.py | {
"start": 886,
"end": 4082
} | class ____(reprlib.Repr):
"""
repr.Repr that limits the resulting size of repr() and includes
information on exceptions raised during the call.
"""
def __init__(self, maxsize: int | None, use_ascii: bool = False) -> None:
"""
:param maxsize:
If not None, will truncate the resulting repr to that specific size, using ellipsis
somewhere in the middle to hide the extra text.
If None, will not impose any size limits on the returning repr.
"""
super().__init__()
# ``maxstring`` is used by the superclass, and needs to be an int; using a
# very large number in case maxsize is None, meaning we want to disable
# truncation.
self.maxstring = maxsize if maxsize is not None else 1_000_000_000
self.maxsize = maxsize
self.use_ascii = use_ascii
def repr(self, x: object) -> str:
try:
if self.use_ascii:
s = ascii(x)
else:
s = super().repr(x)
except (KeyboardInterrupt, SystemExit):
raise
except BaseException as exc:
s = _format_repr_exception(exc, x)
if self.maxsize is not None:
s = _ellipsize(s, self.maxsize)
return s
def repr_instance(self, x: object, level: int) -> str:
try:
s = repr(x)
except (KeyboardInterrupt, SystemExit):
raise
except BaseException as exc:
s = _format_repr_exception(exc, x)
if self.maxsize is not None:
s = _ellipsize(s, self.maxsize)
return s
def safeformat(obj: object) -> str:
"""Return a pretty printed string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info.
"""
try:
return pprint.pformat(obj)
except Exception as exc:
return _format_repr_exception(exc, obj)
# Maximum size of overall repr of objects to display during assertion errors.
DEFAULT_REPR_MAX_SIZE = 240
def saferepr(
obj: object, maxsize: int | None = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False
) -> str:
"""Return a size-limited safe repr-string for the given object.
Failing __repr__ functions of user instances will be represented
with a short exception info and 'saferepr' generally takes
care to never raise exceptions itself.
This function is a wrapper around the Repr/reprlib functionality of the
stdlib.
"""
return SafeRepr(maxsize, use_ascii).repr(obj)
def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str:
"""Return an unlimited-size safe repr-string for the given object.
As with saferepr, failing __repr__ functions of user instances
will be represented with a short exception info.
This function is a wrapper around simple repr.
Note: a cleaner solution would be to alter ``saferepr``this way
when maxsize=None, but that might affect some other code.
"""
try:
if use_ascii:
return ascii(obj)
return repr(obj)
except Exception as exc:
return _format_repr_exception(exc, obj)
| SafeRepr |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 25331,
"end": 29026
} | class ____(TestCase):
def test_valid_signature(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
# Generate valid signature
signature = generate_service_request_signature(url, body, shared_secrets, service_name)
result = compare_service_signature(url, body, signature, shared_secrets, service_name)
assert result is True
def test_valid_signature_with_multiple_keys(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["new-key", "old-key"]
service_name = "TestService"
# Sign with first key
signature = generate_service_request_signature(url, body, ["new-key"], service_name)
result = compare_service_signature(url, body, signature, shared_secrets, service_name)
assert result is True
# Sign with second key
signature = generate_service_request_signature(url, body, ["old-key"], service_name)
result = compare_service_signature(url, body, signature, shared_secrets, service_name)
assert result is True
def test_invalid_signature(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
result = compare_service_signature(
url, body, "rpc0:invalid_signature", shared_secrets, service_name
)
assert result is False
def test_no_shared_secrets(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
service_name = "TestService"
with pytest.raises(RpcAuthenticationSetupException):
compare_service_signature(url, body, "rpc0:signature", [], service_name)
def test_empty_shared_secrets(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
service_name = "TestService"
# Test list with empty string
with pytest.raises(RpcAuthenticationSetupException):
compare_service_signature(url, body, "rpc0:signature", [""], service_name)
# Test list with whitespace-only string
with pytest.raises(RpcAuthenticationSetupException):
compare_service_signature(url, body, "rpc0:signature", [" "], service_name)
# Test list with empty string mixed with valid secret
with pytest.raises(RpcAuthenticationSetupException):
compare_service_signature(
url, body, "rpc0:signature", ["valid-secret", ""], service_name
)
def test_invalid_signature_prefix(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
result = compare_service_signature(
url, body, "invalid:signature", shared_secrets, service_name
)
assert result is False
def test_empty_body(self) -> None:
url = "/test/endpoint"
body = b""
shared_secrets = ["secret-key"]
service_name = "TestService"
result = compare_service_signature(
url, body, "rpc0:signature", shared_secrets, service_name
)
assert result is False
def test_malformed_signature(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
# Test signature without colon
result = compare_service_signature(url, body, "rpc0signature", shared_secrets, service_name)
assert result is False
| TestCompareServiceSignature |
python | MorvanZhou__Reinforcement-learning-with-tensorflow | experiments/Robot_arm/A3C.py | {
"start": 5340,
"end": 8619
} | class ____(object):
def __init__(self, name, globalAC):
self.env = ArmEnv(mode=MODE[n_model])
self.name = name
self.AC = ACNet(name, globalAC)
def work(self):
global GLOBAL_RUNNING_R, GLOBAL_EP
total_step = 1
buffer_s, buffer_a, buffer_r = [], [], []
while not COORD.should_stop() and GLOBAL_EP < MAX_GLOBAL_EP:
s = self.env.reset()
ep_r = 0
for ep_t in range(MAX_EP_STEP):
if self.name == 'W_0':
self.env.render()
a = self.AC.choose_action(s)
s_, r, done = self.env.step(a)
if ep_t == MAX_EP_STEP - 1: done = True
ep_r += r
buffer_s.append(s)
buffer_a.append(a)
buffer_r.append(r)
if total_step % UPDATE_GLOBAL_ITER == 0 or done: # update global and assign to local net
if done:
v_s_ = 0 # terminal
else:
v_s_ = SESS.run(self.AC.v, {self.AC.s: s_[np.newaxis, :]})[0, 0]
buffer_v_target = []
for r in buffer_r[::-1]: # reverse buffer r
v_s_ = r + GAMMA * v_s_
buffer_v_target.append(v_s_)
buffer_v_target.reverse()
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
feed_dict = {
self.AC.s: buffer_s,
self.AC.a_his: buffer_a,
self.AC.v_target: buffer_v_target,
}
test = self.AC.update_global(feed_dict)
buffer_s, buffer_a, buffer_r = [], [], []
self.AC.pull_global()
s = s_
total_step += 1
if done:
if len(GLOBAL_RUNNING_R) == 0: # record running episode reward
GLOBAL_RUNNING_R.append(ep_r)
else:
GLOBAL_RUNNING_R.append(0.9 * GLOBAL_RUNNING_R[-1] + 0.1 * ep_r)
print(
self.name,
"Ep:", GLOBAL_EP,
"| Ep_r: %i" % GLOBAL_RUNNING_R[-1],
'| Var:', test,
)
GLOBAL_EP += 1
break
if __name__ == "__main__":
SESS = tf.Session()
with tf.device("/cpu:0"):
OPT_A = tf.train.RMSPropOptimizer(LR_A, name='RMSPropA')
OPT_C = tf.train.RMSPropOptimizer(LR_C, name='RMSPropC')
GLOBAL_AC = ACNet(GLOBAL_NET_SCOPE) # we only need its params
workers = []
# Create worker
for i in range(N_WORKERS):
i_name = 'W_%i' % i # worker name
workers.append(Worker(i_name, GLOBAL_AC))
COORD = tf.train.Coordinator()
SESS.run(tf.global_variables_initializer())
worker_threads = []
for worker in workers:
job = lambda: worker.work()
t = threading.Thread(target=job)
t.start()
worker_threads.append(t)
COORD.join(worker_threads)
| Worker |
python | pikepdf__pikepdf | tests/test_image_access.py | {
"start": 12627,
"end": 32553
} | class ____(NamedTuple):
bpc: int
width: int
height: int
hival: int
colorspace: pikepdf.Name
palette: bytes
imbytes: bytes
def pdf_from_palette_image_spec(spec: PaletteImageSpec):
pdf = pikepdf.new()
pdfw, pdfh = 36 * spec.width, 36 * spec.height
pdf.add_blank_page(page_size=(pdfw, pdfh))
imobj = Stream(
pdf,
spec.imbytes,
BitsPerComponent=spec.bpc,
ColorSpace=Array([Name.Indexed, spec.colorspace, spec.hival, spec.palette]),
Width=spec.width,
Height=spec.height,
Type=Name.XObject,
Subtype=Name.Image,
)
pdf.pages[0].Contents = Stream(pdf, b'%f 0 0 %f 0 0 cm /Im0 Do' % (pdfw, pdfh))
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj))
pdf.pages[0].MediaBox = Array([0, 0, pdfw, pdfh])
return pdf
@st.composite
def valid_random_palette_image_spec(
draw,
bpcs=st.sampled_from([1, 2, 4, 8]),
widths=st.integers(min_value=1, max_value=16),
heights=st.integers(min_value=1, max_value=16),
colorspaces=st.sampled_from([Name.DeviceGray, Name.DeviceRGB, Name.DeviceCMYK]),
palette=None,
):
bpc = draw(bpcs)
width = draw(widths)
height = draw(heights)
colorspace = draw(colorspaces)
hival = draw(st.integers(min_value=0, max_value=(2**bpc) - 1))
imbytes = draw(imagelike_data(width, height, bpc, (0, hival)))
channels = (
1
if colorspace == Name.DeviceGray
else (
3
if colorspace == Name.DeviceRGB
else 4
if colorspace == Name.DeviceCMYK
else 0
)
)
if not palette:
palette = draw(
st.binary(min_size=channels * (hival + 1), max_size=channels * (hival + 1))
)
return PaletteImageSpec(bpc, width, height, hival, colorspace, palette, imbytes)
@pytest.mark.parametrize(
'filename,bpc,rgb',
[
('pal.pdf', 8, (0, 0, 255)),
('pal-1bit-trivial.pdf', 1, (255, 255, 255)),
('pal-1bit-rgb.pdf', 1, (255, 128, 0)),
],
)
def test_image_palette(resources, filename, bpc, rgb):
pdf = Pdf.open(resources / filename)
pim = PdfImage(next(iter(pdf.pages[0].images.values())))
assert pim.palette[0] == 'RGB'
assert pim.colorspace == '/DeviceRGB'
assert pim.mode == 'P'
assert pim.bits_per_component == bpc
outstream = BytesIO()
pim.extract_to(stream=outstream)
im_pal = pim.as_pil_image()
im = im_pal.convert('RGB')
assert im.getpixel((1, 1)) == rgb
@contextmanager
def first_image_from_pdfimages(pdf, tmpdir):
if not has_pdfimages():
pytest.skip("Need pdfimages for this test")
pdf.save(tmpdir / 'in.pdf')
run(
['pdfimages', '-q', '-png', fspath(tmpdir / 'in.pdf'), fspath('pdfimage')],
cwd=fspath(tmpdir),
check=True,
)
outpng = tmpdir / 'pdfimage-000.png'
assert outpng.exists()
with Image.open(outpng) as im:
yield im
@given(spec=valid_random_palette_image_spec())
@settings(deadline=60000)
def test_image_palette2(spec, tmp_path_factory):
pdf = pdf_from_palette_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject['/Im0'])
im1 = pim.as_pil_image()
with first_image_from_pdfimages(
pdf, tmp_path_factory.mktemp('test_image_palette2')
) as im2:
if pim.palette.base_colorspace == 'CMYK' and im1.size == im2.size:
return # Good enough - CMYK is hard...
if im1.mode == im2.mode:
diff = ImageChops.difference(im1, im2)
else:
diff = ImageChops.difference(im1.convert('RGB'), im2.convert('RGB'))
if diff.getbbox():
if pim.palette.base_colorspace in ('L', 'RGB', 'CMYK') and im2.mode == '1':
note("pdfimages bug - 1bit image stripped of palette")
return
assert (
not diff.getbbox()
), f"{diff.getpixel((0, 0))}, {im1.getpixel((0,0))}, {im2.getpixel((0,0))}"
def test_bool_in_inline_image():
piim = PdfInlineImage(image_data=b'', image_object=(Name.IM, True))
assert piim.image_mask
@pytest.mark.skipif(
not PIL_features.check_codec('jpg_2000'), reason='no JPEG2000 codec'
)
def test_jp2(first_image_in):
xobj, _pdf = first_image_in('pike-jp2.pdf')
pim = PdfImage(xobj)
assert isinstance(pim, PdfJpxImage)
assert '/JPXDecode' in pim.filters
assert pim.colorspace == '/DeviceRGB'
assert not pim.indexed
assert pim.mode == 'RGB'
assert pim.bits_per_component == 8
assert pim.__eq__(42) is NotImplemented
assert pim == PdfImage(xobj)
outstream = BytesIO()
pim.extract_to(stream=outstream)
del pim
del xobj.ColorSpace
# If there is no explicit ColorSpace metadata we should get it from the
# compressed data stream
pim = PdfImage(xobj)
assert pim.colorspace == '/DeviceRGB'
assert pim.bits_per_component == 8
def test_extract_filepath(congress, outdir):
xobj, _pdf = congress
pim = PdfImage(xobj)
result = pim.extract_to(fileprefix=(outdir / 'image'))
assert Path(result).exists()
assert (outdir / 'image.jpg').exists()
def test_extract_direct_fails_nondefault_colortransform(congress):
xobj, _pdf = congress
xobj.DecodeParms = Dictionary(
ColorTransform=42 # Non standard (or allowed in the spec)
)
pim = PdfImage(xobj)
bio = BytesIO()
assert pim._extract_direct(stream=bio) is None
with pytest.raises(UnsupportedImageTypeError):
pim.extract_to(stream=bio)
xobj.ColorSpace = Name.DeviceCMYK
pim = PdfImage(xobj)
assert pim._extract_direct(stream=bio) is None
with pytest.raises(UnsupportedImageTypeError):
pim.extract_to(stream=bio)
def test_icc_use(first_image_in):
xobj, _pdf = first_image_in('1biticc.pdf')
pim = PdfImage(xobj)
assert pim.mode == 'L' # It may be 1 bit per pixel but it's more complex than that
assert pim.colorspace == '/ICCBased'
assert pim.bits_per_component == 1
assert pim.icc.profile.xcolor_space == 'GRAY'
def test_icc_extract(first_image_in):
xobj, _pdf = first_image_in('aquamarine-cie.pdf')
pim = PdfImage(xobj)
assert pim.as_pil_image().info['icc_profile'] == pim.icc.tobytes()
def test_icc_palette(first_image_in):
xobj, _pdf = first_image_in('pink-palette-icc.pdf')
pim = PdfImage(xobj)
assert pim.icc.profile.xcolor_space == 'RGB ' # with trailing space
b = BytesIO()
pim.extract_to(stream=b)
b.seek(0)
im = Image.open(b)
assert im.size == (xobj.Width, xobj.Height)
assert im.mode == 'P'
pil_icc = im.info.get('icc_profile')
pil_icc_stream = BytesIO(pil_icc)
pil_prf = ImageCms.ImageCmsProfile(pil_icc_stream)
assert pil_prf.tobytes() == pim.icc.tobytes()
def test_stacked_compression(first_image_in):
xobj, _pdf = first_image_in('pike-flate-jp2.pdf')
pim = PdfImage(xobj)
assert pim.mode == 'RGB'
assert pim.colorspace == '/DeviceRGB'
assert pim.bits_per_component == 8
assert pim.filters == ['/FlateDecode', '/JPXDecode']
@pytest.mark.parametrize(
'blackis1,decode,expected',
[
(None, None, 255),
(False, None, 255),
(True, None, 0),
(None, [0, 1], 255),
(None, [1, 0], 0),
(False, [0, 1], 255),
(False, [1, 0], 0),
(True, [0, 1], 0),
(True, [1, 0], 255),
],
)
def test_ccitt_photometry(sandwich, blackis1, decode, expected):
xobj, _pdf = sandwich
if blackis1 is not None:
xobj.DecodeParms.BlackIs1 = blackis1
if decode is not None:
xobj.Decode = decode
pim = PdfImage(xobj)
im = pim.as_pil_image()
im = im.convert('L')
assert im.getpixel((0, 0)) == expected, f"Expected background pixel = {expected}"
def test_ccitt_encodedbytealign(sandwich):
xobj, _pdf = sandwich
# Pretend this is image is "EncodedByteAlign". We don't have a FOSS
# example of such an image.
xobj.DecodeParms.EncodedByteAlign = True
pim = PdfImage(xobj)
with pytest.raises(UnsupportedImageTypeError):
pim.as_pil_image()
def test_imagemagick_uses_rle_compression(first_image_in):
xobj, _rle = first_image_in('rle.pdf')
pim = PdfImage(xobj)
im = pim.as_pil_image()
assert im.getpixel((5, 5)) == (255, 128, 0)
def test_ccitt_icc(first_image_in, resources):
xobj, pdf = first_image_in('sandwich.pdf')
pim = PdfImage(xobj)
assert pim.icc is None
bio = BytesIO()
output_type = pim.extract_to(stream=bio)
assert output_type == '.tif'
bio.seek(0)
assert b'GRAYXYZ' not in bio.read(1000)
bio.seek(0)
assert Image.open(bio)
icc_data = (resources / 'Gray.icc').read_bytes()
icc_stream = pdf.make_stream(icc_data)
icc_stream.N = 1
xobj.ColorSpace = pikepdf.Array([Name.ICCBased, icc_stream])
pim = PdfImage(xobj)
assert pim.icc.profile.xcolor_space == 'GRAY'
bio = BytesIO()
output_type = pim.extract_to(stream=bio)
assert output_type == '.tif'
bio.seek(0)
assert b'GRAYXYZ' in bio.read(1000)
bio.seek(0)
assert Image.open(bio)
def test_invalid_icc(first_image_in):
xobj, _pdf = first_image_in('pink-palette-icc.pdf')
cs = xobj.ColorSpace[1][1] # [/Indexed [/ICCBased <stream>]]
cs.write(b'foobar') # corrupt the ICC profile
with pytest.raises(
UnsupportedImageTypeError, match="ICC profile corrupt or not readable"
):
pim = PdfImage(xobj)
assert pim.icc is not None
def test_decodeparms_filter_alternates():
pdf = pikepdf.new()
imobj = Stream(
pdf,
b'dummy',
BitsPerComponent=1,
ColorSpace=Name.DeviceGray,
DecodeParms=Array(
[
Dictionary(
BlackIs1=False,
Columns=16,
K=-1,
)
]
),
Filter=Array([Name.CCITTFaxDecode]),
Height=16,
Width=16,
Type=Name.XObject,
Subtype=Name.Image,
)
pim = pikepdf.PdfImage(imobj)
assert pim.decode_parms[0].K == -1 # Check that array of dict is unpacked properly
CMYK_RED = b'\x00\xc0\xc0\x15'
CMYK_GREEN = b'\x90\x00\xc0\x15'
CMYK_BLUE = b'\xc0\xa0\x00\x15'
CMYK_PINK = b'\x04\xc0\x00\x15'
CMYK_PALETTE = CMYK_RED + CMYK_GREEN + CMYK_BLUE + CMYK_PINK
GRAY_RGB_PALETTE = b''.join(bytes([gray, gray, gray]) for gray in range(256))
@pytest.mark.parametrize(
'base, hival, bits, palette, expect_type, expect_mode',
[
(Name.DeviceGray, 4, 8, b'\x00\x40\x80\xff', 'L', 'P'),
(Name.DeviceCMYK, 4, 8, CMYK_PALETTE, 'CMYK', 'P'),
(Name.DeviceGray, 4, 4, b'\x04\x08\x02\x0f', 'L', 'P'),
(
Array([Name.CalRGB, Dictionary(WhitePoint=Array([1.0, 1.0, 1.0]))]),
255,
8,
GRAY_RGB_PALETTE,
'RGB',
'P',
),
],
)
def test_palette_nonrgb(base, hival, bits, palette, expect_type, expect_mode):
pdf = pikepdf.new()
imobj = Stream(
pdf,
b'\x00\x01\x02\x03' * 16,
BitsPerComponent=bits,
ColorSpace=Array([Name.Indexed, base, hival, palette]),
Width=16,
Height=4,
Type=Name.XObject,
Subtype=Name.Image,
)
pim = pikepdf.PdfImage(imobj)
assert pim.palette == (expect_type, palette)
pim.extract_to(stream=BytesIO())
# To view images:
# pim.extract_to(fileprefix=f'palette_nonrgb_{expect_type}_{bits}')
assert pim.mode == expect_mode
def test_extract_to_mutex_params(sandwich):
pdfimage = PdfImage(sandwich[0])
with pytest.raises(ValueError, match="Cannot set both"):
pdfimage.extract_to(stream=BytesIO(), fileprefix='anything')
def test_separation():
# Manually construct a 2"x1" document with a Separation
# colorspace that devices a single "spot" color channel named
# "LogoGreen". Define a conversion to standard CMYK that assigns
# CMYK equivalents. Copied example from PDF RM.
# LogoGreen is a teal-ish green. First panel is white to full green,
# second is green to full white. RGB ~= (31, 202, 113)
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(144, 72))
# pikepdf does not interpret this - it is for the PDF viewer
# Explanation:
# X is implicitly loaded to stack
# dup: X X
# 0.84 mul: X 0.84X
# exch: 0.84X X
# 0.00: 0.84X X 0.00
# exch: 0.84X 0.00 X
# dup: 0.84X 0.00 X X
# 0.44 mul: 0.84X 0.00 X 0.44X
# exch: 0.84X 0.00 0.44X X
# 0.21mul: 0.84X 0.00 0.44X 0.21X
# X -> {0.84X, 0, 0.44X, 0.21X}
tint_transform_logogreen_to_cmyk = b'''
{
dup 0.84 mul
exch 0.00 exch dup 0.44 mul
exch 0.21 mul
}
'''
cs = Array(
[
Name.Separation,
Name.LogoGreen,
Name.DeviceCMYK,
Stream(
pdf,
tint_transform_logogreen_to_cmyk,
FunctionType=4,
Domain=[0.0, 1.0],
Range=[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
),
]
)
def check_pim(imobj, idx):
pim = pikepdf.PdfImage(imobj)
assert pim.mode == 'Separation'
assert pim.is_separation
assert not pim.is_device_n
assert pim.indexed == idx
assert repr(pim)
with pytest.raises(pikepdf.models.image.HifiPrintImageNotTranscodableError):
pim.extract_to(stream=BytesIO())
imobj0 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=cs,
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj0, idx=False)
imobj1 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=Array([Name.Indexed, cs, 255, bytes(range(255, -1, -1))]),
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj1, idx=True)
pdf.pages[0].Contents = Stream(
pdf, b'72 0 0 72 0 0 cm /Im0 Do 1 0 0 1 1 0 cm /Im1 Do'
)
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj0, Im1=imobj1))
# pdf.save("separation.pdf")
def test_devicen():
# Manually construct a 2"x1" document with a DeviceN
# colorspace that devices a single "spot" color channel named
# "Black". Define a conversion to standard CMYK that assigns
# C=0 M=0 Y=0 and lets black through. The result should appear as a
# gradient from white (top left) to black (bottom right) in the
# left cell, and black to white in the right cell.
pdf = pikepdf.new()
pdf.add_blank_page(page_size=(144, 72))
# Postscript function to map X -> CMYK={0, 0, 0, X}
# Explanation:
# X is implicitly on the stack
# 0 0 0 <- load three zeros on to stack
# stack contains: X 0 0 0
# 4 -1 roll <- roll stack 4 elements -1 times, meaning the order is reversed
# stack contains: 0 0 0 X
# pikepdf currently does not interpret tint transformation functions. This
# is done so that the output test file can be checked in a PDF viewer.
tint_transform_k_to_cmyk = b'{0 0 0 4 -1 roll}'
cs = Array(
[
Name.DeviceN,
Array([Name.Black]),
Name.DeviceCMYK,
Stream(
pdf,
tint_transform_k_to_cmyk,
FunctionType=4,
Domain=[0.0, 1.0],
Range=[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
),
]
)
def check_pim(imobj, idx):
pim = pikepdf.PdfImage(imobj)
assert pim.mode == 'DeviceN'
assert pim.is_device_n
assert not pim.is_separation
assert pim.indexed == idx
assert repr(pim)
with pytest.raises(pikepdf.models.image.HifiPrintImageNotTranscodableError):
pim.extract_to(stream=BytesIO())
imobj0 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=cs,
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj0, idx=False)
imobj1 = Stream(
pdf,
bytes(range(0, 256)),
BitsPerComponent=8,
ColorSpace=Array([Name.Indexed, cs, 255, bytes(range(255, -1, -1))]),
Width=16,
Height=16,
Type=Name.XObject,
Subtype=Name.Image,
)
check_pim(imobj1, idx=True)
pdf.pages[0].Contents = Stream(
pdf, b'72 0 0 72 0 0 cm /Im0 Do 1 0 0 1 1 0 cm /Im1 Do'
)
pdf.pages[0].Resources = Dictionary(XObject=Dictionary(Im0=imobj0, Im1=imobj1))
# pdf.save('devicen.pdf')
@given(
spec=valid_random_image_spec(
bpcs=st.sampled_from([2, 4]),
colorspaces=st.just(Name.DeviceGray),
widths=st.integers(1, 7),
heights=st.integers(1, 7),
)
)
@settings(deadline=None)
def test_grayscale_stride(spec):
pdf = pdf_from_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject.Im0)
assert pim.mode == 'L'
imdata = pim.read_bytes()
w = pim.width
imdata_unpacked_view, stride = unpack_subbyte_pixels(
imdata, pim.size, pim.bits_per_component
)
imdata_unpacked = bytes(imdata_unpacked_view)
bio = BytesIO()
pim.extract_to(stream=bio)
im = Image.open(bio)
assert im.mode == 'L' and im.size == pim.size
for n, pixel in enumerate(im.getdata()):
idx = stride * (n // w) + (n % w)
assert imdata_unpacked[idx] == pixel
@requires_pdfimages
@given(spec=valid_random_image_spec())
def test_random_image(spec, tmp_path_factory):
pdf = pdf_from_image_spec(spec)
pim = PdfImage(pdf.pages[0].Resources.XObject.Im0)
bio = BytesIO()
colorspace = pim.colorspace
width = pim.width
height = pim.height
bpc = pim.bits_per_component
imbytes = pim.read_bytes()
try:
result_extension = pim.extract_to(stream=bio)
assert result_extension in ('.png', '.tiff')
except ValueError as e:
if 'not enough image data' in str(e):
return
elif 'buffer is not large enough' in str(e):
ncomps = (
4
if colorspace == Name.DeviceCMYK
else 3
if colorspace == Name.DeviceRGB
else 1
)
assert ceil(bpc / 8) * width * height * ncomps > len(imbytes)
return
raise
except PIL.UnidentifiedImageError:
if len(imbytes) == 0:
return
raise
except UnsupportedImageTypeError:
if colorspace in (Name.DeviceRGB, Name.DeviceCMYK) and bpc < 8:
return
if bpc == 16:
return
raise
bio.seek(0)
im = Image.open(bio)
assert im.mode == pim.mode
assert im.size == pim.size
outprefix = f'{width}x{height}x{im.mode}-'
tmpdir = tmp_path_factory.mktemp(outprefix)
pdf.save(tmpdir / 'pdf.pdf')
# We don't have convenient CMYK checking tools
if im.mode == 'CMYK':
return
im.save(tmpdir / 'pikepdf.png')
Path(tmpdir / 'imbytes.bin').write_bytes(imbytes)
run(
[
'pdfimages',
'-png',
fspath('pdf.pdf'),
fspath('pdfimage'), # omit suffix
],
cwd=fspath(tmpdir),
check=True,
)
outpng = tmpdir / 'pdfimage-000.png'
assert outpng.exists()
im_roundtrip = Image.open(outpng)
assert im.size == im_roundtrip.size
diff = ImageChops.difference(im, im_roundtrip)
assert not diff.getbbox()
# if diff.getbbox():
# im.save('im1.png')
# im_roundtrip.save('im2.png')
# diff.save('imdiff.png')
# breakpoint()
# assert False
| PaletteImageSpec |
python | walkccc__LeetCode | solutions/566. Reshape the Matrix/566.py | {
"start": 0,
"end": 366
} | class ____:
def matrixReshape(self, nums: list[list[int]],
r: int, c: int) -> list[list[int]]:
if nums == [] or r * c != len(nums) * len(nums[0]):
return nums
ans = [[0 for j in range(c)] for i in range(r)]
k = 0
for row in nums:
for num in row:
ans[k // c][k % c] = num
k += 1
return ans
| Solution |
python | dagster-io__dagster | python_modules/libraries/dagster-k8s/dagster_k8s/container_context.py | {
"start": 1343,
"end": 22642
} | class ____(
NamedTuple(
"_K8sContainerContext",
[
("server_k8s_config", UserDefinedDagsterK8sConfig),
("run_k8s_config", UserDefinedDagsterK8sConfig),
("namespace", Optional[str]),
],
)
):
"""Encapsulates configuration that can be applied to a K8s job running Dagster code.
Can be persisted on a DagsterRun at run submission time based on metadata from the
code location and then included in the job's configuration at run launch time or step
launch time.
"""
def __new__(
cls,
image_pull_policy: Optional[str] = None,
image_pull_secrets: Optional[Sequence[Mapping[str, str]]] = None,
service_account_name: Optional[str] = None,
env_config_maps: Optional[Sequence[str]] = None,
env_secrets: Optional[Sequence[str]] = None,
env_vars: Optional[Sequence[str]] = None,
volume_mounts: Optional[Sequence[Mapping[str, Any]]] = None,
volumes: Optional[Sequence[Mapping[str, Any]]] = None,
labels: Optional[Mapping[str, str]] = None,
namespace: Optional[str] = None,
resources: Optional[Mapping[str, Any]] = None,
scheduler_name: Optional[str] = None,
security_context: Optional[Mapping[str, Any]] = None,
server_k8s_config: Optional[UserDefinedDagsterK8sConfig] = None,
run_k8s_config: Optional[UserDefinedDagsterK8sConfig] = None,
env: Optional[Sequence[Mapping[str, Any]]] = None,
):
top_level_k8s_config = K8sContainerContext._get_base_user_defined_k8s_config(
image_pull_policy=check.opt_str_param(image_pull_policy, "image_pull_policy"),
image_pull_secrets=check.opt_sequence_param(image_pull_secrets, "image_pull_secrets"),
service_account_name=check.opt_str_param(service_account_name, "service_account_name"),
env_config_maps=check.opt_sequence_param(env_config_maps, "env_config_maps"),
env_secrets=check.opt_sequence_param(env_secrets, "env_secrets"),
env_vars=check.opt_sequence_param(env_vars, "env_vars"),
volume_mounts=[
k8s_snake_case_dict(kubernetes.client.V1VolumeMount, mount)
for mount in check.opt_sequence_param(volume_mounts, "volume_mounts")
],
volumes=[
k8s_snake_case_dict(kubernetes.client.V1Volume, volume)
for volume in check.opt_sequence_param(volumes, "volumes")
],
labels=check.opt_mapping_param(labels, "labels"),
resources=check.opt_mapping_param(resources, "resources"),
scheduler_name=check.opt_str_param(scheduler_name, "scheduler_name"),
security_context=check.opt_mapping_param(security_context, "security_context"),
env=[
k8s_snake_case_dict(kubernetes.client.V1EnvVar, e)
for e in check.opt_sequence_param(env, "env")
],
)
run_k8s_config = K8sContainerContext._merge_k8s_config(
top_level_k8s_config._replace( # remove k8s service/deployment fields
deployment_metadata={},
service_metadata={},
),
run_k8s_config or UserDefinedDagsterK8sConfig.from_dict({}),
)
server_k8s_config = K8sContainerContext._merge_k8s_config(
top_level_k8s_config._replace( # remove k8s job fields
job_config={},
job_metadata={},
job_spec_config={},
),
server_k8s_config or UserDefinedDagsterK8sConfig.from_dict({}),
)
return super().__new__(
cls,
run_k8s_config=run_k8s_config,
server_k8s_config=server_k8s_config,
namespace=namespace,
)
@staticmethod
def _get_base_user_defined_k8s_config(
image_pull_policy: Optional[str],
image_pull_secrets: Optional[Sequence[Mapping[str, str]]],
service_account_name: Optional[str],
env_config_maps: Sequence[str],
env_secrets: Sequence[str],
env_vars: Sequence[str],
volume_mounts: Sequence[Mapping[str, Any]],
volumes: Sequence[Mapping[str, Any]],
labels: Mapping[str, str],
resources: Mapping[str, Any],
scheduler_name: Optional[str],
security_context: Mapping[str, Any],
env: Sequence[Mapping[str, Any]],
) -> UserDefinedDagsterK8sConfig:
container_config = {}
pod_spec_config = {}
pod_template_spec_metadata = {}
job_metadata = {}
deployment_metadata = {}
service_metadata = {}
if volume_mounts:
container_config["volume_mounts"] = volume_mounts
if resources:
container_config["resources"] = resources
if image_pull_secrets:
pod_spec_config["image_pull_secrets"] = image_pull_secrets
if volumes:
pod_spec_config["volumes"] = volumes
if labels:
pod_template_spec_metadata["labels"] = labels
job_metadata["labels"] = labels
deployment_metadata["labels"] = labels
service_metadata["labels"] = labels
if image_pull_policy:
container_config["image_pull_policy"] = image_pull_policy
if service_account_name:
pod_spec_config["service_account_name"] = service_account_name
env_from = [{"config_map_ref": {"name": config_map}} for config_map in env_config_maps]
env_from.extend([{"secret_ref": {"name": secret}} for secret in env_secrets])
if env_from:
container_config["env_from"] = env_from
parsed_env_vars = [parse_env_var(key) for key in env_vars]
container_config_env = [
{"name": parsed_env_var[0], "value": parsed_env_var[1]}
for parsed_env_var in parsed_env_vars
]
container_config_env.extend([{**v} for v in env])
if container_config_env:
container_config["env"] = container_config_env
if scheduler_name:
pod_spec_config["scheduler_name"] = scheduler_name
if security_context:
container_config["security_context"] = security_context
return UserDefinedDagsterK8sConfig(
container_config=container_config,
pod_spec_config=pod_spec_config,
pod_template_spec_metadata=pod_template_spec_metadata,
job_metadata=job_metadata,
service_metadata=service_metadata,
deployment_metadata=deployment_metadata,
)
@staticmethod
def _merge_k8s_config(
onto_config: UserDefinedDagsterK8sConfig,
from_config: UserDefinedDagsterK8sConfig,
) -> UserDefinedDagsterK8sConfig:
# Keys are always the same and initialized in constructor
merge_behavior = from_config.merge_behavior
onto_dict = onto_config.to_dict()
from_dict = from_config.to_dict()
assert set(onto_dict) == set(from_dict)
if merge_behavior == K8sConfigMergeBehavior.DEEP:
onto_dict = copy.deepcopy(onto_dict)
merged_dict = K8sContainerContext._deep_merge_k8s_config(
onto_dict=onto_dict, from_dict=from_dict
)
else:
merged_dict = {
key: (
{**onto_dict[key], **from_dict[key]}
if isinstance(onto_dict[key], dict)
else from_dict[key]
)
for key in onto_dict
}
return UserDefinedDagsterK8sConfig.from_dict(merged_dict)
@staticmethod
def _deep_merge_k8s_config(onto_dict: dict[str, Any], from_dict: Mapping[str, Any]):
for from_key, from_value in from_dict.items():
if from_key not in onto_dict:
onto_dict[from_key] = from_value
else:
onto_value = onto_dict[from_key]
if (
isinstance(from_value, list)
and from_key not in ALWAYS_SHALLOW_MERGE_LIST_FIELDS
):
check.invariant(isinstance(onto_value, list))
onto_dict[from_key] = _dedupe_list([*onto_value, *from_value])
elif isinstance(from_value, dict):
check.invariant(isinstance(onto_value, dict))
onto_dict[from_key] = K8sContainerContext._deep_merge_k8s_config(
onto_value, from_value
)
else:
onto_dict[from_key] = from_value
return onto_dict
def merge(
self,
other: "K8sContainerContext",
) -> "K8sContainerContext":
# Lists of attributes that can be combined are combined, scalar values are replaced
# prefering the passed in container context
return K8sContainerContext(
server_k8s_config=self._merge_k8s_config(
self.server_k8s_config, other.server_k8s_config
),
run_k8s_config=self._merge_k8s_config(self.run_k8s_config, other.run_k8s_config),
namespace=other.namespace if other.namespace else self.namespace,
)
def _snake_case_allowed_fields(
self, only_allow_user_defined_k8s_config_fields: Mapping[str, Any]
) -> Mapping[str, Any]:
result = {}
for key in only_allow_user_defined_k8s_config_fields:
if key == "namespace":
result[key] = only_allow_user_defined_k8s_config_fields[key]
continue
if key == "container_config":
model_class = kubernetes.client.V1Container
elif key in {
"job_metadata",
"pod_template_spec_metadata",
"deployment_metadata",
"service_metadata",
}:
model_class = kubernetes.client.V1ObjectMeta
elif key == "pod_spec_config":
model_class = kubernetes.client.V1PodSpec
elif key == "job_spec_config":
model_class = kubernetes.client.V1JobSpec
else:
raise Exception(f"Unexpected key in allowlist {key}")
result[key] = k8s_snake_case_keys(
model_class, only_allow_user_defined_k8s_config_fields[key]
)
return result
def validate_user_k8s_config_for_run(
self,
only_allow_user_defined_k8s_config_fields: Optional[Mapping[str, Any]],
only_allow_user_defined_env_vars: Optional[Sequence[str]],
):
return self._validate_user_k8s_config(
self.run_k8s_config,
only_allow_user_defined_k8s_config_fields,
only_allow_user_defined_env_vars,
)
def validate_user_k8s_config_for_code_server(
self,
only_allow_user_defined_k8s_config_fields: Optional[Mapping[str, Any]],
only_allow_user_defined_env_vars: Optional[Sequence[str]],
):
return self._validate_user_k8s_config(
self.server_k8s_config,
only_allow_user_defined_k8s_config_fields,
only_allow_user_defined_env_vars,
)
def _validate_user_k8s_config(
self,
user_defined_k8s_config: UserDefinedDagsterK8sConfig,
only_allow_user_defined_k8s_config_fields: Optional[Mapping[str, Any]],
only_allow_user_defined_env_vars: Optional[Sequence[str]],
) -> "K8sContainerContext":
used_fields = self._get_used_k8s_config_fields(user_defined_k8s_config)
if only_allow_user_defined_k8s_config_fields is not None:
snake_case_allowlist = self._snake_case_allowed_fields(
only_allow_user_defined_k8s_config_fields
)
disallowed_fields = []
for key, used_fields_with_key in used_fields.items():
if isinstance(used_fields_with_key, set):
for used_field in used_fields_with_key:
if not snake_case_allowlist.get(key, {}).get(used_field):
disallowed_fields.append(f"{key}.{used_field}")
else:
check.invariant(isinstance(used_fields_with_key, bool))
if used_fields_with_key and not only_allow_user_defined_k8s_config_fields.get(
key
):
disallowed_fields.append(key)
if disallowed_fields:
raise Exception(
f"Attempted to create a pod with fields that violated the allowed list: {', '.join(disallowed_fields)}"
)
validated_container_context = self
if only_allow_user_defined_env_vars is not None:
validated_container_context = self._filter_user_defined_env_vars(
set(only_allow_user_defined_env_vars)
)
return validated_container_context
def _filter_user_defined_k8s_config_env_vars(
self,
user_defined_k8s_config: UserDefinedDagsterK8sConfig,
only_allow_user_defined_env_vars: set[str],
discarded_env_var_names: set[str],
) -> UserDefinedDagsterK8sConfig:
"""Filters out any env vars from the supplied UserDefinedDagsterK8sConfig
that are not in the supplied set of env var names and adds the names of
any env vars that were discarded to the passed-in discarded_env_var_names set.
"""
if "env" not in user_defined_k8s_config.container_config:
return user_defined_k8s_config
filtered_env = []
for env_dict in user_defined_k8s_config.container_config["env"]:
env_key = env_dict["name"]
if env_key in only_allow_user_defined_env_vars:
filtered_env.append(env_dict)
else:
discarded_env_var_names.add(env_key)
return UserDefinedDagsterK8sConfig.from_dict(
{
**user_defined_k8s_config.to_dict(),
"container_config": {
**user_defined_k8s_config.container_config,
"env": filtered_env,
},
}
)
def _filter_user_defined_env_vars(
self,
only_allow_user_defined_env_vars: set[str],
) -> "K8sContainerContext":
discarded_env_var_names = set()
new_run_k8s_config = self._filter_user_defined_k8s_config_env_vars(
self.run_k8s_config,
only_allow_user_defined_env_vars,
discarded_env_var_names,
)
new_server_k8s_config = self._filter_user_defined_k8s_config_env_vars(
self.server_k8s_config,
only_allow_user_defined_env_vars,
discarded_env_var_names,
)
if discarded_env_var_names:
logging.warning(
f"Excluding the following environment variables because they are not in the allowlist for user-defined environment variables: {', '.join(discarded_env_var_names)}"
)
return self._replace(
run_k8s_config=new_run_k8s_config,
server_k8s_config=new_server_k8s_config,
)
def _get_used_k8s_config_fields(
self, user_defined_k8s_config: UserDefinedDagsterK8sConfig
) -> Mapping[str, Mapping[str, set[str]]]:
used_fields = {}
for key, fields in user_defined_k8s_config.to_dict().items():
if key == "merge_behavior":
continue
used_fields[key] = used_fields.get(key, set()).union(
{field_key for field_key in fields}
)
if self.namespace:
used_fields["namespace"] = True
return used_fields
@staticmethod
def create_for_run(
dagster_run: DagsterRun,
run_launcher: Optional["K8sRunLauncher"],
include_run_tags: bool,
) -> "K8sContainerContext":
context = K8sContainerContext()
if run_launcher:
context = context.merge(
K8sContainerContext(
image_pull_policy=run_launcher.image_pull_policy,
image_pull_secrets=run_launcher.image_pull_secrets,
service_account_name=run_launcher.service_account_name,
env_config_maps=run_launcher.env_config_maps,
env_secrets=run_launcher.env_secrets,
env_vars=run_launcher.env_vars,
volume_mounts=run_launcher.volume_mounts,
volumes=run_launcher.volumes,
labels=run_launcher.labels,
namespace=run_launcher.job_namespace,
resources=run_launcher.resources,
scheduler_name=run_launcher.scheduler_name,
security_context=run_launcher.security_context,
run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(
run_launcher.run_k8s_config or {}
),
)
)
user_defined_container_context = K8sContainerContext()
if dagster_run.job_code_origin:
run_container_context = dagster_run.job_code_origin.repository_origin.container_context
if run_container_context:
user_defined_container_context = user_defined_container_context.merge(
K8sContainerContext.create_from_config(run_container_context)
)
if include_run_tags:
user_defined_k8s_config = get_user_defined_k8s_config(dagster_run.tags)
user_defined_container_context = user_defined_container_context.merge(
K8sContainerContext(run_k8s_config=user_defined_k8s_config)
)
# If there's an allowlist, make sure user_defined_container_context doesn't violate it
if run_launcher:
user_defined_container_context = (
user_defined_container_context.validate_user_k8s_config_for_run(
run_launcher.only_allow_user_defined_k8s_config_fields,
run_launcher.only_allow_user_defined_env_vars,
)
)
return context.merge(user_defined_container_context)
@staticmethod
def create_from_config(run_container_context) -> "K8sContainerContext":
processed_shared_container_context = process_shared_container_context_config(
run_container_context or {}
)
shared_container_context = K8sContainerContext(
env_vars=processed_shared_container_context.get("env_vars", [])
)
run_k8s_container_context = (
run_container_context.get("k8s", {}) if run_container_context else {}
)
if not run_k8s_container_context:
return shared_container_context
processed_container_context = process_config(
DagsterK8sJobConfig.config_type_container_context(), run_k8s_container_context
)
if not processed_container_context.success:
raise DagsterInvalidConfigError(
"Errors while parsing k8s container context",
processed_container_context.errors,
run_k8s_container_context,
)
processed_context_value = cast("dict", processed_container_context.value)
return shared_container_context.merge(
K8sContainerContext(
image_pull_policy=processed_context_value.get("image_pull_policy"),
image_pull_secrets=processed_context_value.get("image_pull_secrets"),
service_account_name=processed_context_value.get("service_account_name"),
env_config_maps=processed_context_value.get("env_config_maps"),
env_secrets=processed_context_value.get("env_secrets"),
env_vars=processed_context_value.get("env_vars"),
volume_mounts=processed_context_value.get("volume_mounts"),
volumes=processed_context_value.get("volumes"),
labels=processed_context_value.get("labels"),
namespace=processed_context_value.get("namespace"),
resources=processed_context_value.get("resources"),
scheduler_name=processed_context_value.get("scheduler_name"),
security_context=processed_context_value.get("security_context"),
server_k8s_config=UserDefinedDagsterK8sConfig.from_dict(
processed_context_value.get("server_k8s_config", {})
),
run_k8s_config=UserDefinedDagsterK8sConfig.from_dict(
processed_context_value.get("run_k8s_config", {})
),
env=processed_context_value.get("env"),
),
)
def get_k8s_job_config(self, job_image, run_launcher) -> DagsterK8sJobConfig:
return DagsterK8sJobConfig(
job_image=job_image if job_image else run_launcher.job_image,
dagster_home=run_launcher.dagster_home,
instance_config_map=run_launcher.instance_config_map,
postgres_password_secret=run_launcher.postgres_password_secret,
)
| K8sContainerContext |
python | openai__openai-python | tests/test_transform.py | {
"start": 10469,
"end": 10841
} | class ____(BaseModel):
nested: MyModel
@parametrize
@pytest.mark.asyncio
async def test_pydantic_nested_objects(use_async: bool) -> None:
model = ModelNestedObjects.construct(nested={"foo": "stainless"})
assert isinstance(model.nested, MyModel)
assert cast(Any, await transform(model, Any, use_async)) == {"nested": {"foo": "stainless"}}
| ModelNestedObjects |
python | django__django | tests/generic_views/views.py | {
"start": 4924,
"end": 5099
} | class ____(generic.DeleteView):
model = Author
form_class = ConfirmDeleteForm
def get_success_url(self):
return reverse("authors_list")
| AuthorDeleteFormView |
python | scrapy__scrapy | tests/spiders.py | {
"start": 4823,
"end": 5398
} | class ____(SimpleSpider):
name = "asyncdef_asyncio_reqs_return"
async def parse(self, response):
await asyncio.sleep(0.2)
req_id = response.meta.get("req_id", 0)
status = await get_from_asyncio_queue(response.status)
self.logger.info(f"Got response {status}, req_id {req_id}")
if req_id > 0:
return None
reqs = []
for i in range(1, 3):
req = Request(self.start_urls[0], dont_filter=True, meta={"req_id": i})
reqs.append(req)
return reqs
| AsyncDefAsyncioReqsReturnSpider |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 31942,
"end": 33326
} | class ____(BoringModel):
mandatory: int
optional: str = "optional"
ignore_me: bool = False
def __post_init__(self):
super().__init__()
self.save_hyperparameters(ignore=("ignore_me",))
def test_dataclass_lightning_module(tmp_path):
"""Test that save_hyperparameters() works with a LightningModule as a dataclass."""
model = DataClassModel(33, optional="cocofruit")
assert model.hparams == {"mandatory": 33, "optional": "cocofruit"}
def test_dataclass_with_init_false_fields():
"""Test that save_hyperparameters() filters out fields with init=False and issues a warning."""
@dataclass
class DataClassWithInitFalseFieldsModel(BoringModel):
mandatory: int
optional: str = "optional"
non_init_field: int = field(default=999, init=False)
another_non_init: str = field(default="not_in_init", init=False)
def __post_init__(self):
super().__init__()
self.save_hyperparameters()
model = DataClassWithInitFalseFieldsModel(33, optional="cocofruit")
expected_hparams = {"mandatory": 33, "optional": "cocofruit"}
assert model.hparams == expected_hparams
assert model.non_init_field == 999
assert model.another_non_init == "not_in_init"
assert "non_init_field" not in model.hparams
assert "another_non_init" not in model.hparams
| DataClassModel |
python | jazzband__pip-tools | piptools/cache.py | {
"start": 958,
"end": 1790
} | class ____(PipToolsError):
def __init__(self, path: str):
self.path = path
def __str__(self) -> str:
lines = [
"The dependency cache seems to have been corrupted.",
"Inspect, or delete, the following file:",
f" {self.path}",
]
return os.linesep.join(lines)
def read_cache_file(cache_file_path: str) -> CacheDict:
with open(cache_file_path, encoding="utf-8") as cache_file:
try:
doc = json.load(cache_file)
except (json.JSONDecodeError, UnicodeDecodeError):
raise CorruptCacheError(cache_file_path)
# Check version and load the contents
if doc["__format__"] != 1:
raise ValueError("Unknown cache file format")
return _t.cast(CacheDict, doc["dependencies"])
| CorruptCacheError |
python | streamlit__streamlit | e2e_playwright/conftest.py | {
"start": 10153,
"end": 10614
} | class ____:
# id attribute added to the iframe html tag
element_id: str | None = None
# query params to be appended to the iframe src URL
src_query_params: dict[str, str] | None = None
# additional HTML body
additional_html_head: str | None = None
# html content to load. Following placeholders are replaced during the test:
# - $APP_URL: the URL of the Streamlit app
html_content: str | None = None
@dataclass
| IframedPageAttrs |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 5680,
"end": 16552
} | class ____(ObserverBase):
r"""Common base for all observers using uniform quantization to calculate
scale and zero_point.
Args:
dtype: dtype argument to the `quantize` node needed to implement the
reference model spec.
qscheme: Quantization scheme to be used.
reduce_range: Reduces the range of the quantized data type by 1 bit.
This is sometimes required to avoid instruction overflow.
quant_min: Minimum quantization value. If unspecified, it will follow the 8-bit setup.
quant_max: Maximum quantization value. If unspecified, it will follow the 8-bit setup.
eps: Epsilon value for float32, Defaults to `torch.finfo(torch.float32).eps`.
.. warning::
:attr:`dtype` can only take ``torch.qint8`` or ``torch.quint8``.
or `torch.int8` or `torch.uint8`
.. warning::
:attr:`qscheme` can only take one of the following options:
- ``torch.per_tensor_affine``
- ``torch.per_tensor_symmetric``
- ``torch.per_channel_affine``
- ``torch.per_channel_symmetric``
"""
# Note: the version is shared by all observer types
#
# Version 1/None
# self
#
# Version 2 (base class only, does not include child class buffers)
# self
# |--- eps : Tensor
#
# Version 3
# for HistogramObserver only, changed the shape of uninitialized
# min_val and max_val buffers from torch.Size([0]) to torch.Size([])
# for PerChannelObservers, changed the name of the buffers from min_vals
# to min_val and from max_vals to max_val.
_version = 3
eps: torch.Tensor
def __init__(
self,
dtype=torch.quint8,
qscheme=torch.per_tensor_affine,
reduce_range=False,
quant_min=None,
quant_max=None,
factory_kwargs=None,
eps=torch.finfo(torch.float32).eps,
is_dynamic=False,
**kwargs,
) -> None:
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super().__init__(dtype=dtype, is_dynamic=is_dynamic, **kwargs)
self.qscheme = qscheme
if reduce_range:
warnings.warn(
"Please use quant_min and quant_max to specify the range for observers. \
reduce_range will be deprecated in a future release of PyTorch.",
stacklevel=2,
)
self.reduce_range = reduce_range
self.register_buffer("eps", torch.tensor([eps], **factory_kwargs))
if self.qscheme not in (
torch.per_tensor_affine,
torch.per_tensor_symmetric,
torch.per_channel_affine,
torch.per_channel_symmetric,
torch.per_channel_affine_float_qparams,
):
raise AssertionError(
"Default Observer only works for per_tensor_affine, per_tensor_symmetric, "
"per_channel_affine, per_channel_symmetric and per_channel_float_qparams quantization scheme"
)
_ALLOWED_DTYPES = (
torch.qint8,
torch.quint8,
torch.quint4x2,
torch.qint32,
torch.int8,
torch.uint8,
torch.int16,
torch.int32,
torch.float8_e5m2,
torch.float8_e4m3fn,
torch.uint16,
)
if self.dtype not in _ALLOWED_DTYPES:
raise AssertionError(
f"Default Observer only works for {_ALLOWED_DTYPES} data type"
)
self.has_customized_qrange = (quant_min is not None) and (quant_max is not None)
if self.has_customized_qrange:
# pyrefly: ignore [bad-argument-type]
validate_qmin_qmax(quant_min, quant_max)
self.quant_min, self.quant_max = calculate_qmin_qmax(
# pyrefly: ignore [bad-argument-type]
quant_min,
# pyrefly: ignore [bad-argument-type]
quant_max,
self.has_customized_qrange,
self.dtype,
self.reduce_range,
)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
version = local_metadata.get("version", None)
if version is None or version == 1:
# eps was moved to a buffer in version 2
eps = torch.tensor([torch.finfo(torch.float32).eps])
state_dict[prefix + "eps"] = eps
super()._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
@torch.jit.export
def _validate_qmin_qmax(self, quant_min: int, quant_max: int) -> None:
r"""Validates that the user-specified quantization range is properly initialized
and within the given bound supported by the observer dtype.
To accommodate lower-bit quantization with respect to the existing torch.qint8 and
torch.quint8 datatypes, the user can choose to use dynamic quantization range by passing
in a tuple of initial qmin and qmax values. One use case is these customized qmin and qmax
values are used to calculate static estimates of the scale and zero point for aggressive lower-bit
fake quantization. These estimates are compared against parameters learned through backpropagation.
The related literatures for scale and zero point via backpropagation are as follows:
Learned Step Size Quantization: https://openreview.net/pdf?id=rkgO66VKDS
Trained Quantization Thresholds: https://arxiv.org/pdf/1903.08066.pdf
"""
# The variable names are prefixed with "initial" because their values (qmin and qmax) might be adjusted
# based on whether quantization range is reduced and the datatype (signed/unsigned) used by the observer.
if not quant_min <= 0 <= quant_max:
raise AssertionError("Used-specified quantization range must include 0.")
if quant_min >= quant_max:
raise AssertionError(
"qmin must be strictly less than qmax for user-specified quantization range."
)
@torch.jit.export
def _calculate_qparams(
self, min_val: torch.Tensor, max_val: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
r"""Calculates the quantization parameters, given min and max
value tensors. Works for both per tensor and per channel cases
Args:
min_val: Minimum values per channel
max_val: Maximum values per channel
Returns:
scales: Scales tensor of shape (#channels,)
zero_points: Zero points tensor of shape (#channels,)
"""
# Functionally equivalent to 'determine_qparams' in utils.py. Observers must be torchscriptable however and qscheme
# as far as I can tell is not allowed to passed as a parameter in torchscript functions. This makes refactoring observer
# to use this utility a massive pain and very gross. For now Im opting just to duplicate as this code
# seems unlikely to change (last update over 1 year ago) and when torchscript is fully deprecated we can refactor.
# TODO(jakeszwe, jerryzh168)
if not check_min_max_valid(min_val, max_val):
return torch.tensor([1.0], device=min_val.device.type), torch.tensor(
[0], device=min_val.device.type
)
quant_min, quant_max = self.quant_min, self.quant_max
min_val_neg = torch.min(min_val, torch.zeros_like(min_val))
max_val_pos = torch.max(max_val, torch.zeros_like(max_val))
device = min_val_neg.device
scale = torch.ones(min_val_neg.size(), dtype=torch.float32, device=device)
zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)
if (
self.qscheme == torch.per_tensor_symmetric
or self.qscheme == torch.per_channel_symmetric
):
max_val_pos = torch.max(-min_val_neg, max_val_pos)
scale = max_val_pos / (float(quant_max - quant_min) / 2)
scale = torch.max(scale, self.eps)
if self.dtype in [torch.quint8, torch.uint8]:
if self.has_customized_qrange:
# When customized quantization range is used, down-rounded midpoint of the range is chosen.
zero_point = zero_point.new_full(
zero_point.size(), (quant_min + quant_max) // 2
)
else:
zero_point = zero_point.new_full(zero_point.size(), 128)
elif self.dtype == torch.uint16:
zero_point = zero_point.new_full(zero_point.size(), 2**15)
elif self.qscheme == torch.per_channel_affine_float_qparams:
scale = (max_val - min_val) / float(quant_max - quant_min)
scale = torch.where(scale > self.eps, scale, torch.ones_like(scale))
# We use the quantize function
# xq = Round(Xf * inv_scale + zero_point),
# setting zero_point to (-1 * min *inv_scale) we get
# Xq = Round((Xf - min) * inv_scale)
zero_point = -1 * min_val / scale
else:
scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)
scale = torch.max(scale, self.eps)
zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)
zero_point = torch.clamp(zero_point, quant_min, quant_max)
# For scalar values, cast them to Tensors of size 1 to keep the shape
# consistent with default values in FakeQuantize.
if len(scale.shape) == 0:
# TODO: switch to scale.item() after adding JIT support
scale = torch.tensor([float(scale)], dtype=scale.dtype, device=device)
if len(zero_point.shape) == 0:
# TODO: switch to zero_point.item() after adding JIT support
zero_point = torch.tensor(
[int(zero_point)], dtype=zero_point.dtype, device=device
)
if self.qscheme == torch.per_channel_affine_float_qparams:
zero_point = torch.tensor(
[float(zero_point)], dtype=zero_point.dtype, device=device
)
return scale, zero_point
@torch.jit.export
def reset_min_max_vals(self):
raise NotImplementedError("Cannot reset min/max values in the given observer.")
# Originally, this class was called `_ObserverBase`. Keeping the old name around
# for backwards compatibility.
# TODO(after v1.13): delete this
_ObserverBase = UniformQuantizationObserverBase
| UniformQuantizationObserverBase |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/console.py | {
"start": 2306,
"end": 2881
} | class ____:
pass
NO_CHANGE = NoChange()
try:
_STDIN_FILENO = sys.__stdin__.fileno()
except Exception:
_STDIN_FILENO = 0
try:
_STDOUT_FILENO = sys.__stdout__.fileno()
except Exception:
_STDOUT_FILENO = 1
try:
_STDERR_FILENO = sys.__stderr__.fileno()
except Exception:
_STDERR_FILENO = 2
_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO)
_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO)
_TERM_COLORS = {
"kitty": ColorSystem.EIGHT_BIT,
"256color": ColorSystem.EIGHT_BIT,
"16color": ColorSystem.STANDARD,
}
| NoChange |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 24162,
"end": 24449
} | class ____(_LOBDataType, oracle.NCLOB):
def get_dbapi_type(self, dbapi):
# previously, this was dbapi.NCLOB.
# DB_TYPE_NVARCHAR will instead be passed to setinputsizes()
# when this datatype is used.
return dbapi.DB_TYPE_NVARCHAR
| _OracleUnicodeTextNCLOB |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 61117,
"end": 61432
} | class ____(sgqlc.types.Enum):
"""The type of a project field.
Enumeration Choices:
* `DATE`: Date
* `NUMBER`: Number
* `SINGLE_SELECT`: Single Select
* `TEXT`: Text
"""
__schema__ = github_schema
__choices__ = ("DATE", "NUMBER", "SINGLE_SELECT", "TEXT")
| ProjectV2CustomFieldType |
python | django-extensions__django-extensions | tests/testapp/models.py | {
"start": 11400,
"end": 11597
} | class ____(models.Model):
random_char_field = RandomCharField(length=8, uppercase=True, include_digits=False)
class Meta:
app_label = "django_extensions"
| RandomCharTestModelUppercase |
python | wandb__wandb | wandb/sdk/mailbox/mailbox.py | {
"start": 469,
"end": 4806
} | class ____:
"""Matches service responses to requests.
The mailbox can set an address on a server request and create a handle for
waiting for a response to that record. Responses are delivered by calling
`deliver()`. The `close()` method abandons all handles in case the
service process becomes unreachable.
"""
def __init__(self, asyncer: asyncio_manager.AsyncioManager) -> None:
self._asyncer = asyncer
self._handles: dict[str, MailboxResponseHandle] = {}
self._handles_lock = threading.Lock()
self._closed = False
def require_response(
self,
request: spb.ServerRequest | pb.Record,
) -> MailboxHandle[spb.ServerResponse]:
"""Set a response address on a request.
Args:
request: The request on which to set a request ID or mailbox slot.
This is mutated. An address must not already be set.
Returns:
A handle for waiting for the response to the request.
Raises:
MailboxClosedError: If the mailbox has been closed, in which case
no new responses are expected to be delivered and new handles
cannot be created.
"""
if isinstance(request, spb.ServerRequest):
if (address := request.request_id) or (
address := request.record_publish.control.mailbox_slot
):
raise ValueError(f"Request already has an address ({address})")
address = self._new_address()
request.request_id = address
if request.HasField("record_publish"):
request.record_publish.control.mailbox_slot = address
if request.HasField("record_communicate"):
request.record_communicate.control.mailbox_slot = address
else:
if address := request.control.mailbox_slot:
raise ValueError(f"Request already has an address ({address})")
address = self._new_address()
request.control.mailbox_slot = address
with self._handles_lock:
if self._closed:
raise MailboxClosedError()
handle = MailboxResponseHandle(address, asyncer=self._asyncer)
self._handles[address] = handle
return handle
def _new_address(self) -> str:
"""Returns an unused address for a request.
Assumes `_handles_lock` is held.
"""
def generate():
return "".join(
secrets.choice(string.ascii_lowercase + string.digits)
for _ in range(12)
)
address = generate()
# Being extra cautious. This loop will almost never be entered.
while address in self._handles:
address = generate()
return address
async def deliver(self, response: spb.ServerResponse) -> None:
"""Deliver a response from the service.
If the response address is invalid, this does nothing.
It is a no-op if the mailbox has been closed.
"""
address = response.request_id
if not address:
kind: str | None = response.WhichOneof("server_response_type")
if kind == "result_communicate":
result_type = response.result_communicate.WhichOneof("result_type")
kind = f"result_communicate.{result_type}"
_logger.error(f"Received response with no mailbox slot: {kind}")
return
with self._handles_lock:
# NOTE: If the mailbox is closed, this returns None because
# we clear the dict.
handle = self._handles.pop(address, None)
# It is not an error if there is no handle for the address:
# handles can be abandoned if the result is no longer needed.
if handle:
await handle.deliver(response)
def close(self) -> None:
"""Indicate no further responses will be delivered.
Abandons all handles.
"""
with self._handles_lock:
self._closed = True
_logger.info(
f"Closing mailbox, abandoning {len(self._handles)} handles.",
)
for handle in self._handles.values():
handle.abandon()
self._handles.clear()
| Mailbox |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.