language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/convbert/modeling_convbert.py | {
"start": 15983,
"end": 18576
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = ConvBertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = ConvBertAttention(config)
self.intermediate = ConvBertIntermediate(config)
self.output = ConvBertOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, Optional[torch.FloatTensor]]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise AttributeError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
encoder_attention_mask,
encoder_hidden_states,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
| ConvBertLayer |
python | sphinx-doc__sphinx | sphinx/ext/doctest.py | {
"start": 8486,
"end": 9898
} | class ____(doctest.DocTestRunner):
def summarize( # type: ignore[override]
self, out: Callable[[str], None], verbose: bool | None = None
) -> tuple[int, int]:
string_io = StringIO()
old_stdout = sys.stdout
sys.stdout = string_io
try:
res = super().summarize(verbose)
finally:
sys.stdout = old_stdout
out(string_io.getvalue())
return res
def _DocTestRunner__patched_linecache_getlines(
self, filename: str, module_globals: Any = None
) -> Any:
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) # type: ignore[attr-defined]
if m and m.group('name') == self.test.name:
try:
example = self.test.examples[int(m.group('examplenum'))]
# because we compile multiple doctest blocks with the same name
# (viz. the group name) this might, for outer stack frames in a
# traceback, get the wrong test which might not have enough examples
except IndexError:
pass
else:
return example.source.splitlines(True)
return self.save_linecache_getlines(filename, module_globals) # type: ignore[attr-defined]
# the new builder -- use sphinx-build.py -b doctest to run
| SphinxDocTestRunner |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vector_index.py | {
"start": 1155,
"end": 1342
} | class ____(_MultiVectorConfigCreateBase):
enabled: bool = Field(default=True)
@staticmethod
@abstractmethod
def encoding_name() -> str: ...
| _MultiVectorEncodingConfigCreate |
python | kamyu104__LeetCode-Solutions | Python/merge-nodes-in-between-zeros.py | {
"start": 29,
"end": 124
} | class ____(object):
def __init__(self, val=0, next=None):
pass
# linked list
| ListNode |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 17953,
"end": 18250
} | class ____(_Symmetric):
"""
Constrain to positive-semidefinite matrices.
"""
def check(self, value):
sym_check = super().check(value)
if not sym_check.all():
return sym_check
return torch.linalg.eigvalsh(value).ge(0).all(-1)
| _PositiveSemidefinite |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/core/graphql_client.py | {
"start": 10298,
"end": 13938
} | class ____(HTTPAdapter):
def __init__(self, *args, **kwargs):
self.socket_options = kwargs.pop("socket_options", None)
super().__init__(*args, **kwargs)
def init_poolmanager(self, *args, **kwargs):
if self.socket_options is not None:
kwargs["socket_options"] = self.socket_options
super().init_poolmanager(*args, **kwargs)
@contextmanager
def create_graphql_requests_session(adapter_kwargs: Optional[Mapping[str, Any]] = None):
with requests.Session() as session:
adapter = HTTPAdapterWithSocketOptions(**(adapter_kwargs or {}))
session.mount("https://", adapter)
session.mount("http://", adapter)
yield session
def create_agent_http_client(
session: requests.Session,
config_value: dict[str, Any],
scope: DagsterCloudInstanceScope = DagsterCloudInstanceScope.DEPLOYMENT,
):
return DagsterCloudAgentHttpClient(
headers=get_agent_headers(config_value, scope=scope),
verify=config_value.get("verify", True),
timeout=config_value.get("timeout", DEFAULT_TIMEOUT),
cookies=config_value.get("cookies", {}),
# Requests library modifies proxies dictionary so create a copy
proxies=(
check.is_dict(config_value.get("proxies")).copy() if config_value.get("proxies") else {}
),
session=session,
max_retries=config_value.get("retries", DEFAULT_RETRIES),
backoff_factor=config_value.get("backoff_factor", DEFAULT_BACKOFF_FACTOR),
)
def create_agent_graphql_client(
session: requests.Session,
url: str,
config_value: dict[str, Any],
scope: DagsterCloudInstanceScope = DagsterCloudInstanceScope.DEPLOYMENT,
):
return DagsterCloudGraphQLClient(
url=url,
headers=get_agent_headers(config_value, scope=scope),
verify=config_value.get("verify", True),
timeout=config_value.get("timeout", DEFAULT_TIMEOUT),
cookies=config_value.get("cookies", {}),
# Requests library modifies proxies dictionary so create a copy
proxies=(
check.is_dict(config_value.get("proxies")).copy() if config_value.get("proxies") else {}
),
session=session,
max_retries=config_value.get("retries", DEFAULT_RETRIES),
backoff_factor=config_value.get("backoff_factor", DEFAULT_BACKOFF_FACTOR),
)
def _get_retry_after_sleep_time(headers):
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
retry_after = headers.get("Retry-After")
if retry_after is None:
return None
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date = parsedate_tz(retry_after)
if retry_date is None:
return None
retry_date = mktime_tz(retry_date)
seconds = retry_date - time.time()
return max(seconds, 0)
@contextmanager
def create_cloud_webserver_client(
url: str,
api_token: str,
retries=3,
deployment_name: Optional[str] = None,
headers: Optional[dict[str, Any]] = None,
):
with create_graphql_requests_session(adapter_kwargs={}) as session:
yield DagsterCloudGraphQLClient(
session=session,
url=f"{url}/graphql",
headers={
**get_dagster_cloud_api_headers(
api_token,
scope=DagsterCloudInstanceScope.DEPLOYMENT,
deployment_name=deployment_name,
),
**(headers if headers else {}),
},
max_retries=retries,
)
| HTTPAdapterWithSocketOptions |
python | encode__django-rest-framework | tests/test_routers.py | {
"start": 13009,
"end": 13675
} | class ____(TestCase):
"""
Ensure `@action` decorator raises an except when applied
to an existing route
"""
def test_exception_raised_when_action_applied_to_existing_route(self):
class TestViewSet(viewsets.ModelViewSet):
@action(methods=['post'], detail=True)
def retrieve(self, request, *args, **kwargs):
return Response({
'hello': 'world'
})
self.router = SimpleRouter()
self.router.register(r'test', TestViewSet, basename='test')
with pytest.raises(ImproperlyConfigured):
self.router.urls
| TestActionAppliedToExistingRoute |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/event/base.py | {
"start": 14905,
"end": 15247
} | class ____(dispatcher[_ET]):
def __get__(self, obj: Any, cls: Type[Any]) -> Any:
if obj is None:
return self.dispatch
if hasattr(obj, "_slots_dispatch"):
return obj._slots_dispatch
disp = self.dispatch._for_instance(obj)
obj._slots_dispatch = disp
return disp
| slots_dispatcher |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/hooks/dataform.py | {
"start": 1512,
"end": 27370
} | class ____(GoogleBaseHook):
"""Hook for Google Cloud DataForm APIs."""
def get_dataform_client(self) -> DataformClient:
"""Retrieve client library object that allow access to Cloud Dataform service."""
return DataformClient(credentials=self.get_credentials())
@GoogleBaseHook.fallback_to_default_project_id
def wait_for_workflow_invocation(
self,
workflow_invocation_id: str,
repository_id: str,
project_id: str,
region: str,
wait_time: int = 10,
timeout: int | None = None,
) -> None:
"""
Poll a job to check if it finishes.
:param workflow_invocation_id: Id of the Workflow Invocation
:param repository_id: Id of the Dataform repository
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param region: Required. The Cloud Dataproc region in which to handle the request.
:param wait_time: Number of seconds between checks
:param timeout: How many seconds wait for job to be ready. Used only if ``asynchronous`` is False
"""
if region is None:
raise TypeError("missing 1 required keyword argument: 'region'")
state = None
start = time.monotonic()
while state not in (
WorkflowInvocation.State.FAILED,
WorkflowInvocation.State.SUCCEEDED,
WorkflowInvocation.State.CANCELLED,
):
if timeout and start + timeout < time.monotonic():
raise AirflowException(
f"Timeout: workflow invocation {workflow_invocation_id} is not ready after {timeout}s"
)
time.sleep(wait_time)
try:
workflow_invocation = self.get_workflow_invocation(
project_id=project_id,
region=region,
repository_id=repository_id,
workflow_invocation_id=workflow_invocation_id,
)
state = workflow_invocation.state
except Exception as err:
self.log.info(
"Retrying. Dataform API returned error when waiting for workflow invocation: %s", err
)
if state == WorkflowInvocation.State.FAILED:
raise AirflowException(f"Workflow Invocation failed:\n{workflow_invocation}")
if state == WorkflowInvocation.State.CANCELLED:
raise AirflowException(f"Workflow Invocation was cancelled:\n{workflow_invocation}")
@GoogleBaseHook.fallback_to_default_project_id
def create_compilation_result(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result: CompilationResult | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CompilationResult:
"""
Create a new CompilationResult in a given project and location.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result: Required. The compilation result to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
return client.create_compilation_result(
request={
"parent": parent,
"compilation_result": compilation_result,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_compilation_result(
self,
project_id: str,
region: str,
repository_id: str,
compilation_result_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> CompilationResult:
"""
Fetch a single CompilationResult.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param compilation_result_id: The Id of the Dataform Compilation Result
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/compilationResults/{compilation_result_id}"
)
return client.get_compilation_result(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation: WorkflowInvocation | dict,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowInvocation:
"""
Create a new WorkflowInvocation in a given Repository.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation: Required. The workflow invocation resource to create.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
return client.create_workflow_invocation(
request={"parent": parent, "workflow_invocation": workflow_invocation},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WorkflowInvocation:
"""
Fetch a single WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required. The workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/workflowInvocations/{workflow_invocation_id}"
)
return client.get_workflow_invocation(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def query_workflow_invocation_actions(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> QueryWorkflowInvocationActionsPager:
"""
Fetch WorkflowInvocation actions.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required. The workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/workflowInvocations/{workflow_invocation_id}"
)
response = client.query_workflow_invocation_actions(
request={
"name": name,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def cancel_workflow_invocation(
self,
project_id: str,
region: str,
repository_id: str,
workflow_invocation_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Request cancellation of a running WorkflowInvocation.
:param project_id: Required. The ID of the Google Cloud project that the task belongs to.
:param region: Required. The ID of the Google Cloud region that the task belongs to.
:param repository_id: Required. The ID of the Dataform repository that the task belongs to.
:param workflow_invocation_id: Required. The workflow invocation resource's id.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = (
f"projects/{project_id}/locations/{region}/repositories/"
f"{repository_id}/workflowInvocations/{workflow_invocation_id}"
)
try:
workflow_invocation = self.get_workflow_invocation(
project_id=project_id,
region=region,
repository_id=repository_id,
workflow_invocation_id=workflow_invocation_id,
)
state = workflow_invocation.state
except Exception as err:
raise AirflowException(
f"Dataform API returned error when waiting for workflow invocation:\n{err}"
)
if state == WorkflowInvocation.State.RUNNING:
client.cancel_workflow_invocation(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
else:
self.log.info(
"Workflow is not active. Either the execution has already finished or has been canceled. "
"Please check the logs above for more details."
)
@GoogleBaseHook.fallback_to_default_project_id
def create_repository(
self,
*,
project_id: str,
region: str,
repository_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Repository:
"""
Create repository.
:param project_id: Required. The ID of the Google Cloud project where repository should be.
:param region: Required. The ID of the Google Cloud region where repository should be.
:param repository_id: Required. The ID of the new Dataform repository.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}"
request = {
"parent": parent,
"repository_id": repository_id,
}
repository = client.create_repository(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return repository
@GoogleBaseHook.fallback_to_default_project_id
def delete_repository(
self,
*,
project_id: str,
region: str,
repository_id: str,
force: bool = True,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> None:
"""
Delete repository.
:param project_id: Required. The ID of the Google Cloud project where repository located.
:param region: Required. The ID of the Google Cloud region where repository located.
:param repository_id: Required. The ID of the Dataform repository that should be deleted.
:param force: If set to true, any child resources of this repository will also be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
name = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
request = {
"name": name,
"force": force,
}
client.delete_repository(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_workspace(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Workspace:
"""
Create workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace should be.
:param region: Required. The ID of the Google Cloud region where workspace should be.
:param repository_id: Required. The ID of the Dataform repository where workspace should be.
:param workspace_id: Required. The ID of the new Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
parent = f"projects/{project_id}/locations/{region}/repositories/{repository_id}"
request = {"parent": parent, "workspace_id": workspace_id}
workspace = client.create_workspace(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return workspace
@GoogleBaseHook.fallback_to_default_project_id
def delete_workspace(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Delete workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace that should be deleted.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"name": workspace_path,
}
client.delete_workspace(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def write_file(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
contents: bytes,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> WriteFileResponse:
"""
Write a new file to the specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where files should be created.
:param filepath: Required. Path to file including name of the file relative to workspace root.
:param contents: Required. Content of the file to be written.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": filepath,
"contents": contents,
}
response = client.write_file(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def make_directory(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> dict:
"""
Make new directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory should be created.
:param path: Required. The directory's full path including new directory name,
relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": path,
}
response = client.make_directory(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
@GoogleBaseHook.fallback_to_default_project_id
def remove_directory(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
path: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Remove directory in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param path: Required. The directory's full path including directory name,
relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": path,
}
client.remove_directory(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def remove_file(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
filepath: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
):
"""
Remove file in specified workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace where directory located.
:param filepath: Required. The full path including name of the file, relative to the workspace root.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
"path": filepath,
}
client.remove_file(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def install_npm_packages(
self,
*,
project_id: str,
region: str,
repository_id: str,
workspace_id: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> InstallNpmPackagesResponse:
"""
Install NPM dependencies in the provided workspace.
Requires "package.json" to be created in the workspace.
:param project_id: Required. The ID of the Google Cloud project where workspace located.
:param region: Required. The ID of the Google Cloud region where workspace located.
:param repository_id: Required. The ID of the Dataform repository where workspace located.
:param workspace_id: Required. The ID of the Dataform workspace.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_dataform_client()
workspace_path = (
f"projects/{project_id}/locations/{region}/repositories/{repository_id}/workspaces/{workspace_id}"
)
request = {
"workspace": workspace_path,
}
response = client.install_npm_packages(
request=request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return response
| DataformHook |
python | pallets__werkzeug | src/werkzeug/datastructures/range.py | {
"start": 4707,
"end": 7034
} | class ____:
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(
self,
units: str | None,
start: int | None,
stop: int | None,
length: int | None = None,
on_update: cabc.Callable[[ContentRange], None] | None = None,
) -> None:
self.on_update = on_update
self.set(start, stop, length, units)
#: The units to use, usually "bytes"
units: str | None = _CallbackProperty() # type: ignore[assignment]
#: The start point of the range or `None`.
start: int | None = _CallbackProperty() # type: ignore[assignment]
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop: int | None = _CallbackProperty() # type: ignore[assignment]
#: The length of the range or `None`.
length: int | None = _CallbackProperty() # type: ignore[assignment]
def set(
self,
start: int | None,
stop: int | None,
length: int | None = None,
units: str | None = "bytes",
) -> None:
"""Simple method to update the ranges."""
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
self._units: str | None = units
self._start: int | None = start
self._stop: int | None = stop
self._length: int | None = length
if self.on_update is not None:
self.on_update(self)
def unset(self) -> None:
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self) -> str:
if self._units is None:
return ""
if self._length is None:
length: str | int = "*"
else:
length = self._length
if self._start is None:
return f"{self._units} */{length}"
return f"{self._units} {self._start}-{self._stop - 1}/{length}" # type: ignore[operator]
def __bool__(self) -> bool:
return self._units is not None
def __str__(self) -> str:
return self.to_header()
def __repr__(self) -> str:
return f"<{type(self).__name__} {str(self)!r}>"
# circular dependencies
from .. import http # noqa: E402
| ContentRange |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql_tests/graphql/graphql_context_test_suite.py | {
"start": 30822,
"end": 36137
} | class ____(ABC):
@abstractmethod
@contextmanager
def yield_graphql_context(
self, class_scoped_context
) -> Generator[WorkspaceRequestContext, None, None]:
pass
@contextmanager
def graphql_context_for_request(self, request):
check.param_invariant(
isinstance(request.param, GraphQLContextVariant),
"request",
"params in fixture must be List[GraphQLContextVariant]",
)
with manage_graphql_context(request.param) as graphql_context:
yield graphql_context
def graphql_context_variants_fixture(context_variants):
check.list_param(context_variants, "context_variants", of_type=GraphQLContextVariant)
def _wrap(fn):
return pytest.fixture(
name="class_scoped_graphql_context",
scope="class",
params=[
pytest.param(
context_variant,
id=context_variant.test_id,
marks=context_variant.marks + [Marks.graphql_context_test_suite],
)
for context_variant in context_variants
],
)(fn)
return _wrap
def make_graphql_context_test_suite(context_variants):
"""Arguments:
context_variants (List[GraphQLContextVariant]): List of runs to run per test in this class.
This is the base class factory for test suites in the dagster-graphql test.
The goal of this suite is to make it straightforward to run tests
against multiple graphql_contexts, have a coherent lifecycle for those
contexts.
GraphQLContextVariant has a number of static methods to provide common run configurations
as well as common groups of run configuration
One can also make bespoke GraphQLContextVariants with specific implementations
of DagsterInstance, RepositoryLocation, and so forth. See that class
for more details.
Example:
class TestAThing(
make_graphql_context_test_suite(
context_variants=[GraphQLContextVariant.in_memory_in_process_start()]
)
):
def test_graphql_context_exists(self, graphql_context):
assert graphql_context
"""
check.list_param(context_variants, "context_variants", of_type=GraphQLContextVariant)
class _SpecificTestSuiteBase(_GraphQLContextTestSuite):
@graphql_context_variants_fixture(context_variants=context_variants)
def yield_class_scoped_graphql_context(self, request):
with self.graphql_context_for_request(request) as graphql_context:
yield graphql_context
@pytest.fixture(name="graphql_context")
def graphql_context_fixture(self, class_scoped_graphql_context):
with self.yield_graphql_context(class_scoped_graphql_context) as context:
yield context
@pytest.fixture(name="graphql_client")
def graphql_client_fixture(self, graphql_context):
with self.yield_graphql_client(graphql_context) as client:
yield client
@contextmanager
def yield_graphql_context(
self, class_scoped_context
) -> Generator[WorkspaceRequestContext, None, None]:
instance = class_scoped_context.instance
instance.wipe()
instance.wipe_all_schedules()
with class_scoped_context.create_request_context() as request_context:
yield request_context
# ensure that any runs launched by the test are cleaned up
# Since launcher is lazy loaded, we don't need to do anyting if it's None
if instance._run_launcher: # noqa: SLF001
instance._run_launcher.join() # noqa: SLF001
@contextmanager
def yield_graphql_client(self, context) -> Generator[DagsterGraphQLClient, None, None]:
class MockedGraphQLClient:
def execute(self, gql_query: DocumentNode, variable_values=None):
return execute_dagster_graphql(
context,
print_ast(gql_query), # convert doc back to str
variable_values,
).data
with patch("dagster_graphql.client.client.Client") as mock_client:
mock_client.return_value = MockedGraphQLClient()
yield DagsterGraphQLClient("localhost")
return _SpecificTestSuiteBase
ReadonlyGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_readonly_variants()
)
NonLaunchableGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_non_launchable_variants()
)
ExecutingGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_executing_variants()
)
all_repos_loadable_target = LoadableTargetOrigin(
executable_path=sys.executable,
python_file=file_relative_path(__file__, "cross_repo_asset_deps.py"),
)
AllRepositoryGraphQLContextTestMatrix = make_graphql_context_test_suite(
context_variants=GraphQLContextVariant.all_executing_variants(
target=all_repos_loadable_target, location_name="cross_asset_repos"
)
)
| _GraphQLContextTestSuite |
python | jina-ai__jina | jina/serve/stream/helper.py | {
"start": 120,
"end": 535
} | class ____:
"""Class used to wrap a count integer so that it can be updated inside methods.
.. code-block:: python
def count_increment(i: int, rc: _RequestsCounter):
i += 1
rc.count += 1
c_int = 0
c_rc = _RequestsCounter()
count_increment(c_int, c_rc)
assert c_int == 0
assert c_rc.count == 1
"""
count = 0
| _RequestsCounter |
python | doocs__leetcode | solution/1300-1399/1365.How Many Numbers Are Smaller Than the Current Number/Solution2.py | {
"start": 0,
"end": 232
} | class ____:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
cnt = [0] * 102
for x in nums:
cnt[x + 1] += 1
s = list(accumulate(cnt))
return [s[x] for x in nums]
| Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 1117175,
"end": 1126906
} | class ____(FieldChannelMixin, core.SecondaryFieldDef):
r"""
YError schema wrapper.
A field definition of a secondary channel that shares a scale with another primary channel.
For example, ``x2``, ``xError`` and ``xError2`` share the same scale with ``x``.
Parameters
----------
shorthand : str, dict, Sequence[str], :class:`RepeatRef`
shorthand for field, aggregate, and type
aggregate : dict, :class:`Aggregate`, :class:`ArgmaxDef`, :class:`ArgminDef`, :class:`NonArgAggregateOp`, Literal['average', 'count', 'distinct', 'max', 'mean', 'median', 'min', 'missing', 'product', 'q1', 'q3', 'ci0', 'ci1', 'stderr', 'stdev', 'stdevp', 'sum', 'valid', 'values', 'variance', 'variancep', 'exponential', 'exponentialb']
Aggregation function for the field (e.g., ``"mean"``, ``"sum"``, ``"median"``,
``"min"``, ``"max"``, ``"count"``).
**Default value:** ``undefined`` (None)
**See also:** `aggregate <https://vega.github.io/vega-lite/docs/aggregate.html>`__
documentation.
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
bin : None
A flag for binning a ``quantitative`` field, `an object defining binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__, or indicating
that the data for ``x`` or ``y`` channel are binned before they are imported into
Vega-Lite (``"binned"``).
* If ``true``, default `binning parameters
<https://vega.github.io/vega-lite/docs/bin.html#bin-parameters>`__ will be
applied.
* If ``"binned"``, this indicates that the data for the ``x`` (or ``y``) channel are
already binned. You can map the bin-start field to ``x`` (or ``y``) and the
bin-end field to ``x2`` (or ``y2``). The scale and axis will be formatted similar
to binning in Vega-Lite. To adjust the axis ticks based on the bin step, you can
also set the axis's `tickMinStep
<https://vega.github.io/vega-lite/docs/axis.html#ticks>`__ property.
**Default value:** ``false``
**See also:** `bin <https://vega.github.io/vega-lite/docs/bin.html>`__
documentation.
field : str, dict, :class:`Field`, :class:`FieldName`, :class:`RepeatRef`
**Required.** A string defining the name of the field from which to pull a data
value or an object defining iterated values from the `repeat
<https://vega.github.io/vega-lite/docs/repeat.html>`__ operator.
**See also:** `field <https://vega.github.io/vega-lite/docs/field.html>`__
documentation.
**Notes:** 1) Dots (``.``) and brackets (``[`` and ``]``) can be used to access
nested objects (e.g., ``"field": "foo.bar"`` and ``"field": "foo['bar']"``). If
field names contain dots or brackets but are not nested, you can use ``\\`` to
escape dots and brackets (e.g., ``"a\\.b"`` and ``"a\\[0\\]"``). See more details
about escaping in the `field documentation
<https://vega.github.io/vega-lite/docs/field.html>`__. 2) ``field`` is not required
if ``aggregate`` is ``count``.
timeUnit : dict, :class:`TimeUnit`, :class:`MultiTimeUnit`, :class:`BinnedTimeUnit`, :class:`SingleTimeUnit`, :class:`TimeUnitParams`, :class:`UtcMultiTimeUnit`, :class:`UtcSingleTimeUnit`, :class:`LocalMultiTimeUnit`, :class:`LocalSingleTimeUnit`, Literal['binnedyear', 'binnedyearquarter', 'binnedyearquartermonth', 'binnedyearmonth', 'binnedyearmonthdate', 'binnedyearmonthdatehours', 'binnedyearmonthdatehoursminutes', 'binnedyearmonthdatehoursminutesseconds', 'binnedyearweek', 'binnedyearweekday', 'binnedyearweekdayhours', 'binnedyearweekdayhoursminutes', 'binnedyearweekdayhoursminutesseconds', 'binnedyeardayofyear', 'binnedutcyear', 'binnedutcyearquarter', 'binnedutcyearquartermonth', 'binnedutcyearmonth', 'binnedutcyearmonthdate', 'binnedutcyearmonthdatehours', 'binnedutcyearmonthdatehoursminutes', 'binnedutcyearmonthdatehoursminutesseconds', 'binnedutcyearweek', 'binnedutcyearweekday', 'binnedutcyearweekdayhours', 'binnedutcyearweekdayhoursminutes', 'binnedutcyearweekdayhoursminutesseconds', 'binnedutcyeardayofyear', 'utcyear', 'utcquarter', 'utcmonth', 'utcweek', 'utcday', 'utcdayofyear', 'utcdate', 'utchours', 'utcminutes', 'utcseconds', 'utcmilliseconds', 'year', 'quarter', 'month', 'week', 'day', 'dayofyear', 'date', 'hours', 'minutes', 'seconds', 'milliseconds', 'utcyearquarter', 'utcyearquartermonth', 'utcyearmonth', 'utcyearmonthdate', 'utcyearmonthdatehours', 'utcyearmonthdatehoursminutes', 'utcyearmonthdatehoursminutesseconds', 'utcyearweek', 'utcyearweekday', 'utcyearweekdayhours', 'utcyearweekdayhoursminutes', 'utcyearweekdayhoursminutesseconds', 'utcyeardayofyear', 'utcquartermonth', 'utcmonthdate', 'utcmonthdatehours', 'utcmonthdatehoursminutes', 'utcmonthdatehoursminutesseconds', 'utcweekday', 'utcweekdayhours', 'utcweekdayhoursminutes', 'utcweekdayhoursminutesseconds', 'utcdayhours', 'utcdayhoursminutes', 'utcdayhoursminutesseconds', 'utchoursminutes', 'utchoursminutesseconds', 'utcminutesseconds', 'utcsecondsmilliseconds', 'yearquarter', 'yearquartermonth', 'yearmonth', 'yearmonthdate', 'yearmonthdatehours', 'yearmonthdatehoursminutes', 'yearmonthdatehoursminutesseconds', 'yearweek', 'yearweekday', 'yearweekdayhours', 'yearweekdayhoursminutes', 'yearweekdayhoursminutesseconds', 'yeardayofyear', 'quartermonth', 'monthdate', 'monthdatehours', 'monthdatehoursminutes', 'monthdatehoursminutesseconds', 'weekday', 'weekdayhours', 'weekdayhoursminutes', 'weekdayhoursminutesseconds', 'dayhours', 'dayhoursminutes', 'dayhoursminutesseconds', 'hoursminutes', 'hoursminutesseconds', 'minutesseconds', 'secondsmilliseconds']
Time unit (e.g., ``year``, ``yearmonth``, ``month``, ``hours``) for a temporal
field. or `a temporal field that gets casted as ordinal
<https://vega.github.io/vega-lite/docs/type.html#cast>`__.
**Default value:** ``undefined`` (None)
**See also:** `timeUnit <https://vega.github.io/vega-lite/docs/timeunit.html>`__
documentation.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "yError"
@overload
def aggregate(self, _: NonArgAggregateOp_T, /) -> YError: ...
@overload
def aggregate(
self, *, argmax: Optional[str | SchemaBase] = Undefined
) -> YError: ...
@overload
def aggregate(
self, *, argmin: Optional[str | SchemaBase] = Undefined
) -> YError: ...
@overload
def bandPosition(self, _: float, /) -> YError: ...
@overload
def bin(self, _: None, /) -> YError: ...
@overload
def field(self, _: str | RepeatRef, /) -> YError: ...
@overload
def field(
self,
*,
repeat: Optional[Literal["row", "column", "repeat", "layer"]] = Undefined,
) -> YError: ...
@overload
def timeUnit(
self,
_: TimeUnitParams | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T,
/,
) -> YError: ...
@overload
def timeUnit(
self,
*,
binned: Optional[bool] = Undefined,
maxbins: Optional[float] = Undefined,
step: Optional[float] = Undefined,
unit: Optional[SchemaBase | MultiTimeUnit_T | SingleTimeUnit_T] = Undefined,
utc: Optional[bool] = Undefined,
) -> YError: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> YError: ...
def __init__(
self,
shorthand: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
aggregate: Optional[SchemaBase | Map | NonArgAggregateOp_T] = Undefined,
bandPosition: Optional[float] = Undefined,
bin: Optional[None] = Undefined,
field: Optional[str | SchemaBase | Map] = Undefined,
timeUnit: Optional[
SchemaBase | Map | MultiTimeUnit_T | BinnedTimeUnit_T | SingleTimeUnit_T
] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
**kwds,
):
super().__init__(
shorthand=shorthand,
aggregate=aggregate,
bandPosition=bandPosition,
bin=bin,
field=field,
timeUnit=timeUnit,
title=title,
**kwds,
)
@with_property_setters
| YError |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/asset_selection.py | {
"start": 28311,
"end": 29483
} | class ____(AssetSelection):
selected_asset_check_keys: Sequence[AssetCheckKey]
def resolve_inner(
self, asset_graph: BaseAssetGraph, allow_missing: bool
) -> AbstractSet[AssetKey]:
return set()
def resolve_checks_inner( # pyright: ignore[reportIncompatibleMethodOverride]
self, asset_graph: AssetGraph, allow_missing: bool
) -> AbstractSet[AssetCheckKey]:
specified_keys = set(self.selected_asset_check_keys)
missing_keys = {key for key in specified_keys if key not in asset_graph.asset_check_keys}
if not allow_missing and missing_keys:
raise DagsterInvalidSubsetError(
f"AssetCheckKey(s) {[k.to_user_string() for k in missing_keys]} were selected, but "
"no definitions supply these keys. Make sure all keys are spelled "
"correctly, and all definitions are correctly added to the "
f"`Definitions`."
)
return specified_keys & asset_graph.asset_check_keys
def to_serializable_asset_selection(self, asset_graph: BaseAssetGraph) -> "AssetSelection":
return self
@record
| AssetCheckKeysSelection |
python | walkccc__LeetCode | solutions/1896. Minimum Cost to Change the Final Value of Expression/1896.py | {
"start": 0,
"end": 1787
} | class ____:
def minOperationsToFlip(self, expression: str) -> int:
stack = [] # [(the expression, the cost to toggle the expression)]
for e in expression:
if e in '(&|':
# These aren't expressions, so the cost is meaningless.
stack.append((e, 0))
continue
if e == ')':
lastPair = stack.pop()
stack.pop() # Pop '('.
else: # e == '0' or e == '1'
# Store the '0' or '1'. The cost to change their values is just 1,
# whether it's changing '0' to '1' or '1' to '0'.
lastPair = (e, 1)
if stack and stack[-1][0] in '&|':
op = stack.pop()[0]
a, costA = stack.pop()
b, costB = lastPair
# Determine the cost to toggle op(a, b).
if op == '&':
if a == '0' and b == '0':
# Change '&' to '|' and a|b to '1'.
lastPair = ('0', 1 + min(costA, costB))
elif a == '0' and b == '1':
# Change '&' to '|'.
lastPair = ('0', 1)
elif a == '1' and b == '0':
# Change '&' to '|'.
lastPair = ('0', 1)
else: # a == '1' and b == '1'
# Change a|b to '0'.
lastPair = ('1', min(costA, costB))
else: # op == '|'
if a == '0' and b == '0':
# Change a|b to '1'.
lastPair = ('0', min(costA, costB))
elif a == '0' and b == '1':
# Change '|' to '&'.
lastPair = ('1', 1)
elif a == '1' and b == '0':
# Change '|' to '&'.
lastPair = ('1', 1)
else: # a == '1' and b == '1'
# Change '|' to '&' and a|b to '0'.
lastPair = ('1', 1 + min(costA, costB))
stack.append(lastPair)
return stack[-1][1]
| Solution |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/constants.py | {
"start": 2686,
"end": 3270
} | class ____(SimpleNamespace):
"""Constants used by Solr clients."""
QUERY_ALL: Final[str] = "*:*"
"""Solr query requesting all documents to be returned."""
DEFAULT_TIMEOUT_SEC: Final[int] = 60
"""Default request timeout to Solr in seconds."""
SOLR_ISO8601_DATE_FORMAT: Final[str] = "%Y-%m-%dT%H:%M:%SZ"
"""A :py:meth:`datetime.datetime.strftime` format string for Solr-compatible datetimes.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/indexing-guide/date-formatting-math.html>`_
for more information.
"""
| SolrConstants |
python | ray-project__ray | python/ray/util/state/common.py | {
"start": 60431,
"end": 60720
} | class ____:
#: Node ID -> summary per node
#: If the data is not required to be orgnized per node, it will contain
#: a single key, "cluster".
node_id_to_summary: Dict[str, Union[TaskSummaries, ActorSummaries, ObjectSummaries]]
@dataclass(init=not IS_PYDANTIC_2)
| StateSummary |
python | plotly__plotly.py | _plotly_utils/basevalidators.py | {
"start": 13335,
"end": 19758
} | class ____(BaseValidator):
"""
"enumerated": {
"description": "Enumerated value type. The available values are
listed in `values`.",
"requiredOpts": [
"values"
],
"otherOpts": [
"dflt",
"coerceNumber",
"arrayOk"
]
},
"""
def __init__(
self,
plotly_name,
parent_name,
values,
array_ok=False,
coerce_number=False,
**kwargs,
):
super(EnumeratedValidator, self).__init__(
plotly_name=plotly_name, parent_name=parent_name, **kwargs
)
# Save params
# -----------
self.values = values
self.array_ok = array_ok
# coerce_number is rarely used and not implemented
self.coerce_number = coerce_number
self.kwargs = kwargs
# Handle regular expressions
# --------------------------
# Compiled regexs
self.val_regexs = []
# regex replacements that run before the matching regex
# So far, this is only used to cast 'x1' -> 'x' for anchor-style
# enumeration properties
self.regex_replacements = []
# Loop over enumeration values
# ----------------------------
# Look for regular expressions
for v in self.values:
if v and isinstance(v, str) and v[0] == "/" and v[-1] == "/" and len(v) > 1:
# String is a regex with leading and trailing '/' character
regex_str = v[1:-1]
self.val_regexs.append(re.compile(regex_str))
self.regex_replacements.append(
EnumeratedValidator.build_regex_replacement(regex_str)
)
else:
self.val_regexs.append(None)
self.regex_replacements.append(None)
def __deepcopy__(self, memodict={}):
"""
A custom deepcopy method is needed here because compiled regex
objects don't support deepcopy
"""
cls = self.__class__
return cls(self.plotly_name, self.parent_name, values=self.values)
@staticmethod
def build_regex_replacement(regex_str):
# Example: regex_str == r"^y([2-9]|[1-9][0-9]+)?$"
#
# When we see a regular expression like the one above, we want to
# build regular expression replacement params that will remove a
# suffix of 1 from the input string ('y1' -> 'y' in this example)
#
# Why?: Regular expressions like this one are used in enumeration
# properties that refer to subplotids (e.g. layout.annotation.xref)
# The regular expressions forbid suffixes of 1, like 'x1'. But we
# want to accept 'x1' and coerce it into 'x'
#
# To be cautious, we only perform this conversion for enumerated
# values that match the anchor-style regex
match = re.match(
r"\^(\w)\(\[2\-9\]\|\[1\-9\]\[0\-9\]\+\)\?\( domain\)\?\$", regex_str
)
if match:
anchor_char = match.group(1)
return "^" + anchor_char + "1$", anchor_char
else:
return None
def perform_replacemenet(self, v):
"""
Return v with any applicable regex replacements applied
"""
if isinstance(v, str):
for repl_args in self.regex_replacements:
if repl_args:
v = re.sub(repl_args[0], repl_args[1], v)
return v
def description(self):
# Separate regular values from regular expressions
enum_vals = []
enum_regexs = []
for v, regex in zip(self.values, self.val_regexs):
if regex is not None:
enum_regexs.append(regex.pattern)
else:
enum_vals.append(v)
desc = """\
The '{name}' property is an enumeration that may be specified as:""".format(
name=self.plotly_name
)
if enum_vals:
enum_vals_str = "\n".join(
textwrap.wrap(
repr(enum_vals),
initial_indent=" " * 12,
subsequent_indent=" " * 12,
break_on_hyphens=False,
)
)
desc = (
desc
+ """
- One of the following enumeration values:
{enum_vals_str}""".format(enum_vals_str=enum_vals_str)
)
if enum_regexs:
enum_regexs_str = "\n".join(
textwrap.wrap(
repr(enum_regexs),
initial_indent=" " * 12,
subsequent_indent=" " * 12,
break_on_hyphens=False,
)
)
desc = (
desc
+ """
- A string that matches one of the following regular expressions:
{enum_regexs_str}""".format(enum_regexs_str=enum_regexs_str)
)
if self.array_ok:
desc = (
desc
+ """
- A tuple, list, or one-dimensional numpy array of the above"""
)
return desc
def in_values(self, e):
"""
Return whether a value matches one of the enumeration options
"""
is_str = isinstance(e, str)
for v, regex in zip(self.values, self.val_regexs):
if is_str and regex:
in_values = fullmatch(regex, e) is not None
# in_values = regex.fullmatch(e) is not None
else:
in_values = e == v
if in_values:
return True
return False
def validate_coerce(self, v):
if is_none_or_typed_array_spec(v):
pass
elif self.array_ok and is_array(v):
v_replaced = [self.perform_replacemenet(v_el) for v_el in v]
invalid_els = [e for e in v_replaced if (not self.in_values(e))]
if invalid_els:
self.raise_invalid_elements(invalid_els[:10])
if is_homogeneous_array(v):
v = copy_to_readonly_numpy_array(v)
else:
v = to_scalar_or_list(v)
else:
v = self.perform_replacemenet(v)
if not self.in_values(v):
self.raise_invalid_val(v)
return v
| EnumeratedValidator |
python | scipy__scipy | benchmarks/benchmarks/spatial.py | {
"start": 2175,
"end": 2493
} | class ____(PresortedDataSetup):
params = PresortedDataSetup.params[:-1]
param_names = PresortedDataSetup.param_names[:-1]
def setup(self, *args):
super().setup(*args, None)
def time_build(self, mnr, balanced, order):
cKDTree(self.data.get(order), balanced_tree=balanced)
| BuildUnbalanced |
python | tensorflow__tensorflow | tensorflow/python/distribute/experimental/dtensor_util.py | {
"start": 3962,
"end": 12790
} | class ____(distribute_lib.ReplicaContext):
"""ReplicaContext for strategy that is backed by DTensor.
Since the DTensor is operated in the global context, most of the methods from
existing strategy ReplicaContext is not applicable since they need to access
local values. For now most of the methods in this class will raise explicit
error to user, and we will add more support for local values in future.
"""
_UNSUPPORTED_ERROR_MSG = (
"Strategy that is backed by DTensor is run with a global context, and "
"doesn't support operations for local context, like any call to merge/"
"gather/reduce or local replica ID. Please use any strategy that is not "
"backed by DTensor")
def __init__(self, strategy):
# Since DTensor strategy only runs in a global context, and we can't have
# a local replica ID in the sync group. For now we pass None to parent, and
# raise an explicit error when it is accessed.
super().__init__(strategy, replica_id_in_sync_group=None)
def __enter__(self):
# This is a copy of parent class, without any check about whether the
# current replica is the first one (since DTensor only has one).
distribute_lib._push_per_thread_mode(self._thread_context) # # pylint: disable=protected-access
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
self._summary_recording_distribution_strategy = (
summary_state.is_recording_distribution_strategy)
summary_state.is_recording_distribution_strategy = True
@property
def replica_id_in_sync_group(self):
# Since there is only one global context for DTensor, we always return a
# constant value here. This value is needed by the RNG which try to generate
# different seed for different replica.
return 0
@property
def _replica_id(self):
raise NotImplementedError(self._UNSUPPORTED_ERROR_MSG)
def merge_call(self, merge_fn, args=(), kwargs=None):
raise NotImplementedError(self._UNSUPPORTED_ERROR_MSG)
def all_reduce(self, reduce_op, value, options=None):
raise NotImplementedError(self._UNSUPPORTED_ERROR_MSG)
def all_gather(self, value, axis, options=None):
raise NotImplementedError(self._UNSUPPORTED_ERROR_MSG)
def _update(self, var, fn, args=(), kwargs=None, group=True):
raise NotImplementedError(self._UNSUPPORTED_ERROR_MSG)
def initialize_accelerator_system_once(device_type):
# Initialize the GPU/TPU before creating the mesh.
# Note that this method will also trigger the creation of the pairing
# virtual host CPUs, which is needed by dataset and checkpoint.
if not accelerator_util.is_initialized():
# TODO(feyu): Add a method in accelerator_util to check the initialized
# mesh device types.
accelerator_util.initialize_accelerator_system(
device_type,
experimental_reset_context=True)
def convert_inputs_to_dtensor(inputs, mesh):
"""Convert any input types to DTensor instance."""
if isinstance(inputs, DTensorDistributedValue):
return inputs.get_dtensor()
elif isinstance(inputs, values_lib.DistributedValues):
return convert_per_replica_to_dtensor(inputs, mesh)
elif isinstance(inputs, input_util._DTensorIterator): # pylint: disable=protected-access
return inputs
elif tensor_util.is_tensor(inputs):
if context.executing_eagerly():
if d_api.is_dtensor(inputs):
return inputs
else:
# For a non-dtensor input in eager context, we could choose to replica
# them into per-replica and then pack them into dtensor. However, this
# will cause an eager/graph discrepancy since we can't do this check in
# the graph context. For now, we will ask user to provide a distributed
# value for inputs.
_raise_unsupported_input_type_error(inputs)
else:
# For graph context, since we can't check if they are dtensor or not. We
# will assume the value is already distributed. This is a critical use
# case for keras, where all the inputs are pre-distributed via strategy,
# and the train function execute within graph context.
return inputs
else:
# For any other types.
_raise_unsupported_input_type_error(inputs)
def _raise_unsupported_input_type_error(inputs):
raise ValueError("Unsupported input types for MirroredStrategy. "
"Please use `strategy.distribute_dataset` or "
"`strategy.distribute_values_from_function` to "
f"distribute inputs. Received input type: {type(inputs)}")
def is_distributed_value(value):
return isinstance(
value, values_lib.DistributedValues) or d_api.is_dtensor(value)
def convert_per_replica_to_dtensor(per_replica_value, mesh):
"""Convert a PerReplica result to a DTensor instance.
Args:
per_replica_value: A PerReplica instance whose value will be converted
to DTensor.
mesh: The mesh used for layout creation.
Returns:
A DTensor instance that packed from per_replica_value with batch sharded
layout.
"""
values = per_replica_value.values
if isinstance(values[0], (float, int)):
rank = 0
else:
rank = len(values[0].shape)
if rank == 0:
result = []
# dtensor.pack requires each component to have same rank as the packed
# result. When the individual value is scalar, it needs to be expanded into
# 1D tensor.
for v in values:
result.append(array_ops.expand_dims_v2(v, axis=0))
rank += 1
else:
result = list(values) # dtensor.pack requires a list as input.
# TODO(scottzhu): Note that the result tensor could be a partial value and
# not always batch shard or fully replicaed. See
# http://screenshot/6ERkXyX95KqftCw as an example.
batch_layout = layout.Layout.batch_sharded(
mesh, batch_dim=DEFAULT_BATCH_MESH_DIM_NAME, rank=rank)
return d_api.pack(result, batch_layout)
def dtensor_reduce(strategy, reduce_op, value, axis):
"""Implement dtensor based strategy.reduce()."""
# Due to the limitation of using scalar in DTensor (e.g. the rank 0 tensor
# loss the batch shard information), we need to override the default
# reduce in addition to the strategy.extend._reduce_to()
# Most of the logic here is a mimic of the parent class, except for how
# mean and sum are calculated in a global context.
distribute_lib._require_cross_replica_or_default_context_extended( # pylint: disable=protected-access
strategy.extended)
if isinstance(reduce_op, str):
reduce_op = reduce_util.ReduceOp(reduce_op.upper())
distributed_input = is_distributed_value(value)
if not distributed_input and axis is None:
# For any value that isn't distributed and doesn't need a reduction within
# the replica.
destinations = (device_util.current() or
strategy.extended._default_device or # pylint: disable=protected-access
"/device:CPU:0")
devices = cross_device_ops_lib.get_devices_from(destinations)
with ops.device(devices[0]):
return array_ops.identity(
cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, value, destinations, strategy.num_replicas_in_sync))
value = convert_inputs_to_dtensor(value, strategy._mesh) # pylint: disable=protected-access
# At this point, the value is a DTensor instance now.
# There will be a final reduction step cross replica. In order to maintain
# the shape of each local replica, we need to add a new dim to the front.
# E.g. 2 replica with local shape as (4, 5, 6), the global tensor shape
# should be (8, 5, 6), we will reshape into (2, 4, 5, 6) and then do a
# reduction on axis 0.
if reduce_op == reduce_util.ReduceOp.MEAN:
reduce_op = math_ops.reduce_mean
else:
reduce_op = math_ops.reduce_sum
# TODO(scottzhu): Make sure we handle dynamic/uneven shape in future.
if d_api.fetch_layout(value).is_fully_replicated():
# In case of fully mirrored dtensor, we only need to do one reduce, and
# don't need to care about any per-replica logic.
if axis is not None:
value = reduce_op(value, axis=axis)
else:
new_shape = [strategy.num_replicas_in_sync, -1]
if len(value.shape) > 1:
new_shape.extend(array_ops.shape(value)[1:])
value = array_ops.reshape(value, new_shape)
if axis is not None:
# we do a reduce_sum/mean within each of the replica when axis is not
# None. Add 1 to the axis since there is a new dim added by reshape in
# front.
value = reduce_op(value, axis=axis + 1)
value = reduce_op(value, axis=0)
# Note that we return a DTensor instance here, which should have the same
# value as the original MirroredStrategy, but with a different type. User
# might want a tf.Tensor for the status quo.
return value
| DTensorReplicaContext |
python | numpy__numpy | numpy/_core/tests/test_umath.py | {
"start": 108378,
"end": 110528
} | class ____(_FilterInvalids):
def test_reduce(self):
dflt = np.typecodes['AllFloat']
dint = np.typecodes['AllInteger']
seq1 = np.arange(11)
seq2 = seq1[::-1]
func = np.fmin.reduce
for dt in dint:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
for dt in dflt:
tmp1 = seq1.astype(dt)
tmp2 = seq2.astype(dt)
assert_equal(func(tmp1), 0)
assert_equal(func(tmp2), 0)
tmp1[::2] = np.nan
tmp2[::2] = np.nan
assert_equal(func(tmp1), 1)
assert_equal(func(tmp2), 1)
def test_reduce_complex(self):
assert_equal(np.fmin.reduce([1, 2j]), 2j)
assert_equal(np.fmin.reduce([1 + 3j, 2j]), 2j)
def test_float_nans(self):
nan = np.nan
arg1 = np.array([0, nan, nan])
arg2 = np.array([nan, 0, nan])
out = np.array([0, 0, nan])
assert_equal(np.fmin(arg1, arg2), out)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
arg1 = np.array([0, cnan, cnan], dtype=complex)
arg2 = np.array([cnan, 0, cnan], dtype=complex)
out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
def test_precision(self):
dtypes = [np.float16, np.float32, np.float64, np.longdouble]
for dt in dtypes:
dtmin = np.finfo(dt).min
dtmax = np.finfo(dt).max
d1 = dt(0.1)
d1_next = np.nextafter(d1, np.inf)
test_cases = [
# v1 v2 expected
(dtmin, np.inf, dtmin),
(dtmax, np.inf, dtmax),
(d1, d1_next, d1),
(dtmin, np.nan, dtmin),
]
for v1, v2, expected in test_cases:
assert_equal(np.fmin([v1], [v2]), [expected])
assert_equal(np.fmin.reduce([v1, v2]), expected)
| TestFmin |
python | doocs__leetcode | solution/3700-3799/3758.Convert Number Words to Digits/Solution.py | {
"start": 0,
"end": 608
} | class ____:
def convertNumber(self, s: str) -> str:
d = [
"zero",
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
i, n = 0, len(s)
ans = []
while i < n:
for j, t in enumerate(d):
m = len(t)
if i + m <= n and s[i : i + m] == t:
ans.append(str(j))
i += m - 1
break
i += 1
return "".join(ans)
| Solution |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/unsorted_segment_join_op_test.py | {
"start": 1376,
"end": 10885
} | class ____(UnicodeTestCase, parameterized.TestCase):
def test_basic_np_array(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1]
num_segments = 2
separator = ':'
output_array = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_segment_id_and_input_empty(self):
inputs = np.array([], dtype=np.bytes_)
segment_ids = np.array([], dtype=np.int32)
num_segments = 3
separator = ':'
output_array = ['', '', '']
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_type_check(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = np.array([1, 0, 1], dtype=np.int32)
num_segments = np.array(2, dtype=np.int32)
separator = ':'
output_array = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
segment_ids = np.array([1, 0, 1], dtype=np.int64)
num_segments = np.array(2, dtype=np.int64)
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_basic_tensor(self):
inputs = constant_op.constant([['Y', 'q', 'c'], ['Y', '6', '6'],
['p', 'G', 'a']])
segment_ids = constant_op.constant([1, 0, 1])
num_segments = 2
separator = ':'
output_array = constant_op.constant([['Y', '6', '6'], ['Y:p', 'q:G',
'c:a']])
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res, output_array)
self.assertAllEqual(res.shape, output_array.get_shape())
def test_multiple_segment_join(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids_1 = [1, 0, 1]
num_segments_1 = 2
separator_1 = ':'
output_array_1 = [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids_1,
num_segments=num_segments_1,
separator=separator_1))
self.assertAllEqualUnicode(res, output_array_1)
self.assertAllEqual(res.shape, np.array(output_array_1).shape)
segment_ids_2 = [1, 1]
num_segments_2 = 2
separator_2 = ''
output_array_2 = [['', '', ''], ['YY:p', '6q:G', '6c:a']]
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=res,
segment_ids=segment_ids_2,
num_segments=num_segments_2,
separator=separator_2))
self.assertAllEqualUnicode(res, output_array_2)
self.assertAllEqual(res.shape, np.array(output_array_2).shape)
@parameterized.parameters([
{
'inputs': [[[['q'], ['s']], [['f'], ['F']], [['h'], ['0']]],
[[['E'], ['j']], [['2'], ['k']], [['N'], ['d']]],
[[['G'], ['M']], [['1'], ['S']], [['N'], ['7']]],
[[['8'], ['W']], [['W'], ['G']], [['j'], ['d']]]],
'segment_ids': [1, 1, 0, 2],
'num_segments':
3,
'separator':
':',
'output_array': [[[['G'], ['M']], [['1'], ['S']], [['N'], ['7']]],
[[['q:E'], ['s:j']], [['f:2'], ['F:k']],
[['h:N'], ['0:d']]],
[[['8'], ['W']], [['W'], ['G']], [['j'], ['d']]]],
},
{
'inputs': [[['Q', 'b'], ['c', 'p']], [['i', '9'], ['n', 'b']],
[['T', 'h'], ['g', 'z']]],
'segment_ids': [[0, 1], [1, 0], [1, 0]],
'num_segments': 2,
'separator': ':',
'output_array': [['Q:n:g', 'b:b:z'], ['c:i:T', 'p:9:h']]
},
{
'inputs': [[['Q', 'b'], ['b', 'p']], [['i', '9'], ['n', 'b']],
[['T', 'h'], ['g', 'z']]],
'segment_ids': [[[2, 1], [0, 0]], [[2, 0], [2, 2]], [[0, 2], [1, 0]]],
'num_segments': 3,
'separator': ':',
'output_array': ['b:p:9:T:z', 'b:g', 'Q:i:n:b:h']
},
{
'inputs': [[['z'], ['h']], [['c'], ['z']], [['V'], ['T']]],
'segment_ids': [0, 1, 1],
'num_segments': 3,
'separator': ':',
'output_array': [[['z'], ['h']], [['c:V'], ['z:T']], [[''], ['']]]
},
])
def test_multiple_cases_with_different_dims(self, inputs, segment_ids,
num_segments, separator,
output_array):
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqualUnicode(res, output_array)
self.assertAllEqual(res.shape, np.array(output_array).shape)
@parameterized.parameters([
{
'separator': '',
'output_array': ['thisisatest']
},
{
'separator': ':',
'output_array': ['this:is:a:test']
},
{
'separator': 'UNK',
'output_array': ['thisUNKisUNKaUNKtest']
},
])
def testSeparator(self, separator, output_array):
inputs = ['this', 'is', 'a', 'test']
segment_ids = [0, 0, 0, 0]
num_segments = 1
res = self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
self.assertAllEqual(res.shape, np.array(output_array).shape)
self.assertAllEqualUnicode(res, output_array)
def test_fail_segment_id_exceeds_segment_nums(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1]
num_segments = 1
separator = ':'
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_segment_id_dim_does_not_match(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [1, 0, 1, 1]
num_segments = 2
separator = ':'
if not context.executing_eagerly():
with self.assertRaises(ValueError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
else:
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_segment_id_empty_input_non_empty(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = np.array([], dtype=np.int32)
num_segments = 2
separator = ':'
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_empty_input(self):
inputs = np.array([], dtype=np.bytes_)
segment_ids = [1, 0, 1]
num_segments = 2
separator = ':'
with self.assertRaises((ValueError, errors_impl.InvalidArgumentError)):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
def test_fail_negative_segment_id(self):
inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']]
segment_ids = [-1, 0, -1]
num_segments = 1
separator = ':'
with self.assertRaises(errors_impl.InvalidArgumentError):
self.evaluate(
string_ops.unsorted_segment_join(
inputs=inputs,
segment_ids=segment_ids,
num_segments=num_segments,
separator=separator))
if __name__ == '__main__':
test.main()
| UnsortedSegmentJoinOpTest |
python | kamyu104__LeetCode-Solutions | Python/check-if-an-original-string-exists-given-two-encoded-strings.py | {
"start": 3233,
"end": 5137
} | class ____(object):
def possiblyEquals(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: bool
"""
def memoization(s1, s2, i, j, k, lookup):
if (i, j, k) not in lookup:
if i == len(s1) and j == len(s2):
lookup[(i, j, k)] = (k == 0)
elif i != len(s1) and s1[i].isdigit():
lookup[(i, j, k)] = False
for ni in xrange(i+1, len(s1)+1):
if (ni == len(s1) or s1[ni] != '0') and memoization(s1, s2, ni, j, k+int(s1[i:ni]), lookup):
lookup[(i, j, k)] = True
break
if ni == len(s1) or not s1[ni].isdigit():
break
elif j != len(s2) and s2[j].isdigit():
lookup[(i, j, k)] = False
for nj in xrange(j+1, len(s2)+1):
if (nj == len(s2) or s2[nj] != '0') and memoization(s1, s2, i, nj, k-int(s2[j:nj]), lookup):
lookup[(i, j, k)] = True
break
if nj == len(s2) or not s2[nj].isdigit():
break
elif k < 0:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j, k+1, lookup) if i != len(s1) else False
elif k > 0:
lookup[(i, j, k)] = memoization(s1, s2, i, j+1, k-1, lookup) if j != len(s2) else False
else:
lookup[(i, j, k)] = memoization(s1, s2, i+1, j+1, k, lookup) if i != len(s1) and j != len(s2) and s1[i] == s2[j] else False
return lookup[(i, j, k)]
return memoization(s1, s2, 0, 0, 0, {})
# Time: O(m * n * k), k is the max number of consecutive digits in s1 and s2
# Space: O(min(m, n) * k)
# bottom-up dp
| Solution2 |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 115618,
"end": 117943
} | class ____(Request):
"""
For each task, get a list of metrics for which the requested event type was reported
:param tasks: Task IDs
:type tasks: Sequence[str]
:param event_type: Event type
:type event_type: EventTypeEnum
"""
_service = "events"
_action = "get_task_metrics"
_version = "2.20"
_schema = {
"definitions": {
"event_type_enum": {
"enum": [
"training_stats_scalar",
"training_stats_vector",
"training_debug_image",
"plot",
"log",
],
"type": "string",
}
},
"properties": {
"event_type": {
"$ref": "#/definitions/event_type_enum",
"description": "Event type",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], event_type: Any = None, **kwargs: Any) -> None:
super(GetTaskMetricsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.event_type = event_type
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("event_type")
def event_type(self) -> Any:
return self._property_event_type
@event_type.setter
def event_type(self, value: Any) -> None:
if value is None:
self._property_event_type = None
return
if isinstance(value, six.string_types):
try:
value = EventTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "event_type", enum.Enum)
self._property_event_type = value
| GetTaskMetricsRequest |
python | tornadoweb__tornado | tornado/locale.py | {
"start": 18832,
"end": 21120
} | class ____(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code: str, translations: gettext.NullTranslations) -> None:
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super().__init__(code)
def translate(
self,
message: str,
plural_message: Optional[str] = None,
count: Optional[int] = None,
) -> str:
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
def pgettext(
self,
context: str,
message: str,
plural_message: Optional[str] = None,
count: Optional[int] = None,
) -> str:
"""Allows to set context for translation, accepts plural forms.
Usage example::
pgettext("law", "right")
pgettext("good", "right")
Plural message example::
pgettext("organization", "club", "clubs", len(clubs))
pgettext("stick", "club", "clubs", len(clubs))
To generate POT file with context, add following options to step 1
of `load_gettext_translations` sequence::
xgettext [basic options] --keyword=pgettext:1c,2 --keyword=pgettext:1c,2,3
.. versionadded:: 4.2
"""
if plural_message is not None:
assert count is not None
msgs_with_ctxt = (
f"{context}{CONTEXT_SEPARATOR}{message}",
f"{context}{CONTEXT_SEPARATOR}{plural_message}",
count,
)
result = self.ngettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = self.ngettext(message, plural_message, count)
return result
else:
msg_with_ctxt = f"{context}{CONTEXT_SEPARATOR}{message}"
result = self.gettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
| GettextLocale |
python | pytorch__pytorch | torch/_dynamo/variables/functions.py | {
"start": 64800,
"end": 66208
} | class ____(NestedUserFunctionVariable):
def __init__(
self,
wrapped: Any,
context: "ContextWrappingVariable",
**kwargs: Any,
) -> None:
kwargs.pop("fn_name", None)
kwargs.pop("code", None)
kwargs.pop("f_globals", None)
kwargs.pop("defaults", None)
kwargs.pop("kwdefaults", None)
kwargs.pop("annotations", None)
kwargs.pop("closure", None)
kwargs.pop("wrapped_fn", None)
super().__init__(
wrapped.fn_name,
wrapped.code,
wrapped.f_globals,
wrapped.defaults,
wrapped.kwdefaults,
wrapped.annotations,
wrapped.closure,
wrapped.wrapped_fn,
)
self.wrapped = wrapped
self.context = context
def call_function(
self,
tx: "InstructionTranslator",
args: Sequence[VariableTracker],
kwargs: dict[str, VariableTracker],
) -> VariableTracker:
self.context.enter(tx)
result = super().call_function(tx, args, kwargs)
self.context.exit(tx)
return result
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(lambda: codegen(self.context)) # type: ignore[arg-type]
codegen(self.wrapped)
codegen.extend_output(create_call_function(1, False))
| WrappedNestedUserFunctionVariable |
python | kamyu104__LeetCode-Solutions | Python/find-minimum-time-to-reach-last-room-ii.py | {
"start": 89,
"end": 1282
} | class ____(object):
def minTimeToReach(self, moveTime):
"""
:type moveTime: List[List[int]]
:rtype: int
"""
def dijkstra(start, target):
DIRECTIONS = [(1, 0), (0, 1), (-1, 0), (0, -1)]
dist = [[float("inf")]*len(moveTime[0]) for _ in xrange(len(moveTime))]
dist[start[0]][start[1]] = 0
min_heap = [(dist[start[0]][start[1]], start[0], start[1])]
while min_heap:
curr, i, j = heapq.heappop(min_heap)
if curr != dist[i][j]:
continue
if (i, j) == target:
break
for di, dj in DIRECTIONS:
ni, nj = i+di, j+dj
c = (i+j)%2+1
if not (0 <= ni < len(moveTime) and 0 <= nj < len(moveTime[0]) and dist[ni][nj] > max(moveTime[ni][nj], curr)+c):
continue
dist[ni][nj] = max(moveTime[ni][nj], curr)+c
heapq.heappush(min_heap, (dist[ni][nj], ni, nj))
return dist[target[0]][target[1]]
return dijkstra((0, 0), (len(moveTime)-1, len(moveTime[0])-1))
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-stripe/unit_tests/integration/test_bank_accounts.py | {
"start": 4675,
"end": 15393
} | class ____(TestCase):
@HttpMocker()
def test_given_one_page_when_read_then_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_record(
_a_customer()
.with_id("1")
.with_field(
_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account()).with_record(_a_bank_account()))
)
)
.with_record(
_a_customer().with_id("2").with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account())))
)
.build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 3
@HttpMocker()
def test_given_source_is_not_bank_account_when_read_then_filter_record(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_record(_a_customer().with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_NOT_A_BANK_ACCOUNT))))
.build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 0
@HttpMocker()
def test_given_multiple_bank_accounts_pages_when_read_then_query_pagination_on_child(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_record(
_a_customer()
.with_id("parent_id")
.with_field(
_SOURCES_FIELD,
_as_dict(_bank_accounts_response().with_pagination().with_record(_a_bank_account().with_id("latest_bank_account_id"))),
)
)
.build(),
)
http_mocker.get(
# we do not use slice boundaries here because:
# * there should be no duplicates parents (application fees) returned by the stripe API as it is using cursor pagination
# * it is implicitly lower bounder by the parent creation
# * the upper boundary is not configurable and is always <now>
_customers_bank_accounts_request("parent_id").with_limit(100).with_starting_after("latest_bank_account_id").build(),
_bank_accounts_response().with_record(_a_bank_account()).build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_multiple_customers_pages_when_read_then_query_pagination_on_parent(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_pagination()
.with_record(
_a_customer()
.with_id("parent_id")
.with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account())))
)
.build(),
)
http_mocker.get(
_customers_request()
.with_expands(_EXPANDS)
.with_starting_after("parent_id")
.with_created_gte(_A_START_DATE)
.with_created_lte(_NOW)
.with_limit(100)
.build(),
_customers_response()
.with_record(_a_customer().with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account()))))
.build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 2
@HttpMocker()
def test_given_parent_stream_without_bank_accounts_when_read_then_stream_did_not_run(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response().build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert_stream_did_not_run(output, _STREAM_NAME)
@HttpMocker()
def test_given_slice_range_when_read_then_perform_multiple_requests(self, http_mocker: HttpMocker) -> None:
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
_customers_request()
.with_expands(_EXPANDS)
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_customers_response()
.with_record(
_a_customer().with_id("1").with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account())))
)
.build(),
)
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(slice_datetime).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_record(
_a_customer().with_id("2").with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account())))
)
.build(),
)
output = self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
assert len(output.records) == 2
@HttpMocker()
def test_given_slice_range_and_bank_accounts_pagination_when_read_then_do_not_slice_child(self, http_mocker: HttpMocker) -> None:
"""
This means that if the user attempt to configure the slice range, it will only apply on the parent stream
"""
start_date = _NOW - timedelta(days=30)
slice_range = timedelta(days=20)
slice_datetime = start_date + slice_range
http_mocker.get(
StripeRequestBuilder.customers_endpoint(_ACCOUNT_ID, _CLIENT_SECRET).with_any_query_params().build(),
_customers_response().build(),
) # catching subsequent slicing request that we don't really care for this test
http_mocker.get(
_customers_request()
.with_expands(_EXPANDS)
.with_created_gte(start_date)
.with_created_lte(slice_datetime - _AVOIDING_INCLUSIVE_BOUNDARIES)
.with_limit(100)
.build(),
_customers_response()
.with_record(
_a_customer()
.with_id("parent_id")
.with_field(
_SOURCES_FIELD,
_as_dict(_bank_accounts_response().with_pagination().with_record(_a_bank_account().with_id("latest_bank_account_id"))),
)
)
.build(),
)
http_mocker.get(
# slice range is not applied here
_customers_bank_accounts_request("parent_id").with_limit(100).with_starting_after("latest_bank_account_id").build(),
_bank_accounts_response().with_record(_a_bank_account()).build(),
)
self._read(_config().with_start_date(start_date).with_slice_range_in_days(slice_range.days))
# request matched http_mocker
@HttpMocker()
def test_given_no_state_when_read_then_return_ignore_lookback(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response().with_record(_a_customer()).build(),
)
self._read(_config().with_start_date(_A_START_DATE).with_lookback_window_in_days(10))
# request matched http_mocker
@HttpMocker()
def test_given_one_page_when_read_then_cursor_field_is_set(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_expands(_EXPANDS).with_created_gte(_A_START_DATE).with_created_lte(_NOW).with_limit(100).build(),
_customers_response()
.with_record(_a_customer().with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account()))))
.build(),
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert output.records[0].record.data["updated"] == int(_NOW.timestamp())
@HttpMocker()
def test_given_http_status_401_when_read_then_config_error(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_any_query_params().build(),
a_response_with_status(401),
)
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
@HttpMocker()
def test_given_rate_limited_when_read_then_retry_and_return_records(self, http_mocker: HttpMocker) -> None:
http_mocker.get(
_customers_request().with_any_query_params().build(),
[
a_response_with_status(429),
_customers_response()
.with_record(_a_customer().with_field(_SOURCES_FIELD, _as_dict(_bank_accounts_response().with_record(_a_bank_account()))))
.build(),
],
)
output = self._read(_config().with_start_date(_A_START_DATE))
assert len(output.records) == 1
@HttpMocker()
def test_given_http_status_500_when_read_then_raise_config_error(self, http_mocker: HttpMocker) -> None:
request = _customers_request().with_any_query_params().build()
http_mocker.get(
request,
a_response_with_status(500),
)
with patch.object(HttpStatusErrorHandler, "max_retries", new=1):
output = self._read(_config(), expecting_exception=True)
assert output.errors[-1].trace.error.failure_type == FailureType.config_error
def _read(self, config: ConfigBuilder, expecting_exception: bool = False) -> EntrypointOutput:
return _read(config, SyncMode.full_refresh, expecting_exception=expecting_exception)
@freezegun.freeze_time(_NOW.isoformat())
| FullRefreshTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 262,
"end": 470
} | class ____(graphene.ObjectType):
key = graphene.NonNull(graphene.String)
value = graphene.NonNull(graphene.String)
class Meta:
name = "MetadataItemDefinition"
| GrapheneMetadataItemDefinition |
python | django__django | tests/auth_tests/urls.py | {
"start": 2964,
"end": 3016
} | class ____(EmptyResponseBaseView):
pass
| PublicView |
python | streamlit__streamlit | lib/streamlit/elements/widgets/data_editor.py | {
"start": 4059,
"end": 20624
} | class ____:
"""DataEditorSerde is used to serialize and deserialize the data editor state."""
def deserialize(self, ui_value: str | None) -> EditingState:
data_editor_state: EditingState = cast(
"EditingState",
{
"edited_rows": {},
"added_rows": [],
"deleted_rows": [],
}
if ui_value is None
else json.loads(ui_value),
)
# Make sure that all editing state keys are present:
if "edited_rows" not in data_editor_state:
data_editor_state["edited_rows"] = {} # type: ignore[unreachable]
if "deleted_rows" not in data_editor_state:
data_editor_state["deleted_rows"] = [] # type: ignore[unreachable]
if "added_rows" not in data_editor_state:
data_editor_state["added_rows"] = [] # type: ignore[unreachable]
# Convert the keys (numerical row positions) to integers.
# The keys are strings because they are serialized to JSON.
data_editor_state["edited_rows"] = {
int(k): v
for k, v in data_editor_state["edited_rows"].items() # ty: ignore[possibly-missing-attribute]
}
return data_editor_state
def serialize(self, editing_state: EditingState) -> str:
return json.dumps(editing_state, default=str)
def _parse_value(
value: str | int | float | bool | list[str] | None,
column_data_kind: ColumnDataKind,
) -> Any:
"""Convert a value to the correct type.
Parameters
----------
value : str | int | float | bool | list[str] | None
The value to convert.
column_data_kind : ColumnDataKind
The determined data kind of the column. The column data kind refers to the
shared data type of the values in the column (e.g. int, float, str).
Returns
-------
The converted value.
"""
if value is None:
return None
import pandas as pd
try:
if column_data_kind in (ColumnDataKind.LIST, ColumnDataKind.EMPTY):
return list(value) if is_list_like(value) else [value] # ty: ignore
if column_data_kind == ColumnDataKind.STRING:
return str(value)
# List values aren't supported for anything else than list column data kind.
# To make the type checker happy, we raise a TypeError here. However,
# This isn't expected to happen.
if isinstance(value, list):
raise TypeError( # noqa: TRY301
"List values are only supported by list, string and empty columns."
)
if column_data_kind == ColumnDataKind.INTEGER:
return int(value)
if column_data_kind == ColumnDataKind.FLOAT:
return float(value)
if column_data_kind == ColumnDataKind.BOOLEAN:
return bool(value)
if column_data_kind == ColumnDataKind.DECIMAL:
# Decimal theoretically can also be initialized via number values.
# However, using number values here seems to cause issues with Arrow
# serialization, once you try to render the returned dataframe.
return Decimal(str(value))
if column_data_kind == ColumnDataKind.TIMEDELTA:
return pd.Timedelta(value)
if column_data_kind in [
ColumnDataKind.DATETIME,
ColumnDataKind.DATE,
ColumnDataKind.TIME,
]:
datetime_value = pd.Timestamp(value) # ty: ignore
if pd.isna(datetime_value):
return None # type: ignore[unreachable]
if column_data_kind == ColumnDataKind.DATETIME:
return datetime_value
if column_data_kind == ColumnDataKind.DATE:
return datetime_value.date()
if column_data_kind == ColumnDataKind.TIME:
return datetime_value.time()
except (ValueError, pd.errors.ParserError, TypeError) as ex:
_LOGGER.warning(
"Failed to parse value %s as %s.",
value,
column_data_kind,
exc_info=ex,
)
return None
return value
def _apply_cell_edits(
df: pd.DataFrame,
edited_rows: Mapping[
int, Mapping[str, str | int | float | bool | list[str] | None]
],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply cell edits to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the cell edits to.
edited_rows : Mapping[int, Mapping[str, str | int | float | bool | None]]
A hierarchical mapping based on row position -> column name -> value
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
for row_id, row_changes in edited_rows.items():
row_pos = int(row_id)
for col_name, value in row_changes.items():
if col_name == INDEX_IDENTIFIER:
# The edited cell is part of the index
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
old_idx_value = df.index[row_pos]
new_idx_value = _parse_value(value, dataframe_schema[INDEX_IDENTIFIER])
df.rename(
index={old_idx_value: new_idx_value},
inplace=True, # noqa: PD002
)
else:
col_pos = df.columns.get_loc(col_name)
df.iat[row_pos, col_pos] = _parse_value( # type: ignore
value, dataframe_schema[col_name]
)
def _parse_added_row(
df: pd.DataFrame,
added_row: dict[str, Any],
dataframe_schema: DataframeSchema,
) -> tuple[Any, list[Any]]:
"""Parse the added row into an optional index value and a list of row values."""
index_value = None
new_row: list[Any] = [None for _ in range(df.shape[1])]
for col_name, value in added_row.items():
if col_name == INDEX_IDENTIFIER:
# TODO(lukasmasuch): To support multi-index in the future:
# use a tuple of values here instead of a single value
index_value = _parse_value(value, dataframe_schema[INDEX_IDENTIFIER])
else:
col_pos = cast("int", df.columns.get_loc(col_name))
new_row[col_pos] = _parse_value(value, dataframe_schema[col_name])
return index_value, new_row
def _assign_row_values(
df: pd.DataFrame,
row_label: Any,
row_values: list[Any],
) -> None:
"""Assign values to a dataframe row via a mapping.
This avoids numpy attempting to coerce nested sequences (e.g. lists) into
multi-dimensional arrays when a column legitimately stores list values.
"""
df.loc[row_label] = dict(zip(df.columns, row_values, strict=True))
def _apply_row_additions(
df: pd.DataFrame,
added_rows: list[dict[str, Any]],
dataframe_schema: DataframeSchema,
) -> None:
"""Apply row additions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row additions to.
added_rows : List[Dict[str, Any]]
A list of row additions. Each row addition is a dictionary with the
column position as key and the new cell value as value.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if not added_rows:
return
import pandas as pd
index_type: Literal["range", "integer", "other"] = "other"
# This is only used if the dataframe has a range or integer index that can be
# auto incremented:
index_stop: int | None = None
index_step: int | None = None
if isinstance(df.index, pd.RangeIndex):
# Extract metadata from the range index:
index_type = "range"
index_stop = df.index.stop
index_step = df.index.step
elif isinstance(df.index, pd.Index) and pd.api.types.is_integer_dtype(
df.index.dtype
):
# Get highest integer value and increment it by 1 to get unique index value.
index_type = "integer"
index_stop = 0 if df.index.empty else df.index.max() + 1
index_step = 1
for added_row in added_rows:
index_value, new_row = _parse_added_row(df, added_row, dataframe_schema)
if index_value is not None and index_type != "range":
# Case 1: Non-range index with an explicitly provided index value
# Add row using the user-provided index value.
# This handles any type of index that cannot be auto incremented.
# Note: this just overwrites the row in case the index value
# already exists. In the future, it would be better to
# require users to provide unique non-None values for the index with
# some kind of visual indications.
_assign_row_values(df, index_value, new_row)
continue
if index_stop is not None and index_step is not None:
# Case 2: Range or integer index that can be auto incremented.
# Add row using the next value in the sequence
_assign_row_values(df, index_stop, new_row)
# Increment to the next range index value
index_stop += index_step
continue
# Row cannot be added -> skip it and log a warning.
_LOGGER.warning(
"Cannot automatically add row for the index "
"of type %s without an explicit index value. Row addition skipped.",
type(df.index).__name__,
)
def _apply_row_deletions(df: pd.DataFrame, deleted_rows: list[int]) -> None:
"""Apply row deletions to the provided dataframe (inplace).
Parameters
----------
df : pd.DataFrame
The dataframe to apply the row deletions to.
deleted_rows : List[int]
A list of row numbers to delete.
"""
# Drop rows based in numeric row positions
df.drop(df.index[deleted_rows], inplace=True) # noqa: PD002
def _apply_dataframe_edits(
df: pd.DataFrame,
data_editor_state: EditingState,
dataframe_schema: DataframeSchema,
) -> None:
"""Apply edits to the provided dataframe (inplace).
This includes cell edits, row additions and row deletions.
Parameters
----------
df : pd.DataFrame
The dataframe to apply the edits to.
data_editor_state : EditingState
The editing state of the data editor component.
dataframe_schema: DataframeSchema
The schema of the dataframe.
"""
if data_editor_state.get("edited_rows"):
_apply_cell_edits(df, data_editor_state["edited_rows"], dataframe_schema)
if data_editor_state.get("deleted_rows"):
_apply_row_deletions(df, data_editor_state["deleted_rows"])
if data_editor_state.get("added_rows"):
# The addition of new rows needs to happen after the deletion to not have
# unexpected side-effects, like https://github.com/streamlit/streamlit/issues/8854
_apply_row_additions(df, data_editor_state["added_rows"], dataframe_schema)
def _is_supported_index(df_index: pd.Index[Any]) -> bool:
"""Check if the index is supported by the data editor component.
Parameters
----------
df_index : pd.Index
The index to check.
Returns
-------
bool
True if the index is supported, False otherwise.
"""
import pandas as pd
return (
type(df_index)
in [
pd.RangeIndex,
pd.Index,
pd.DatetimeIndex,
pd.CategoricalIndex,
# Interval type isn't editable currently:
# pd.IntervalIndex,
# Period type isn't editable currently:
# pd.PeriodIndex,
]
# We need to check these index types without importing, since they are
# deprecated and planned to be removed soon.
or is_type(df_index, "pandas.core.indexes.numeric.Int64Index")
or is_type(df_index, "pandas.core.indexes.numeric.Float64Index")
or is_type(df_index, "pandas.core.indexes.numeric.UInt64Index")
)
def _fix_column_headers(data_df: pd.DataFrame) -> None:
"""Fix the column headers of the provided dataframe inplace to work
correctly for data editing.
"""
import pandas as pd
if isinstance(data_df.columns, pd.MultiIndex):
# Flatten hierarchical column headers to a single level:
data_df.columns = [
"_".join(map(str, header))
for header in data_df.columns.to_flat_index() # type: ignore
]
elif pd.api.types.infer_dtype(data_df.columns) != "string":
# If the column names are not all strings, we need to convert them to strings
# to avoid issues with editing:
data_df.rename(
columns={column: str(column) for column in data_df.columns},
inplace=True, # noqa: PD002
)
def _check_column_names(data_df: pd.DataFrame) -> None:
"""Check if the column names in the provided dataframe are valid.
It's not allowed to have duplicate column names or column names that are
named ``_index``. If the column names are not valid, a ``StreamlitAPIException``
is raised.
"""
if data_df.columns.empty:
return
# Check if the column names are unique and raise an exception if not.
# Add the names of the duplicated columns to the exception message.
duplicated_columns = data_df.columns[data_df.columns.duplicated()]
if len(duplicated_columns) > 0:
raise StreamlitAPIException(
f"All column names are required to be unique for usage with data editor. "
f"The following column names are duplicated: {list(duplicated_columns)}. "
f"Please rename the duplicated columns in the provided data."
)
# Check if the column names are not named "_index" and raise an exception if so.
if INDEX_IDENTIFIER in data_df.columns:
raise StreamlitAPIException(
f"The column name '{INDEX_IDENTIFIER}' is reserved for the index column "
f"and can't be used for data columns. Please rename the column in the "
f"provided data."
)
def _check_type_compatibilities(
data_df: pd.DataFrame,
columns_config: ColumnConfigMapping,
dataframe_schema: DataframeSchema,
) -> None:
"""Check column type to data type compatibility.
Iterates the index and all columns of the dataframe to check if
the configured column types are compatible with the underlying data types.
Parameters
----------
data_df : pd.DataFrame
The dataframe to check the type compatibilities for.
columns_config : ColumnConfigMapping
A mapping of column to column configurations.
dataframe_schema : DataframeSchema
The schema of the dataframe.
Raises
------
StreamlitAPIException
If a configured column type is editable and not compatible with the
underlying data type.
"""
# TODO(lukasmasuch): Update this here to support multi-index in the future:
indices = [(INDEX_IDENTIFIER, data_df.index)]
for column in indices + list(data_df.items()):
column_name = str(column[0])
column_data_kind = dataframe_schema[column_name]
# TODO(lukasmasuch): support column config via numerical index here?
if column_name in columns_config:
column_config = columns_config[column_name]
if column_config.get("disabled") is True:
# Disabled columns are not checked for compatibility.
# This might change in the future.
continue
type_config = column_config.get("type_config")
if type_config is None:
continue
configured_column_type = type_config.get("type")
if configured_column_type is None:
continue
if is_type_compatible(configured_column_type, column_data_kind) is False:
raise StreamlitAPIException(
f"The configured column type `{configured_column_type}` for column "
f"`{column_name}` is not compatible for editing the underlying "
f"data type `{column_data_kind}`.\n\nYou have following options to "
f"fix this: 1) choose a compatible type 2) disable the column "
f"3) convert the column into a compatible data type."
)
| DataEditorSerde |
python | ansible__ansible | test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py | {
"start": 13976,
"end": 14502
} | class ____:
"""Configuration for the environment."""
def __init__(
self,
env_vars: t.Optional[dict[str, str]] = None,
ansible_vars: t.Optional[dict[str, t.Any]] = None,
module_defaults: t.Optional[dict[str, dict[str, t.Any]]] = None,
callback_plugins: t.Optional[list[str]] = None,
):
self.env_vars = env_vars
self.ansible_vars = ansible_vars
self.module_defaults = module_defaults
self.callback_plugins = callback_plugins
| CloudEnvironmentConfig |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 219227,
"end": 220043
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, datasetId: str, clean: Optional[bool] = None):
"""Airbyte Source for Apify Dataset.
Documentation can be found at https://docs.airbyte.com/integrations/sources/apify-dataset
Args:
name (str): The name of the destination.
datasetId (str): ID of the dataset you would like to load to Airbyte.
clean (Optional[bool]): If set to true, only clean items will be downloaded from the dataset. See description of what clean means in Apify API docs. If not sure, set clean to false.
"""
self.datasetId = check.str_param(datasetId, "datasetId")
self.clean = check.opt_bool_param(clean, "clean")
super().__init__("Apify Dataset", name)
| ApifyDatasetSource |
python | getsentry__sentry | src/sentry/sentry_apps/api/bases/sentryapps.py | {
"start": 4355,
"end": 7521
} | class ____(IntegrationPlatformEndpoint):
permission_classes: tuple[type[BasePermission], ...] = (SentryAppsAndStaffPermission,)
def _get_organization_slug(self, request: Request):
organization_slug = request.data.get("organization")
if not organization_slug or not isinstance(organization_slug, str):
error_message = "Please provide a valid value for the 'organization' field."
raise SentryAppError(message=error_message, status_code=404)
return organization_slug
def _get_organization_for_superuser_or_staff(
self, user: RpcUser | User, organization_slug: str
) -> RpcUserOrganizationContext:
context = organization_service.get_organization_by_slug(
slug=organization_slug, only_visible=False, user_id=user.id
)
if context is None:
error_message = f"Organization '{organization_slug}' does not exist."
raise SentryAppError(message=error_message, status_code=404)
return context
def _get_organization_for_user(
self, user: RpcUser | User, organization_slug: str
) -> RpcUserOrganizationContext:
context = organization_service.get_organization_by_slug(
slug=organization_slug, only_visible=True, user_id=user.id
)
if context is None or context.member is None:
error_message = f"User does not belong to the '{organization_slug}' organization."
raise SentryAppError(message=to_single_line_str(error_message), status_code=403)
return context
def _get_org_context(self, request: Request) -> RpcUserOrganizationContext:
organization_slug = self._get_organization_slug(request)
assert request.user.is_authenticated, "User must be authenticated to get organization"
if is_active_superuser(request) or is_active_staff(request):
return self._get_organization_for_superuser_or_staff(request.user, organization_slug)
else:
return self._get_organization_for_user(request.user, organization_slug)
def convert_args(self, request: Request, *args, **kwargs):
"""
This baseclass is the SentryApp collection endpoints:
[GET, POST] /sentry-apps
The GET endpoint is public and doesn't require (or handle) any query
params or request body.
The POST endpoint is for creating a Sentry App. Part of that creation
is associating it with the Organization that it's created within.
So in the case of POST requests, we want to pull the Organization out
of the request body so that we can ensure the User making the request
has access to it.
Since ``convert_args`` is conventionally where you materialize model
objects from URI params, we're applying the same logic for a param in
the request body.
"""
if not request.data:
return (args, kwargs)
context = self._get_org_context(request)
self.check_object_permissions(request, context)
kwargs["organization"] = context.organization
return (args, kwargs)
| SentryAppsBaseEndpoint |
python | huggingface__transformers | tests/models/fuyu/test_processing_fuyu.py | {
"start": 26304,
"end": 27684
} | class ____(unittest.TestCase):
def setUp(self):
"""
Adding a mix of present and absent images.
"""
self.image_input = torch.randn([1, 1, 3, 64, 64])
self.image_present = torch.tensor([[1]])
self.image_unpadded_h = torch.tensor([[45]]) # Adjusted for subsequence of 1
self.image_unpadded_w = torch.tensor([[50]]) # Adjusted for subsequence of 1
self.image_patch_dim_h = 16
self.image_patch_dim_w = 16
self.image_placeholder_id = 999
self.image_newline_id = 888
self.variable_sized = True
self.image_processor = FuyuImageProcessor(
patch_size={"height": self.image_patch_dim_h, "width": self.image_patch_dim_w}
)
def test_process_images_for_model_input_fixed_sized(self):
self.variable_sized = False
result = self.image_processor.preprocess_with_tokenizer_info(
image_input=self.image_input,
image_present=self.image_present,
image_unpadded_h=self.image_unpadded_h,
image_unpadded_w=self.image_unpadded_w,
image_placeholder_id=self.image_placeholder_id,
image_newline_id=self.image_newline_id,
variable_sized=self.variable_sized,
)
self.assertEqual(result["images"][0][0].shape, torch.Size([3, 64, 64]))
| TestProcessImagesForModelInput |
python | huggingface__transformers | tests/models/bit/test_modeling_bit.py | {
"start": 1353,
"end": 5463
} | class ____:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[8, 16, 32, 64],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
num_groups=1,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
self.num_groups = num_groups
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return BitConfig(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
num_groups=self.num_groups,
)
def create_and_check_model(self, config, pixel_values, labels):
model = BitModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape,
(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32),
)
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = BitForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_backbone(self, config, pixel_values, labels):
model = BitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = BitBackbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| BitModelTester |
python | ansible__ansible | lib/ansible/modules/service_facts.py | {
"start": 15438,
"end": 17869
} | class ____(BaseService):
def query_rcctl(self, cmd):
svcs = []
rc, stdout, stderr = self.module.run_command("%s ls %s" % (self.rcctl_path, cmd))
if 'needs root privileges' in stderr.lower():
self.module.warn('rcctl requires root privileges')
else:
for svc in stdout.split('\n'):
if svc == '':
continue
else:
svcs.append(svc)
return svcs
def get_info(self, name):
info = {}
rc, stdout, stderr = self.module.run_command("%s get %s" % (self.rcctl_path, name))
if 'needs root privileges' in stderr.lower():
self.module.warn('rcctl requires root privileges')
else:
undy = '%s_' % name
for variable in stdout.split('\n'):
if variable == '' or '=' not in variable:
continue
else:
k, v = variable.replace(undy, '', 1).split('=', 1)
info[k] = v
return info
def gather_services(self):
services = {}
self.rcctl_path = self.module.get_bin_path("rcctl")
if self.rcctl_path:
# populate services will all possible
for svc in self.query_rcctl('all'):
services[svc] = {'name': svc, 'source': 'rcctl', 'rogue': False}
services[svc].update(self.get_info(svc))
for svc in self.query_rcctl('on'):
services[svc].update({'status': 'enabled'})
for svc in self.query_rcctl('started'):
services[svc].update({'state': 'running'})
# Override the state for services which are marked as 'failed'
for svc in self.query_rcctl('failed'):
services[svc].update({'state': 'failed'})
for svc in services.keys():
# Based on the list of services that are enabled/failed, determine which are disabled
if services[svc].get('status') is None:
services[svc].update({'status': 'disabled'})
# and do the same for those are aren't running
if services[svc].get('state') is None:
services[svc].update({'state': 'stopped'})
for svc in self.query_rcctl('rogue'):
services[svc]['rogue'] = True
return services
| OpenBSDScanService |
python | tensorflow__tensorflow | tensorflow/python/data/benchmarks/benchmark_base.py | {
"start": 1076,
"end": 9638
} | class ____(test.Benchmark):
"""Base class for dataset benchmarks."""
def _run_eager_benchmark(self, iterable, iters, warmup):
"""Benchmark the iterable in eager mode.
Runs the iterable `iters` times. In each iteration, the benchmark measures
the time it takes to go execute the iterable.
Args:
iterable: The tf op or tf.data Dataset to benchmark.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
Returns:
A float, representing the median time (with respect to `iters`)
it takes for the iterable to be executed `iters` num of times.
Raises:
RuntimeError: When executed in graph mode.
"""
deltas = []
if not context.executing_eagerly():
raise RuntimeError(
"Eager mode benchmarking is not supported in graph mode.")
for _ in range(iters):
if warmup:
iterator = iter(iterable)
next(iterator)
iterator = iter(iterable)
start = time.time()
next(iterator)
end = time.time()
deltas.append(end - start)
return np.median(deltas)
def _run_graph_benchmark(self,
iterable,
iters,
warmup,
session_config,
initializer=None):
"""Benchmarks the iterable in graph mode.
Runs the iterable `iters` times. In each iteration, the benchmark measures
the time it takes to go execute the iterable.
Args:
iterable: The tf op or tf.data Dataset to benchmark.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
initializer: The initializer op required to initialize the iterable.
Returns:
A float, representing the median time (with respect to `iters`)
it takes for the iterable to be executed `iters` num of times.
Raises:
RuntimeError: When executed in eager mode.
"""
deltas = []
if context.executing_eagerly():
raise RuntimeError(
"Graph mode benchmarking is not supported in eager mode.")
for _ in range(iters):
with session.Session(config=session_config) as sess:
if warmup:
# Run once to warm up the session caches.
if initializer:
sess.run(initializer)
sess.run(iterable)
if initializer:
sess.run(initializer)
start = time.time()
sess.run(iterable)
end = time.time()
deltas.append(end - start)
return np.median(deltas)
def run_op_benchmark(self, op, iters=1, warmup=True, session_config=None):
"""Benchmarks the op.
Runs the op `iters` times. In each iteration, the benchmark measures
the time it takes to go execute the op.
Args:
op: The tf op to benchmark.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
Returns:
A float, representing the per-execution wall time of the op in seconds.
This is the median time (with respect to `iters`) it takes for the op
to be executed `iters` num of times.
"""
if context.executing_eagerly():
return self._run_eager_benchmark(iterable=op, iters=iters, warmup=warmup)
return self._run_graph_benchmark(
iterable=op, iters=iters, warmup=warmup, session_config=session_config)
def run_benchmark(self,
dataset,
num_elements,
iters=1,
warmup=True,
apply_default_optimizations=False,
session_config=None):
"""Benchmarks the dataset.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
iters: Number of times to repeat the timing.
warmup: If true, warms up the session caches by running an untimed run.
apply_default_optimizations: Determines whether default optimizations
should be applied.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
# The options that have been applied to the dataset are preserved so that
# they are not overwritten while benchmarking.
options = options_lib.Options()
options.experimental_optimization.apply_default_optimizations = (
apply_default_optimizations)
dataset = dataset.with_options(options)
# NOTE: We use `dataset.skip()` to perform the iterations in C++, avoiding
# the overhead of having to execute a TensorFlow op for each step of the
# input pipeline. Note that this relies on the underlying implementation of
# `skip` to execute upstream computation. If it is optimized in the future,
# we will have to change this code.
dataset = dataset.skip(num_elements - 1)
if context.executing_eagerly():
median_duration = self._run_eager_benchmark(
iterable=dataset, iters=iters, warmup=warmup)
return median_duration / float(num_elements)
iterator = dataset_ops.make_initializable_iterator(dataset)
next_element = iterator.get_next()
op = nest.flatten(next_element)[0].op
median_duration = self._run_graph_benchmark(
iterable=op,
iters=iters,
warmup=warmup,
session_config=session_config,
initializer=iterator.initializer)
return median_duration / float(num_elements)
def run_and_report_benchmark(self,
dataset,
num_elements,
name,
iters=5,
extras=None,
warmup=True,
apply_default_optimizations=False,
session_config=None):
"""Benchmarks the dataset and reports the stats.
Runs the dataset `iters` times. In each iteration, the benchmark measures
the time it takes to go through `num_elements` elements of the dataset.
This is followed by logging/printing the benchmark stats.
Args:
dataset: Dataset to benchmark.
num_elements: Number of dataset elements to iterate through each benchmark
iteration.
name: Name of the benchmark.
iters: Number of times to repeat the timing.
extras: A dict which maps string keys to additional benchmark info.
warmup: If true, warms up the session caches by running an untimed run.
apply_default_optimizations: Determines whether default optimizations
should be applied.
session_config: A ConfigProto protocol buffer with configuration options
for the session. Applicable only for benchmarking in graph mode.
Returns:
A float, representing the per-element wall time of the dataset in seconds.
This is the median time (with respect to `iters`) it takes for the dataset
to go through `num_elements` elements, divided by `num_elements.`
"""
wall_time = self.run_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=iters,
warmup=warmup,
apply_default_optimizations=apply_default_optimizations,
session_config=session_config)
if extras is None:
extras = {}
if context.executing_eagerly():
name = "{}.eager".format(name)
extras["implementation"] = "eager"
else:
name = "{}.graph".format(name)
extras["implementation"] = "graph"
extras["num_elements"] = num_elements
self.report_benchmark(
wall_time=wall_time, iters=iters, name=name, extras=extras)
return wall_time
| DatasetBenchmarkBase |
python | django__django | django/db/backends/sqlite3/creation.py | {
"start": 207,
"end": 6784
} | class ____(BaseDatabaseCreation):
@staticmethod
def is_in_memory_db(database_name):
return not isinstance(database_name, Path) and (
database_name == ":memory:" or "mode=memory" in database_name
)
def _get_test_db_name(self):
test_database_name = self.connection.settings_dict["TEST"]["NAME"] or ":memory:"
if test_database_name == ":memory:":
return "file:memorydb_%s?mode=memory&cache=shared" % self.connection.alias
return test_database_name
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
test_database_name = self._get_test_db_name()
if keepdb:
return test_database_name
if not self.is_in_memory_db(test_database_name):
# Erase the old test database
if verbosity >= 1:
self.log(
"Destroying old test database for alias %s..."
% (self._get_database_display_str(verbosity, test_database_name),)
)
if os.access(test_database_name, os.F_OK):
if not autoclobber:
confirm = input(
"Type 'yes' if you would like to try deleting the test "
"database '%s', or 'no' to cancel: " % test_database_name
)
if autoclobber or confirm == "yes":
try:
os.remove(test_database_name)
except Exception as e:
self.log("Got an error deleting the old test database: %s" % e)
sys.exit(2)
else:
self.log("Tests cancelled.")
sys.exit(1)
return test_database_name
def get_test_db_clone_settings(self, suffix):
orig_settings_dict = self.connection.settings_dict
source_database_name = orig_settings_dict["NAME"] or ":memory:"
if not self.is_in_memory_db(source_database_name):
root, ext = os.path.splitext(source_database_name)
return {**orig_settings_dict, "NAME": f"{root}_{suffix}{ext}"}
start_method = multiprocessing.get_start_method()
if start_method == "fork":
return orig_settings_dict
if start_method in {"forkserver", "spawn"}:
return {
**orig_settings_dict,
"NAME": f"{self.connection.alias}_{suffix}.sqlite3",
}
raise NotSupportedError(
f"Cloning with start method {start_method!r} is not supported."
)
def _clone_test_db(self, suffix, verbosity, keepdb=False):
source_database_name = self.connection.settings_dict["NAME"]
target_database_name = self.get_test_db_clone_settings(suffix)["NAME"]
if not self.is_in_memory_db(source_database_name):
# Erase the old test database
if os.access(target_database_name, os.F_OK):
if keepdb:
return
if verbosity >= 1:
self.log(
"Destroying old test database for alias %s..."
% (
self._get_database_display_str(
verbosity, target_database_name
),
)
)
try:
os.remove(target_database_name)
except Exception as e:
self.log("Got an error deleting the old test database: %s" % e)
sys.exit(2)
try:
shutil.copy(source_database_name, target_database_name)
except Exception as e:
self.log("Got an error cloning the test database: %s" % e)
sys.exit(2)
# Forking automatically makes a copy of an in-memory database.
# Forkserver and spawn require migrating to disk which will be
# re-opened in setup_worker_connection.
elif multiprocessing.get_start_method() in {"forkserver", "spawn"}:
ondisk_db = sqlite3.connect(target_database_name, uri=True)
self.connection.connection.backup(ondisk_db)
ondisk_db.close()
def _destroy_test_db(self, test_database_name, verbosity):
if test_database_name and not self.is_in_memory_db(test_database_name):
# Remove the SQLite database file
os.remove(test_database_name)
def test_db_signature(self):
"""
Return a tuple that uniquely identifies a test database.
This takes into account the special cases of ":memory:" and "" for
SQLite since the databases will be distinct despite having the same
TEST NAME. See https://www.sqlite.org/inmemorydb.html
"""
test_database_name = self._get_test_db_name()
sig = [self.connection.settings_dict["NAME"]]
if self.is_in_memory_db(test_database_name):
sig.append(self.connection.alias)
else:
sig.append(test_database_name)
return tuple(sig)
def setup_worker_connection(self, _worker_id):
settings_dict = self.get_test_db_clone_settings(_worker_id)
# connection.settings_dict must be updated in place for changes to be
# reflected in django.db.connections. Otherwise new threads would
# connect to the default database instead of the appropriate clone.
start_method = multiprocessing.get_start_method()
if start_method == "fork":
# Update settings_dict in place.
self.connection.settings_dict.update(settings_dict)
self.connection.close()
elif start_method in {"forkserver", "spawn"}:
alias = self.connection.alias
connection_str = (
f"file:memorydb_{alias}_{_worker_id}?mode=memory&cache=shared"
)
source_db = self.connection.Database.connect(
f"file:{alias}_{_worker_id}.sqlite3?mode=ro", uri=True
)
target_db = sqlite3.connect(connection_str, uri=True)
source_db.backup(target_db)
source_db.close()
# Update settings_dict in place.
self.connection.settings_dict.update(settings_dict)
self.connection.settings_dict["NAME"] = connection_str
# Re-open connection to in-memory database before closing copy
# connection.
self.connection.connect()
target_db.close()
| DatabaseCreation |
python | doocs__leetcode | solution/1400-1499/1461.Check If a String Contains All Binary Codes of Size K/Solution.py | {
"start": 0,
"end": 238
} | class ____:
def hasAllCodes(self, s: str, k: int) -> bool:
n = len(s)
m = 1 << k
if n - k + 1 < m:
return False
ss = {s[i : i + k] for i in range(n - k + 1)}
return len(ss) == m
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 530037,
"end": 530358
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("ProjectV2View", graphql_name="node")
| ProjectV2ViewEdge |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 51452,
"end": 53425
} | class ____(test.TestCase):
def test_not_wait_for_step_zero(self):
with ops.Graph().as_default():
training_util.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=0)
hook.begin()
with session_lib.Session() as sess:
# Before run should return without waiting gstep increment.
hook.before_run(
session_run_hook.SessionRunContext(
original_args=None, session=sess))
@test.mock.patch.object(time, 'sleep')
def test_wait_for_step(self, mock_sleep):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
hook = basic_session_run_hooks.GlobalStepWaiterHook(wait_until_step=1000)
hook.begin()
with session_lib.Session() as sess:
# Mock out calls to time.sleep() to update the global step.
class Context:
counter = 0
def mock_sleep_side_effect(seconds):
del seconds # argument is ignored
Context.counter += 1
if Context.counter == 1:
# The first time sleep() is called, we update the global_step from
# 0 to 500.
sess.run(state_ops.assign(gstep, 500))
elif Context.counter == 2:
# The second time sleep() is called, we update the global_step from
# 500 to 1100.
sess.run(state_ops.assign(gstep, 1100))
else:
raise AssertionError(
'Expected before_run() to terminate after the second call to '
'time.sleep()')
mock_sleep.side_effect = mock_sleep_side_effect
# Run the mocked-out interaction with the hook.
self.evaluate(variables_lib.global_variables_initializer())
run_context = session_run_hook.SessionRunContext(
original_args=None, session=sess)
hook.before_run(run_context)
self.assertEqual(Context.counter, 2)
| GlobalStepWaiterHookTest |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 27058,
"end": 28761
} | class ____(test.Benchmark):
def _benchmarkAdjustSaturation(self, device, cpu_count):
image_shape = [299, 299, 3]
warmup_rounds = 100
benchmark_rounds = 1000
config = config_pb2.ConfigProto()
if cpu_count is not None:
config.inter_op_parallelism_threads = 1
config.intra_op_parallelism_threads = cpu_count
with self.benchmark_session(config=config, device=device) as sess:
inputs = variables.Variable(
random_ops.random_uniform(image_shape, dtype=dtypes.float32) * 255,
trainable=False,
dtype=dtypes.float32)
delta = constant_op.constant(0.1, dtype=dtypes.float32)
outputs = image_ops.adjust_saturation(inputs, delta)
run_op = control_flow_ops.group(outputs)
self.evaluate(variables.global_variables_initializer())
for _ in range(warmup_rounds):
self.evaluate(run_op)
start = time.time()
for _ in range(benchmark_rounds):
self.evaluate(run_op)
end = time.time()
step_time = (end - start) / benchmark_rounds
tag = device + "_%s" % (cpu_count if cpu_count is not None else "_all")
print("benchmarkAdjustSaturation_299_299_3_%s step_time: %.2f us" %
(tag, step_time * 1e6))
self.report_benchmark(
name="benchmarkAdjustSaturation_299_299_3_%s" % (tag),
iters=benchmark_rounds,
wall_time=step_time)
def benchmarkAdjustSaturationCpu1(self):
self._benchmarkAdjustSaturation("/cpu:0", 1)
def benchmarkAdjustSaturationCpuAll(self):
self._benchmarkAdjustSaturation("/cpu:0", None)
def benchmarkAdjustSaturationGpu(self):
self._benchmarkAdjustSaturation(test.gpu_device_name(), None)
| AdjustSaturationBenchmark |
python | pydantic__pydantic | tests/mypy/modules/frozen_field.py | {
"start": 291,
"end": 434
} | class ____(Parent):
child_attr: str = Field(exclude=True)
@property
def parent_attr(self) -> str:
return self.child_attr
| Child |
python | ray-project__ray | doc/source/serve/doc_code/http_guide/websockets_example.py | {
"start": 189,
"end": 895
} | class ____:
@app.websocket("/")
async def echo(self, ws: WebSocket):
await ws.accept()
try:
while True:
text = await ws.receive_text()
await ws.send_text(text)
except WebSocketDisconnect:
print("Client disconnected.")
serve_app = serve.run(EchoServer.bind())
# __websocket_serve_app_end__
# __websocket_serve_client_start__
from websockets.sync.client import connect
with connect("ws://localhost:8000") as websocket:
websocket.send("Eureka!")
assert websocket.recv() == "Eureka!"
websocket.send("I've found it!")
assert websocket.recv() == "I've found it!"
# __websocket_serve_client_end__
| EchoServer |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/external.py | {
"start": 5398,
"end": 6434
} | class ____(graphene.ObjectType):
id = graphene.NonNull(graphene.ID)
name = graphene.NonNull(graphene.String)
loadStatus = graphene.NonNull(GrapheneRepositoryLocationLoadStatus)
updateTimestamp = graphene.NonNull(graphene.Float)
versionKey = graphene.NonNull(graphene.String)
permissions = graphene.Field(non_null_list(GraphenePermission))
class Meta:
name = "WorkspaceLocationStatusEntry"
def __init__(
self,
id,
name,
load_status,
update_timestamp,
version_key,
):
super().__init__(
id=id,
name=name,
loadStatus=load_status,
updateTimestamp=update_timestamp,
versionKey=version_key,
)
def resolve_permissions(self, graphene_info):
permissions = graphene_info.context.permissions_for_location(location_name=self.name)
return [GraphenePermission(permission, value) for permission, value in permissions.items()]
| GrapheneWorkspaceLocationStatusEntry |
python | PrefectHQ__prefect | src/prefect/server/database/configurations.py | {
"start": 1437,
"end": 3523
} | class ____:
"""A test utility which tracks the connections given out by a connection pool, to
make it easy to see which connections are currently checked out and open."""
all_connections: dict[AdaptedConnection, list[str]]
open_connections: dict[AdaptedConnection, list[str]]
left_field_closes: dict[AdaptedConnection, list[str]]
connects: int
closes: int
active: bool
def __init__(self) -> None:
self.active = False
self.all_connections = {}
self.open_connections = {}
self.left_field_closes = {}
self.connects = 0
self.closes = 0
def track_pool(self, pool: sa.pool.Pool) -> None:
event.listen(pool, "connect", self.on_connect)
event.listen(pool, "close", self.on_close)
event.listen(pool, "close_detached", self.on_close_detached)
def on_connect(
self,
adapted_connection: AdaptedConnection,
connection_record: ConnectionPoolEntry,
) -> None:
self.all_connections[adapted_connection] = traceback.format_stack()
self.open_connections[adapted_connection] = traceback.format_stack()
self.connects += 1
def on_close(
self,
adapted_connection: AdaptedConnection,
connection_record: ConnectionPoolEntry,
) -> None:
try:
del self.open_connections[adapted_connection]
except KeyError:
self.left_field_closes[adapted_connection] = traceback.format_stack()
self.closes += 1
def on_close_detached(
self,
adapted_connection: AdaptedConnection,
) -> None:
try:
del self.open_connections[adapted_connection]
except KeyError:
self.left_field_closes[adapted_connection] = traceback.format_stack()
self.closes += 1
def clear(self) -> None:
self.all_connections.clear()
self.open_connections.clear()
self.left_field_closes.clear()
self.connects = 0
self.closes = 0
TRACKER: ConnectionTracker = ConnectionTracker()
| ConnectionTracker |
python | tensorflow__tensorflow | tensorflow/python/framework/stack.py | {
"start": 965,
"end": 4338
} | class ____(threading.local, Generic[T]):
"""A thread-local stack of objects for providing implicit defaults."""
def __init__(self):
super().__init__()
self._enforce_nesting = True
self.stack: list[T] = []
def get_default(self) -> Optional[T]:
return self.stack[-1] if self.stack else None
def reset(self) -> None:
self.stack = []
def is_cleared(self) -> bool:
return not self.stack
@property
def enforce_nesting(self) -> bool:
return self._enforce_nesting
@enforce_nesting.setter
def enforce_nesting(self, value: bool):
self._enforce_nesting = value
@tf_contextlib.contextmanager
def get_controller(self, default: T) -> Iterator[T]:
"""A context manager for manipulating a default stack."""
self.stack.append(default)
try:
yield default
finally:
# stack may be empty if reset() was called
if self.stack:
if self._enforce_nesting:
if self.stack[-1] is not default:
raise AssertionError(
"Nesting violated for default stack of %s objects" %
type(default))
self.stack.pop()
else:
self.stack.remove(default)
_default_session_stack = DefaultStack()
def default_session(session):
"""Python "with" handler for defining a default session.
This function provides a means of registering a session for handling
Tensor.eval() and Operation.run() calls. It is primarily intended for use
by session.Session, but can be used with any object that implements
the Session.run() interface.
Use with the "with" keyword to specify that Tensor.eval() and Operation.run()
invocations within the scope of a block should be executed by a particular
session.
The default session applies to the current thread only, so it is always
possible to inspect the call stack and determine the scope of a default
session. If you create a new thread, and wish to use the default session
in that thread, you must explicitly add a "with ops.default_session(sess):"
block in that thread's function.
Example:
The following code examples are equivalent:
# 1. Using the Session object directly:
sess = ...
c = tf.constant(5.0)
sess.run(c)
# 2. Using default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
result = c.eval()
# 3. Overriding default_session():
sess = ...
with ops.default_session(sess):
c = tf.constant(5.0)
with ops.default_session(...):
c.eval(session=sess)
Args:
session: The session to be installed as the default session.
Returns:
A context manager for the default session.
"""
return _default_session_stack.get_controller(session)
@tf_export(v1=["get_default_session"])
def get_default_session():
"""Returns the default session for the current thread.
The returned `Session` will be the innermost session on which a
`Session` or `Session.as_default()` context has been entered.
NOTE: The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
Returns:
The default `Session` being used in the current thread.
"""
return _default_session_stack.get_default()
| DefaultStack |
python | mahmoud__boltons | boltons/statsutils.py | {
"start": 6396,
"end": 29923
} | class ____:
"""The ``Stats`` type is used to represent a group of unordered
statistical datapoints for calculations such as mean, median, and
variance.
Args:
data (list): List or other iterable containing numeric values.
default (float): A value to be returned when a given
statistical measure is not defined. 0.0 by default, but
``float('nan')`` is appropriate for stricter applications.
use_copy (bool): By default Stats objects copy the initial
data into a new list to avoid issues with
modifications. Pass ``False`` to disable this behavior.
is_sorted (bool): Presorted data can skip an extra sorting
step for a little speed boost. Defaults to False.
"""
def __init__(self, data, default=0.0, use_copy=True, is_sorted=False):
self._use_copy = use_copy
self._is_sorted = is_sorted
if use_copy:
self.data = list(data)
else:
self.data = data
self.default = default
cls = self.__class__
self._prop_attr_names = [a for a in dir(self)
if isinstance(getattr(cls, a, None),
_StatsProperty)]
self._pearson_precision = 0
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def _get_sorted_data(self):
"""When using a copy of the data, it's better to have that copy be
sorted, but we do it lazily using this method, in case no
sorted measures are used. I.e., if median is never called,
sorting would be a waste.
When not using a copy, it's presumed that all optimizations
are on the user.
"""
if not self._use_copy:
return sorted(self.data)
elif not self._is_sorted:
self.data.sort()
return self.data
def clear_cache(self):
"""``Stats`` objects automatically cache intermediary calculations
that can be reused. For instance, accessing the ``std_dev``
attribute after the ``variance`` attribute will be
significantly faster for medium-to-large datasets.
If you modify the object by adding additional data points,
call this function to have the cached statistics recomputed.
"""
for attr_name in self._prop_attr_names:
attr_name = getattr(self.__class__, attr_name).internal_name
if not hasattr(self, attr_name):
continue
delattr(self, attr_name)
return
def _calc_count(self):
"""The number of items in this Stats object. Returns the same as
:func:`len` on a Stats object, but provided for pandas terminology
parallelism.
>>> Stats(range(20)).count
20
"""
return len(self.data)
count = _StatsProperty('count', _calc_count)
def _calc_mean(self):
"""
The arithmetic mean, or "average". Sum of the values divided by
the number of values.
>>> mean(range(20))
9.5
>>> mean(list(range(19)) + [949]) # 949 is an arbitrary outlier
56.0
"""
return sum(self.data, 0.0) / len(self.data)
mean = _StatsProperty('mean', _calc_mean)
def _calc_max(self):
"""
The maximum value present in the data.
>>> Stats([2, 1, 3]).max
3
"""
if self._is_sorted:
return self.data[-1]
return max(self.data)
max = _StatsProperty('max', _calc_max)
def _calc_min(self):
"""
The minimum value present in the data.
>>> Stats([2, 1, 3]).min
1
"""
if self._is_sorted:
return self.data[0]
return min(self.data)
min = _StatsProperty('min', _calc_min)
def _calc_median(self):
"""
The median is either the middle value or the average of the two
middle values of a sample. Compared to the mean, it's generally
more resilient to the presence of outliers in the sample.
>>> median([2, 1, 3])
2
>>> median(range(97))
48
>>> median(list(range(96)) + [1066]) # 1066 is an arbitrary outlier
48
"""
return self._get_quantile(self._get_sorted_data(), 0.5)
median = _StatsProperty('median', _calc_median)
def _calc_iqr(self):
"""Inter-quartile range (IQR) is the difference between the 75th
percentile and 25th percentile. IQR is a robust measure of
dispersion, like standard deviation, but safer to compare
between datasets, as it is less influenced by outliers.
>>> iqr([1, 2, 3, 4, 5])
2
>>> iqr(range(1001))
500
"""
return self.get_quantile(0.75) - self.get_quantile(0.25)
iqr = _StatsProperty('iqr', _calc_iqr)
def _calc_trimean(self):
"""The trimean is a robust measure of central tendency, like the
median, that takes the weighted average of the median and the
upper and lower quartiles.
>>> trimean([2, 1, 3])
2.0
>>> trimean(range(97))
48.0
>>> trimean(list(range(96)) + [1066]) # 1066 is an arbitrary outlier
48.0
"""
sorted_data = self._get_sorted_data()
gq = lambda q: self._get_quantile(sorted_data, q)
return (gq(0.25) + (2 * gq(0.5)) + gq(0.75)) / 4.0
trimean = _StatsProperty('trimean', _calc_trimean)
def _calc_variance(self):
"""\
Variance is the average of the squares of the difference between
each value and the mean.
>>> variance(range(97))
784.0
"""
global mean # defined elsewhere in this file
return mean(self._get_pow_diffs(2))
variance = _StatsProperty('variance', _calc_variance)
def _calc_std_dev(self):
"""\
Standard deviation. Square root of the variance.
>>> std_dev(range(97))
28.0
"""
return self.variance ** 0.5
std_dev = _StatsProperty('std_dev', _calc_std_dev)
def _calc_median_abs_dev(self):
"""\
Median Absolute Deviation is a robust measure of statistical
dispersion: http://en.wikipedia.org/wiki/Median_absolute_deviation
>>> median_abs_dev(range(97))
24.0
"""
global median # defined elsewhere in this file
sorted_vals = sorted(self.data)
x = float(median(sorted_vals))
return median([abs(x - v) for v in sorted_vals])
median_abs_dev = _StatsProperty('median_abs_dev', _calc_median_abs_dev)
mad = median_abs_dev # convenience
def _calc_rel_std_dev(self):
"""\
Standard deviation divided by the absolute value of the average.
http://en.wikipedia.org/wiki/Relative_standard_deviation
>>> print('%1.3f' % rel_std_dev(range(97)))
0.583
"""
abs_mean = abs(self.mean)
if abs_mean:
return self.std_dev / abs_mean
else:
return self.default
rel_std_dev = _StatsProperty('rel_std_dev', _calc_rel_std_dev)
def _calc_skewness(self):
"""\
Indicates the asymmetry of a curve. Positive values mean the bulk
of the values are on the left side of the average and vice versa.
http://en.wikipedia.org/wiki/Skewness
See the module docstring for more about statistical moments.
>>> skewness(range(97)) # symmetrical around 48.0
0.0
>>> left_skewed = skewness(list(range(97)) + list(range(10)))
>>> right_skewed = skewness(list(range(97)) + list(range(87, 97)))
>>> round(left_skewed, 3), round(right_skewed, 3)
(0.114, -0.114)
"""
data, s_dev = self.data, self.std_dev
if len(data) > 1 and s_dev > 0:
return (sum(self._get_pow_diffs(3)) /
float((len(data) - 1) * (s_dev ** 3)))
else:
return self.default
skewness = _StatsProperty('skewness', _calc_skewness)
def _calc_kurtosis(self):
"""\
Indicates how much data is in the tails of the distribution. The
result is always positive, with the normal "bell-curve"
distribution having a kurtosis of 3.
http://en.wikipedia.org/wiki/Kurtosis
See the module docstring for more about statistical moments.
>>> kurtosis(range(9))
1.99125
With a kurtosis of 1.99125, [0, 1, 2, 3, 4, 5, 6, 7, 8] is more
centrally distributed than the normal curve.
"""
data, s_dev = self.data, self.std_dev
if len(data) > 1 and s_dev > 0:
return (sum(self._get_pow_diffs(4)) /
float((len(data) - 1) * (s_dev ** 4)))
else:
return 0.0
kurtosis = _StatsProperty('kurtosis', _calc_kurtosis)
def _calc_pearson_type(self):
precision = self._pearson_precision
skewness = self.skewness
kurtosis = self.kurtosis
beta1 = skewness ** 2.0
beta2 = kurtosis * 1.0
# TODO: range checks?
c0 = (4 * beta2) - (3 * beta1)
c1 = skewness * (beta2 + 3)
c2 = (2 * beta2) - (3 * beta1) - 6
if round(c1, precision) == 0:
if round(beta2, precision) == 3:
return 0 # Normal
else:
if beta2 < 3:
return 2 # Symmetric Beta
elif beta2 > 3:
return 7
elif round(c2, precision) == 0:
return 3 # Gamma
else:
k = c1 ** 2 / (4 * c0 * c2)
if k < 0:
return 1 # Beta
raise RuntimeError('missed a spot')
pearson_type = _StatsProperty('pearson_type', _calc_pearson_type)
@staticmethod
def _get_quantile(sorted_data, q):
data, n = sorted_data, len(sorted_data)
idx = q / 1.0 * (n - 1)
idx_f, idx_c = int(floor(idx)), int(ceil(idx))
if idx_f == idx_c:
return data[idx_f]
return (data[idx_f] * (idx_c - idx)) + (data[idx_c] * (idx - idx_f))
def get_quantile(self, q):
"""Get a quantile from the dataset. Quantiles are floating point
values between ``0.0`` and ``1.0``, with ``0.0`` representing
the minimum value in the dataset and ``1.0`` representing the
maximum. ``0.5`` represents the median:
>>> Stats(range(100)).get_quantile(0.5)
49.5
"""
q = float(q)
if not 0.0 <= q <= 1.0:
raise ValueError('expected q between 0.0 and 1.0, not %r' % q)
elif not self.data:
return self.default
return self._get_quantile(self._get_sorted_data(), q)
def get_zscore(self, value):
"""Get the z-score for *value* in the group. If the standard deviation
is 0, 0 inf or -inf will be returned to indicate whether the value is
equal to, greater than or below the group's mean.
"""
mean = self.mean
if self.std_dev == 0:
if value == mean:
return 0
if value > mean:
return float('inf')
if value < mean:
return float('-inf')
return (float(value) - mean) / self.std_dev
def trim_relative(self, amount=0.15):
"""A utility function used to cut a proportion of values off each end
of a list of values. This has the effect of limiting the
effect of outliers.
Args:
amount (float): A value between 0.0 and 0.5 to trim off of
each side of the data.
.. note:
This operation modifies the data in-place. It does not
make or return a copy.
"""
trim = float(amount)
if not 0.0 <= trim < 0.5:
raise ValueError('expected amount between 0.0 and 0.5, not %r'
% trim)
size = len(self.data)
size_diff = int(size * trim)
if size_diff == 0.0:
return
self.data = self._get_sorted_data()[size_diff:-size_diff]
self.clear_cache()
def _get_pow_diffs(self, power):
"""
A utility function used for calculating statistical moments.
"""
m = self.mean
return [(v - m) ** power for v in self.data]
def _get_bin_bounds(self, count=None, with_max=False):
if not self.data:
return [0.0] # TODO: raise?
data = self.data
len_data, min_data, max_data = len(data), min(data), max(data)
if len_data < 4:
if not count:
count = len_data
dx = (max_data - min_data) / float(count)
bins = [min_data + (dx * i) for i in range(count)]
elif count is None:
# freedman algorithm for fixed-width bin selection
q25, q75 = self.get_quantile(0.25), self.get_quantile(0.75)
dx = 2 * (q75 - q25) / (len_data ** (1 / 3.0))
bin_count = max(1, int(ceil((max_data - min_data) / dx)))
bins = [min_data + (dx * i) for i in range(bin_count + 1)]
bins = [b for b in bins if b < max_data]
else:
dx = (max_data - min_data) / float(count)
bins = [min_data + (dx * i) for i in range(count)]
if with_max:
bins.append(float(max_data))
return bins
def get_histogram_counts(self, bins=None, **kw):
"""Produces a list of ``(bin, count)`` pairs comprising a histogram of
the Stats object's data, using fixed-width bins. See
:meth:`Stats.format_histogram` for more details.
Args:
bins (int): maximum number of bins, or list of
floating-point bin boundaries. Defaults to the output of
Freedman's algorithm.
bin_digits (int): Number of digits used to round down the
bin boundaries. Defaults to 1.
The output of this method can be stored and/or modified, and
then passed to :func:`statsutils.format_histogram_counts` to
achieve the same text formatting as the
:meth:`~Stats.format_histogram` method. This can be useful for
snapshotting over time.
"""
bin_digits = int(kw.pop('bin_digits', 1))
if kw:
raise TypeError('unexpected keyword arguments: %r' % kw.keys())
if not bins:
bins = self._get_bin_bounds()
else:
try:
bin_count = int(bins)
except TypeError:
try:
bins = [float(x) for x in bins]
except Exception:
raise ValueError('bins expected integer bin count or list'
' of float bin boundaries, not %r' % bins)
if self.min < bins[0]:
bins = [self.min] + bins
else:
bins = self._get_bin_bounds(bin_count)
# floor and ceil really should have taken ndigits, like round()
round_factor = 10.0 ** bin_digits
bins = [floor(b * round_factor) / round_factor for b in bins]
bins = sorted(set(bins))
idxs = [bisect.bisect(bins, d) - 1 for d in self.data]
count_map = Counter(idxs)
bin_counts = [(b, count_map.get(i, 0)) for i, b in enumerate(bins)]
return bin_counts
def format_histogram(self, bins=None, **kw):
"""Produces a textual histogram of the data, using fixed-width bins,
allowing for simple visualization, even in console environments.
>>> data = list(range(20)) + list(range(5, 15)) + [10]
>>> print(Stats(data).format_histogram(width=30))
0.0: 5 #########
4.4: 8 ###############
8.9: 11 ####################
13.3: 5 #########
17.8: 2 ####
In this histogram, five values are between 0.0 and 4.4, eight
are between 4.4 and 8.9, and two values lie between 17.8 and
the max.
You can specify the number of bins, or provide a list of
bin boundaries themselves. If no bins are provided, as in the
example above, `Freedman's algorithm`_ for bin selection is
used.
Args:
bins (int): Maximum number of bins for the
histogram. Also accepts a list of floating-point
bin boundaries. If the minimum boundary is still
greater than the minimum value in the data, that
boundary will be implicitly added. Defaults to the bin
boundaries returned by `Freedman's algorithm`_.
bin_digits (int): Number of digits to round each bin
to. Note that bins are always rounded down to avoid
clipping any data. Defaults to 1.
width (int): integer number of columns in the longest line
in the histogram. Defaults to console width on Python
3.3+, or 80 if that is not available.
format_bin (callable): Called on each bin to create a
label for the final output. Use this function to add
units, such as "ms" for milliseconds.
Should you want something more programmatically reusable, see
the :meth:`~Stats.get_histogram_counts` method, the output of
is used by format_histogram. The :meth:`~Stats.describe`
method is another useful summarization method, albeit less
visual.
.. _Freedman's algorithm: https://en.wikipedia.org/wiki/Freedman%E2%80%93Diaconis_rule
"""
width = kw.pop('width', None)
format_bin = kw.pop('format_bin', None)
bin_counts = self.get_histogram_counts(bins=bins, **kw)
return format_histogram_counts(bin_counts,
width=width,
format_bin=format_bin)
def describe(self, quantiles=None, format=None):
"""Provides standard summary statistics for the data in the Stats
object, in one of several convenient formats.
Args:
quantiles (list): A list of numeric values to use as
quantiles in the resulting summary. All values must be
0.0-1.0, with 0.5 representing the median. Defaults to
``[0.25, 0.5, 0.75]``, representing the standard
quartiles.
format (str): Controls the return type of the function,
with one of three valid values: ``"dict"`` gives back
a :class:`dict` with the appropriate keys and
values. ``"list"`` is a list of key-value pairs in an
order suitable to pass to an OrderedDict or HTML
table. ``"text"`` converts the values to text suitable
for printing, as seen below.
Here is the information returned by a default ``describe``, as
presented in the ``"text"`` format:
>>> stats = Stats(range(1, 8))
>>> print(stats.describe(format='text'))
count: 7
mean: 4.0
std_dev: 2.0
mad: 2.0
min: 1
0.25: 2.5
0.5: 4
0.75: 5.5
max: 7
For more advanced descriptive statistics, check out my blog
post on the topic `Statistics for Software
<https://www.paypal-engineering.com/2016/04/11/statistics-for-software/>`_.
"""
if format is None:
format = 'dict'
elif format not in ('dict', 'list', 'text'):
raise ValueError('invalid format for describe,'
' expected one of "dict"/"list"/"text", not %r'
% format)
quantiles = quantiles or [0.25, 0.5, 0.75]
q_items = []
for q in quantiles:
q_val = self.get_quantile(q)
q_items.append((str(q), q_val))
items = [('count', self.count),
('mean', self.mean),
('std_dev', self.std_dev),
('mad', self.mad),
('min', self.min)]
items.extend(q_items)
items.append(('max', self.max))
if format == 'dict':
ret = dict(items)
elif format == 'list':
ret = items
elif format == 'text':
ret = '\n'.join(['{}{}'.format((label + ':').ljust(10), val)
for label, val in items])
return ret
def describe(data, quantiles=None, format=None):
"""A convenience function to get standard summary statistics useful
for describing most data. See :meth:`Stats.describe` for more
details.
>>> print(describe(range(7), format='text'))
count: 7
mean: 3.0
std_dev: 2.0
mad: 2.0
min: 0
0.25: 1.5
0.5: 3
0.75: 4.5
max: 6
See :meth:`Stats.format_histogram` for another very useful
summarization that uses textual visualization.
"""
return Stats(data).describe(quantiles=quantiles, format=format)
def _get_conv_func(attr_name):
def stats_helper(data, default=0.0):
return getattr(Stats(data, default=default, use_copy=False),
attr_name)
return stats_helper
for attr_name, attr in list(Stats.__dict__.items()):
if isinstance(attr, _StatsProperty):
if attr_name in ('max', 'min', 'count'): # don't shadow builtins
continue
if attr_name in ('mad',): # convenience aliases
continue
func = _get_conv_func(attr_name)
func.__doc__ = attr.func.__doc__
globals()[attr_name] = func
delattr(Stats, '_calc_' + attr_name)
# cleanup
del attr
del attr_name
del func
def format_histogram_counts(bin_counts, width=None, format_bin=None):
"""The formatting logic behind :meth:`Stats.format_histogram`, which
takes the output of :meth:`Stats.get_histogram_counts`, and passes
them to this function.
Args:
bin_counts (list): A list of bin values to counts.
width (int): Number of character columns in the text output,
defaults to 80 or console width in Python 3.3+.
format_bin (callable): Used to convert bin values into string
labels.
"""
lines = []
if not format_bin:
format_bin = lambda v: v
if not width:
try:
import shutil # python 3 convenience
width = shutil.get_terminal_size()[0]
except Exception:
width = 80
bins = [b for b, _ in bin_counts]
count_max = max([count for _, count in bin_counts])
count_cols = len(str(count_max))
labels = ['%s' % format_bin(b) for b in bins]
label_cols = max([len(l) for l in labels])
tmp_line = '{}: {} #'.format('x' * label_cols, count_max)
bar_cols = max(width - len(tmp_line), 3)
line_k = float(bar_cols) / count_max
tmpl = "{label:>{label_cols}}: {count:>{count_cols}} {bar}"
for label, (bin_val, count) in zip(labels, bin_counts):
bar_len = int(round(count * line_k))
bar = ('#' * bar_len) or '|'
line = tmpl.format(label=label,
label_cols=label_cols,
count=count,
count_cols=count_cols,
bar=bar)
lines.append(line)
return '\n'.join(lines)
| Stats |
python | pytorch__pytorch | .github/scripts/runner_determinator.py | {
"start": 3053,
"end": 3455
} | class ____(NamedTuple):
rollout_perc: float = (
0 # Percentage of workflows to experiment on when user is not opted-in.
)
all_branches: bool = (
False # If True, the experiment is also enabled on the exception branches
)
default: bool = (
True # If True, the experiment is enabled by default for all queries
)
# Add more fields as needed
| Experiment |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 27456,
"end": 32191
} | class ____(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, config: GroundingDinoConfig, num_heads: int, n_points: int):
super().__init__()
self.attn = MultiScaleDeformableAttention()
if config.d_model % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {config.d_model} and {num_heads}"
)
dim_per_head = config.d_model // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in GroundingDinoMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 64
self.d_model = config.d_model
self.n_levels = config.num_feature_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(config.d_model, num_heads * self.n_levels * n_points * 2)
self.attention_weights = nn.Linear(config.d_model, num_heads * self.n_levels * n_points)
self.value_proj = nn.Linear(config.d_model, config.d_model)
self.output_proj = nn.Linear(config.d_model, config.d_model)
self.disable_custom_kernels = config.disable_custom_kernels
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
# Ignore copy
if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(~attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = F.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
num_coordinates = reference_points.shape[-1]
if num_coordinates == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif num_coordinates == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
output = self.attn(
value,
spatial_shapes,
spatial_shapes_list,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
output = self.output_proj(output)
return output, attention_weights
| GroundingDinoMultiscaleDeformableAttention |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/waiters/test_comprehend.py | {
"start": 1145,
"end": 1394
} | class ____:
def test_service_waiters(self):
assert "pii_entities_detection_job_complete" in ComprehendHook().list_waiters()
assert "create_document_classifier_complete" in ComprehendHook().list_waiters()
| TestComprehendCustomWaiters |
python | catalyst-team__catalyst | catalyst/contrib/data/reader.py | {
"start": 1245,
"end": 3088
} | class ____(IReader):
"""
Numeric data reader abstraction.
Reads a single float, int, str or other from data
"""
def __init__(
self,
input_key: str,
output_key: Optional[str] = None,
dtype: Type = np.float32,
default_value: float = None,
one_hot_classes: int = None,
smoothing: float = None,
):
"""
Args:
input_key: input key to use from annotation dict
output_key: output key to use to store the result,
default: ``input_key``
dtype: datatype of scalar values to use
default_value: default value to use if something goes wrong
one_hot_classes: number of one-hot classes
smoothing (float, optional): if specified applies label smoothing
to one_hot classes
"""
super().__init__(input_key, output_key or input_key)
self.dtype = dtype
self.default_value = default_value
self.one_hot_classes = one_hot_classes
self.smoothing = smoothing
if self.one_hot_classes is not None and self.smoothing is not None:
assert 0.0 < smoothing < 1.0, (
"If smoothing is specified it must be in (0; 1), " + f"got {smoothing}"
)
def __call__(self, element):
"""
Reads a row from your annotations dict and
transfer it to a single value
Args:
element: elem in your dataset
Returns:
dtype: Scalar value
"""
scalar = self.dtype(element.get(self.input_key, self.default_value))
if self.one_hot_classes is not None:
scalar = get_one_hot(scalar, self.one_hot_classes, smoothing=self.smoothing)
output = {self.output_key: scalar}
return output
| ScalarReader |
python | mahmoud__boltons | boltons/funcutils.py | {
"start": 24018,
"end": 35994
} | class ____:
"""The FunctionBuilder type provides an interface for programmatically
creating new functions, either based on existing functions or from
scratch.
Values are passed in at construction or set as attributes on the
instance. For creating a new function based of an existing one,
see the :meth:`~FunctionBuilder.from_func` classmethod. At any
point, :meth:`~FunctionBuilder.get_func` can be called to get a
newly compiled function, based on the values configured.
>>> fb = FunctionBuilder('return_five', doc='returns the integer 5',
... body='return 5')
>>> f = fb.get_func()
>>> f()
5
>>> fb.varkw = 'kw'
>>> f_kw = fb.get_func()
>>> f_kw(ignored_arg='ignored_val')
5
Note that function signatures themselves changed quite a bit in
Python 3, so several arguments are only applicable to
FunctionBuilder in Python 3. Except for *name*, all arguments to
the constructor are keyword arguments.
Args:
name (str): Name of the function.
doc (str): `Docstring`_ for the function, defaults to empty.
module (str): Name of the module from which this function was
imported. Defaults to None.
body (str): String version of the code representing the body
of the function. Defaults to ``'pass'``, which will result
in a function which does nothing and returns ``None``.
args (list): List of argument names, defaults to empty list,
denoting no arguments.
varargs (str): Name of the catch-all variable for positional
arguments. E.g., "args" if the resultant function is to have
``*args`` in the signature. Defaults to None.
varkw (str): Name of the catch-all variable for keyword
arguments. E.g., "kwargs" if the resultant function is to have
``**kwargs`` in the signature. Defaults to None.
defaults (tuple): A tuple containing default argument values for
those arguments that have defaults.
kwonlyargs (list): Argument names which are only valid as
keyword arguments. **Python 3 only.**
kwonlydefaults (dict): A mapping, same as normal *defaults*,
but only for the *kwonlyargs*. **Python 3 only.**
annotations (dict): Mapping of type hints and so
forth. **Python 3 only.**
filename (str): The filename that will appear in
tracebacks. Defaults to "boltons.funcutils.FunctionBuilder".
indent (int): Number of spaces with which to indent the
function *body*. Values less than 1 will result in an error.
dict (dict): Any other attributes which should be added to the
functions compiled with this FunctionBuilder.
All of these arguments are also made available as attributes which
can be mutated as necessary.
.. _Docstring: https://en.wikipedia.org/wiki/Docstring#Python
"""
_argspec_defaults = {'args': list,
'varargs': lambda: None,
'varkw': lambda: None,
'defaults': lambda: None,
'kwonlyargs': list,
'kwonlydefaults': dict,
'annotations': dict}
@classmethod
def _argspec_to_dict(cls, f):
argspec = inspect.getfullargspec(f)
return {attr: getattr(argspec, attr)
for attr in cls._argspec_defaults}
_defaults = {'doc': str,
'dict': dict,
'is_async': lambda: False,
'module': lambda: None,
'body': lambda: 'pass',
'indent': lambda: 4,
"annotations": dict,
'filename': lambda: 'boltons.funcutils.FunctionBuilder'}
_defaults.update(_argspec_defaults)
_compile_count = itertools.count()
def __init__(self, name, **kw):
self.name = name
for a, default_factory in self._defaults.items():
val = kw.pop(a, None)
if val is None:
val = default_factory()
setattr(self, a, val)
if kw:
raise TypeError('unexpected kwargs: %r' % kw.keys())
return
# def get_argspec(self): # TODO
def get_sig_str(self, with_annotations=True):
"""Return function signature as a string.
with_annotations is ignored on Python 2. On Python 3 signature
will omit annotations if it is set to False.
"""
if with_annotations:
annotations = self.annotations
else:
annotations = {}
return inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
self.kwonlyargs,
{},
annotations)
_KWONLY_MARKER = re.compile(r"""
\* # a star
\s* # followed by any amount of whitespace
, # followed by a comma
\s* # followed by any amount of whitespace
""", re.VERBOSE)
def get_invocation_str(self):
kwonly_pairs = None
formatters = {}
if self.kwonlyargs:
kwonly_pairs = {arg: arg
for arg in self.kwonlyargs}
formatters['formatvalue'] = lambda value: '=' + value
sig = inspect_formatargspec(self.args,
self.varargs,
self.varkw,
[],
kwonly_pairs,
kwonly_pairs,
{},
**formatters)
sig = self._KWONLY_MARKER.sub('', sig)
return sig[1:-1]
@classmethod
def from_func(cls, func):
"""Create a new FunctionBuilder instance based on an existing
function. The original function will not be stored or
modified.
"""
# TODO: copy_body? gonna need a good signature regex.
# TODO: might worry about __closure__?
if not callable(func):
raise TypeError(f'expected callable object, not {func!r}')
if isinstance(func, functools.partial):
kwargs = {'name': func.func.__name__,
'doc': func.func.__doc__,
'module': getattr(func.func, '__module__', None), # e.g., method_descriptor
'annotations': getattr(func.func, "__annotations__", {}),
'dict': getattr(func.func, '__dict__', {})}
else:
kwargs = {'name': func.__name__,
'doc': func.__doc__,
'module': getattr(func, '__module__', None), # e.g., method_descriptor
'annotations': getattr(func, "__annotations__", {}),
'dict': getattr(func, '__dict__', {})}
kwargs.update(cls._argspec_to_dict(func))
if inspect.iscoroutinefunction(func):
kwargs['is_async'] = True
return cls(**kwargs)
def get_func(self, execdict=None, add_source=True, with_dict=True):
"""Compile and return a new function based on the current values of
the FunctionBuilder.
Args:
execdict (dict): The dictionary representing the scope in
which the compilation should take place. Defaults to an empty
dict.
add_source (bool): Whether to add the source used to a
special ``__source__`` attribute on the resulting
function. Defaults to True.
with_dict (bool): Add any custom attributes, if
applicable. Defaults to True.
To see an example of usage, see the implementation of
:func:`~boltons.funcutils.wraps`.
"""
execdict = execdict or {}
body = self.body or self._default_body
tmpl = 'def {name}{sig_str}:'
tmpl += '\n{body}'
if self.is_async:
tmpl = 'async ' + tmpl
body = _indent(self.body, ' ' * self.indent)
name = self.name.replace('<', '_').replace('>', '_') # lambdas
src = tmpl.format(name=name, sig_str=self.get_sig_str(with_annotations=False),
doc=self.doc, body=body)
self._compile(src, execdict)
func = execdict[name]
func.__name__ = self.name
func.__doc__ = self.doc
func.__defaults__ = self.defaults
func.__kwdefaults__ = self.kwonlydefaults
func.__annotations__ = self.annotations
if with_dict:
func.__dict__.update(self.dict)
func.__module__ = self.module
# TODO: caller module fallback?
if add_source:
func.__source__ = src
return func
def get_defaults_dict(self):
"""Get a dictionary of function arguments with defaults and the
respective values.
"""
ret = dict(reversed(list(zip(reversed(self.args),
reversed(self.defaults or [])))))
kwonlydefaults = getattr(self, 'kwonlydefaults', None)
if kwonlydefaults:
ret.update(kwonlydefaults)
return ret
def get_arg_names(self, only_required=False):
arg_names = tuple(self.args) + tuple(getattr(self, 'kwonlyargs', ()))
if only_required:
defaults_dict = self.get_defaults_dict()
arg_names = tuple([an for an in arg_names if an not in defaults_dict])
return arg_names
def add_arg(self, arg_name, default=NO_DEFAULT, kwonly=False):
"""Add an argument with optional *default* (defaults to
``funcutils.NO_DEFAULT``). Pass *kwonly=True* to add a
keyword-only argument
"""
if arg_name in self.args:
raise ExistingArgument(f'arg {arg_name!r} already in func {self.name} arg list')
if arg_name in self.kwonlyargs:
raise ExistingArgument(f'arg {arg_name!r} already in func {self.name} kwonly arg list')
if not kwonly:
self.args.append(arg_name)
if default is not NO_DEFAULT:
self.defaults = (self.defaults or ()) + (default,)
else:
self.kwonlyargs.append(arg_name)
if default is not NO_DEFAULT:
self.kwonlydefaults[arg_name] = default
def remove_arg(self, arg_name):
"""Remove an argument from this FunctionBuilder's argument list. The
resulting function will have one less argument per call to
this function.
Args:
arg_name (str): The name of the argument to remove.
Raises a :exc:`ValueError` if the argument is not present.
"""
args = self.args
d_dict = self.get_defaults_dict()
try:
args.remove(arg_name)
except ValueError:
try:
self.kwonlyargs.remove(arg_name)
except (AttributeError, ValueError):
# missing from both
exc = MissingArgument('arg %r not found in %s argument list:'
' %r' % (arg_name, self.name, args))
exc.arg_name = arg_name
raise exc
else:
self.kwonlydefaults.pop(arg_name, None)
else:
d_dict.pop(arg_name, None)
self.defaults = tuple([d_dict[a] for a in args if a in d_dict])
return
def _compile(self, src, execdict):
filename = ('<%s-%d>'
% (self.filename, next(self._compile_count),))
try:
code = compile(src, filename, 'single')
exec(code, execdict)
except Exception:
raise
return execdict
| FunctionBuilder |
python | conda__conda | tests/plugins/test_subcommands.py | {
"start": 645,
"end": 5942
} | class ____:
name: str
summary: str
configure_parser: Callable | None = None
def custom_command(self, args):
pass
@plugins.hookimpl
def conda_subcommands(self):
yield CondaSubcommand(
name=self.name,
summary=self.summary,
action=self.custom_command,
configure_parser=self.configure_parser,
)
def test_invoked(plugin_manager, conda_cli: CondaCLIFixture, mocker: MockerFixture):
"""Ensure we are able to invoke our command after creating it."""
# mocks
mocked = mocker.patch.object(SubcommandPlugin, "custom_command")
# setup
plugin_manager.register(SubcommandPlugin(name="custom", summary="Summary."))
# test
conda_cli("custom", "some-arg", "some-other-arg")
# assertions; make sure our command was invoked with the right arguments
mocked.assert_called_with(("some-arg", "some-other-arg"))
def test_help(plugin_manager, conda_cli: CondaCLIFixture, capsys: CaptureFixture):
"""Ensures the command appears on the help page."""
# setup
plugin_manager.register(SubcommandPlugin(name="custom", summary="Summary."))
# test
with pytest.raises(SystemExit, match="0"):
conda_cli("--help")
stdout, stderr = capsys.readouterr()
# assertions; make sure our command appears with the help blurb
assert re.search(r"custom\s+Summary.", stdout) is not None
assert not stderr
def test_duplicated(plugin_manager, conda_cli: CondaCLIFixture):
"""
Ensures we get an error when attempting to register commands with the same `name` property.
"""
# setup
plugin = SubcommandPlugin(name="custom", summary="Summary.")
assert plugin_manager.load_plugins(plugin) == 1
# invalid, identical plugins, error ignored
assert plugin_manager.load_plugins(plugin) == 0
# invalid, similar plugins, error ignored
plugin2 = SubcommandPlugin(name="custom", summary="Summary.")
assert plugin_manager.load_plugins(plugin2) == 0
@pytest.mark.parametrize("command", BUILTIN_COMMANDS)
def test_cannot_override_builtin_commands(command, plugin_manager, mocker, conda_cli):
"""
Ensures that plugin subcommands do not override the builtin conda commands
"""
# mocks
mocked = mocker.patch.object(SubcommandPlugin, "custom_command")
mock_log = mocker.patch("conda.cli.conda_argparse.log")
# setup
plugin_manager.register(SubcommandPlugin(name=command, summary="Summary."))
# test
with pytest.raises(SystemExit, match="0"):
conda_cli(command, "--help")
# assertions; make sure we got the right error messages and didn't invoke the custom command
assert mock_log.error.mock_calls == [
mocker.call(
dals(
f"""
The plugin '{command}' is trying to override the built-in command
with the same name, which is not allowed.
Please uninstall the plugin to stop seeing this error message.
"""
)
)
]
assert mocked.mock_calls == []
def test_parser_no_plugins(plugin_manager):
subcommand_plugin = SubcommandPlugin(name="custom", summary="Summary.")
assert plugin_manager.load_plugins(subcommand_plugin) == 1
assert plugin_manager.is_registered(subcommand_plugin)
parser = generate_parser()
with pytest.raises(SystemExit, match="2"):
parser.parse_args(["foobar"])
args = parser.parse_args(["custom"])
assert args.cmd == "custom"
plugin_manager.disable_external_plugins()
parser = generate_parser()
with pytest.raises(SystemExit, match="2"):
parser.parse_args(["foobar"])
with pytest.raises(SystemExit, match="2"):
args = parser.parse_args(["custom"])
def test_custom_plugin_not_extend_parser(
plugin_manager,
conda_cli: CondaCLIFixture,
mocker: MockerFixture,
):
subcommand_plugin = SubcommandPlugin(name="custom", summary="Summary.")
assert plugin_manager.load_plugins(subcommand_plugin) == 1
assert plugin_manager.is_registered(subcommand_plugin)
mocker.patch(
"conda.base.context.Context.plugin_manager",
return_value=plugin_manager,
new_callable=mocker.PropertyMock,
)
assert context.plugin_manager is plugin_manager
stdout, stderr, err = conda_cli("custom", "--help")
# configure_parser is undefined and action don't do anything, so this subcommand does not have any help text
assert not stdout
assert not stderr
assert not err
def test_custom_plugin_extend_parser(
plugin_manager,
conda_cli: CondaCLIFixture,
mocker: MockerFixture,
):
def configure_parser(subparser):
pass
subcommand_plugin = SubcommandPlugin(
name="custom",
summary="Summary.",
configure_parser=configure_parser,
)
assert plugin_manager.load_plugins(subcommand_plugin) == 1
assert plugin_manager.is_registered(subcommand_plugin)
mocker.patch(
"conda.base.context.Context.plugin_manager",
return_value=plugin_manager,
new_callable=mocker.PropertyMock,
)
assert context.plugin_manager is plugin_manager
with pytest.raises(SystemExit, match="0"):
conda_cli("custom", "--help")
| SubcommandPlugin |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 11375,
"end": 11811
} | class ____(GroupType):
type_id = 1004
slug = "performance_render_blocking_asset_span"
description = "Large Render Blocking Asset"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.FRONTEND.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = True
use_flagpole_for_all_features = True
@dataclass(frozen=True)
| PerformanceRenderBlockingAssetSpanGroupType |
python | astropy__astropy | astropy/cosmology/_src/funcs/optimize.py | {
"start": 1181,
"end": 19640
} | class ____(TypedDict): # noqa: PYI049
"""Keyword arguments for :func:`~astropy.cosmology.z_at_value`.
Note that :func:`~astropy.cosmology.z_at_value` can accept most of these
arguments as positional arguments. This TypedDict is useful for
type annotating arguments to other functions that pass them to
:func:`~astropy.cosmology.z_at_value`.
"""
zmin: NotRequired[ArrayLike]
"""The lower search limit for ``z``."""
zmax: NotRequired[ArrayLike]
"""The upper search limit for ``z``."""
ztol: NotRequired[float | FArray]
"""The relative error in ``z`` acceptable for convergence."""
maxfun: NotRequired[int | npt.NDArray[np.integer]]
"""The maximum number of function evaluations allowed in the optimization routine."""
method: NotRequired[str | _CustomSolverCallable]
"""The type of solver to pass to the minimizer.
The built-in options provided by :func:`~scipy.optimize.minimize_scalar` are 'Brent'
(default),'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any user-provided
callable object that meets the requirements listed therein under the Notes on
"Custom minimizers" - or in more detail in :doc:`scipy:tutorial/optimize` - although
their use is currently untested.
"""
bracket: NotRequired[npt.NDArray[np.void] | _BracketSingle | None]
"""The search bracket, with semantics depending on the solver.
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
"""
verbose: NotRequired[bool]
"""Print diagnostic output from solver."""
def _z_at_scalar_value(
func,
fval: float | FArray | Quantity,
zmin: float | FArray | Quantity = 1e-8,
zmax: float | FArray | Quantity = 1000,
ztol: float | FArray = 1e-8,
maxfun: int | npt.NDArray[np.integer] = 500,
method: str | _CustomSolverCallable = "Brent",
bracket: _BracketSingle | None = None,
verbose: bool = False,
) -> float:
"""Find the redshift ``z`` at which ``func(z) = fval``.
See :func:`astropy.cosmology.z_at_value`.
"""
from scipy.optimize import minimize_scalar
opt = {"maxiter": maxfun, "xtol": ztol}
# Assume custom methods support the same options as default; otherwise user
# will see warnings.
if callable(method): # can skip callables
pass
elif str(method).lower() == "bounded":
opt["xatol"] = opt.pop("xtol")
if bracket is not None:
warnings.warn(f"Option 'bracket' is ignored by method {method}.")
bracket = None
# fval falling inside the interval of bracketing function values does not
# guarantee it has a unique solution, but for Standard Cosmological
# quantities normally should (being monotonic or having a single extremum).
# In these cases keep solver from returning solutions outside of bracket.
fval_zmin, fval_zmax = func(zmin), func(zmax)
nobracket = False
if np.sign(fval - fval_zmin) != np.sign(fval_zmax - fval):
if bracket is None:
nobracket = True
else:
fval_brac = func(np.asanyarray(bracket))
if np.sign(fval - fval_brac[0]) != np.sign(fval_brac[-1] - fval):
nobracket = True
else:
zmin, zmax = bracket[0], bracket[-1]
fval_zmin, fval_zmax = fval_brac[[0, -1]]
if nobracket:
warnings.warn(
f"fval is not bracketed by func(zmin)={fval_zmin} and "
f"func(zmax)={fval_zmax}. This means either there is no "
"solution, or that there is more than one solution "
"between zmin and zmax satisfying fval = func(z).",
AstropyUserWarning,
)
if isinstance(fval_zmin, Quantity):
val = fval.to_value(fval_zmin.unit)
else:
val = fval
# Construct bounds (Brent and Golden fail if bounds are not None)
if callable(method) or str(method).lower() not in {"brent", "golden"}:
bounds = (zmin, zmax)
else:
bounds = None
# Objective function to minimize.
# 'Brent' and 'Golden' ignore `bounds` but this keeps the domain witihin the bounds.
def f(z):
if z > zmax:
return 1.0e300 * (1.0 + z - zmax)
elif z < zmin:
return 1.0e300 * (1.0 + zmin - z)
elif isinstance(fval_zmin, Quantity):
return abs(func(z).value - val)
else:
return abs(func(z) - val)
# Perform the minimization
res = minimize_scalar(f, method=method, bounds=bounds, bracket=bracket, options=opt)
# Scipy docs state that `OptimizeResult` always has 'status' and 'message'
# attributes, but only `_minimize_scalar_bounded()` seems to have really
# implemented them.
if not res.success:
warnings.warn(
f"Solver returned {res.get('status')}:"
f" {res.get('message', 'Unsuccessful')}\nPrecision {res.fun} reached after"
f" {res.nfev} function calls.",
AstropyUserWarning,
)
if verbose:
print(res)
if np.allclose(res.x, zmax):
raise CosmologyError(
f"Best guess z={res.x} is very close to the upper z limit {zmax}."
"\nTry re-running with a different zmax."
)
elif np.allclose(res.x, zmin):
raise CosmologyError(
f"Best guess z={res.x} is very close to the lower z limit {zmin}."
"\nTry re-running with a different zmin."
)
return res.x
def z_at_value(
func,
fval,
zmin: ArrayLike = 1e-8,
zmax: ArrayLike = 1000,
ztol: ArrayLike = 1e-8,
maxfun: int | npt.NDArray[np.integer] = 500,
method: str | _CustomSolverCallable = "Brent",
bracket: npt.NDArray[np.void] | _BracketSingle | None = None,
*,
verbose: bool = False,
) -> Quantity:
"""Find the redshift ``z`` at which ``func(z) = fval``.
This finds the redshift at which one of the cosmology functions or
methods (for example Planck13.distmod) is equal to a known value.
.. warning::
Make sure you understand the behavior of the function that you are
trying to invert! Depending on the cosmology, there may not be a
unique solution. For example, in the standard Lambda CDM cosmology,
there are two redshifts which give an angular diameter distance of
1500 Mpc, z ~ 0.7 and z ~ 3.8. To force ``z_at_value`` to find the
solution you are interested in, use the ``zmin`` and ``zmax`` keywords
to limit the search range (see the example below).
Parameters
----------
func : function or method
A function that takes a redshift as input.
fval : Quantity
The (scalar or array) value of ``func(z)`` to recover.
zmin : float or array-like['dimensionless'] or quantity-like, optional
The lower search limit for ``z``. Beware of divergences
in some cosmological functions, such as distance moduli,
at z=0 (default 1e-8).
zmax : float or array-like['dimensionless'] or quantity-like, optional
The upper search limit for ``z`` (default 1000).
ztol : float or array-like['dimensionless'], optional
The relative error in ``z`` acceptable for convergence.
maxfun : int or array-like, optional
The maximum number of function evaluations allowed in the
optimization routine (default 500).
method : str or callable, optional
Type of solver to pass to the minimizer. The built-in options provided
by :func:`~scipy.optimize.minimize_scalar` are 'Brent' (default),
'Golden' and 'Bounded' with names case insensitive - see documentation
there for details. It also accepts a custom solver by passing any
user-provided callable object that meets the requirements listed
therein under the Notes on "Custom minimizers" - or in more detail in
:doc:`scipy:tutorial/optimize` - although their use is currently
untested.
.. versionadded:: 4.3
bracket : sequence or object array[sequence], optional
For methods 'Brent' and 'Golden', ``bracket`` defines the bracketing
interval and can either have three items (z1, z2, z3) so that
z1 < z2 < z3 and ``func(z2) < func (z1), func(z3)`` or two items z1
and z3 which are assumed to be a starting interval for a downhill
bracket search. For non-monotonic functions such as angular diameter
distance this may be used to start the search on the desired side of
the maximum, but see Examples below for usage notes.
.. versionadded:: 4.3
verbose : bool, optional keyword-only
Print diagnostic output from solver (default `False`).
.. versionadded:: 4.3
.. versionchanged:: 6.1
Changed to keyword-only.
Returns
-------
z : Quantity ['redshift']
The redshift ``z`` satisfying ``zmin < z < zmax`` and ``func(z) =
fval`` within ``ztol``. Has units of cosmological redshift.
Warns
-----
:class:`~astropy.utils.exceptions.AstropyUserWarning`
If ``fval`` is not bracketed by ``func(zmin)=fval(zmin)`` and
``func(zmax)=fval(zmax)``.
If the solver was not successful.
Raises
------
:class:`astropy.cosmology.CosmologyError`
If the result is very close to either ``zmin`` or ``zmax``.
ValueError
If ``bracket`` is not an array nor a 2 (or 3) element sequence.
TypeError
If ``bracket`` is not an object array. 2 (or 3) element sequences will
be turned into object arrays, so this error should only occur if a
non-object array is used for ``bracket``.
Notes
-----
This works for any arbitrary input cosmology, but is inefficient if you
want to invert a large number of values for the same cosmology. In this
case, it is faster to instead generate an array of values at many
closely-spaced redshifts that cover the relevant redshift range, and then
use interpolation to find the redshift at each value you are interested
in. For example, to efficiently find the redshifts corresponding to 10^6
values of the distance modulus in a Planck13 cosmology, you could do the
following:
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, z_at_value
Generate 10^6 distance moduli between 24 and 44 for which we
want to find the corresponding redshifts:
>>> Dvals = (24 + np.random.rand(1000000) * 20) * u.mag
Make a grid of distance moduli covering the redshift range we
need using 50 equally log-spaced values between zmin and
zmax. We use log spacing to adequately sample the steep part of
the curve at low distance moduli:
>>> zmin = z_at_value(Planck13.distmod, Dvals.min())
>>> zmax = z_at_value(Planck13.distmod, Dvals.max())
>>> zgrid = np.geomspace(zmin, zmax, 50)
>>> Dgrid = Planck13.distmod(zgrid)
Finally interpolate to find the redshift at each distance modulus:
>>> zvals = np.interp(Dvals.value, Dgrid.value, zgrid)
Examples
--------
>>> import astropy.units as u
>>> from astropy.cosmology import Planck13, Planck18, z_at_value
The age and lookback time are monotonic with redshift, and so a
unique solution can be found:
>>> z_at_value(Planck13.age, 2 * u.Gyr) # doctest: +FLOAT_CMP
<Quantity 3.19812268 redshift>
The angular diameter is not monotonic however, and there are two
redshifts that give a value of 1500 Mpc. You can use the zmin and
zmax keywords to find the one you are interested in:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmax=1.5) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, zmin=2.5) # doctest: +FLOAT_CMP
<Quantity 3.7823268 redshift>
Alternatively the ``bracket`` option may be used to initialize the
function solver on a desired region, but one should be aware that this
does not guarantee it will remain close to this starting bracket.
For the example of angular diameter distance, which has a maximum near
a redshift of 1.6 in this cosmology, defining a bracket on either side
of this maximum will often return a solution on the same side:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... method="Brent", bracket=(1.0, 1.2)) # doctest: +FLOAT_CMP +IGNORE_WARNINGS
<Quantity 0.68044452 redshift>
But this is not ascertained especially if the bracket is chosen too wide
and/or too close to the turning point:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(0.1, 1.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
Likewise, even for the same minimizer and same starting conditions different
results can be found depending on architecture or library versions:
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 3.7823268 redshift> # doctest: +SKIP
>>> z_at_value(Planck18.angular_diameter_distance,
... 1500 * u.Mpc, bracket=(2.0, 2.5)) # doctest: +SKIP
<Quantity 0.68044452 redshift> # doctest: +SKIP
It is therefore generally safer to use the 3-parameter variant to ensure
the solution stays within the bracketing limits:
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc, method="Brent",
... bracket=(0.1, 1.0, 1.5)) # doctest: +FLOAT_CMP
<Quantity 0.68044452 redshift>
Also note that the luminosity distance and distance modulus (two
other commonly inverted quantities) are monotonic in flat and open
universes, but not in closed universes.
All the arguments except ``func``, ``method`` and ``verbose`` accept array
inputs. This does NOT use interpolation tables or any method to speed up
evaluations, rather providing a convenient means to broadcast arguments
over an element-wise scalar evaluation.
The most common use case for non-scalar input is to evaluate 'func' for an
array of ``fval``:
>>> z_at_value(Planck13.age, [2, 7] * u.Gyr) # doctest: +FLOAT_CMP
<Quantity [3.19812061, 0.75620443] redshift>
``fval`` can be any shape:
>>> z_at_value(Planck13.age, [[2, 7], [1, 3]]*u.Gyr) # doctest: +FLOAT_CMP
<Quantity [[3.19812061, 0.75620443],
[5.67661227, 2.19131955]] redshift>
Other arguments can be arrays. For non-monotic functions -- for example,
the angular diameter distance -- this can be useful to find all solutions.
>>> z_at_value(Planck13.angular_diameter_distance, 1500 * u.Mpc,
... zmin=[0, 2.5], zmax=[2, 4]) # doctest: +FLOAT_CMP
<Quantity [0.68127747, 3.79149062] redshift>
The ``bracket`` argument can likewise be be an array. However, since
bracket must already be a sequence (or None), it MUST be given as an
object `numpy.ndarray`. Importantly, the depth of the array must be such
that each bracket subsequence is an object. Errors or unexpected results
will happen otherwise. A convenient means to ensure the right depth is by
including a length-0 tuple as a bracket and then truncating the object
array to remove the placeholder. This can be seen in the following
example:
>>> bracket=np.array([(1.0, 1.2),(2.0, 2.5), ()], dtype=object)[:-1]
>>> z_at_value(Planck18.angular_diameter_distance, 1500 * u.Mpc,
... bracket=bracket) # doctest: +SKIP
<Quantity [0.68044452, 3.7823268] redshift>
"""
# `fval` can be a Quantity, which isn't (yet) compatible w/ `numpy.nditer`
# so we strip it of units for broadcasting and restore the units when
# passing the elements to `_z_at_scalar_value`.
fval = np.asanyarray(fval)
unit = getattr(fval, "unit", 1) # can be unitless
zmin = Quantity(zmin, cu.redshift).value # must be unitless
zmax = Quantity(zmax, cu.redshift).value
# bracket must be an object array (assumed to be correct) or a 'scalar'
# bracket: 2 or 3 elt sequence
if not isinstance(bracket, np.ndarray): # 'scalar' bracket
if bracket is not None and len(bracket) not in (2, 3):
raise ValueError(
"`bracket` is not an array nor a 2 (or 3) element sequence."
)
else: # munge bracket into a 1-elt object array
bracket = np.array([bracket, ()], dtype=object)[:1].squeeze()
if bracket.dtype != np.object_:
raise TypeError(f"`bracket` has dtype {bracket.dtype}, not 'O'")
# make multi-dimensional iterator for all but `method`, `verbose`
with np.nditer(
[fval, zmin, zmax, ztol, maxfun, bracket, None],
flags=["refs_ok"],
op_flags=[
*[["readonly"]] * 6, # ← inputs output ↓
["writeonly", "allocate", "no_subtype"],
],
op_dtypes=(*(None,) * 6, fval.dtype),
casting="no",
) as it:
for fv, zmn, zmx, zt, mfe, bkt, zs in it: # ← eltwise unpack & eval ↓
zs[...] = _z_at_scalar_value(
func,
fv * unit, # type: ignore[arg-type]
zmin=zmn,
zmax=zmx,
ztol=zt,
maxfun=mfe,
bracket=bkt.item(),
# not broadcasted
method=method,
verbose=verbose,
)
# since bracket is an object array, the output will be too, so it is
# cast to the same type as the function value.
result = it.operands[-1] # zs
return result << cu.redshift
| _ZAtValueKWArgs |
python | google__pytype | pytype/rewrite/frame_test.py | {
"start": 2623,
"end": 5477
} | class ____(FrameTestBase):
def test_store_local_in_module_frame(self):
frame = self._make_frame('', name='__main__')
frame.step()
var = self._const_var(5)
frame.store_local('x', var)
stored = frame.load_local('x')
self.assertEqual(stored, var.with_name('x'))
self.assertEqual(stored, frame.load_global('x'))
def test_store_local_in_nonmodule_frame(self):
frame = self._make_frame('', name='f')
frame.step()
var = self._const_var(5)
frame.store_local('x', var)
stored = frame.load_local('x')
self.assertEqual(stored, var.with_name('x'))
with self.assertRaises(KeyError):
frame.load_global('x')
def test_store_global_in_module_frame(self):
frame = self._make_frame('', name='__main__')
frame.step()
var = self._const_var(5)
frame.store_global('x', var)
stored = frame.load_global('x')
self.assertEqual(stored, var.with_name('x'))
self.assertEqual(stored, frame.load_local('x'))
def test_store_global_in_nonmodule_frame(self):
frame = self._make_frame('', name='f')
frame.step()
var = self._const_var(5)
frame.store_global('x', var)
stored = frame.load_global('x')
self.assertEqual(stored, var.with_name('x'))
with self.assertRaises(KeyError):
frame.load_local('x')
def test_overwrite_global_in_module_frame(self):
code = test_utils.parse('')
var = self._const_var(5)
frame = frame_lib.Frame(
self.ctx,
'__main__',
code,
initial_locals={'x': var},
initial_globals={'x': var},
)
frame.step()
self.assertEqual(frame.load_global('x'), var.with_name('x'))
self.assertEqual(frame.load_local('x'), var.with_name('x'))
var2 = self._const_var(10)
frame.store_global('x', var2)
self.assertEqual(frame.load_global('x'), var2.with_name('x'))
self.assertEqual(frame.load_local('x'), var2.with_name('x'))
def test_overwrite_global_in_nonmodule_frame(self):
code = test_utils.parse('')
var = self._const_var(5)
frame = frame_lib.Frame(self.ctx, 'f', code, initial_globals={'x': var})
frame.step()
self.assertEqual(frame.load_global('x'), var.with_name('x'))
with self.assertRaises(KeyError):
frame.load_local('x')
var2 = self._const_var(10)
frame.store_global('x', var2)
self.assertEqual(frame.load_global('x'), var2.with_name('x'))
with self.assertRaises(KeyError):
frame.load_local('x')
def test_enclosing(self):
code = test_utils.parse('')
frame = frame_lib.Frame(self.ctx, 'f', code)
frame.step()
x = self._const_var(5)
frame.store_enclosing('x', x)
with self.assertRaises(KeyError):
frame.load_local('x')
with self.assertRaises(KeyError):
frame.load_global('x')
self.assertEqual(frame.load_enclosing('x'), x.with_name('x'))
| LoadStoreTest |
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 12854,
"end": 15346
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([GitLayer(config, i) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
pixel_values_present: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPast]:
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask,
past_key_values,
output_attentions,
pixel_values_present,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
| GitEncoder |
python | jpadilla__pyjwt | tests/test_compressed_jwt.py | {
"start": 49,
"end": 1237
} | class ____(PyJWT):
def _decode_payload(self, decoded):
return json.loads(
# wbits=-15 has zlib not worry about headers of crc's
zlib.decompress(decoded["payload"], wbits=-15).decode("utf-8")
)
def test_decodes_complete_valid_jwt_with_compressed_payload():
# Test case from https://github.com/jpadilla/pyjwt/pull/753/files
example_payload = {"hello": "world"}
example_secret = "secret"
# payload made with the pako (https://nodeca.github.io/pako/) library in Javascript:
# Buffer.from(pako.deflateRaw('{"hello": "world"}')).toString('base64')
example_jwt = (
b"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9"
b".q1bKSM3JyVeyUlAqzy/KSVGqBQA="
b".08wHYeuh1rJXmcBcMrz6NxmbxAnCQp2rGTKfRNIkxiw="
)
decoded = CompressedPyJWT().decode_complete(
example_jwt, example_secret, algorithms=["HS256"]
)
assert decoded == {
"header": {"alg": "HS256", "typ": "JWT"},
"payload": example_payload,
"signature": (
b"\xd3\xcc\x07a\xeb\xa1\xd6\xb2W\x99\xc0\\2\xbc\xfa7"
b"\x19\x9b\xc4\t\xc2B\x9d\xab\x192\x9fD\xd2$\xc6,"
),
}
| CompressedPyJWT |
python | doocs__leetcode | solution/0700-0799/0710.Random Pick with Blacklist/Solution.py | {
"start": 0,
"end": 555
} | class ____:
def __init__(self, n: int, blacklist: List[int]):
self.k = n - len(blacklist)
self.d = {}
i = self.k
black = set(blacklist)
for b in blacklist:
if b < self.k:
while i in black:
i += 1
self.d[b] = i
i += 1
def pick(self) -> int:
x = randrange(self.k)
return self.d.get(x, x)
# Your Solution object will be instantiated and called as such:
# obj = Solution(n, blacklist)
# param_1 = obj.pick()
| Solution |
python | doocs__leetcode | solution/0600-0699/0654.Maximum Binary Tree/Solution2.py | {
"start": 192,
"end": 663
} | class ____:
def constructMaximumBinaryTree(self, nums: List[int]) -> Optional[TreeNode]:
def dfs(l, r):
if l > r:
return None
val = tree.query(1, l, r)
root = TreeNode(val)
root.left = dfs(l, d[val] - 1)
root.right = dfs(d[val] + 1, r)
return root
d = {v: i for i, v in enumerate(nums, 1)}
tree = SegmentTree(nums)
return dfs(1, len(nums))
| Solution |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_distribution_to_match_benfords_law.py | {
"start": 1326,
"end": 6181
} | class ____(ColumnAggregateMetricProvider):
"""
MetricProvider tests whether data matches Benford's Law Fraud Detection
Algorithm.
Uses a Chi-Square Goodness of Fit test with an 80@ p-value
"""
metric_name = "column.custom.DistributionMatchesBenfordsLaw"
value_keys = tuple()
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
totalVals = (column.apply(lambda x: 1.0 if x is not None else 0.0)).sum()
num1 = (column.apply(lambda x: matchFirstDigit(x, 1) if x is not None else 0.0)).sum()
num2 = (column.apply(lambda x: matchFirstDigit(x, 2) if x is not None else 0.0)).sum()
num3 = (column.apply(lambda x: matchFirstDigit(x, 3) if x is not None else 0.0)).sum()
num4 = (column.apply(lambda x: matchFirstDigit(x, 4) if x is not None else 0.0)).sum()
num5 = (column.apply(lambda x: matchFirstDigit(x, 5) if x is not None else 0.0)).sum()
num6 = (column.apply(lambda x: matchFirstDigit(x, 6) if x is not None else 0.0)).sum()
num7 = (column.apply(lambda x: matchFirstDigit(x, 7) if x is not None else 0.0)).sum()
num8 = (column.apply(lambda x: matchFirstDigit(x, 8) if x is not None else 0.0)).sum()
num9 = (column.apply(lambda x: matchFirstDigit(x, 9) if x is not None else 0.0)).sum()
listdata = [
num1 / totalVals,
num2 / totalVals,
num3 / totalVals,
num4 / totalVals,
num5 / totalVals,
num6 / totalVals,
num7 / totalVals,
num8 / totalVals,
num9 / totalVals,
]
matchvalues = []
for x in range(1, 10):
matchvalues.append(math.log(1.0 + 1.0 / x) / math.log(10))
"""
listdata: length 10
matchvalues: length 10
chi square them with 90 percent confidence
"""
stat = 0
for i in range(9):
stat += ((listdata[i] - matchvalues[i]) ** 2) / (matchvalues[i])
return not stat >= 5.071
# @metric_value(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(
# cls,
# execution_engine: "SqlAlchemyExecutionEngine",
# metric_domain_kwargs: Dict,
# metric_value_kwargs: Dict,
# metrics: Dict[Tuple, Any],
# runtime_configuration: Dict,
# ):
# (
# selectable,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# ) = execution_engine.get_compute_domain(
# metric_domain_kwargs, MetricDomainTypes.COLUMN
# )
# column_name = accessor_domain_kwargs["column"]
# column = sa.column(column_name)
# sqlalchemy_engine = execution_engine.engine
# dialect = sqlalchemy_engine.dialect
# column_median = None
# # TODO: compute the value and return it
# return column_median
# @metric_value(engine=SparkDFExecutionEngine)
# def _spark(
# cls,
# execution_engine: "SqlAlchemyExecutionEngine",
# metric_domain_kwargs: Dict,
# metric_value_kwargs: Dict,
# metrics: Dict[Tuple, Any],
# runtime_configuration: Dict,
# ):
# (
# df,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# ) = execution_engine.get_compute_domain(
# metric_domain_kwargs, MetricDomainTypes.COLUMN
# )
# column = accessor_domain_kwargs["column"]
# column_median = None
# # TODO: compute the value and return it
# return column_median
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
"""This should return a dictionary:
{
"dependency_name": MetricConfiguration,
...
}
"""
dependencies = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
table_domain_kwargs = {
k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
}
dependencies.update(
{"table.row_count": MetricConfiguration("table.row_count", table_domain_kwargs)}
)
if isinstance(execution_engine, SqlAlchemyExecutionEngine):
dependencies["column_values.nonnull.count"] = MetricConfiguration(
"column_values.nonnull.count", metric.metric_domain_kwargs
)
return dependencies
| ColumnDistributionMatchesBenfordsLaw |
python | numpy__numpy | numpy/polynomial/tests/test_symbol.py | {
"start": 219,
"end": 1533
} | class ____:
"""
Test polynomial creation with symbol kwarg.
"""
c = [1, 2, 3]
def test_default_symbol(self):
p = poly.Polynomial(self.c)
assert_equal(p.symbol, 'x')
@pytest.mark.parametrize(('bad_input', 'exception'), (
('', ValueError),
('3', ValueError),
(None, TypeError),
(1, TypeError),
))
def test_symbol_bad_input(self, bad_input, exception):
with pytest.raises(exception):
p = poly.Polynomial(self.c, symbol=bad_input)
@pytest.mark.parametrize('symbol', (
'x',
'x_1',
'A',
'xyz',
'β',
))
def test_valid_symbols(self, symbol):
"""
Values for symbol that should pass input validation.
"""
p = poly.Polynomial(self.c, symbol=symbol)
assert_equal(p.symbol, symbol)
def test_property(self):
"""
'symbol' attribute is read only.
"""
p = poly.Polynomial(self.c, symbol='x')
with pytest.raises(AttributeError):
p.symbol = 'z'
def test_change_symbol(self):
p = poly.Polynomial(self.c, symbol='y')
# Create new polynomial from p with different symbol
pt = poly.Polynomial(p.coef, symbol='t')
assert_equal(pt.symbol, 't')
| TestInit |
python | django__django | django/views/generic/list.py | {
"start": 5177,
"end": 6310
} | class ____(MultipleObjectMixin, View):
"""
Base view for displaying a list of objects.
This requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty:
# When pagination is enabled and object_list is a queryset,
# it's better to do a cheap query than to load the unpaginated
# queryset in memory.
if self.get_paginate_by(self.object_list) is not None and hasattr(
self.object_list, "exists"
):
is_empty = not self.object_list.exists()
else:
is_empty = not self.object_list
if is_empty:
raise Http404(
_("Empty list and “%(class_name)s.allow_empty” is False.")
% {
"class_name": self.__class__.__name__,
}
)
context = self.get_context_data()
return self.render_to_response(context)
| BaseListView |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/executor_definition.py | {
"start": 1489,
"end": 2687
} | class ____(PyEnum):
"""An ExecutorDefinition can include a list of requirements that the system uses to
check whether the executor will be able to work for a particular job execution.
"""
# The passed in IJob must be reconstructable across process boundaries
RECONSTRUCTABLE_PIPELINE = ( # This needs to still exist for folks who may have written their own executor
"RECONSTRUCTABLE_PIPELINE"
)
RECONSTRUCTABLE_JOB = "RECONSTRUCTABLE_PIPELINE"
# The DagsterInstance must be loadable in a different process
NON_EPHEMERAL_INSTANCE = "NON_EPHEMERAL_INSTANCE"
# Any op outputs on the job must be persisted
PERSISTENT_OUTPUTS = "PERSISTENT_OUTPUTS"
def multiple_process_executor_requirements() -> Sequence[ExecutorRequirement]:
return [
ExecutorRequirement.RECONSTRUCTABLE_JOB,
ExecutorRequirement.NON_EPHEMERAL_INSTANCE,
ExecutorRequirement.PERSISTENT_OUTPUTS,
]
ExecutorConfig = Mapping[str, object]
ExecutorCreationFunction: TypeAlias = Callable[["InitExecutorContext"], "Executor"]
ExecutorRequirementsFunction: TypeAlias = Callable[[ExecutorConfig], Sequence[ExecutorRequirement]]
@public
| ExecutorRequirement |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numeric.py | {
"start": 108845,
"end": 109619
} | class ____(TestCase):
def test_zero_dimension(self):
# Test resolution to issue #5663
a = np.zeros((3, 0))
b = np.zeros((0, 4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
def test_zero_dimension_einsum(self):
# Test resolution to issue #5663
a = np.zeros((3, 0))
b = np.zeros((0, 4))
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.einsum("ij,jk", a, b))
def test_zero_dimensional(self):
# gh-12130
arr_0d = np.array(1)
ret = np.tensordot(
arr_0d, arr_0d, ([], [])
) # contracting no axes is well defined
assert_array_equal(ret, arr_0d)
if __name__ == "__main__":
run_tests()
| TestTensordot |
python | numpy__numpy | numpy/lib/tests/test_io.py | {
"start": 107817,
"end": 108005
} | class ____:
def __init__(self, base):
self.base = base
def write(self, s):
return self.base.write(s)
def flush(self):
return self.base.flush()
| JustWriter |
python | google__jax | jax/_src/api.py | {
"start": 71451,
"end": 77723
} | class ____(NamedTuple):
flat_fun: lu.WrappedFun
in_tree: PyTreeDef
out_tree: Callable[[], PyTreeDef]
flat_args: Sequence[Any]
donated_invars: Sequence[bool]
in_axes_flat: Sequence[int | None]
local_axis_size: int
out_axes_thunk: Callable
devices: Sequence[xc.Device] | None
global_axis_size: int
is_explicit_global_axis_size: bool
def _get_global_axis_size(local_axis_size: int, in_devices, backend_name: str,
global_axis_size: int | None):
"""Determine global_axis_size for multi-host pmap."""
# TODO(mattjj,skyewm): revive this check (inner_pmap always False now)
# if xb.process_count() > 1 and global_axis_size is None and inner_pmap:
# raise ValueError("'axis_size' must be specified for nested multi-host pmaps")
if (xb.process_count() == 1 and global_axis_size is not None and
global_axis_size != local_axis_size):
raise ValueError(
f"Specified axis_size {global_axis_size} doesn't match received "
f"axis_size {local_axis_size}.")
if in_devices is not None and backend_name is None:
backend = xb.get_device_backend(in_devices[0])
else:
backend = xb.get_backend(backend_name)
if global_axis_size is None:
if xb.process_count(backend) == 1:
global_axis_size = local_axis_size
elif in_devices is not None:
global_axis_size = len(in_devices)
else:
global_axis_size = local_axis_size * xb.process_count(backend)
assert all(
len(xb.local_devices(pi, backend)) == xb.local_device_count(backend)
for pi in range(xb.process_count(backend)))
return global_axis_size
def _prepare_pmap(fun: Callable, in_axes, out_axes, static_broadcasted_tuple,
donate_tuple, in_devices, backend_name,
axis_size, args, kwargs):
if in_devices is not None and len(in_devices) == 0:
raise ValueError("'devices' argument to pmap must be non-empty, or None.")
dbg = debug_info(
"pmap", fun, args, kwargs,
static_argnums=static_broadcasted_tuple)
f = lu.wrap_init(fun, debug_info=dbg)
del dbg
if static_broadcasted_tuple:
if max(static_broadcasted_tuple) >= len(args):
raise ValueError(
f"pmapped function has static_broadcasted_argnums={static_broadcasted_tuple}"
f" but was called with only {len(args)} positional "
f"argument{'s' if len(args) > 1 else ''}. "
"All static broadcasted arguments must be passed positionally.")
dyn_argnums = [i for i in range(len(args))
if i not in static_broadcasted_tuple]
f, dyn_args = argnums_partial(f, dyn_argnums, args)
if isinstance(in_axes, tuple):
dyn_in_axes = tuple(in_axes[i] for i in dyn_argnums)
else:
dyn_in_axes = in_axes
else:
dyn_args, dyn_in_axes = args, in_axes
args, in_tree = tree_flatten((dyn_args, kwargs))
if donate_tuple and not config.debug_nans.value:
donated_invars = donation_vector(donate_tuple, (), in_tree)
else:
donated_invars = (False,) * len(args)
try:
in_axes_flat = tuple(broadcast_prefix((dyn_in_axes, 0), (dyn_args, kwargs),
is_leaf=lambda x: x is None))
except ValueError:
e, *_ = prefix_errors((dyn_in_axes, 0), (dyn_args, kwargs))
ex = e('pmap in_axes')
msg, = ex.args
msg += ("\n\nThe 'full pytree' here is the tuple of arguments passed "
"positionally to the pmapped function, and the value of `in_axes` "
"must be a tree prefix of that tuple. But it was not a prefix.")
if kwargs:
msg += ("\n\nWhen some arguments are passed by keyword to the pmapped "
"function, they are not included in the comparison to `in_axes`. "
"Instead, each argument passed by keyword is mapped over its "
"leading axis. See the description of `in_axes` in the `pmap` "
"docstring: "
"https://docs.jax.dev/en/latest/_autosummary/jax.pmap.html#jax.pmap")
msg += ("\n\nCheck that the value of the `in_axes` argument to `pmap` "
"is a tree prefix of the tuple of arguments passed positionally to "
"the pmapped function.")
raise ValueError(msg) from None
local_axis_size = _mapped_axis_size(fun, in_tree, args, in_axes_flat, "pmap")
f, out_axes_thunk = flat_out_axes(f, out_axes)
flat_fun, out_tree = flatten_fun(f, in_tree)
is_explicit_global_axis_size = axis_size is not None
global_axis_size = _get_global_axis_size(local_axis_size, in_devices,
backend_name, axis_size)
return PmapCallInfo(flat_fun=flat_fun,
in_tree=in_tree,
out_tree=out_tree,
flat_args=args,
donated_invars=donated_invars,
in_axes_flat=in_axes_flat,
local_axis_size=local_axis_size,
out_axes_thunk=out_axes_thunk,
devices=None if in_devices is None else tuple(in_devices),
global_axis_size=global_axis_size,
is_explicit_global_axis_size=is_explicit_global_axis_size)
def _shared_code_pmap(fun, axis_name, static_broadcasted_argnums,
donate_argnums, in_axes, out_axes):
# axis_size is an optional integer representing the global axis size. The
# aggregate size (across all processes) size of the mapped axis must match the
# given value.
check_callable(fun)
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
static_broadcasted_tuple = _ensure_index_tuple(static_broadcasted_argnums)
donate_tuple = rebase_donate_argnums(
_ensure_index_tuple(donate_argnums), static_broadcasted_tuple)
if not all(type(l) is int for l in tree_leaves(in_axes)):
raise TypeError("pmap in_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {in_axes}.")
if not all(type(l) is int for l in tree_leaves(out_axes)):
raise TypeError("pmap out_axes must be an int, None, or (nested) container "
f"with those types as leaves, but got {out_axes}.")
return axis_name, static_broadcasted_tuple, donate_tuple
| PmapCallInfo |
python | SmileyChris__easy-thumbnails | easy_thumbnails/management/commands/thumbnail_cleanup.py | {
"start": 5216,
"end": 6342
} | class ____(BaseCommand):
help = """ Deletes thumbnails that no longer have an original file. """
def add_arguments(self, parser):
parser.add_argument(
'--dry-run',
action='store_true',
dest='dry_run',
default=False,
help='Dry run the execution.')
parser.add_argument(
'--last-n-days',
action='store',
dest='last_n_days',
default=0,
type=int,
help='The number of days back in time to clean thumbnails for.')
parser.add_argument(
'--path',
action='store',
dest='cleanup_path',
type=str,
help='Specify a path to clean up.')
def handle(self, *args, **options):
tcc = ThumbnailCollectionCleaner(self.stdout, self.stderr)
tcc.clean_up(
dry_run=options.get('dry_run', False),
verbosity=int(options.get('verbosity', 1)),
last_n_days=int(options.get('last_n_days', 0)),
cleanup_path=options.get('cleanup_path'))
tcc.print_stats()
| Command |
python | tensorflow__tensorflow | tensorflow/python/dlpack/dlpack_test.py | {
"start": 1899,
"end": 4492
} | class ____(parameterized.TestCase, test.TestCase):
@parameterized.named_parameters(GetNamedTestParameters())
def testRoundTrip(self, dtype, shape):
np.random.seed(42)
if dtype == np.bool_:
np_array = np.random.randint(0, 1, shape, np.bool_)
else:
np_array = np.random.randint(0, 10, shape)
# copy to gpu if available
tf_tensor = array_ops.identity(constant_op.constant(np_array, dtype=dtype))
tf_tensor_device = tf_tensor.device
tf_tensor_dtype = tf_tensor.dtype
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
tf_tensor2 = dlpack.from_dlpack(dlcapsule)
self.assertAllClose(np_array, tf_tensor2)
if tf_tensor_dtype == dtypes.int32:
# int32 tensor is always on cpu for now
self.assertEqual(tf_tensor2.device,
"/job:localhost/replica:0/task:0/device:CPU:0")
else:
self.assertEqual(tf_tensor_device, tf_tensor2.device)
def testRoundTripWithoutToDlpack(self):
np_array = np.random.randint(0, 10, [42])
self.assertAllEqual(
np.from_dlpack(constant_op.constant(np_array).cpu()), np_array
)
def testTensorsCanBeConsumedOnceOnly(self):
np.random.seed(42)
np_array = np.random.randint(0, 10, (2, 3, 4))
tf_tensor = constant_op.constant(np_array, dtype=np.float32)
dlcapsule = dlpack.to_dlpack(tf_tensor)
del tf_tensor # should still work
_ = dlpack.from_dlpack(dlcapsule)
def ConsumeDLPackTensor():
dlpack.from_dlpack(dlcapsule) # Should can be consumed only once
self.assertRaisesRegex(Exception,
".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testDLPackFromWithoutContextInitialization(self):
tf_tensor = constant_op.constant(1)
dlcapsule = dlpack.to_dlpack(tf_tensor)
# Resetting the context doesn't cause an error.
context._reset_context()
_ = dlpack.from_dlpack(dlcapsule)
def testUnsupportedTypeToDLPack(self):
def UnsupportedQint16():
tf_tensor = constant_op.constant([[1, 4], [5, 2]], dtype=dtypes.qint16)
_ = dlpack.to_dlpack(tf_tensor)
self.assertRaisesRegex(Exception, ".* is not supported by dlpack",
UnsupportedQint16)
def testMustPassTensorArgumentToDLPack(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"The argument to `to_dlpack` must be a TF tensor, not Python object"):
dlpack.to_dlpack([1])
if __name__ == "__main__":
ops.enable_eager_execution()
test.main()
| DLPackTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 328753,
"end": 330795
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of UpdateCheckRun"""
__schema__ = github_schema
__field_names__ = (
"repository_id",
"check_run_id",
"name",
"details_url",
"external_id",
"status",
"started_at",
"conclusion",
"completed_at",
"output",
"actions",
"client_mutation_id",
)
repository_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="repositoryId")
"""The node ID of the repository."""
check_run_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="checkRunId")
"""The node of the check."""
name = sgqlc.types.Field(String, graphql_name="name")
"""The name of the check."""
details_url = sgqlc.types.Field(URI, graphql_name="detailsUrl")
"""The URL of the integrator's site that has the full details of the
check.
"""
external_id = sgqlc.types.Field(String, graphql_name="externalId")
"""A reference for the run on the integrator's system."""
status = sgqlc.types.Field(RequestableCheckStatusState, graphql_name="status")
"""The current status."""
started_at = sgqlc.types.Field(DateTime, graphql_name="startedAt")
"""The time that the check run began."""
conclusion = sgqlc.types.Field(CheckConclusionState, graphql_name="conclusion")
"""The final conclusion of the check."""
completed_at = sgqlc.types.Field(DateTime, graphql_name="completedAt")
"""The time that the check run finished."""
output = sgqlc.types.Field(CheckRunOutput, graphql_name="output")
"""Descriptive details about the run."""
actions = sgqlc.types.Field(sgqlc.types.list_of(sgqlc.types.non_null(CheckRunAction)), graphql_name="actions")
"""Possible further actions the integrator can perform, which a user
may trigger.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| UpdateCheckRunInput |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 8011,
"end": 8317
} | class ____:
"""
Table 4.4.
Table 8 in the 2.0 reference.
"""
PREDICTOR = "/Predictor" # integer
COLORS = "/Colors" # integer
BITS_PER_COMPONENT = "/BitsPerComponent" # integer
COLUMNS = "/Columns" # integer
EARLY_CHANGE = "/EarlyChange" # integer
| LzwFilterParameters |
python | sphinx-doc__sphinx | sphinx/builders/linkcheck.py | {
"start": 26528,
"end": 27327
} | class ____(HTMLParser):
"""Specialised HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor: str) -> None:
super().__init__()
self.search_anchor = search_anchor
self.found = False
def handle_starttag(self, tag: Any, attrs: Any) -> None:
for key, value in attrs:
if key in {'id', 'name'} and value == self.search_anchor:
self.found = True
break
def _allowed_redirect(
url: str, new_url: str, allowed_redirects: dict[re.Pattern[str], re.Pattern[str]]
) -> bool:
if allowed_redirects is _SENTINEL_LAR:
return False
return any(
from_url.match(url) and to_url.match(new_url)
for from_url, to_url in allowed_redirects.items()
)
| AnchorCheckParser |
python | apache__airflow | task-sdk/src/airflow/sdk/exceptions.py | {
"start": 5624,
"end": 5718
} | class ____(BaseException):
"""Raise when the task execution times-out."""
| AirflowTaskTimeout |
python | mlflow__mlflow | mlflow/pyfunc/scoring_server/__init__.py | {
"start": 11426,
"end": 15558
} | class ____(NamedTuple):
response: str
status: int
mimetype: str
def invocations(data, content_type, model, input_schema):
type_parts = list(map(str.strip, content_type.split(";")))
mime_type = type_parts[0]
parameter_value_pairs = type_parts[1:]
parameter_values = {
key: value for pair in parameter_value_pairs for key, _, value in [pair.partition("=")]
}
charset = parameter_values.get("charset", "utf-8").lower()
if charset != "utf-8":
return InvocationsResponse(
response="The scoring server only supports UTF-8",
status=415,
mimetype="text/plain",
)
if unexpected_content_parameters := set(parameter_values.keys()).difference({"charset"}):
return InvocationsResponse(
response=(
f"Unrecognized content type parameters: "
f"{', '.join(unexpected_content_parameters)}. "
f"{SCORING_PROTOCOL_CHANGE_INFO}"
),
status=415,
mimetype="text/plain",
)
# The traditional JSON request/response format, wraps the data with one of the supported keys
# like "dataframe_split" and "predictions". For LLM use cases, we also support unwrapped JSON
# payload, to provide unified prediction interface.
should_parse_as_unified_llm_input = False
if mime_type == CONTENT_TYPE_CSV:
# Convert from CSV to pandas
if isinstance(data, bytes):
data = data.decode("utf-8")
csv_input = StringIO(data)
data = parse_csv_input(csv_input=csv_input, schema=input_schema)
params = None
elif mime_type == CONTENT_TYPE_JSON:
parsed_json_input = _parse_json_data(data, model.metadata, input_schema)
data = parsed_json_input.data
params = parsed_json_input.params
should_parse_as_unified_llm_input = parsed_json_input.is_unified_llm_input
else:
return InvocationsResponse(
response=(
"This predictor only supports the following content types:"
f" Types: {CONTENT_TYPES}."
f" Got '{content_type}'."
),
status=415,
mimetype="text/plain",
)
# Do the prediction
# NB: utils._validate_serving_input mimic the scoring process here to validate input_example
# work for serving, so any changes here should be reflected there as well
try:
if "params" in inspect.signature(model.predict).parameters:
raw_predictions = model.predict(data, params=params)
else:
_log_warning_if_params_not_in_predict_signature(_logger, params)
raw_predictions = model.predict(data)
except MlflowException as e:
if "Failed to enforce schema" in e.message:
_logger.warning(
"If using `instances` as input key, we internally convert "
"the data type from `records` (List[Dict]) type to "
"`list` (Dict[str, List]) type if the data is a pandas "
"dataframe representation. This might cause schema changes. "
"Please use `inputs` to avoid this conversion.\n"
)
e.message = f"Failed to predict data '{data}'. \nError: {e.message}"
raise e
except Exception:
raise MlflowException(
message=(
"Encountered an unexpected error while evaluating the model. Verify"
" that the serialized input Dataframe is compatible with the model for"
" inference."
),
error_code=BAD_REQUEST,
stack_trace=traceback.format_exc(),
)
result = StringIO()
# if the data was formatted using the unified LLM format,
# then return the data without the "predictions" key
if should_parse_as_unified_llm_input:
unwrapped_predictions_to_json(raw_predictions, result)
else:
predictions_to_json(raw_predictions, result)
return InvocationsResponse(response=result.getvalue(), status=200, mimetype="application/json")
| InvocationsResponse |
python | tensorflow__tensorflow | tensorflow/tools/ci_build/osx/arm64/tensorflow_metal_plugin_test.py | {
"start": 141897,
"end": 145361
} | class ____(test.TestCase):
def _testRandom(self, dtype):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
# Random number of tensors, but always > 1.
num_tensors = np.random.randint(2, 10)
# Random dim to concat on
concat_dim = np.random.randint(5)
params = {}
if dtype == dtypes.bfloat16:
dtype_feed = dtypes.float32
else:
dtype_feed = dtype
with self.cached_session(use_gpu=True):
p = []
for i in np.arange(num_tensors):
input_shape = shape
input_shape[concat_dim] = np.random.randint(1, 5)
placeholder = array_ops.placeholder(dtype_feed, shape=input_shape)
p.append(placeholder)
t = dtype_feed.as_numpy_dtype
params[placeholder] = np.random.rand(*input_shape).astype(t)
if dtype != dtype_feed:
concat_inputs = [math_ops.cast(p_i, dtype) for p_i in p]
else:
concat_inputs = p
c = array_ops.concat(concat_inputs, concat_dim)
if dtype != dtype_feed:
c = math_ops.cast(c, dtype_feed)
result = c.eval(feed_dict=params)
self.assertEqual(result.shape, c.get_shape())
cur_offset = 0
for i in np.arange(num_tensors):
# The index into the result is the ':' along all dimensions
# except the concat_dim. slice(0, size) is used for ':', and
# a list of slices is used to index into result.
ind = [slice(0, params[p[i]].shape[j]) for j in np.arange(5)]
ind[concat_dim] = slice(
cur_offset, cur_offset + params[p[i]].shape[concat_dim]
)
cur_offset += params[p[i]].shape[concat_dim]
if dtype == dtype_feed:
self.assertAllEqual(result[tuple(ind)], params[p[i]])
else:
self.assertAllClose(result[tuple(ind)], params[p[i]], 0.01)
@test_util.run_deprecated_v1
def testRandom(self):
self._testRandom(dtypes.bfloat16.as_numpy_dtype)
self._testRandom(dtypes.float16)
self._testRandom(dtypes.float32)
self._testRandom(dtypes.int32)
self._testRandom(dtypes.int64)
def _RunAndVerifyGradientsRandom(self, dtype=dtypes.float32.as_numpy_dtype):
# Random dims of rank 5
input_shape = np.random.randint(1, 5, size=5)
# Random number of tensors
num_tensors = np.random.randint(12, 20)
# Random dim to concat on
concat_dim = np.random.randint(5)
concat_dim_sizes = np.random.randint(1, 5, size=num_tensors)
with test_util.use_gpu():
inp = []
inp_tensors = []
for x in concat_dim_sizes:
shape = input_shape
shape[concat_dim] = x
t = np.random.rand(*shape).astype(dtype)
inp.append(t)
inp_tensors.append(
constant_op.constant(t.flatten(), shape=shape, dtype=dtype)
)
c = array_ops.concat(inp_tensors, concat_dim)
output_shape = input_shape
output_shape[concat_dim] = concat_dim_sizes.sum()
grad_inp = np.random.rand(*output_shape).astype(dtype)
grad_tensor = constant_op.constant(grad_inp.flatten(), shape=output_shape)
grad = gradients_impl.gradients([c], inp_tensors, [grad_tensor])
concated_grad = array_ops.concat(grad, concat_dim)
result = self.evaluate(concated_grad)
self.assertAllEqual(result, grad_inp)
@test_util.run_deprecated_v1
def testGradientsRandom(self):
for _ in range(5):
self._RunAndVerifyGradientsRandom()
self._RunAndVerifyGradientsRandom(dtypes.bfloat16.as_numpy_dtype)
| ConcatOpTest |
python | getsentry__sentry | tests/sentry/tasks/test_code_owners.py | {
"start": 805,
"end": 12006
} | class ____(TestCase):
def setUp(self) -> None:
self.login_as(user=self.user)
self.team = self.create_team(
organization=self.organization, slug="tiger-team", members=[self.user]
)
self.project = self.project = self.create_project(
organization=self.organization, teams=[self.team], slug="bengal"
)
self.repo = Repository.objects.create(
name="example", organization_id=self.organization.id, integration_id=self.integration.id
)
self.code_mapping = self.create_code_mapping(
repo=self.repo,
project=self.project,
)
self.data = {
"raw": "docs/* @NisanthanNanthakumar @getsentry/ecosystem\n",
}
self.ownership = ProjectOwnership.objects.create(
project=self.project, auto_assignment=True, codeowners_auto_sync=True
)
self.code_owners = self.create_codeowners(
self.project, self.code_mapping, raw=self.data["raw"]
)
def test_simple(self) -> None:
with self.tasks() and self.feature({"organizations:integrations-codeowners": True}):
# new external team mapping
self.external_team = self.create_external_team(integration=self.integration)
update_code_owners_schema(
organization=self.organization.id, integration=self.integration.id
)
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.schema == {
"$version": 1,
"rules": [
{
"matcher": {"type": "codeowners", "pattern": "docs/*"},
"owners": [
{"type": "team", "identifier": "tiger-team", "id": self.team.id},
],
}
],
}
with self.tasks() and self.feature({"organizations:integrations-codeowners": True}):
# delete external team mapping
ExternalActor.objects.get(id=self.external_team.id).delete()
update_code_owners_schema(
organization=self.organization.id, integration=self.integration.id
)
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.schema == {"$version": 1, "rules": []}
@freeze_time("2023-01-01 00:00:00")
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_codeowner_file",
return_value=LATEST_GITHUB_CODEOWNERS,
)
def test_codeowners_auto_sync_successful(self, mock_get_codeowner_file: MagicMock) -> None:
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.raw == self.data["raw"]
with self.tasks() and self.feature({"organizations:integrations-codeowners": True}):
self.create_external_team()
self.create_external_user(external_name="@NisanthanNanthakumar")
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="A",
)
code_owners_auto_sync(commit.id)
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.raw == LATEST_GITHUB_CODEOWNERS["raw"]
assert code_owners.schema == {
"$version": 1,
"rules": [
{
"matcher": {"pattern": "docs/*", "type": "codeowners"},
"owners": [
{"identifier": "admin@localhost", "type": "user", "id": self.user.id},
{"identifier": "tiger-team", "type": "team", "id": self.team.id},
],
},
{
"matcher": {"pattern": "*", "type": "codeowners"},
"owners": [
{"identifier": "admin@localhost", "type": "user", "id": self.user.id}
],
},
],
}
assert code_owners.date_updated.strftime("%Y-%m-%d %H:%M:%S") == "2023-01-01 00:00:00"
@patch(
"sentry.integrations.github.integration.GitHubIntegration.get_codeowner_file",
return_value=None,
)
@patch("sentry.notifications.notifications.codeowners_auto_sync.AutoSyncNotification.send")
def test_codeowners_auto_sync_failed_to_fetch_file(
self,
mock_send_email: MagicMock,
mock_get_codeowner_file: MagicMock,
) -> None:
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.raw == self.data["raw"]
with self.tasks() and self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="A",
)
code_owners_auto_sync(commit.id)
code_owners = ProjectCodeOwners.objects.get(id=self.code_owners.id)
assert code_owners.raw == self.data["raw"]
mock_send_email.assert_called_once_with()
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_commit_file_change_triggers_auto_sync_task(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="A",
)
mock_code_owners_auto_sync.delay.assert_called_once_with(commit_id=commit.id)
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_commit_file_change_triggers_auto_sync_task_modified(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename="CODEOWNERS",
type="M",
)
mock_code_owners_auto_sync.delay.assert_called_once_with(commit_id=commit.id)
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_commit_file_change_does_not_trigger_auto_sync_for_deleted_file(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="D",
)
mock_code_owners_auto_sync.delay.assert_not_called()
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_commit_file_change_does_not_trigger_auto_sync_for_non_codeowners_file(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
CommitFileChange.objects.create(
organization_id=self.organization.id,
commit_id=commit.id,
filename="src/main.py",
type="A",
)
mock_code_owners_auto_sync.delay.assert_not_called()
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_bulk_create_commit_file_changes_does_not_trigger_auto_sync_task(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
change1 = CommitFileChange(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="M",
)
change2 = CommitFileChange(
organization_id=self.organization.id,
commit_id=commit.id,
filename="src/main.py",
type="A",
)
CommitFileChange.objects.bulk_create([change1, change2])
mock_code_owners_auto_sync.delay.assert_not_called()
@patch("sentry.tasks.codeowners.code_owners_auto_sync")
def test_post_bulk_create_commit_file_changes_triggers_auto_sync_task(
self, mock_code_owners_auto_sync: MagicMock
) -> None:
with self.feature({"organizations:integrations-codeowners": True}):
commit = Commit.objects.create(
repository_id=self.repo.id,
organization_id=self.organization.id,
key="1234",
message="Initial commit",
)
change1 = CommitFileChange(
organization_id=self.organization.id,
commit_id=commit.id,
filename=".github/CODEOWNERS",
type="M",
)
change2 = CommitFileChange(
organization_id=self.organization.id,
commit_id=commit.id,
filename="src/main.py",
type="A",
)
file_changes = [change1, change2]
CommitFileChange.objects.bulk_create(file_changes)
post_bulk_create(file_changes)
mock_code_owners_auto_sync.delay.assert_called_once_with(commit_id=commit.id)
| CodeOwnersTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 925315,
"end": 925882
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of RemoveEnterpriseIdentityProvider"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "identity_provider")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
identity_provider = sgqlc.types.Field("EnterpriseIdentityProvider", graphql_name="identityProvider")
"""The identity provider that was removed from the enterprise."""
| RemoveEnterpriseIdentityProviderPayload |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 284420,
"end": 288444
} | class ____(TypedDict, total=False):
"""
:class:`altair.TitleConfig` ``TypedDict`` wrapper.
Parameters
----------
align
Horizontal text alignment for title text. One of ``"left"``, ``"center"``, or
``"right"``.
anchor
The anchor position for placing the title and subtitle text. One of ``"start"``,
``"middle"``, or ``"end"``. For example, with an orientation of top these anchor
positions map to a left-, center-, or right-aligned title.
angle
Angle in degrees of title and subtitle text.
aria
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG group, removing the title from the ARIA accessibility tree.
**Default value:** ``true``
baseline
Vertical text baseline for title and subtitle text. One of ``"alphabetic"``
(default), ``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, or
``"line-bottom"``. The ``"line-top"`` and ``"line-bottom"`` values operate similarly
to ``"top"`` and ``"bottom"``, but are calculated relative to the *lineHeight*
rather than *fontSize* alone.
color
Text color for title text.
dx
Delta offset for title and subtitle text x-coordinate.
dy
Delta offset for title and subtitle text y-coordinate.
font
Font name for title text.
fontSize
Font size in pixels for title text.
fontStyle
Font style for title text.
fontWeight
Font weight for title text. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
frame
The reference frame for the anchor position, one of ``"bounds"`` (to anchor relative
to the full bounding box) or ``"group"`` (to anchor relative to the group width or
height).
limit
The maximum allowed length in pixels of title and subtitle text.
lineHeight
Line height in pixels for multi-line title text or title text with ``"line-top"`` or
``"line-bottom"`` baseline.
offset
The orthogonal offset in pixels by which to displace the title group from its
position along the edge of the chart.
orient
Default title orientation (``"top"``, ``"bottom"``, ``"left"``, or ``"right"``)
subtitleColor
Text color for subtitle text.
subtitleFont
Font name for subtitle text.
subtitleFontSize
Font size in pixels for subtitle text.
subtitleFontStyle
Font style for subtitle text.
subtitleFontWeight
Font weight for subtitle text. This can be either a string (e.g ``"bold"``,
``"normal"``) or a number (``100``, ``200``, ``300``, ..., ``900`` where
``"normal"`` = ``400`` and ``"bold"`` = ``700``).
subtitleLineHeight
Line height in pixels for multi-line subtitle text.
subtitlePadding
The padding in pixels between title and subtitle text.
zindex
The integer z-index indicating the layering of the title group relative to other
axis, mark, and legend groups.
**Default value:** ``0``.
"""
align: Align_T
anchor: TitleAnchor_T
angle: float
aria: bool
baseline: TextBaseline_T
color: ColorHex | ColorName_T | None
dx: float
dy: float
font: str
fontSize: float
fontStyle: str
fontWeight: FontWeight_T
frame: str | TitleFrame_T
limit: float
lineHeight: float
offset: float
orient: TitleOrient_T
subtitleColor: ColorHex | ColorName_T | None
subtitleFont: str
subtitleFontSize: float
subtitleFontStyle: str
subtitleFontWeight: FontWeight_T
subtitleLineHeight: float
subtitlePadding: float
zindex: float
| TitleConfigKwds |
python | ApeWorX__ape | src/ape/pytest/warnings.py | {
"start": 111,
"end": 483
} | class ____(Warning):
"""
Occurs when fixtures disrupt isolation causing performance degradation.
"""
def warn_invalid_isolation():
message = (
"Invalid isolation; Ensure session|package|module|class scoped fixtures "
"run earlier. Rebasing fixtures is costly."
)
warnings.warn(message, InvalidIsolationWarning)
| InvalidIsolationWarning |
python | scrapy__scrapy | scrapy/spidermiddlewares/referer.py | {
"start": 3313,
"end": 3794
} | class ____(ReferrerPolicy):
"""
https://www.w3.org/TR/referrer-policy/#referrer-policy-no-referrer
The simplest policy is "no-referrer", which specifies that no referrer information
is to be sent along with requests made from a particular request client to any origin.
The header will be omitted entirely.
"""
name: str = POLICY_NO_REFERRER
def referrer(self, response_url: str, request_url: str) -> str | None:
return None
| NoReferrerPolicy |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 903,
"end": 1549
} | class ____(ConverterAPI):
"""
A converter that converts ``str`` to ``HexBytes``.
NOTE: This utility converter ensures that all bytes args can accept hex too
"""
def is_convertible(self, value: Any) -> bool:
return (
(isinstance(value, str) and is_hex(value))
or isinstance(value, bytes)
or isinstance(value, int)
)
def convert(self, value: str) -> bytes:
"""
Convert the given value to ``HexBytes``.
Args:
value (str): The value to convert.
Returns:
bytes
"""
return HexBytes(value)
| HexConverter |
python | google__pytype | pytype/pytd/main_test.py | {
"start": 213,
"end": 2527
} | class ____(unittest.TestCase):
"""Test pytd/main.py."""
def setUp(self):
super().setUp()
# Save the value of sys.argv (which will be restored in tearDown), so that
# tests can overwrite it.
self._sys_argv = sys.argv
def tearDown(self):
super().tearDown()
sys.argv = self._sys_argv
def test_parse_opts(self):
argument_parser = pytd_tool.make_parser()
opts = argument_parser.parse_args([
"--optimize",
"--lossy",
"--max-union=42",
"--use-abcs",
"--remove-mutable",
"--python_version=3.9",
"in.pytd",
"out.pytd",
])
self.assertTrue(opts.optimize)
self.assertTrue(opts.lossy)
self.assertEqual(opts.max_union, 42)
self.assertTrue(opts.use_abcs)
self.assertTrue(opts.remove_mutable)
self.assertEqual(opts.python_version, "3.9")
self.assertEqual(opts.input, "in.pytd")
self.assertEqual(opts.output, "out.pytd")
def test_version_error(self):
sys.argv = ["main.py", "--python_version=4.0"]
with self.assertRaises(SystemExit):
pytd_tool.main()
def test_missing_input(self):
sys.argv = ["main.py"]
with self.assertRaises(SystemExit):
pytd_tool.main()
def test_parse_error(self):
with test_utils.Tempdir() as d:
inpath = d.create_file("in.pytd", "def f(x): str") # malformed pytd
sys.argv = ["main.py", inpath]
with self.assertRaises(SystemExit):
pytd_tool.main()
def test_no_output(self):
with test_utils.Tempdir() as d:
inpath = d.create_file("in.pytd", "def f(x) -> str: ...")
# Not specifying an output is fine; the tool simply checks that the input
# file is parseable.
sys.argv = ["main.py", inpath]
pytd_tool.main()
def test_output(self):
with test_utils.Tempdir() as d:
src = textwrap.dedent("""
from typing import overload
@overload
def f(x: int) -> str: ...
@overload
def f(x: str) -> str: ...
""").strip()
inpath = d.create_file("in.pytd", src)
outpath = path_utils.join(d.path, "out.pytd")
sys.argv = ["main.py", inpath, outpath]
pytd_tool.main()
with open(outpath) as f:
self.assertMultiLineEqual(f.read(), src)
if __name__ == "__main__":
unittest.main()
| TestPytdTool |
python | pytorch__pytorch | torch/_inductor/runtime/triton_heuristics.py | {
"start": 141480,
"end": 141951
} | class ____(GridExpr):
def generate(self, meta: dict[str, int]) -> None:
for candidate in self.inductor_meta["precomputed_grids"]:
if all(meta.get(k) == v for k, v in candidate["config"].items()):
self.x_grid, self.y_grid, self.z_grid = candidate[self.mode]
return
raise AssertionError(
f"Precomputed grid not found for {meta} in {self.inductor_meta['precomputed_grids']}"
)
| PrecomputedGrid |
python | google__pytype | pytype/pytd/codegen/namedtuple.py | {
"start": 133,
"end": 1427
} | class ____:
"""Construct a class for a new named tuple."""
# This is called from the pyi parser, to convert a namedtuple constructed by a
# functional constructor into a NamedTuple subclass.
def __init__(self, base_name, fields, generated_classes):
# Handle previously defined NamedTuples with the same name
index = len(generated_classes[base_name])
self.name = escape.pack_namedtuple_base_class(base_name, index)
self.cls = self._new_named_tuple(self.name, fields)
def _new_named_tuple(
self, class_name: str, fields: list[tuple[str, Any]]
) -> pytd.Class:
"""Generates a pytd class for a named tuple.
Args:
class_name: The name of the generated class
fields: A list of (name, type) tuples.
Returns:
A generated class that describes the named tuple.
"""
class_base = pytd.NamedType("typing.NamedTuple")
class_constants = tuple(pytd.Constant(n, t) for n, t in fields)
return pytd.Class(name=class_name,
keywords=(),
bases=(class_base,),
methods=(),
constants=class_constants,
decorators=(),
classes=(),
slots=None,
template=())
| NamedTuple |
python | python__mypy | mypy/nodes.py | {
"start": 102349,
"end": 134397
} | class ____(SymbolNode):
"""The type structure of a single class.
Each TypeInfo corresponds one-to-one to a ClassDef, which
represents the AST of the class.
In type-theory terms, this is a "type constructor", and if the
class is generic then it will be a type constructor of higher kind.
Where the class is used in an actual type, it's in the form of an
Instance, which amounts to a type application of the tycon to
the appropriate number of arguments.
"""
__slots__ = (
"_fullname",
"module_name",
"defn",
"mro",
"_mro_refs",
"bad_mro",
"is_final",
"is_disjoint_base",
"declared_metaclass",
"metaclass_type",
"names",
"is_abstract",
"is_protocol",
"runtime_protocol",
"abstract_attributes",
"deletable_attributes",
"slots",
"assuming",
"assuming_proper",
"inferring",
"is_enum",
"fallback_to_any",
"meta_fallback_to_any",
"type_vars",
"has_param_spec_type",
"bases",
"_promote",
"tuple_type",
"special_alias",
"is_named_tuple",
"typeddict_type",
"is_newtype",
"is_intersection",
"metadata",
"alt_promote",
"has_type_var_tuple_type",
"type_var_tuple_prefix",
"type_var_tuple_suffix",
"self_type",
"dataclass_transform_spec",
"is_type_check_only",
"deprecated",
"type_object_type",
)
_fullname: str # Fully qualified name
# Fully qualified name for the module this type was defined in. This
# information is also in the fullname, but is harder to extract in the
# case of nested class definitions.
module_name: str
defn: ClassDef # Corresponding ClassDef
# Method Resolution Order: the order of looking up attributes. The first
# value always to refers to this class.
mro: list[TypeInfo]
# Used to stash the names of the mro classes temporarily between
# deserialization and fixup. See deserialize() for why.
_mro_refs: list[str] | None
bad_mro: bool # Could not construct full MRO
is_final: bool
is_disjoint_base: bool
declared_metaclass: mypy.types.Instance | None
metaclass_type: mypy.types.Instance | None
names: SymbolTable # Names defined directly in this type
is_abstract: bool # Does the class have any abstract attributes?
is_protocol: bool # Is this a protocol class?
runtime_protocol: bool # Does this protocol support isinstance checks?
# List of names of abstract attributes together with their abstract status.
# The abstract status must be one of `NOT_ABSTRACT`, `IS_ABSTRACT`, `IMPLICITLY_ABSTRACT`.
abstract_attributes: list[tuple[str, int]]
deletable_attributes: list[str] # Used by mypyc only
# Does this type have concrete `__slots__` defined?
# If class does not have `__slots__` defined then it is `None`,
# if it has empty `__slots__` then it is an empty set.
slots: set[str] | None
# The attributes 'assuming' and 'assuming_proper' represent structural subtype matrices.
#
# In languages with structural subtyping, one can keep a global subtype matrix like this:
# . A B C .
# A 1 0 0
# B 1 1 1
# C 1 0 1
# .
# where 1 indicates that the type in corresponding row is a subtype of the type
# in corresponding column. This matrix typically starts filled with all 1's and
# a typechecker tries to "disprove" every subtyping relation using atomic (or nominal) types.
# However, we don't want to keep this huge global state. Instead, we keep the subtype
# information in the form of list of pairs (subtype, supertype) shared by all Instances
# with given supertype's TypeInfo. When we enter a subtype check we push a pair in this list
# thus assuming that we started with 1 in corresponding matrix element. Such algorithm allows
# to treat recursive and mutually recursive protocols and other kinds of complex situations.
#
# If concurrent/parallel type checking will be added in future,
# then there should be one matrix per thread/process to avoid false negatives
# during the type checking phase.
assuming: list[tuple[mypy.types.Instance, mypy.types.Instance]]
assuming_proper: list[tuple[mypy.types.Instance, mypy.types.Instance]]
# Ditto for temporary 'inferring' stack of recursive constraint inference.
# It contains Instances of protocol types that appeared as an argument to
# constraints.infer_constraints(). We need 'inferring' to avoid infinite recursion for
# recursive and mutually recursive protocols.
#
# We make 'assuming' and 'inferring' attributes here instead of passing they as kwargs,
# since this would require to pass them in many dozens of calls. In particular,
# there is a dependency infer_constraint -> is_subtype -> is_callable_subtype ->
# -> infer_constraints.
inferring: list[mypy.types.Instance]
# 'inferring' and 'assuming' can't be made sets, since we need to use
# is_same_type to correctly treat unions.
# Classes inheriting from Enum shadow their true members with a __getattr__, so we
# have to treat them as a special case.
is_enum: bool
# If true, any unknown attributes should have type 'Any' instead
# of generating a type error. This would be true if there is a
# base class with type 'Any', but other use cases may be
# possible. This is similar to having __getattr__ that returns Any
# (and __setattr__), but without the __getattr__ method.
fallback_to_any: bool
# Same as above but for cases where metaclass has type Any. This will suppress
# all attribute errors only for *class object* access.
meta_fallback_to_any: bool
# Information related to type annotations.
# Generic type variable names (full names)
type_vars: list[str]
# Whether this class has a ParamSpec type variable
has_param_spec_type: bool
# Direct base classes.
bases: list[mypy.types.Instance]
# Another type which this type will be treated as a subtype of,
# even though it's not a subclass in Python. The non-standard
# `@_promote` decorator introduces this, and there are also
# several builtin examples, in particular `int` -> `float`.
_promote: list[mypy.types.ProperType]
# This is used for promoting native integer types such as 'i64' to
# 'int'. (_promote is used for the other direction.) This only
# supports one-step promotions (e.g., i64 -> int, not
# i64 -> int -> float, and this isn't used to promote in joins.
#
# This results in some unintuitive results, such as that even
# though i64 is compatible with int and int is compatible with
# float, i64 is *not* compatible with float.
alt_promote: mypy.types.Instance | None
# Representation of a Tuple[...] base class, if the class has any
# (e.g., for named tuples). If this is not None, the actual Type
# object used for this class is not an Instance but a TupleType;
# the corresponding Instance is set as the fallback type of the
# tuple type.
tuple_type: mypy.types.TupleType | None
# Is this a named tuple type?
is_named_tuple: bool
# If this class is defined by the TypedDict type constructor,
# then this is not None.
typeddict_type: mypy.types.TypedDictType | None
# Is this a newtype type?
is_newtype: bool
# Is this a synthesized intersection type?
is_intersection: bool
# This is a dictionary that will be serialized and un-serialized as is.
# It is useful for plugins to add their data to save in the cache.
metadata: dict[str, JsonDict]
# Store type alias representing this type (for named tuples and TypedDicts).
# Although definitions of these types are stored in symbol tables as TypeInfo,
# when a type analyzer will find them, it should construct a TupleType, or
# a TypedDict type. However, we can't use the plain types, since if the definition
# is recursive, this will create an actual recursive structure of types (i.e. as
# internal Python objects) causing infinite recursions everywhere during type checking.
# To overcome this, we create a TypeAlias node, that will point to these types.
# We store this node in the `special_alias` attribute, because it must be the same node
# in case we are doing multiple semantic analysis passes.
special_alias: TypeAlias | None
# Shared type variable for typing.Self in this class (if used, otherwise None).
self_type: mypy.types.TypeVarType | None
# Added if the corresponding class is directly decorated with `typing.dataclass_transform`
dataclass_transform_spec: DataclassTransformSpec | None
# Is set to `True` when class is decorated with `@typing.type_check_only`
is_type_check_only: bool
# The type's deprecation message (in case it is deprecated)
deprecated: str | None
# Cached value of class constructor type, i.e. the type of class object when it
# appears in runtime context.
type_object_type: mypy.types.FunctionLike | None
FLAGS: Final = [
"is_abstract",
"is_enum",
"fallback_to_any",
"meta_fallback_to_any",
"is_named_tuple",
"is_newtype",
"is_protocol",
"runtime_protocol",
"is_final",
"is_disjoint_base",
"is_intersection",
]
def __init__(self, names: SymbolTable, defn: ClassDef, module_name: str) -> None:
"""Initialize a TypeInfo."""
super().__init__()
self._fullname = defn.fullname
self.names = names
self.defn = defn
self.module_name = module_name
self.type_vars = []
self.has_param_spec_type = False
self.has_type_var_tuple_type = False
self.bases = []
self.mro = []
self._mro_refs = None
self.bad_mro = False
self.declared_metaclass = None
self.metaclass_type = None
self.is_abstract = False
self.abstract_attributes = []
self.deletable_attributes = []
self.slots = None
self.assuming = []
self.assuming_proper = []
self.inferring = []
self.is_protocol = False
self.runtime_protocol = False
self.type_var_tuple_prefix: int | None = None
self.type_var_tuple_suffix: int | None = None
self.add_type_vars()
self.is_final = False
self.is_disjoint_base = False
self.is_enum = False
self.fallback_to_any = False
self.meta_fallback_to_any = False
self._promote = []
self.alt_promote = None
self.tuple_type = None
self.special_alias = None
self.is_named_tuple = False
self.typeddict_type = None
self.is_newtype = False
self.is_intersection = False
self.metadata = {}
self.self_type = None
self.dataclass_transform_spec = None
self.is_type_check_only = False
self.deprecated = None
self.type_object_type = None
def add_type_vars(self) -> None:
self.has_type_var_tuple_type = False
if self.defn.type_vars:
for i, vd in enumerate(self.defn.type_vars):
if isinstance(vd, mypy.types.ParamSpecType):
self.has_param_spec_type = True
if isinstance(vd, mypy.types.TypeVarTupleType):
assert not self.has_type_var_tuple_type
self.has_type_var_tuple_type = True
self.type_var_tuple_prefix = i
self.type_var_tuple_suffix = len(self.defn.type_vars) - i - 1
self.type_vars.append(vd.name)
@property
def name(self) -> str:
"""Short name."""
return self.defn.name
@property
def fullname(self) -> str:
return self._fullname
def is_generic(self) -> bool:
"""Is the type generic (i.e. does it have type variables)?"""
return len(self.type_vars) > 0
def get(self, name: str) -> SymbolTableNode | None:
for cls in self.mro:
n = cls.names.get(name)
if n:
return n
return None
def get_containing_type_info(self, name: str) -> TypeInfo | None:
for cls in self.mro:
if name in cls.names:
return cls
return None
@property
def protocol_members(self) -> list[str]:
# Protocol members are names of all attributes/methods defined in a protocol
# and in all its supertypes (except for 'object').
members: set[str] = set()
assert self.mro, "This property can be only accessed after MRO is (re-)calculated"
for base in self.mro[:-1]: # we skip "object" since everyone implements it
if base.is_protocol:
for name, node in base.names.items():
if isinstance(node.node, (TypeAlias, TypeVarExpr, MypyFile)):
# These are auxiliary definitions (and type aliases are prohibited).
continue
if name in EXCLUDED_PROTOCOL_ATTRIBUTES:
continue
members.add(name)
return sorted(members)
@property
def enum_members(self) -> list[str]:
# TODO: cache the results?
members = []
for name, sym in self.names.items():
# Case 1:
#
# class MyEnum(Enum):
# @member
# def some(self): ...
if isinstance(sym.node, Decorator):
if any(
dec.fullname == "enum.member"
for dec in sym.node.decorators
if isinstance(dec, RefExpr)
):
members.append(name)
continue
# Case 2:
#
# class MyEnum(Enum):
# x = 1
#
# Case 3:
#
# class MyEnum(Enum):
# class Other: ...
elif isinstance(sym.node, (Var, TypeInfo)):
if (
# TODO: properly support ignored names from `_ignore_`
name in EXCLUDED_ENUM_ATTRIBUTES
or is_sunder(name)
or name.startswith("__") # dunder and private
):
continue # name is excluded
if isinstance(sym.node, Var):
if not sym.node.has_explicit_value:
continue # unannotated value not a member
typ = mypy.types.get_proper_type(sym.node.type)
if (
isinstance(typ, mypy.types.FunctionLike) and not typ.is_type_obj()
) or ( # explicit `@member` is required
isinstance(typ, mypy.types.Instance)
and typ.type.fullname == "enum.nonmember"
):
continue # name is not a member
members.append(name)
return members
def __getitem__(self, name: str) -> SymbolTableNode:
n = self.get(name)
if n:
return n
else:
raise KeyError(name)
def __repr__(self) -> str:
return f"<TypeInfo {self.fullname}>"
def __bool__(self) -> bool:
# We defined this here instead of just overriding it in
# FakeInfo so that mypyc can generate a direct call instead of
# using the generic bool handling.
return not isinstance(self, FakeInfo)
def has_readable_member(self, name: str) -> bool:
return self.get(name) is not None
def get_method(self, name: str) -> FuncBase | Decorator | None:
for cls in self.mro:
if name in cls.names:
node = cls.names[name].node
elif possible_redefinitions := sorted(
[n for n in cls.names.keys() if n.startswith(f"{name}-redefinition")]
):
node = cls.names[possible_redefinitions[-1]].node
else:
continue
if isinstance(node, SYMBOL_FUNCBASE_TYPES):
return node
elif isinstance(node, Decorator): # Two `if`s make `mypyc` happy
return node
else:
return None
return None
def calculate_metaclass_type(self) -> mypy.types.Instance | None:
declared = self.declared_metaclass
if declared is not None and not declared.type.has_base("builtins.type"):
return declared
if self._fullname == "builtins.type":
return mypy.types.Instance(self, [])
winner = declared
for super_class in self.mro[1:]:
super_meta = super_class.declared_metaclass
if super_meta is None or super_meta.type is None:
continue
if winner is None:
winner = super_meta
continue
if winner.type.has_base(super_meta.type.fullname):
continue
if super_meta.type.has_base(winner.type.fullname):
winner = super_meta
continue
# metaclass conflict
winner = None
break
return winner
def explain_metaclass_conflict(self) -> str | None:
# Compare to logic in calculate_metaclass_type
declared = self.declared_metaclass
if declared is not None and not declared.type.has_base("builtins.type"):
return None
if self._fullname == "builtins.type":
return None
winner = declared
if declared is None:
resolution_steps = []
else:
resolution_steps = [f'"{declared.type.fullname}" (metaclass of "{self.fullname}")']
for super_class in self.mro[1:]:
super_meta = super_class.declared_metaclass
if super_meta is None or super_meta.type is None:
continue
if winner is None:
winner = super_meta
resolution_steps.append(
f'"{winner.type.fullname}" (metaclass of "{super_class.fullname}")'
)
continue
if winner.type.has_base(super_meta.type.fullname):
continue
if super_meta.type.has_base(winner.type.fullname):
winner = super_meta
resolution_steps.append(
f'"{winner.type.fullname}" (metaclass of "{super_class.fullname}")'
)
continue
# metaclass conflict
conflict = f'"{super_meta.type.fullname}" (metaclass of "{super_class.fullname}")'
return f"{' > '.join(resolution_steps)} conflicts with {conflict}"
return None
def is_metaclass(self, *, precise: bool = False) -> bool:
return (
self.has_base("builtins.type")
or self.fullname == "abc.ABCMeta"
or (self.fallback_to_any and not precise)
)
def has_base(self, fullname: str) -> bool:
"""Return True if type has a base type with the specified name.
This can be either via extension or via implementation.
"""
for cls in self.mro:
if cls.fullname == fullname:
return True
return False
def direct_base_classes(self) -> list[TypeInfo]:
"""Return a direct base classes.
Omit base classes of other base classes.
"""
return [base.type for base in self.bases]
def update_tuple_type(self, typ: mypy.types.TupleType) -> None:
"""Update tuple_type and special_alias as needed."""
self.tuple_type = typ
alias = TypeAlias.from_tuple_type(self)
if not self.special_alias:
self.special_alias = alias
else:
self.special_alias.target = alias.target
# Invalidate recursive status cache in case it was previously set.
self.special_alias._is_recursive = None
def update_typeddict_type(self, typ: mypy.types.TypedDictType) -> None:
"""Update typeddict_type and special_alias as needed."""
self.typeddict_type = typ
alias = TypeAlias.from_typeddict_type(self)
if not self.special_alias:
self.special_alias = alias
else:
self.special_alias.target = alias.target
# Invalidate recursive status cache in case it was previously set.
self.special_alias._is_recursive = None
def __str__(self) -> str:
"""Return a string representation of the type.
This includes the most important information about the type.
"""
options = Options()
return self.dump(
str_conv=mypy.strconv.StrConv(options=options),
type_str_conv=mypy.types.TypeStrVisitor(options=options),
)
def dump(
self, str_conv: mypy.strconv.StrConv, type_str_conv: mypy.types.TypeStrVisitor
) -> str:
"""Return a string dump of the contents of the TypeInfo."""
base: str = ""
def type_str(typ: mypy.types.Type) -> str:
return typ.accept(type_str_conv)
head = "TypeInfo" + str_conv.format_id(self)
if self.bases:
base = f"Bases({', '.join(type_str(base) for base in self.bases)})"
mro = "Mro({})".format(
", ".join(item.fullname + str_conv.format_id(item) for item in self.mro)
)
names = []
for name in sorted(self.names):
description = name + str_conv.format_id(self.names[name].node)
node = self.names[name].node
if isinstance(node, Var) and node.type:
description += f" ({type_str(node.type)})"
names.append(description)
items = [f"Name({self.fullname})", base, mro, ("Names", names)]
if self.declared_metaclass:
items.append(f"DeclaredMetaclass({type_str(self.declared_metaclass)})")
if self.metaclass_type:
items.append(f"MetaclassType({type_str(self.metaclass_type)})")
return mypy.strconv.dump_tagged(items, head, str_conv=str_conv)
def serialize(self) -> JsonDict:
# NOTE: This is where all ClassDefs originate, so there shouldn't be duplicates.
data = {
".class": "TypeInfo",
"module_name": self.module_name,
"fullname": self.fullname,
"names": self.names.serialize(self.fullname),
"defn": self.defn.serialize(),
"abstract_attributes": self.abstract_attributes,
"type_vars": self.type_vars,
"has_param_spec_type": self.has_param_spec_type,
"bases": [b.serialize() for b in self.bases],
"mro": [c.fullname for c in self.mro],
"_promote": [p.serialize() for p in self._promote],
"alt_promote": None if self.alt_promote is None else self.alt_promote.serialize(),
"declared_metaclass": (
None if self.declared_metaclass is None else self.declared_metaclass.serialize()
),
"metaclass_type": (
None if self.metaclass_type is None else self.metaclass_type.serialize()
),
"tuple_type": None if self.tuple_type is None else self.tuple_type.serialize(),
"typeddict_type": (
None if self.typeddict_type is None else self.typeddict_type.serialize()
),
"flags": get_flags(self, TypeInfo.FLAGS),
"metadata": self.metadata,
"slots": sorted(self.slots) if self.slots is not None else None,
"deletable_attributes": self.deletable_attributes,
"self_type": self.self_type.serialize() if self.self_type is not None else None,
"dataclass_transform_spec": (
self.dataclass_transform_spec.serialize()
if self.dataclass_transform_spec is not None
else None
),
"deprecated": self.deprecated,
}
return data
@classmethod
def deserialize(cls, data: JsonDict) -> TypeInfo:
names = SymbolTable.deserialize(data["names"])
defn = ClassDef.deserialize(data["defn"])
module_name = data["module_name"]
ti = TypeInfo(names, defn, module_name)
ti._fullname = data["fullname"]
ti.abstract_attributes = [(attr[0], attr[1]) for attr in data["abstract_attributes"]]
ti.type_vars = data["type_vars"]
ti.has_param_spec_type = data["has_param_spec_type"]
ti.bases = [mypy.types.Instance.deserialize(b) for b in data["bases"]]
_promote = []
for p in data["_promote"]:
t = mypy.types.deserialize_type(p)
assert isinstance(t, mypy.types.ProperType)
_promote.append(t)
ti._promote = _promote
ti.alt_promote = (
None
if data["alt_promote"] is None
else mypy.types.Instance.deserialize(data["alt_promote"])
)
ti.declared_metaclass = (
None
if data["declared_metaclass"] is None
else mypy.types.Instance.deserialize(data["declared_metaclass"])
)
ti.metaclass_type = (
None
if data["metaclass_type"] is None
else mypy.types.Instance.deserialize(data["metaclass_type"])
)
# NOTE: ti.mro will be set in the fixup phase based on these
# names. The reason we need to store the mro instead of just
# recomputing it from base classes has to do with a subtle
# point about fine-grained incremental: the cache files might
# not be loaded until after a class in the mro has changed its
# bases, which causes the mro to change. If we recomputed our
# mro, we would compute the *new* mro, which leaves us with no
# way to detect that the mro has changed! Thus we need to make
# sure to load the original mro so that once the class is
# rechecked, it can tell that the mro has changed.
ti._mro_refs = data["mro"]
ti.tuple_type = (
None
if data["tuple_type"] is None
else mypy.types.TupleType.deserialize(data["tuple_type"])
)
ti.typeddict_type = (
None
if data["typeddict_type"] is None
else mypy.types.TypedDictType.deserialize(data["typeddict_type"])
)
ti.metadata = data["metadata"]
ti.slots = set(data["slots"]) if data["slots"] is not None else None
ti.deletable_attributes = data["deletable_attributes"]
set_flags(ti, data["flags"])
st = data["self_type"]
ti.self_type = mypy.types.TypeVarType.deserialize(st) if st is not None else None
if data.get("dataclass_transform_spec") is not None:
ti.dataclass_transform_spec = DataclassTransformSpec.deserialize(
data["dataclass_transform_spec"]
)
ti.deprecated = data.get("deprecated")
return ti
def write(self, data: WriteBuffer) -> None:
write_tag(data, TYPE_INFO)
self.names.write(data, self.fullname)
self.defn.write(data)
write_str(data, self.module_name)
write_str(data, self.fullname)
write_str_list(data, [a for a, _ in self.abstract_attributes])
write_int_list(data, [s for _, s in self.abstract_attributes])
write_str_list(data, self.type_vars)
write_bool(data, self.has_param_spec_type)
mypy.types.write_type_list(data, self.bases)
write_str_list(data, [c.fullname for c in self.mro])
mypy.types.write_type_list(data, self._promote)
mypy.types.write_type_opt(data, self.alt_promote)
mypy.types.write_type_opt(data, self.declared_metaclass)
mypy.types.write_type_opt(data, self.metaclass_type)
mypy.types.write_type_opt(data, self.tuple_type)
mypy.types.write_type_opt(data, self.typeddict_type)
write_flags(data, self, TypeInfo.FLAGS)
write_json(data, self.metadata)
if self.slots is None:
write_tag(data, LITERAL_NONE)
else:
write_str_list(data, sorted(self.slots))
write_str_list(data, self.deletable_attributes)
mypy.types.write_type_opt(data, self.self_type)
if self.dataclass_transform_spec is None:
write_tag(data, LITERAL_NONE)
else:
self.dataclass_transform_spec.write(data)
write_str_opt(data, self.deprecated)
write_tag(data, END_TAG)
@classmethod
def read(cls, data: ReadBuffer) -> TypeInfo:
names = SymbolTable.read(data)
assert read_tag(data) == CLASS_DEF
defn = ClassDef.read(data)
module_name = read_str(data)
ti = TypeInfo(names, defn, module_name)
ti._fullname = read_str(data)
attrs = read_str_list(data)
statuses = read_int_list(data)
ti.abstract_attributes = list(zip(attrs, statuses))
ti.type_vars = read_str_list(data)
ti.has_param_spec_type = read_bool(data)
ti.bases = []
assert read_tag(data) == LIST_GEN
for _ in range(read_int_bare(data)):
assert read_tag(data) == mypy.types.INSTANCE
ti.bases.append(mypy.types.Instance.read(data))
# NOTE: ti.mro will be set in the fixup phase based on these
# names. The reason we need to store the mro instead of just
# recomputing it from base classes has to do with a subtle
# point about fine-grained incremental: the cache files might
# not be loaded until after a class in the mro has changed its
# bases, which causes the mro to change. If we recomputed our
# mro, we would compute the *new* mro, which leaves us with no
# way to detect that the mro has changed! Thus, we need to make
# sure to load the original mro so that once the class is
# rechecked, it can tell that the mro has changed.
ti._mro_refs = read_str_list(data)
ti._promote = cast(list[mypy.types.ProperType], mypy.types.read_type_list(data))
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.INSTANCE
ti.alt_promote = mypy.types.Instance.read(data)
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.INSTANCE
ti.declared_metaclass = mypy.types.Instance.read(data)
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.INSTANCE
ti.metaclass_type = mypy.types.Instance.read(data)
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.TUPLE_TYPE
ti.tuple_type = mypy.types.TupleType.read(data)
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.TYPED_DICT_TYPE
ti.typeddict_type = mypy.types.TypedDictType.read(data)
read_flags(data, ti, TypeInfo.FLAGS)
ti.metadata = read_json(data)
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == LIST_STR
ti.slots = {read_str_bare(data) for _ in range(read_int_bare(data))}
ti.deletable_attributes = read_str_list(data)
if (tag := read_tag(data)) != LITERAL_NONE:
assert tag == mypy.types.TYPE_VAR_TYPE
ti.self_type = mypy.types.TypeVarType.read(data)
tag = read_tag(data)
if tag != LITERAL_NONE:
assert tag == DT_SPEC
ti.dataclass_transform_spec = DataclassTransformSpec.read(data)
ti.deprecated = read_str_opt(data)
assert read_tag(data) == END_TAG
return ti
| TypeInfo |
python | kamyu104__LeetCode-Solutions | Python/stable-subarrays-with-equal-boundary-and-interior-sum.py | {
"start": 75,
"end": 715
} | class ____(object):
def countStableSubarrays(self, capacity):
"""
:type capacity: List[int]
:rtype: int
"""
L = 3
cnt = collections.defaultdict(lambda: collections.defaultdict(int))
result = prefix = prefix2 = 0
for i in xrange(len(capacity)):
result += cnt[capacity[i]][prefix-capacity[i]]
prefix += capacity[i]
if (i+1)-L+1 >= 0:
prefix2 += capacity[(i+1)-L+1]
cnt[capacity[(i+1)-L+1]][prefix2] += 1
return result
# Time: O(n)
# Space: O(n)
import collections
# freq table, prefix sum
| Solution |
python | numba__numba | numba/core/errors.py | {
"start": 1972,
"end": 2099
} | class ____(NumbaWarning):
"""
Warning category for using an experimental feature.
"""
| NumbaExperimentalFeatureWarning |
python | django__django | tests/utils_tests/test_module_loading.py | {
"start": 327,
"end": 2852
} | class ____(unittest.TestCase):
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module("utils_tests.test_module")
test_no_submodule = import_module("utils_tests.test_no_submodule")
# An importable child
self.assertTrue(module_has_submodule(test_module, "good_module"))
mod = import_module("utils_tests.test_module.good_module")
self.assertEqual(mod.content, "Good Module")
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, "bad_module"))
with self.assertRaises(ImportError):
import_module("utils_tests.test_module.bad_module")
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, "no_such_module"))
with self.assertRaises(ImportError):
import_module("utils_tests.test_module.no_such_module")
# A child that doesn't exist, but is the name of a package on the path
self.assertFalse(module_has_submodule(test_module, "django"))
with self.assertRaises(ImportError):
import_module("utils_tests.test_module.django")
# Don't be confused by caching of import misses
import types # NOQA: causes attempted import of utils_tests.types
self.assertFalse(module_has_submodule(sys.modules["utils_tests"], "types"))
# A module which doesn't have a __path__ (so no submodules)
self.assertFalse(module_has_submodule(test_no_submodule, "anything"))
with self.assertRaises(ImportError):
import_module("utils_tests.test_no_submodule.anything")
def test_has_sumbodule_with_dotted_path(self):
"""Nested module existence can be tested."""
test_module = import_module("utils_tests.test_module")
# A grandchild that exists.
self.assertIs(
module_has_submodule(test_module, "child_module.grandchild_module"), True
)
# A grandchild that doesn't exist.
self.assertIs(
module_has_submodule(test_module, "child_module.no_such_module"), False
)
# A grandchild whose parent doesn't exist.
self.assertIs(
module_has_submodule(test_module, "no_such_module.grandchild_module"), False
)
# A grandchild whose parent is not a package.
self.assertIs(
module_has_submodule(test_module, "good_module.no_such_module"), False
)
| DefaultLoader |
python | PrefectHQ__prefect | tests/test_schedules.py | {
"start": 3089,
"end": 3826
} | class ____:
def test_rrule_schedule_creation(self):
rrule = "RRULE:FREQ=DAILY;INTERVAL=1"
schedule = RRule(rrule)
assert schedule.rrule == rrule
assert schedule.timezone is None
assert schedule.active is True
assert schedule.parameters == {}
def test_rrule_schedule_with_all_parameters(self):
rrule = "RRULE:FREQ=WEEKLY;BYDAY=MO,WE,FR"
params = {"key": "value"}
schedule = RRule(
rrule, timezone="Europe/London", active=False, parameters=params
)
assert schedule.rrule == rrule
assert schedule.timezone == "Europe/London"
assert schedule.active is False
assert schedule.parameters == params
| TestRRuleSchedule |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/base.py | {
"start": 107691,
"end": 107950
} | class ____(TypedDict):
"""Represents a reflected named type."""
name: str
"""Name of the type."""
schema: str
"""The schema of the type."""
visible: bool
"""Indicates if this type is in the current search path."""
| ReflectedNamedType |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 42455,
"end": 47941
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
dbt_commands: Optional[list[str]] = Field(
None,
description=(
"An array of commands to execute for jobs with the dbt task, for example"
' `"dbt_commands": ["dbt deps", "dbt seed", "dbt run"]`'
),
examples=[["dbt deps", "dbt seed", "dbt run"]],
)
jar_params: Optional[List[str]] = Field(
None,
description=(
"A list of parameters for jobs with Spark JAR tasks, for example"
' `"jar_params": ["john doe", "35"]`. The parameters are used to invoke the'
" main function of the main class specified in the Spark JAR task. If not"
" specified upon `run-now`, it defaults to an empty list. jar_params cannot"
" be specified in conjunction with notebook_params. The JSON representation"
' of this field (for example `{"jar_params":["john doe","35"]}`) cannot'
" exceed 10,000 bytes.\n\nUse [Task parameter"
" variables](https://docs.databricks.com/jobs.html#parameter-variables) to"
" set parameters containing information about job runs."
),
examples=[["john", "doe", "35"]],
)
notebook_params: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"A map from keys to values for jobs with notebook task, for example"
' `"notebook_params": {"name": "john doe", "age": "35"}`. The map is passed'
" to the notebook and is accessible through the"
" [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets)"
" function.\n\nIf not specified upon `run-now`, the triggered run uses the"
" job’s base parameters.\n\nnotebook_params cannot be specified in"
" conjunction with jar_params.\n\nUse [Task parameter"
" variables](https://docs.databricks.com/jobs.html#parameter-variables) to"
" set parameters containing information about job runs.\n\nThe JSON"
" representation of this field (for example"
' `{"notebook_params":{"name":"john doe","age":"35"}}`) cannot exceed'
" 10,000 bytes."
),
examples=[{"age": "35", "name": "john doe"}],
)
pipeline_params: Optional[PipelineParams] = None
python_named_params: Optional[Dict[str, Any]] = Field(
default=None,
description=(
"A map from keys to values for jobs with Python wheel task, for example"
' `"python_named_params": {"name": "task", "data":'
' "dbfs:/path/to/data.json"}`.'
),
examples=[{"data": "dbfs:/path/to/data.json", "name": "task"}],
)
python_params: Optional[List[str]] = Field(
default=None,
description=(
"A list of parameters for jobs with Python tasks, for example"
' `"python_params": ["john doe", "35"]`. The parameters are passed to'
" Python file as command-line parameters. If specified upon `run-now`, it"
" would overwrite the parameters specified in job setting. The JSON"
' representation of this field (for example `{"python_params":["john'
' doe","35"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter'
" variables](https://docs.databricks.com/jobs.html#parameter-variables) to"
" set parameters containing information about job"
" runs.\n\nImportant\n\nThese parameters accept only Latin characters"
" (ASCII character set). Using non-ASCII characters returns an error."
" Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis,"
" and emojis."
),
examples=[["john doe", "35"]],
)
spark_submit_params: Optional[List[str]] = Field(
default=None,
description=(
"A list of parameters for jobs with spark submit task, for example"
' `"spark_submit_params": ["--class",'
' "org.apache.spark.examples.SparkPi"]`. The parameters are passed to'
" spark-submit script as command-line parameters. If specified upon"
" `run-now`, it would overwrite the parameters specified in job setting."
" The JSON representation of this field (for example"
' `{"python_params":["john doe","35"]}`) cannot exceed 10,000 bytes.\n\nUse'
" [Task parameter"
" variables](https://docs.databricks.com/jobs.html#parameter-variables) to"
" set parameters containing information about job"
" runs.\n\nImportant\n\nThese parameters accept only Latin characters"
" (ASCII character set). Using non-ASCII characters returns an error."
" Examples of invalid, non-ASCII characters are Chinese, Japanese kanjis,"
" and emojis."
),
examples=[["--class", "org.apache.spark.examples.SparkPi"]],
)
sql_params: Optional[Dict[str, Any]] = Field(
default=None,
description=(
'A map from keys to values for SQL tasks, for example `"sql_params":'
' {"name": "john doe", "age": "35"}`. The SQL alert task does not support'
" custom parameters."
),
examples=[{"age": "35", "name": "john doe"}],
)
| RunParameters |
python | Netflix__metaflow | metaflow/plugins/argo/argo_workflows.py | {
"start": 214962,
"end": 215846
} | class ____(object):
# https://github.com/argoproj/argo-events/blob/master/api/sensor.md#argoproj.io/v1alpha1.TriggerTemplate
def __init__(self, name):
tree = lambda: defaultdict(tree)
self.payload = tree()
self.payload["name"] = name
def k8s_trigger(self, k8s_trigger):
self.payload["k8s"] = k8s_trigger.to_json()
return self
def argo_workflow_trigger(self, argo_workflow_trigger):
self.payload["argoWorkflow"] = argo_workflow_trigger.to_json()
return self
def conditions_reset(self, cron, timezone):
if cron:
self.payload["conditionsReset"] = [
{"byTime": {"cron": cron, "timezone": timezone}}
]
return self
def to_json(self):
return self.payload
def __str__(self):
return json.dumps(self.payload, indent=4)
| TriggerTemplate |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 6603,
"end": 10612
} | class ____(StringField):
"""A field that validates input as an email address."""
USER_REGEX = LazyRegexCompiler(
# `dot-atom` defined in RFC 5322 Section 3.2.3.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*\Z"
# `quoted-string` defined in RFC 5322 Section 3.2.4.
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)',
re.IGNORECASE,
)
UTF8_USER_REGEX = LazyRegexCompiler(
(
# RFC 6531 Section 3.3 extends `atext` (used by dot-atom) to
# include `UTF8-non-ascii`.
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z\u0080-\U0010FFFF]+)*\Z"
# `quoted-string`
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"\Z)'
),
re.IGNORECASE | re.UNICODE,
)
DOMAIN_REGEX = LazyRegexCompiler(
r"((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+)(?:[A-Z0-9-]{2,63}(?<!-))\Z",
re.IGNORECASE,
)
error_msg = "Invalid email address: %s"
def __init__(
self,
domain_whitelist=None,
allow_utf8_user=False,
allow_ip_domain=False,
*args,
**kwargs,
):
"""
:param domain_whitelist: (optional) list of valid domain names applied during validation
:param allow_utf8_user: Allow user part of the email to contain utf8 char
:param allow_ip_domain: Allow domain part of the email to be an IPv4 or IPv6 address
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.StringField`
"""
self.domain_whitelist = domain_whitelist or []
self.allow_utf8_user = allow_utf8_user
self.allow_ip_domain = allow_ip_domain
super().__init__(*args, **kwargs)
def validate_user_part(self, user_part):
"""Validate the user part of the email address. Return True if
valid and False otherwise.
"""
if self.allow_utf8_user:
return self.UTF8_USER_REGEX.match(user_part)
return self.USER_REGEX.match(user_part)
def validate_domain_part(self, domain_part):
"""Validate the domain part of the email address. Return True if
valid and False otherwise.
"""
# Skip domain validation if it's in the whitelist.
if domain_part in self.domain_whitelist:
return True
if self.DOMAIN_REGEX.match(domain_part):
return True
# Validate IPv4/IPv6, e.g. user@[192.168.0.1]
if self.allow_ip_domain and domain_part[0] == "[" and domain_part[-1] == "]":
for addr_family in (socket.AF_INET, socket.AF_INET6):
try:
socket.inet_pton(addr_family, domain_part[1:-1])
return True
except (OSError, UnicodeEncodeError):
pass
return False
def validate(self, value):
super().validate(value)
if "@" not in value:
self.error(self.error_msg % value)
user_part, domain_part = value.rsplit("@", 1)
# Validate the user part.
if not self.validate_user_part(user_part):
self.error(self.error_msg % value)
# Validate the domain and, if invalid, see if it's IDN-encoded.
if not self.validate_domain_part(domain_part):
try:
domain_part = domain_part.encode("idna").decode("ascii")
except UnicodeError:
self.error(
"{} {}".format(
self.error_msg % value, "(domain failed IDN encoding)"
)
)
else:
if not self.validate_domain_part(domain_part):
self.error(
"{} {}".format(
self.error_msg % value, "(domain validation failed)"
)
)
| EmailField |
python | sqlalchemy__sqlalchemy | test/orm/test_query.py | {
"start": 42237,
"end": 49556
} | class ____(QueryTest, AssertsCompiledSQL):
@testing.combinations(
lambda s, User: s.query(User).limit(2),
lambda s, User: s.query(User).filter(User.id == 1).offset(2),
lambda s, User: s.query(User).limit(2).offset(2),
)
def test_no_limit_offset(self, test_case):
User = self.classes.User
s = fixture_session()
q = testing.resolve_lambda(test_case, User=User, s=s)
assert_raises(sa_exc.InvalidRequestError, q.join, User.addresses)
assert_raises(sa_exc.InvalidRequestError, q.filter, User.name == "ed")
assert_raises(sa_exc.InvalidRequestError, q.filter_by, name="ed")
assert_raises(sa_exc.InvalidRequestError, q.order_by, "foo")
assert_raises(sa_exc.InvalidRequestError, q.group_by, "foo")
assert_raises(sa_exc.InvalidRequestError, q.having, "foo")
q.enable_assertions(False).join(User.addresses)
q.enable_assertions(False).filter(User.name == "ed")
q.enable_assertions(False).order_by("foo")
q.enable_assertions(False).group_by("foo")
def test_no_from(self):
users, User = self.tables.users, self.classes.User
s = fixture_session()
q = s.query(User).select_from(users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).join(User.addresses)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
assert_raises(sa_exc.InvalidRequestError, q.select_from, users)
q.enable_assertions(False).select_from(users)
def test_invalid_select_from(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id == 5)
assert_raises(sa_exc.ArgumentError, q.select_from, User.id)
def test_invalid_from_statement(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
s = fixture_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.from_statement, User.id == 5)
assert_raises(
sa_exc.ArgumentError, q.from_statement, users.join(addresses)
)
def test_invalid_column(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.add_columns, object())
def test_invalid_column_tuple(self):
User = self.classes.User
s = fixture_session()
q = s.query(User)
assert_raises(sa_exc.ArgumentError, q.add_columns, (1, 1))
def test_distinct(self):
"""test that a distinct() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = fixture_session()
q = s.query(User).distinct()
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError,
q.from_statement,
text("select * from table"),
)
def test_order_by(self):
"""test that an order_by() call is not valid before 'clauseelement'
conditions."""
User = self.classes.User
s = fixture_session()
q = s.query(User).order_by(User.id)
assert_raises(sa_exc.InvalidRequestError, q.select_from, User)
assert_raises(
sa_exc.InvalidRequestError,
q.from_statement,
text("select * from table"),
)
def test_entity_or_mapper_zero_from_context(self):
User, Address = self.classes.User, self.classes.Address
s = fixture_session()
q = s.query(User, Address)._compile_state()
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(User))
u1 = aliased(User)
q = s.query(u1, Address)._compile_state()
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(u1))
q = s.query(User).select_from(Address)._compile_state()
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(Address))
q = s.query(User.name, Address)._compile_state()
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(User))
q = s.query(u1.name, Address)._compile_state()
is_(q._mapper_zero(), inspect(User))
is_(q._entity_zero(), inspect(u1))
q1 = s.query(User).exists()
q = s.query(q1)._compile_state()
is_(q._mapper_zero(), None)
is_(q._entity_zero(), None)
q1 = s.query(Bundle("b1", User.id, User.name))._compile_state()
is_(q1._mapper_zero(), inspect(User))
is_(q1._entity_zero(), inspect(User))
@testing.combinations(
lambda s, User: s.query(User).filter(User.id == 5),
lambda s, User: s.query(User).filter_by(id=5),
lambda s, User: s.query(User).limit(5),
lambda s, User: s.query(User).group_by(User.name),
lambda s, User: s.query(User).order_by(User.name),
)
def test_from_statement(self, test_case):
User = self.classes.User
s = fixture_session()
q = testing.resolve_lambda(test_case, User=User, s=s)
assert_raises(sa_exc.InvalidRequestError, q.from_statement, text("x"))
@testing.combinations(
(Query.filter, lambda meth, User: meth(User.id == 5)),
(Query.filter_by, lambda meth: meth(id=5)),
(Query.limit, lambda meth: meth(5)),
(Query.group_by, lambda meth, User: meth(User.name)),
(Query.order_by, lambda meth, User: meth(User.name)),
)
def test_from_statement_text(self, meth, test_case):
User = self.classes.User
s = fixture_session()
q = s.query(User)
q = q.from_statement(text("x"))
m = functools.partial(meth, q)
assert_raises(
sa_exc.InvalidRequestError,
testing.resolve_lambda,
test_case,
meth=m,
User=User,
s=s,
)
def test_illegal_coercions(self):
User = self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
User,
)
ua = aliased(User)
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
ua,
)
s = fixture_session()
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element or literal value expected, got .*User",
lambda: s.query(User).filter(User.name == User),
)
u1 = User()
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element expected, got .*User",
distinct,
u1,
)
assert_raises_message(
sa_exc.ArgumentError,
"SQL expression element or literal value expected, got .*User",
lambda: s.query(User).filter(User.name == u1),
)
| InvalidGenerationsTest |
python | kubernetes-client__python | kubernetes/client/models/v1_job_list.py | {
"start": 383,
"end": 6726
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1Job]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1JobList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1JobList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1JobList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1JobList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1JobList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1JobList. # noqa: E501
items is the list of Jobs. # noqa: E501
:return: The items of this V1JobList. # noqa: E501
:rtype: list[V1Job]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1JobList.
items is the list of Jobs. # noqa: E501
:param items: The items of this V1JobList. # noqa: E501
:type: list[V1Job]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1JobList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1JobList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1JobList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1JobList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1JobList. # noqa: E501
:return: The metadata of this V1JobList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1JobList.
:param metadata: The metadata of this V1JobList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1JobList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1JobList):
return True
return self.to_dict() != other.to_dict()
| V1JobList |
python | dagster-io__dagster | python_modules/libraries/dagster-dg-cli/dagster_dg_cli/api_layer/schemas/run_event.py | {
"start": 121,
"end": 297
} | class ____(str, Enum):
"""Event severity levels."""
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
INFO = "INFO"
DEBUG = "DEBUG"
| RunEventLevel |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.