blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c75bfdc06dfb4e402a4db55629be9e86aba8c74d
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/tests/resources/mlflow-test-plugin/mlflow_test_plugin/dummy_backend.py
|
2417846989c7adc574682b462c86ad90345fe275
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
dummy_backend.py
|
from mlflow.entities import RunStatus
from mlflow.projects.backend.abstract_backend import AbstractBackend
from mlflow.projects.submitted_run import SubmittedRun
from mlflow.projects.utils import fetch_and_validate_project, get_or_create_run
class DummySubmittedRun(SubmittedRun):
"""
A run that just does nothing
"""
def __init__(self, run_id):
self._run_id = run_id
def wait(self):
return True
def get_status(self):
return RunStatus.FINISHED
def cancel(self):
pass
@property
def run_id(self):
return self._run_id
class PluginDummyProjectBackend(AbstractBackend):
def run(
self,
project_uri,
entry_point,
params,
version,
backend_config,
tracking_uri,
experiment_id,
):
work_dir = fetch_and_validate_project(project_uri, version, entry_point, params)
active_run = get_or_create_run(
None, project_uri, experiment_id, work_dir, version, entry_point, params
)
return DummySubmittedRun(active_run.info.run_id)
|
ce109b652247ba75087cd3ce085fbc978a88a557
|
045ec3ae16fc554a05510abc3697557ebc5ce304
|
/CIME/SystemTests/seq.py
|
304932d7d1484f9af3c575b6d94cba054fe6d4a1
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
ESMCI/cime
|
c09223ee9b8a463bd00741ff39f60fda7639af89
|
02fad90a379cdbd3c1106cbd63324480f0bf7a22
|
refs/heads/master
| 2023-08-16T07:03:22.224344
| 2023-08-03T19:47:53
| 2023-08-03T19:47:53
| 31,605,662
| 159
| 179
|
NOASSERTION
| 2023-09-12T18:38:42
| 2015-03-03T15:33:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
seq.py
|
"""
sequencing bfb test (10 day seq,conc tests)
"""
from CIME.XML.standard_module_setup import *
from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
logger = logging.getLogger(__name__)
class SEQ(SystemTestsCompareTwo):
def __init__(self, case, **kwargs):
"""
initialize an object interface to file env_test.xml in the case directory
"""
SystemTestsCompareTwo.__init__(
self,
case,
separate_builds=True,
run_two_suffix="seq",
run_one_description="base",
run_two_description="sequence",
**kwargs
)
def _case_one_setup(self):
pass
def _case_two_setup(self):
comp_classes = self._case.get_values("COMP_CLASSES")
any_changes = False
for comp in comp_classes:
any_changes |= self._case.get_value("ROOTPE_{}".format(comp)) != 0
if any_changes:
for comp in comp_classes:
self._case.set_value("ROOTPE_{}".format(comp), 0)
else:
totalpes = self._case.get_value("TOTALPES")
newntasks = max(1, totalpes // len(comp_classes))
rootpe = newntasks
for comp in comp_classes:
# here we set the cpl to have the first 2 tasks
# and each component to have a different ROOTPE
if comp == "CPL":
self._case.set_value("NTASKS_CPL", newntasks)
else:
self._case.set_value("NTASKS_{}".format(comp), newntasks)
self._case.set_value("ROOTPE_{}".format(comp), rootpe)
rootpe += newntasks
self._case.flush()
self._case.case_setup(test_mode=True, reset=True)
|
a87d53d07e9ef74429960ff9b6353b1f40a77c59
|
8e6bb9c1a620a162b7d017c2373dd01be54ea86d
|
/bio/pyroe/makeunspliceunspliced/test/Snakefile
|
7b0ffdd046aaa1261091016a82690e86bb8e0652
|
[] |
no_license
|
snakemake/snakemake-wrappers
|
5d0963502c26eb709513567e25422871fe477cf2
|
996bdcf2a96535b967dfa483c363a5496f4b3906
|
refs/heads/master
| 2023-08-19T05:18:44.337503
| 2023-08-18T12:03:38
| 2023-08-18T12:03:38
| 213,319,194
| 184
| 189
| null | 2023-09-12T11:38:35
| 2019-10-07T07:20:59
|
CAP CDS
|
UTF-8
|
Python
| false
| false
| 664
|
Snakefile
|
rule test_pyroe_makesplicedunspliced:
input:
fasta="genome.fasta",
gtf="annotation.gtf",
spliced="extra_spliced.fasta", # Optional path to additional spliced sequences (FASTA)
unspliced="extra_unspliced.fasta", # Optional path to additional unspliced sequences (FASTA)
output:
gene_id_to_name="gene_id_to_name.tsv",
fasta="spliceu.fa",
g2g="spliceu_g2g.tsv",
t2g_3col="spliceu_t2g_3col.tsv",
t2g="spliceu_t2g.tsv",
threads: 1
log:
"logs/pyroe.log",
params:
extra="", # Optional parameters
wrapper:
"master/bio/pyroe/makeunspliceunspliced/"
|
|
93ddf6c7774c6bdd1aa8e82e782cc3f7508e893e
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/appplatform/outputs.py
|
6dab0c986249cef9444f16192288cd4f2bacacde
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 124,866
|
py
|
outputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'SpringCloudApiPortalSso',
'SpringCloudAppCustomPersistentDisk',
'SpringCloudAppIdentity',
'SpringCloudAppIngressSettings',
'SpringCloudAppPersistentDisk',
'SpringCloudBuildDeploymentQuota',
'SpringCloudBuildPackBindingLaunch',
'SpringCloudBuilderBuildPackGroup',
'SpringCloudBuilderStack',
'SpringCloudConfigurationServiceRepository',
'SpringCloudConnectionAuthentication',
'SpringCloudConnectionSecretStore',
'SpringCloudContainerDeploymentQuota',
'SpringCloudCustomizedAcceleratorGitRepository',
'SpringCloudCustomizedAcceleratorGitRepositoryBasicAuth',
'SpringCloudCustomizedAcceleratorGitRepositorySshAuth',
'SpringCloudDevToolPortalSso',
'SpringCloudGatewayApiMetadata',
'SpringCloudGatewayClientAuthorization',
'SpringCloudGatewayCors',
'SpringCloudGatewayQuota',
'SpringCloudGatewayRouteConfigOpenApi',
'SpringCloudGatewayRouteConfigRoute',
'SpringCloudGatewaySso',
'SpringCloudJavaDeploymentQuota',
'SpringCloudServiceConfigServerGitSetting',
'SpringCloudServiceConfigServerGitSettingHttpBasicAuth',
'SpringCloudServiceConfigServerGitSettingRepository',
'SpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuth',
'SpringCloudServiceConfigServerGitSettingRepositorySshAuth',
'SpringCloudServiceConfigServerGitSettingSshAuth',
'SpringCloudServiceContainerRegistry',
'SpringCloudServiceDefaultBuildService',
'SpringCloudServiceMarketplace',
'SpringCloudServiceNetwork',
'SpringCloudServiceRequiredNetworkTrafficRule',
'SpringCloudServiceTrace',
'GetSpringCloudAppIdentityResult',
'GetSpringCloudAppPersistentDiskResult',
'GetSpringCloudServiceConfigServerGitSettingResult',
'GetSpringCloudServiceConfigServerGitSettingHttpBasicAuthResult',
'GetSpringCloudServiceConfigServerGitSettingRepositoryResult',
'GetSpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthResult',
'GetSpringCloudServiceConfigServerGitSettingRepositorySshAuthResult',
'GetSpringCloudServiceConfigServerGitSettingSshAuthResult',
'GetSpringCloudServiceRequiredNetworkTrafficRuleResult',
]
@pulumi.output_type
class SpringCloudApiPortalSso(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "issuerUri":
suggest = "issuer_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudApiPortalSso. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudApiPortalSso.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudApiPortalSso.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
issuer_uri: Optional[str] = None,
scopes: Optional[Sequence[str]] = None):
"""
:param str client_id: The public identifier for the application.
:param str client_secret: The secret known only to the application and the authorization server.
:param str issuer_uri: The URI of Issuer Identifier.
:param Sequence[str] scopes: It defines the specific actions applications can be allowed to do on a user's behalf.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if issuer_uri is not None:
pulumi.set(__self__, "issuer_uri", issuer_uri)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The public identifier for the application.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
The secret known only to the application and the authorization server.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="issuerUri")
def issuer_uri(self) -> Optional[str]:
"""
The URI of Issuer Identifier.
"""
return pulumi.get(self, "issuer_uri")
@property
@pulumi.getter
def scopes(self) -> Optional[Sequence[str]]:
"""
It defines the specific actions applications can be allowed to do on a user's behalf.
"""
return pulumi.get(self, "scopes")
@pulumi.output_type
class SpringCloudAppCustomPersistentDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mountPath":
suggest = "mount_path"
elif key == "shareName":
suggest = "share_name"
elif key == "storageName":
suggest = "storage_name"
elif key == "mountOptions":
suggest = "mount_options"
elif key == "readOnlyEnabled":
suggest = "read_only_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudAppCustomPersistentDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudAppCustomPersistentDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudAppCustomPersistentDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
mount_path: str,
share_name: str,
storage_name: str,
mount_options: Optional[Sequence[str]] = None,
read_only_enabled: Optional[bool] = None):
"""
:param str mount_path: The mount path of the persistent disk.
:param str share_name: The share name of the Azure File share.
:param str storage_name: The name of the Spring Cloud Storage.
:param Sequence[str] mount_options: These are the mount options for a persistent disk.
:param bool read_only_enabled: Indicates whether the persistent disk is a readOnly one.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "share_name", share_name)
pulumi.set(__self__, "storage_name", storage_name)
if mount_options is not None:
pulumi.set(__self__, "mount_options", mount_options)
if read_only_enabled is not None:
pulumi.set(__self__, "read_only_enabled", read_only_enabled)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> str:
"""
The mount path of the persistent disk.
"""
return pulumi.get(self, "mount_path")
@property
@pulumi.getter(name="shareName")
def share_name(self) -> str:
"""
The share name of the Azure File share.
"""
return pulumi.get(self, "share_name")
@property
@pulumi.getter(name="storageName")
def storage_name(self) -> str:
"""
The name of the Spring Cloud Storage.
"""
return pulumi.get(self, "storage_name")
@property
@pulumi.getter(name="mountOptions")
def mount_options(self) -> Optional[Sequence[str]]:
"""
These are the mount options for a persistent disk.
"""
return pulumi.get(self, "mount_options")
@property
@pulumi.getter(name="readOnlyEnabled")
def read_only_enabled(self) -> Optional[bool]:
"""
Indicates whether the persistent disk is a readOnly one.
"""
return pulumi.get(self, "read_only_enabled")
@pulumi.output_type
class SpringCloudAppIdentity(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityIds":
suggest = "identity_ids"
elif key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudAppIdentity. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudAppIdentity.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudAppIdentity.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
identity_ids: Optional[Sequence[str]] = None,
principal_id: Optional[str] = None,
tenant_id: Optional[str] = None):
"""
:param str type: Specifies the type of Managed Service Identity that should be configured on this Spring Cloud Application. Possible values are `SystemAssigned`, `UserAssigned`, `SystemAssigned, UserAssigned` (to enable both).
:param Sequence[str] identity_ids: A list of User Assigned Managed Identity IDs to be assigned to this Spring Cloud Application.
> **NOTE:** This is required when `type` is set to `UserAssigned` or `SystemAssigned, UserAssigned`.
:param str principal_id: The Principal ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
:param str tenant_id: The Tenant ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
"""
pulumi.set(__self__, "type", type)
if identity_ids is not None:
pulumi.set(__self__, "identity_ids", identity_ids)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of Managed Service Identity that should be configured on this Spring Cloud Application. Possible values are `SystemAssigned`, `UserAssigned`, `SystemAssigned, UserAssigned` (to enable both).
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="identityIds")
def identity_ids(self) -> Optional[Sequence[str]]:
"""
A list of User Assigned Managed Identity IDs to be assigned to this Spring Cloud Application.
> **NOTE:** This is required when `type` is set to `UserAssigned` or `SystemAssigned, UserAssigned`.
"""
return pulumi.get(self, "identity_ids")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
The Principal ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The Tenant ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
"""
return pulumi.get(self, "tenant_id")
@pulumi.output_type
class SpringCloudAppIngressSettings(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "backendProtocol":
suggest = "backend_protocol"
elif key == "readTimeoutInSeconds":
suggest = "read_timeout_in_seconds"
elif key == "sendTimeoutInSeconds":
suggest = "send_timeout_in_seconds"
elif key == "sessionAffinity":
suggest = "session_affinity"
elif key == "sessionCookieMaxAge":
suggest = "session_cookie_max_age"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudAppIngressSettings. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudAppIngressSettings.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudAppIngressSettings.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
backend_protocol: Optional[str] = None,
read_timeout_in_seconds: Optional[int] = None,
send_timeout_in_seconds: Optional[int] = None,
session_affinity: Optional[str] = None,
session_cookie_max_age: Optional[int] = None):
"""
:param str backend_protocol: Specifies how ingress should communicate with this app backend service. Allowed values are `GRPC` and `Default`. Defaults to `Default`.
:param int read_timeout_in_seconds: Specifies the ingress read time out in seconds. Defaults to `300`.
:param int send_timeout_in_seconds: Specifies the ingress send time out in seconds. Defaults to `60`.
:param str session_affinity: Specifies the type of the affinity, set this to `Cookie` to enable session affinity. Allowed values are `Cookie` and `None`. Defaults to `None`.
:param int session_cookie_max_age: Specifies the time in seconds until the cookie expires.
"""
if backend_protocol is not None:
pulumi.set(__self__, "backend_protocol", backend_protocol)
if read_timeout_in_seconds is not None:
pulumi.set(__self__, "read_timeout_in_seconds", read_timeout_in_seconds)
if send_timeout_in_seconds is not None:
pulumi.set(__self__, "send_timeout_in_seconds", send_timeout_in_seconds)
if session_affinity is not None:
pulumi.set(__self__, "session_affinity", session_affinity)
if session_cookie_max_age is not None:
pulumi.set(__self__, "session_cookie_max_age", session_cookie_max_age)
@property
@pulumi.getter(name="backendProtocol")
def backend_protocol(self) -> Optional[str]:
"""
Specifies how ingress should communicate with this app backend service. Allowed values are `GRPC` and `Default`. Defaults to `Default`.
"""
return pulumi.get(self, "backend_protocol")
@property
@pulumi.getter(name="readTimeoutInSeconds")
def read_timeout_in_seconds(self) -> Optional[int]:
"""
Specifies the ingress read time out in seconds. Defaults to `300`.
"""
return pulumi.get(self, "read_timeout_in_seconds")
@property
@pulumi.getter(name="sendTimeoutInSeconds")
def send_timeout_in_seconds(self) -> Optional[int]:
"""
Specifies the ingress send time out in seconds. Defaults to `60`.
"""
return pulumi.get(self, "send_timeout_in_seconds")
@property
@pulumi.getter(name="sessionAffinity")
def session_affinity(self) -> Optional[str]:
"""
Specifies the type of the affinity, set this to `Cookie` to enable session affinity. Allowed values are `Cookie` and `None`. Defaults to `None`.
"""
return pulumi.get(self, "session_affinity")
@property
@pulumi.getter(name="sessionCookieMaxAge")
def session_cookie_max_age(self) -> Optional[int]:
"""
Specifies the time in seconds until the cookie expires.
"""
return pulumi.get(self, "session_cookie_max_age")
@pulumi.output_type
class SpringCloudAppPersistentDisk(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGb":
suggest = "size_in_gb"
elif key == "mountPath":
suggest = "mount_path"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudAppPersistentDisk. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudAppPersistentDisk.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudAppPersistentDisk.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
mount_path: Optional[str] = None):
"""
:param int size_in_gb: Specifies the size of the persistent disk in GB. Possible values are between `0` and `50`.
:param str mount_path: Specifies the mount path of the persistent disk. Defaults to `/persistent`.
"""
pulumi.set(__self__, "size_in_gb", size_in_gb)
if mount_path is not None:
pulumi.set(__self__, "mount_path", mount_path)
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> int:
"""
Specifies the size of the persistent disk in GB. Possible values are between `0` and `50`.
"""
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> Optional[str]:
"""
Specifies the mount path of the persistent disk. Defaults to `/persistent`.
"""
return pulumi.get(self, "mount_path")
@pulumi.output_type
class SpringCloudBuildDeploymentQuota(dict):
def __init__(__self__, *,
cpu: Optional[str] = None,
memory: Optional[str] = None):
"""
:param str cpu: Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
:param str memory: Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory is not None:
pulumi.set(__self__, "memory", memory)
@property
@pulumi.getter
def cpu(self) -> Optional[str]:
"""
Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
"""
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def memory(self) -> Optional[str]:
"""
Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
return pulumi.get(self, "memory")
@pulumi.output_type
class SpringCloudBuildPackBindingLaunch(dict):
def __init__(__self__, *,
properties: Optional[Mapping[str, str]] = None,
secrets: Optional[Mapping[str, str]] = None):
"""
:param Mapping[str, str] properties: Specifies a map of non-sensitive properties for launchProperties.
:param Mapping[str, str] secrets: Specifies a map of sensitive properties for launchProperties.
"""
if properties is not None:
pulumi.set(__self__, "properties", properties)
if secrets is not None:
pulumi.set(__self__, "secrets", secrets)
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, str]]:
"""
Specifies a map of non-sensitive properties for launchProperties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def secrets(self) -> Optional[Mapping[str, str]]:
"""
Specifies a map of sensitive properties for launchProperties.
"""
return pulumi.get(self, "secrets")
@pulumi.output_type
class SpringCloudBuilderBuildPackGroup(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "buildPackIds":
suggest = "build_pack_ids"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudBuilderBuildPackGroup. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudBuilderBuildPackGroup.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudBuilderBuildPackGroup.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
build_pack_ids: Optional[Sequence[str]] = None):
"""
:param str name: The name which should be used for this build pack group.
:param Sequence[str] build_pack_ids: Specifies a list of the build pack's ID.
"""
pulumi.set(__self__, "name", name)
if build_pack_ids is not None:
pulumi.set(__self__, "build_pack_ids", build_pack_ids)
@property
@pulumi.getter
def name(self) -> str:
"""
The name which should be used for this build pack group.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="buildPackIds")
def build_pack_ids(self) -> Optional[Sequence[str]]:
"""
Specifies a list of the build pack's ID.
"""
return pulumi.get(self, "build_pack_ids")
@pulumi.output_type
class SpringCloudBuilderStack(dict):
def __init__(__self__, *,
id: str,
version: str):
"""
:param str id: Specifies the ID of the ClusterStack.
:param str version: Specifies the version of the ClusterStack
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the ID of the ClusterStack.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def version(self) -> str:
"""
Specifies the version of the ClusterStack
"""
return pulumi.get(self, "version")
@pulumi.output_type
class SpringCloudConfigurationServiceRepository(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "caCertificateId":
suggest = "ca_certificate_id"
elif key == "hostKey":
suggest = "host_key"
elif key == "hostKeyAlgorithm":
suggest = "host_key_algorithm"
elif key == "privateKey":
suggest = "private_key"
elif key == "searchPaths":
suggest = "search_paths"
elif key == "strictHostKeyChecking":
suggest = "strict_host_key_checking"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudConfigurationServiceRepository. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudConfigurationServiceRepository.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudConfigurationServiceRepository.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
label: str,
name: str,
patterns: Sequence[str],
uri: str,
ca_certificate_id: Optional[str] = None,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None,
password: Optional[str] = None,
private_key: Optional[str] = None,
search_paths: Optional[Sequence[str]] = None,
strict_host_key_checking: Optional[bool] = None,
username: Optional[str] = None):
"""
:param str label: Specifies the label of the repository.
:param str name: Specifies the name which should be used for this repository.
:param Sequence[str] patterns: Specifies the collection of patterns of the repository.
:param str uri: Specifies the URI of the repository.
:param str ca_certificate_id: Specifies the ID of the Certificate Authority used when retrieving the Git Repository via HTTPS.
:param str host_key: Specifies the SSH public key of git repository.
:param str host_key_algorithm: Specifies the SSH key algorithm of git repository.
:param str password: Specifies the password of git repository basic auth.
:param str private_key: Specifies the SSH private key of git repository.
:param Sequence[str] search_paths: Specifies a list of searching path of the repository
:param bool strict_host_key_checking: Specifies whether enable the strict host key checking.
:param str username: Specifies the username of git repository basic auth.
"""
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "patterns", patterns)
pulumi.set(__self__, "uri", uri)
if ca_certificate_id is not None:
pulumi.set(__self__, "ca_certificate_id", ca_certificate_id)
if host_key is not None:
pulumi.set(__self__, "host_key", host_key)
if host_key_algorithm is not None:
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
if password is not None:
pulumi.set(__self__, "password", password)
if private_key is not None:
pulumi.set(__self__, "private_key", private_key)
if search_paths is not None:
pulumi.set(__self__, "search_paths", search_paths)
if strict_host_key_checking is not None:
pulumi.set(__self__, "strict_host_key_checking", strict_host_key_checking)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def label(self) -> str:
"""
Specifies the label of the repository.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name which should be used for this repository.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def patterns(self) -> Sequence[str]:
"""
Specifies the collection of patterns of the repository.
"""
return pulumi.get(self, "patterns")
@property
@pulumi.getter
def uri(self) -> str:
"""
Specifies the URI of the repository.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter(name="caCertificateId")
def ca_certificate_id(self) -> Optional[str]:
"""
Specifies the ID of the Certificate Authority used when retrieving the Git Repository via HTTPS.
"""
return pulumi.get(self, "ca_certificate_id")
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> Optional[str]:
"""
Specifies the SSH public key of git repository.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> Optional[str]:
"""
Specifies the SSH key algorithm of git repository.
"""
return pulumi.get(self, "host_key_algorithm")
@property
@pulumi.getter
def password(self) -> Optional[str]:
"""
Specifies the password of git repository basic auth.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> Optional[str]:
"""
Specifies the SSH private key of git repository.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="searchPaths")
def search_paths(self) -> Optional[Sequence[str]]:
"""
Specifies a list of searching path of the repository
"""
return pulumi.get(self, "search_paths")
@property
@pulumi.getter(name="strictHostKeyChecking")
def strict_host_key_checking(self) -> Optional[bool]:
"""
Specifies whether enable the strict host key checking.
"""
return pulumi.get(self, "strict_host_key_checking")
@property
@pulumi.getter
def username(self) -> Optional[str]:
"""
Specifies the username of git repository basic auth.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class SpringCloudConnectionAuthentication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "principalId":
suggest = "principal_id"
elif key == "subscriptionId":
suggest = "subscription_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudConnectionAuthentication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudConnectionAuthentication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudConnectionAuthentication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
certificate: Optional[str] = None,
client_id: Optional[str] = None,
name: Optional[str] = None,
principal_id: Optional[str] = None,
secret: Optional[str] = None,
subscription_id: Optional[str] = None):
"""
:param str type: The authentication type. Possible values are `systemAssignedIdentity`, `userAssignedIdentity`, `servicePrincipalSecret`, `servicePrincipalCertificate`, `secret`. Changing this forces a new resource to be created.
:param str certificate: Service principal certificate for `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalCertificate`.
:param str client_id: Client ID for `userAssignedIdentity` or `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalSecret` or `servicePrincipalCertificate`. When `type` is set to `userAssignedIdentity`, `client_id` and `subscription_id` should be either both specified or both not specified.
:param str name: Username or account name for secret auth. `name` and `secret` should be either both specified or both not specified when `type` is set to `secret`.
:param str principal_id: Principal ID for `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalSecret` or `servicePrincipalCertificate`.
:param str secret: Password or account key for secret auth. `secret` and `name` should be either both specified or both not specified when `type` is set to `secret`.
:param str subscription_id: Subscription ID for `userAssignedIdentity`. `subscription_id` and `client_id` should be either both specified or both not specified.
"""
pulumi.set(__self__, "type", type)
if certificate is not None:
pulumi.set(__self__, "certificate", certificate)
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if name is not None:
pulumi.set(__self__, "name", name)
if principal_id is not None:
pulumi.set(__self__, "principal_id", principal_id)
if secret is not None:
pulumi.set(__self__, "secret", secret)
if subscription_id is not None:
pulumi.set(__self__, "subscription_id", subscription_id)
@property
@pulumi.getter
def type(self) -> str:
"""
The authentication type. Possible values are `systemAssignedIdentity`, `userAssignedIdentity`, `servicePrincipalSecret`, `servicePrincipalCertificate`, `secret`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def certificate(self) -> Optional[str]:
"""
Service principal certificate for `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalCertificate`.
"""
return pulumi.get(self, "certificate")
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
Client ID for `userAssignedIdentity` or `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalSecret` or `servicePrincipalCertificate`. When `type` is set to `userAssignedIdentity`, `client_id` and `subscription_id` should be either both specified or both not specified.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Username or account name for secret auth. `name` and `secret` should be either both specified or both not specified when `type` is set to `secret`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> Optional[str]:
"""
Principal ID for `servicePrincipal` auth. Should be specified when `type` is set to `servicePrincipalSecret` or `servicePrincipalCertificate`.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter
def secret(self) -> Optional[str]:
"""
Password or account key for secret auth. `secret` and `name` should be either both specified or both not specified when `type` is set to `secret`.
"""
return pulumi.get(self, "secret")
@property
@pulumi.getter(name="subscriptionId")
def subscription_id(self) -> Optional[str]:
"""
Subscription ID for `userAssignedIdentity`. `subscription_id` and `client_id` should be either both specified or both not specified.
"""
return pulumi.get(self, "subscription_id")
@pulumi.output_type
class SpringCloudConnectionSecretStore(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyVaultId":
suggest = "key_vault_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudConnectionSecretStore. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudConnectionSecretStore.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudConnectionSecretStore.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_vault_id: str):
"""
:param str key_vault_id: The key vault id to store secret.
"""
pulumi.set(__self__, "key_vault_id", key_vault_id)
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> str:
"""
The key vault id to store secret.
"""
return pulumi.get(self, "key_vault_id")
@pulumi.output_type
class SpringCloudContainerDeploymentQuota(dict):
def __init__(__self__, *,
cpu: Optional[str] = None,
memory: Optional[str] = None):
"""
:param str cpu: Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
:param str memory: Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory is not None:
pulumi.set(__self__, "memory", memory)
@property
@pulumi.getter
def cpu(self) -> Optional[str]:
"""
Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
"""
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def memory(self) -> Optional[str]:
"""
Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
return pulumi.get(self, "memory")
@pulumi.output_type
class SpringCloudCustomizedAcceleratorGitRepository(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "basicAuth":
suggest = "basic_auth"
elif key == "caCertificateId":
suggest = "ca_certificate_id"
elif key == "gitTag":
suggest = "git_tag"
elif key == "intervalInSeconds":
suggest = "interval_in_seconds"
elif key == "sshAuth":
suggest = "ssh_auth"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudCustomizedAcceleratorGitRepository. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudCustomizedAcceleratorGitRepository.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudCustomizedAcceleratorGitRepository.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
url: str,
basic_auth: Optional['outputs.SpringCloudCustomizedAcceleratorGitRepositoryBasicAuth'] = None,
branch: Optional[str] = None,
ca_certificate_id: Optional[str] = None,
commit: Optional[str] = None,
git_tag: Optional[str] = None,
interval_in_seconds: Optional[int] = None,
ssh_auth: Optional['outputs.SpringCloudCustomizedAcceleratorGitRepositorySshAuth'] = None):
"""
:param str url: Specifies Git repository URL for the accelerator.
:param 'SpringCloudCustomizedAcceleratorGitRepositoryBasicAuthArgs' basic_auth: A `basic_auth` block as defined below. Conflicts with `git_repository.0.ssh_auth`. Changing this forces a new Spring Cloud Customized Accelerator to be created.
:param str branch: Specifies the Git repository branch to be used.
:param str ca_certificate_id: Specifies the ID of the CA Spring Cloud Certificate for https URL of Git repository.
:param str commit: Specifies the Git repository commit to be used.
:param str git_tag: Specifies the Git repository tag to be used.
:param int interval_in_seconds: Specifies the interval for checking for updates to Git or image repository. It should be greater than 10.
:param 'SpringCloudCustomizedAcceleratorGitRepositorySshAuthArgs' ssh_auth: A `ssh_auth` block as defined below. Conflicts with `git_repository.0.basic_auth`. Changing this forces a new Spring Cloud Customized Accelerator to be created.
"""
pulumi.set(__self__, "url", url)
if basic_auth is not None:
pulumi.set(__self__, "basic_auth", basic_auth)
if branch is not None:
pulumi.set(__self__, "branch", branch)
if ca_certificate_id is not None:
pulumi.set(__self__, "ca_certificate_id", ca_certificate_id)
if commit is not None:
pulumi.set(__self__, "commit", commit)
if git_tag is not None:
pulumi.set(__self__, "git_tag", git_tag)
if interval_in_seconds is not None:
pulumi.set(__self__, "interval_in_seconds", interval_in_seconds)
if ssh_auth is not None:
pulumi.set(__self__, "ssh_auth", ssh_auth)
@property
@pulumi.getter
def url(self) -> str:
"""
Specifies Git repository URL for the accelerator.
"""
return pulumi.get(self, "url")
@property
@pulumi.getter(name="basicAuth")
def basic_auth(self) -> Optional['outputs.SpringCloudCustomizedAcceleratorGitRepositoryBasicAuth']:
"""
A `basic_auth` block as defined below. Conflicts with `git_repository.0.ssh_auth`. Changing this forces a new Spring Cloud Customized Accelerator to be created.
"""
return pulumi.get(self, "basic_auth")
@property
@pulumi.getter
def branch(self) -> Optional[str]:
"""
Specifies the Git repository branch to be used.
"""
return pulumi.get(self, "branch")
@property
@pulumi.getter(name="caCertificateId")
def ca_certificate_id(self) -> Optional[str]:
"""
Specifies the ID of the CA Spring Cloud Certificate for https URL of Git repository.
"""
return pulumi.get(self, "ca_certificate_id")
@property
@pulumi.getter
def commit(self) -> Optional[str]:
"""
Specifies the Git repository commit to be used.
"""
return pulumi.get(self, "commit")
@property
@pulumi.getter(name="gitTag")
def git_tag(self) -> Optional[str]:
"""
Specifies the Git repository tag to be used.
"""
return pulumi.get(self, "git_tag")
@property
@pulumi.getter(name="intervalInSeconds")
def interval_in_seconds(self) -> Optional[int]:
"""
Specifies the interval for checking for updates to Git or image repository. It should be greater than 10.
"""
return pulumi.get(self, "interval_in_seconds")
@property
@pulumi.getter(name="sshAuth")
def ssh_auth(self) -> Optional['outputs.SpringCloudCustomizedAcceleratorGitRepositorySshAuth']:
"""
A `ssh_auth` block as defined below. Conflicts with `git_repository.0.basic_auth`. Changing this forces a new Spring Cloud Customized Accelerator to be created.
"""
return pulumi.get(self, "ssh_auth")
@pulumi.output_type
class SpringCloudCustomizedAcceleratorGitRepositoryBasicAuth(dict):
def __init__(__self__, *,
password: str,
username: str):
"""
:param str password: Specifies the password of git repository basic auth.
:param str username: Specifies the username of git repository basic auth.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> str:
"""
Specifies the password of git repository basic auth.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> str:
"""
Specifies the username of git repository basic auth.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class SpringCloudCustomizedAcceleratorGitRepositorySshAuth(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateKey":
suggest = "private_key"
elif key == "hostKey":
suggest = "host_key"
elif key == "hostKeyAlgorithm":
suggest = "host_key_algorithm"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudCustomizedAcceleratorGitRepositorySshAuth. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudCustomizedAcceleratorGitRepositorySshAuth.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudCustomizedAcceleratorGitRepositorySshAuth.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_key: str,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None):
"""
:param str private_key: Specifies the Private SSH Key of git repository basic auth.
:param str host_key: Specifies the Public SSH Key of git repository basic auth.
:param str host_key_algorithm: Specifies the SSH Key algorithm of git repository basic auth.
"""
pulumi.set(__self__, "private_key", private_key)
if host_key is not None:
pulumi.set(__self__, "host_key", host_key)
if host_key_algorithm is not None:
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
Specifies the Private SSH Key of git repository basic auth.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> Optional[str]:
"""
Specifies the Public SSH Key of git repository basic auth.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> Optional[str]:
"""
Specifies the SSH Key algorithm of git repository basic auth.
"""
return pulumi.get(self, "host_key_algorithm")
@pulumi.output_type
class SpringCloudDevToolPortalSso(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "metadataUrl":
suggest = "metadata_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudDevToolPortalSso. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudDevToolPortalSso.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudDevToolPortalSso.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
metadata_url: Optional[str] = None,
scopes: Optional[Sequence[str]] = None):
"""
:param str client_id: Specifies the public identifier for the application.
:param str client_secret: Specifies the secret known only to the application and the authorization server.
:param str metadata_url: Specifies the URI of a JSON file with generic OIDC provider configuration.
:param Sequence[str] scopes: Specifies a list of specific actions applications can be allowed to do on a user's behalf.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if metadata_url is not None:
pulumi.set(__self__, "metadata_url", metadata_url)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
Specifies the public identifier for the application.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
Specifies the secret known only to the application and the authorization server.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="metadataUrl")
def metadata_url(self) -> Optional[str]:
"""
Specifies the URI of a JSON file with generic OIDC provider configuration.
"""
return pulumi.get(self, "metadata_url")
@property
@pulumi.getter
def scopes(self) -> Optional[Sequence[str]]:
"""
Specifies a list of specific actions applications can be allowed to do on a user's behalf.
"""
return pulumi.get(self, "scopes")
@pulumi.output_type
class SpringCloudGatewayApiMetadata(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "documentationUrl":
suggest = "documentation_url"
elif key == "serverUrl":
suggest = "server_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudGatewayApiMetadata. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudGatewayApiMetadata.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudGatewayApiMetadata.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
description: Optional[str] = None,
documentation_url: Optional[str] = None,
server_url: Optional[str] = None,
title: Optional[str] = None,
version: Optional[str] = None):
"""
:param str description: Detailed description of the APIs available on the Gateway instance.
:param str documentation_url: Location of additional documentation for the APIs available on the Gateway instance.
:param str server_url: Base URL that API consumers will use to access APIs on the Gateway instance.
:param str title: Specifies the title describing the context of the APIs available on the Gateway instance.
:param str version: Specifies the version of APIs available on this Gateway instance.
"""
if description is not None:
pulumi.set(__self__, "description", description)
if documentation_url is not None:
pulumi.set(__self__, "documentation_url", documentation_url)
if server_url is not None:
pulumi.set(__self__, "server_url", server_url)
if title is not None:
pulumi.set(__self__, "title", title)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Detailed description of the APIs available on the Gateway instance.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="documentationUrl")
def documentation_url(self) -> Optional[str]:
"""
Location of additional documentation for the APIs available on the Gateway instance.
"""
return pulumi.get(self, "documentation_url")
@property
@pulumi.getter(name="serverUrl")
def server_url(self) -> Optional[str]:
"""
Base URL that API consumers will use to access APIs on the Gateway instance.
"""
return pulumi.get(self, "server_url")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
Specifies the title describing the context of the APIs available on the Gateway instance.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
Specifies the version of APIs available on this Gateway instance.
"""
return pulumi.get(self, "version")
@pulumi.output_type
class SpringCloudGatewayClientAuthorization(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateIds":
suggest = "certificate_ids"
elif key == "verificationEnabled":
suggest = "verification_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudGatewayClientAuthorization. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudGatewayClientAuthorization.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudGatewayClientAuthorization.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_ids: Optional[Sequence[str]] = None,
verification_enabled: Optional[bool] = None):
"""
:param Sequence[str] certificate_ids: Specifies the Spring Cloud Certificate IDs of the Spring Cloud Gateway.
:param bool verification_enabled: Specifies whether the client certificate verification is enabled.
"""
if certificate_ids is not None:
pulumi.set(__self__, "certificate_ids", certificate_ids)
if verification_enabled is not None:
pulumi.set(__self__, "verification_enabled", verification_enabled)
@property
@pulumi.getter(name="certificateIds")
def certificate_ids(self) -> Optional[Sequence[str]]:
"""
Specifies the Spring Cloud Certificate IDs of the Spring Cloud Gateway.
"""
return pulumi.get(self, "certificate_ids")
@property
@pulumi.getter(name="verificationEnabled")
def verification_enabled(self) -> Optional[bool]:
"""
Specifies whether the client certificate verification is enabled.
"""
return pulumi.get(self, "verification_enabled")
@pulumi.output_type
class SpringCloudGatewayCors(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allowedHeaders":
suggest = "allowed_headers"
elif key == "allowedMethods":
suggest = "allowed_methods"
elif key == "allowedOriginPatterns":
suggest = "allowed_origin_patterns"
elif key == "allowedOrigins":
suggest = "allowed_origins"
elif key == "credentialsAllowed":
suggest = "credentials_allowed"
elif key == "exposedHeaders":
suggest = "exposed_headers"
elif key == "maxAgeSeconds":
suggest = "max_age_seconds"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudGatewayCors. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudGatewayCors.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudGatewayCors.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allowed_headers: Optional[Sequence[str]] = None,
allowed_methods: Optional[Sequence[str]] = None,
allowed_origin_patterns: Optional[Sequence[str]] = None,
allowed_origins: Optional[Sequence[str]] = None,
credentials_allowed: Optional[bool] = None,
exposed_headers: Optional[Sequence[str]] = None,
max_age_seconds: Optional[int] = None):
"""
:param Sequence[str] allowed_headers: Allowed headers in cross-site requests. The special value `*` allows actual requests to send any header.
:param Sequence[str] allowed_methods: Allowed HTTP methods on cross-site requests. The special value `*` allows all methods. If not set, `GET` and `HEAD` are allowed by default. Possible values are `DELETE`, `GET`, `HEAD`, `MERGE`, `POST`, `OPTIONS` and `PUT`.
:param Sequence[str] allowed_origin_patterns: Allowed origin patterns to make cross-site requests.
:param Sequence[str] allowed_origins: Allowed origins to make cross-site requests. The special value `*` allows all domains.
:param bool credentials_allowed: is user credentials are supported on cross-site requests?
:param Sequence[str] exposed_headers: HTTP response headers to expose for cross-site requests.
:param int max_age_seconds: How long, in seconds, the response from a pre-flight request can be cached by clients.
"""
if allowed_headers is not None:
pulumi.set(__self__, "allowed_headers", allowed_headers)
if allowed_methods is not None:
pulumi.set(__self__, "allowed_methods", allowed_methods)
if allowed_origin_patterns is not None:
pulumi.set(__self__, "allowed_origin_patterns", allowed_origin_patterns)
if allowed_origins is not None:
pulumi.set(__self__, "allowed_origins", allowed_origins)
if credentials_allowed is not None:
pulumi.set(__self__, "credentials_allowed", credentials_allowed)
if exposed_headers is not None:
pulumi.set(__self__, "exposed_headers", exposed_headers)
if max_age_seconds is not None:
pulumi.set(__self__, "max_age_seconds", max_age_seconds)
@property
@pulumi.getter(name="allowedHeaders")
def allowed_headers(self) -> Optional[Sequence[str]]:
"""
Allowed headers in cross-site requests. The special value `*` allows actual requests to send any header.
"""
return pulumi.get(self, "allowed_headers")
@property
@pulumi.getter(name="allowedMethods")
def allowed_methods(self) -> Optional[Sequence[str]]:
"""
Allowed HTTP methods on cross-site requests. The special value `*` allows all methods. If not set, `GET` and `HEAD` are allowed by default. Possible values are `DELETE`, `GET`, `HEAD`, `MERGE`, `POST`, `OPTIONS` and `PUT`.
"""
return pulumi.get(self, "allowed_methods")
@property
@pulumi.getter(name="allowedOriginPatterns")
def allowed_origin_patterns(self) -> Optional[Sequence[str]]:
"""
Allowed origin patterns to make cross-site requests.
"""
return pulumi.get(self, "allowed_origin_patterns")
@property
@pulumi.getter(name="allowedOrigins")
def allowed_origins(self) -> Optional[Sequence[str]]:
"""
Allowed origins to make cross-site requests. The special value `*` allows all domains.
"""
return pulumi.get(self, "allowed_origins")
@property
@pulumi.getter(name="credentialsAllowed")
def credentials_allowed(self) -> Optional[bool]:
"""
is user credentials are supported on cross-site requests?
"""
return pulumi.get(self, "credentials_allowed")
@property
@pulumi.getter(name="exposedHeaders")
def exposed_headers(self) -> Optional[Sequence[str]]:
"""
HTTP response headers to expose for cross-site requests.
"""
return pulumi.get(self, "exposed_headers")
@property
@pulumi.getter(name="maxAgeSeconds")
def max_age_seconds(self) -> Optional[int]:
"""
How long, in seconds, the response from a pre-flight request can be cached by clients.
"""
return pulumi.get(self, "max_age_seconds")
@pulumi.output_type
class SpringCloudGatewayQuota(dict):
def __init__(__self__, *,
cpu: Optional[str] = None,
memory: Optional[str] = None):
"""
:param str cpu: Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
:param str memory: Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory is not None:
pulumi.set(__self__, "memory", memory)
@property
@pulumi.getter
def cpu(self) -> Optional[str]:
"""
Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
"""
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def memory(self) -> Optional[str]:
"""
Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
return pulumi.get(self, "memory")
@pulumi.output_type
class SpringCloudGatewayRouteConfigOpenApi(dict):
def __init__(__self__, *,
uri: Optional[str] = None):
"""
:param str uri: The URI of OpenAPI specification.
"""
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The URI of OpenAPI specification.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class SpringCloudGatewayRouteConfigRoute(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "classificationTags":
suggest = "classification_tags"
elif key == "ssoValidationEnabled":
suggest = "sso_validation_enabled"
elif key == "tokenRelay":
suggest = "token_relay"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudGatewayRouteConfigRoute. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudGatewayRouteConfigRoute.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudGatewayRouteConfigRoute.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
order: int,
classification_tags: Optional[Sequence[str]] = None,
description: Optional[str] = None,
filters: Optional[Sequence[str]] = None,
predicates: Optional[Sequence[str]] = None,
sso_validation_enabled: Optional[bool] = None,
title: Optional[str] = None,
token_relay: Optional[bool] = None,
uri: Optional[str] = None):
"""
:param int order: Specifies the route processing order.
:param Sequence[str] classification_tags: Specifies the classification tags which will be applied to methods in the generated OpenAPI documentation.
:param str description: Specifies the description which will be applied to methods in the generated OpenAPI documentation.
:param Sequence[str] filters: Specifies a list of filters which are used to modify the request before sending it to the target endpoint, or the received response.
:param Sequence[str] predicates: Specifies a list of conditions to evaluate a route for each request. Each predicate may be evaluated against request headers and parameter values. All of the predicates associated with a route must evaluate to true for the route to be matched to the request.
:param bool sso_validation_enabled: Should the sso validation be enabled?
:param str title: Specifies the title which will be applied to methods in the generated OpenAPI documentation.
:param bool token_relay: Should pass currently-authenticated user's identity token to application service?
:param str uri: Specifies the full uri which will override `appName`.
"""
pulumi.set(__self__, "order", order)
if classification_tags is not None:
pulumi.set(__self__, "classification_tags", classification_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if filters is not None:
pulumi.set(__self__, "filters", filters)
if predicates is not None:
pulumi.set(__self__, "predicates", predicates)
if sso_validation_enabled is not None:
pulumi.set(__self__, "sso_validation_enabled", sso_validation_enabled)
if title is not None:
pulumi.set(__self__, "title", title)
if token_relay is not None:
pulumi.set(__self__, "token_relay", token_relay)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter
def order(self) -> int:
"""
Specifies the route processing order.
"""
return pulumi.get(self, "order")
@property
@pulumi.getter(name="classificationTags")
def classification_tags(self) -> Optional[Sequence[str]]:
"""
Specifies the classification tags which will be applied to methods in the generated OpenAPI documentation.
"""
return pulumi.get(self, "classification_tags")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Specifies the description which will be applied to methods in the generated OpenAPI documentation.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence[str]]:
"""
Specifies a list of filters which are used to modify the request before sending it to the target endpoint, or the received response.
"""
return pulumi.get(self, "filters")
@property
@pulumi.getter
def predicates(self) -> Optional[Sequence[str]]:
"""
Specifies a list of conditions to evaluate a route for each request. Each predicate may be evaluated against request headers and parameter values. All of the predicates associated with a route must evaluate to true for the route to be matched to the request.
"""
return pulumi.get(self, "predicates")
@property
@pulumi.getter(name="ssoValidationEnabled")
def sso_validation_enabled(self) -> Optional[bool]:
"""
Should the sso validation be enabled?
"""
return pulumi.get(self, "sso_validation_enabled")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
Specifies the title which will be applied to methods in the generated OpenAPI documentation.
"""
return pulumi.get(self, "title")
@property
@pulumi.getter(name="tokenRelay")
def token_relay(self) -> Optional[bool]:
"""
Should pass currently-authenticated user's identity token to application service?
"""
return pulumi.get(self, "token_relay")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
Specifies the full uri which will override `appName`.
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class SpringCloudGatewaySso(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "clientId":
suggest = "client_id"
elif key == "clientSecret":
suggest = "client_secret"
elif key == "issuerUri":
suggest = "issuer_uri"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudGatewaySso. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudGatewaySso.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudGatewaySso.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
client_id: Optional[str] = None,
client_secret: Optional[str] = None,
issuer_uri: Optional[str] = None,
scopes: Optional[Sequence[str]] = None):
"""
:param str client_id: The public identifier for the application.
:param str client_secret: The secret known only to the application and the authorization server.
:param str issuer_uri: The URI of Issuer Identifier.
:param Sequence[str] scopes: It defines the specific actions applications can be allowed to do on a user's behalf.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret is not None:
pulumi.set(__self__, "client_secret", client_secret)
if issuer_uri is not None:
pulumi.set(__self__, "issuer_uri", issuer_uri)
if scopes is not None:
pulumi.set(__self__, "scopes", scopes)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[str]:
"""
The public identifier for the application.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecret")
def client_secret(self) -> Optional[str]:
"""
The secret known only to the application and the authorization server.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="issuerUri")
def issuer_uri(self) -> Optional[str]:
"""
The URI of Issuer Identifier.
"""
return pulumi.get(self, "issuer_uri")
@property
@pulumi.getter
def scopes(self) -> Optional[Sequence[str]]:
"""
It defines the specific actions applications can be allowed to do on a user's behalf.
"""
return pulumi.get(self, "scopes")
@pulumi.output_type
class SpringCloudJavaDeploymentQuota(dict):
def __init__(__self__, *,
cpu: Optional[str] = None,
memory: Optional[str] = None):
"""
:param str cpu: Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
:param str memory: Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
if cpu is not None:
pulumi.set(__self__, "cpu", cpu)
if memory is not None:
pulumi.set(__self__, "memory", memory)
@property
@pulumi.getter
def cpu(self) -> Optional[str]:
"""
Specifies the required cpu of the Spring Cloud Deployment. Possible Values are `500m`, `1`, `2`, `3` and `4`. Defaults to `1` if not specified.
> **Note:** `cpu` supports `500m` and `1` for Basic tier, `500m`, `1`, `2`, `3` and `4` for Standard tier.
"""
return pulumi.get(self, "cpu")
@property
@pulumi.getter
def memory(self) -> Optional[str]:
"""
Specifies the required memory size of the Spring Cloud Deployment. Possible Values are `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi`. Defaults to `1Gi` if not specified.
> **Note:** `memory` supports `512Mi`, `1Gi` and `2Gi` for Basic tier, `512Mi`, `1Gi`, `2Gi`, `3Gi`, `4Gi`, `5Gi`, `6Gi`, `7Gi`, and `8Gi` for Standard tier.
"""
return pulumi.get(self, "memory")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSetting(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "httpBasicAuth":
suggest = "http_basic_auth"
elif key == "searchPaths":
suggest = "search_paths"
elif key == "sshAuth":
suggest = "ssh_auth"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceConfigServerGitSetting. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceConfigServerGitSetting.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceConfigServerGitSetting.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
uri: str,
http_basic_auth: Optional['outputs.SpringCloudServiceConfigServerGitSettingHttpBasicAuth'] = None,
label: Optional[str] = None,
repositories: Optional[Sequence['outputs.SpringCloudServiceConfigServerGitSettingRepository']] = None,
search_paths: Optional[Sequence[str]] = None,
ssh_auth: Optional['outputs.SpringCloudServiceConfigServerGitSettingSshAuth'] = None):
"""
:param str uri: The URI of the default Git repository used as the Config Server back end, should be started with `http://`, `https://`, `git@`, or `ssh://`.
:param 'SpringCloudServiceConfigServerGitSettingHttpBasicAuthArgs' http_basic_auth: A `http_basic_auth` block as defined below.
:param str label: The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository.
:param Sequence['SpringCloudServiceConfigServerGitSettingRepositoryArgs'] repositories: One or more `repository` blocks as defined below.
:param Sequence[str] search_paths: An array of strings used to search subdirectories of the Git repository.
:param 'SpringCloudServiceConfigServerGitSettingSshAuthArgs' ssh_auth: A `ssh_auth` block as defined below.
"""
pulumi.set(__self__, "uri", uri)
if http_basic_auth is not None:
pulumi.set(__self__, "http_basic_auth", http_basic_auth)
if label is not None:
pulumi.set(__self__, "label", label)
if repositories is not None:
pulumi.set(__self__, "repositories", repositories)
if search_paths is not None:
pulumi.set(__self__, "search_paths", search_paths)
if ssh_auth is not None:
pulumi.set(__self__, "ssh_auth", ssh_auth)
@property
@pulumi.getter
def uri(self) -> str:
"""
The URI of the default Git repository used as the Config Server back end, should be started with `http://`, `https://`, `git@`, or `ssh://`.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter(name="httpBasicAuth")
def http_basic_auth(self) -> Optional['outputs.SpringCloudServiceConfigServerGitSettingHttpBasicAuth']:
"""
A `http_basic_auth` block as defined below.
"""
return pulumi.get(self, "http_basic_auth")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def repositories(self) -> Optional[Sequence['outputs.SpringCloudServiceConfigServerGitSettingRepository']]:
"""
One or more `repository` blocks as defined below.
"""
return pulumi.get(self, "repositories")
@property
@pulumi.getter(name="searchPaths")
def search_paths(self) -> Optional[Sequence[str]]:
"""
An array of strings used to search subdirectories of the Git repository.
"""
return pulumi.get(self, "search_paths")
@property
@pulumi.getter(name="sshAuth")
def ssh_auth(self) -> Optional['outputs.SpringCloudServiceConfigServerGitSettingSshAuth']:
"""
A `ssh_auth` block as defined below.
"""
return pulumi.get(self, "ssh_auth")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSettingHttpBasicAuth(dict):
def __init__(__self__, *,
password: str,
username: str):
"""
:param str password: The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
:param str username: The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> str:
"""
The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> str:
"""
The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSettingRepository(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "httpBasicAuth":
suggest = "http_basic_auth"
elif key == "searchPaths":
suggest = "search_paths"
elif key == "sshAuth":
suggest = "ssh_auth"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceConfigServerGitSettingRepository. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceConfigServerGitSettingRepository.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceConfigServerGitSettingRepository.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
uri: str,
http_basic_auth: Optional['outputs.SpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuth'] = None,
label: Optional[str] = None,
patterns: Optional[Sequence[str]] = None,
search_paths: Optional[Sequence[str]] = None,
ssh_auth: Optional['outputs.SpringCloudServiceConfigServerGitSettingRepositorySshAuth'] = None):
"""
:param str name: A name to identify on the Git repository, required only if repos exists.
:param str uri: The URI of the Git repository that's used as the Config Server back end should be started with `http://`, `https://`, `git@`, or `ssh://`.
:param 'SpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthArgs' http_basic_auth: A `http_basic_auth` block as defined below.
:param str label: The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository.
:param Sequence[str] patterns: An array of strings used to match an application name. For each pattern, use the `{application}/{profile}` format with wildcards.
:param Sequence[str] search_paths: An array of strings used to search subdirectories of the Git repository.
:param 'SpringCloudServiceConfigServerGitSettingRepositorySshAuthArgs' ssh_auth: A `ssh_auth` block as defined below.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "uri", uri)
if http_basic_auth is not None:
pulumi.set(__self__, "http_basic_auth", http_basic_auth)
if label is not None:
pulumi.set(__self__, "label", label)
if patterns is not None:
pulumi.set(__self__, "patterns", patterns)
if search_paths is not None:
pulumi.set(__self__, "search_paths", search_paths)
if ssh_auth is not None:
pulumi.set(__self__, "ssh_auth", ssh_auth)
@property
@pulumi.getter
def name(self) -> str:
"""
A name to identify on the Git repository, required only if repos exists.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def uri(self) -> str:
"""
The URI of the Git repository that's used as the Config Server back end should be started with `http://`, `https://`, `git@`, or `ssh://`.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter(name="httpBasicAuth")
def http_basic_auth(self) -> Optional['outputs.SpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuth']:
"""
A `http_basic_auth` block as defined below.
"""
return pulumi.get(self, "http_basic_auth")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The default label of the Git repository, should be the branch name, tag name, or commit-id of the repository.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def patterns(self) -> Optional[Sequence[str]]:
"""
An array of strings used to match an application name. For each pattern, use the `{application}/{profile}` format with wildcards.
"""
return pulumi.get(self, "patterns")
@property
@pulumi.getter(name="searchPaths")
def search_paths(self) -> Optional[Sequence[str]]:
"""
An array of strings used to search subdirectories of the Git repository.
"""
return pulumi.get(self, "search_paths")
@property
@pulumi.getter(name="sshAuth")
def ssh_auth(self) -> Optional['outputs.SpringCloudServiceConfigServerGitSettingRepositorySshAuth']:
"""
A `ssh_auth` block as defined below.
"""
return pulumi.get(self, "ssh_auth")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuth(dict):
def __init__(__self__, *,
password: str,
username: str):
"""
:param str password: The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
:param str username: The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> str:
"""
The password used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> str:
"""
The username that's used to access the Git repository server, required when the Git repository server supports HTTP Basic Authentication.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSettingRepositorySshAuth(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateKey":
suggest = "private_key"
elif key == "hostKey":
suggest = "host_key"
elif key == "hostKeyAlgorithm":
suggest = "host_key_algorithm"
elif key == "strictHostKeyCheckingEnabled":
suggest = "strict_host_key_checking_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceConfigServerGitSettingRepositorySshAuth. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceConfigServerGitSettingRepositorySshAuth.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceConfigServerGitSettingRepositorySshAuth.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_key: str,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None,
strict_host_key_checking_enabled: Optional[bool] = None):
"""
:param str private_key: The SSH private key to access the Git repository, required when the URI starts with `git@` or `ssh://`.
:param str host_key: The host key of the Git repository server, should not include the algorithm prefix as covered by `host-key-algorithm`.
:param str host_key_algorithm: The host key algorithm, should be `ssh-dss`, `ssh-rsa`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, or `ecdsa-sha2-nistp521`. Required only if `host-key` exists.
:param bool strict_host_key_checking_enabled: Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to `true`.
"""
pulumi.set(__self__, "private_key", private_key)
if host_key is not None:
pulumi.set(__self__, "host_key", host_key)
if host_key_algorithm is not None:
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
if strict_host_key_checking_enabled is not None:
pulumi.set(__self__, "strict_host_key_checking_enabled", strict_host_key_checking_enabled)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The SSH private key to access the Git repository, required when the URI starts with `git@` or `ssh://`.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> Optional[str]:
"""
The host key of the Git repository server, should not include the algorithm prefix as covered by `host-key-algorithm`.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> Optional[str]:
"""
The host key algorithm, should be `ssh-dss`, `ssh-rsa`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, or `ecdsa-sha2-nistp521`. Required only if `host-key` exists.
"""
return pulumi.get(self, "host_key_algorithm")
@property
@pulumi.getter(name="strictHostKeyCheckingEnabled")
def strict_host_key_checking_enabled(self) -> Optional[bool]:
"""
Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to `true`.
"""
return pulumi.get(self, "strict_host_key_checking_enabled")
@pulumi.output_type
class SpringCloudServiceConfigServerGitSettingSshAuth(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "privateKey":
suggest = "private_key"
elif key == "hostKey":
suggest = "host_key"
elif key == "hostKeyAlgorithm":
suggest = "host_key_algorithm"
elif key == "strictHostKeyCheckingEnabled":
suggest = "strict_host_key_checking_enabled"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceConfigServerGitSettingSshAuth. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceConfigServerGitSettingSshAuth.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceConfigServerGitSettingSshAuth.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
private_key: str,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None,
strict_host_key_checking_enabled: Optional[bool] = None):
"""
:param str private_key: The SSH private key to access the Git repository, required when the URI starts with `git@` or `ssh://`.
:param str host_key: The host key of the Git repository server, should not include the algorithm prefix as covered by `host-key-algorithm`.
:param str host_key_algorithm: The host key algorithm, should be `ssh-dss`, `ssh-rsa`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, or `ecdsa-sha2-nistp521`. Required only if `host-key` exists.
:param bool strict_host_key_checking_enabled: Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to `true`.
"""
pulumi.set(__self__, "private_key", private_key)
if host_key is not None:
pulumi.set(__self__, "host_key", host_key)
if host_key_algorithm is not None:
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
if strict_host_key_checking_enabled is not None:
pulumi.set(__self__, "strict_host_key_checking_enabled", strict_host_key_checking_enabled)
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The SSH private key to access the Git repository, required when the URI starts with `git@` or `ssh://`.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> Optional[str]:
"""
The host key of the Git repository server, should not include the algorithm prefix as covered by `host-key-algorithm`.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> Optional[str]:
"""
The host key algorithm, should be `ssh-dss`, `ssh-rsa`, `ecdsa-sha2-nistp256`, `ecdsa-sha2-nistp384`, or `ecdsa-sha2-nistp521`. Required only if `host-key` exists.
"""
return pulumi.get(self, "host_key_algorithm")
@property
@pulumi.getter(name="strictHostKeyCheckingEnabled")
def strict_host_key_checking_enabled(self) -> Optional[bool]:
"""
Indicates whether the Config Server instance will fail to start if the host_key does not match. Defaults to `true`.
"""
return pulumi.get(self, "strict_host_key_checking_enabled")
@pulumi.output_type
class SpringCloudServiceContainerRegistry(dict):
def __init__(__self__, *,
name: str,
password: str,
server: str,
username: str):
"""
:param str name: Specifies the name of the container registry.
:param str password: Specifies the password of the container registry.
:param str server: Specifies the login server of the container registry.
:param str username: Specifies the username of the container registry.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the container registry.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> str:
"""
Specifies the password of the container registry.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def server(self) -> str:
"""
Specifies the login server of the container registry.
"""
return pulumi.get(self, "server")
@property
@pulumi.getter
def username(self) -> str:
"""
Specifies the username of the container registry.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class SpringCloudServiceDefaultBuildService(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "containerRegistryName":
suggest = "container_registry_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceDefaultBuildService. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceDefaultBuildService.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceDefaultBuildService.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
container_registry_name: Optional[str] = None):
"""
:param str container_registry_name: Specifies the name of the container registry used in the default build service.
"""
if container_registry_name is not None:
pulumi.set(__self__, "container_registry_name", container_registry_name)
@property
@pulumi.getter(name="containerRegistryName")
def container_registry_name(self) -> Optional[str]:
"""
Specifies the name of the container registry used in the default build service.
"""
return pulumi.get(self, "container_registry_name")
@pulumi.output_type
class SpringCloudServiceMarketplace(dict):
def __init__(__self__, *,
plan: str,
product: str,
publisher: str):
"""
:param str plan: Specifies the plan ID of the 3rd Party Artifact that is being procured.
:param str product: Specifies the 3rd Party artifact that is being procured.
:param str publisher: Specifies the publisher ID of the 3rd Party Artifact that is being procured.
"""
pulumi.set(__self__, "plan", plan)
pulumi.set(__self__, "product", product)
pulumi.set(__self__, "publisher", publisher)
@property
@pulumi.getter
def plan(self) -> str:
"""
Specifies the plan ID of the 3rd Party Artifact that is being procured.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter
def product(self) -> str:
"""
Specifies the 3rd Party artifact that is being procured.
"""
return pulumi.get(self, "product")
@property
@pulumi.getter
def publisher(self) -> str:
"""
Specifies the publisher ID of the 3rd Party Artifact that is being procured.
"""
return pulumi.get(self, "publisher")
@pulumi.output_type
class SpringCloudServiceNetwork(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "appSubnetId":
suggest = "app_subnet_id"
elif key == "cidrRanges":
suggest = "cidr_ranges"
elif key == "serviceRuntimeSubnetId":
suggest = "service_runtime_subnet_id"
elif key == "appNetworkResourceGroup":
suggest = "app_network_resource_group"
elif key == "outboundType":
suggest = "outbound_type"
elif key == "readTimeoutSeconds":
suggest = "read_timeout_seconds"
elif key == "serviceRuntimeNetworkResourceGroup":
suggest = "service_runtime_network_resource_group"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceNetwork. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceNetwork.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceNetwork.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
app_subnet_id: str,
cidr_ranges: Sequence[str],
service_runtime_subnet_id: str,
app_network_resource_group: Optional[str] = None,
outbound_type: Optional[str] = None,
read_timeout_seconds: Optional[int] = None,
service_runtime_network_resource_group: Optional[str] = None):
"""
:param str app_subnet_id: Specifies the ID of the Subnet which should host the Spring Boot Applications deployed in this Spring Cloud Service. Changing this forces a new resource to be created.
:param Sequence[str] cidr_ranges: A list of (at least 3) CIDR ranges (at least /16) which are used to host the Spring Cloud infrastructure, which must not overlap with any existing CIDR ranges in the Subnet. Changing this forces a new resource to be created.
:param str service_runtime_subnet_id: Specifies the ID of the Subnet where the Service Runtime components of the Spring Cloud Service will exist. Changing this forces a new resource to be created.
:param str app_network_resource_group: Specifies the Name of the resource group containing network resources of Azure Spring Cloud Apps. Changing this forces a new resource to be created.
:param str outbound_type: Specifies the egress traffic type of the Spring Cloud Service. Possible values are `loadBalancer` and `userDefinedRouting`. Defaults to `loadBalancer`. Changing this forces a new resource to be created.
:param int read_timeout_seconds: Ingress read time out in seconds.
:param str service_runtime_network_resource_group: Specifies the Name of the resource group containing network resources of Azure Spring Cloud Service Runtime. Changing this forces a new resource to be created.
"""
pulumi.set(__self__, "app_subnet_id", app_subnet_id)
pulumi.set(__self__, "cidr_ranges", cidr_ranges)
pulumi.set(__self__, "service_runtime_subnet_id", service_runtime_subnet_id)
if app_network_resource_group is not None:
pulumi.set(__self__, "app_network_resource_group", app_network_resource_group)
if outbound_type is not None:
pulumi.set(__self__, "outbound_type", outbound_type)
if read_timeout_seconds is not None:
pulumi.set(__self__, "read_timeout_seconds", read_timeout_seconds)
if service_runtime_network_resource_group is not None:
pulumi.set(__self__, "service_runtime_network_resource_group", service_runtime_network_resource_group)
@property
@pulumi.getter(name="appSubnetId")
def app_subnet_id(self) -> str:
"""
Specifies the ID of the Subnet which should host the Spring Boot Applications deployed in this Spring Cloud Service. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "app_subnet_id")
@property
@pulumi.getter(name="cidrRanges")
def cidr_ranges(self) -> Sequence[str]:
"""
A list of (at least 3) CIDR ranges (at least /16) which are used to host the Spring Cloud infrastructure, which must not overlap with any existing CIDR ranges in the Subnet. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "cidr_ranges")
@property
@pulumi.getter(name="serviceRuntimeSubnetId")
def service_runtime_subnet_id(self) -> str:
"""
Specifies the ID of the Subnet where the Service Runtime components of the Spring Cloud Service will exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "service_runtime_subnet_id")
@property
@pulumi.getter(name="appNetworkResourceGroup")
def app_network_resource_group(self) -> Optional[str]:
"""
Specifies the Name of the resource group containing network resources of Azure Spring Cloud Apps. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "app_network_resource_group")
@property
@pulumi.getter(name="outboundType")
def outbound_type(self) -> Optional[str]:
"""
Specifies the egress traffic type of the Spring Cloud Service. Possible values are `loadBalancer` and `userDefinedRouting`. Defaults to `loadBalancer`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "outbound_type")
@property
@pulumi.getter(name="readTimeoutSeconds")
def read_timeout_seconds(self) -> Optional[int]:
"""
Ingress read time out in seconds.
"""
return pulumi.get(self, "read_timeout_seconds")
@property
@pulumi.getter(name="serviceRuntimeNetworkResourceGroup")
def service_runtime_network_resource_group(self) -> Optional[str]:
"""
Specifies the Name of the resource group containing network resources of Azure Spring Cloud Service Runtime. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "service_runtime_network_resource_group")
@pulumi.output_type
class SpringCloudServiceRequiredNetworkTrafficRule(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ipAddresses":
suggest = "ip_addresses"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceRequiredNetworkTrafficRule. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceRequiredNetworkTrafficRule.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceRequiredNetworkTrafficRule.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
direction: Optional[str] = None,
fqdns: Optional[Sequence[str]] = None,
ip_addresses: Optional[Sequence[str]] = None,
port: Optional[int] = None,
protocol: Optional[str] = None):
"""
:param str direction: The direction of required traffic. Possible values are `Inbound`, `Outbound`.
:param Sequence[str] fqdns: The FQDN list of required traffic.
:param Sequence[str] ip_addresses: The IP list of required traffic.
:param int port: The port of required traffic.
:param str protocol: The protocol of required traffic.
"""
if direction is not None:
pulumi.set(__self__, "direction", direction)
if fqdns is not None:
pulumi.set(__self__, "fqdns", fqdns)
if ip_addresses is not None:
pulumi.set(__self__, "ip_addresses", ip_addresses)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def direction(self) -> Optional[str]:
"""
The direction of required traffic. Possible values are `Inbound`, `Outbound`.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def fqdns(self) -> Optional[Sequence[str]]:
"""
The FQDN list of required traffic.
"""
return pulumi.get(self, "fqdns")
@property
@pulumi.getter(name="ipAddresses")
def ip_addresses(self) -> Optional[Sequence[str]]:
"""
The IP list of required traffic.
"""
return pulumi.get(self, "ip_addresses")
@property
@pulumi.getter
def port(self) -> Optional[int]:
"""
The port of required traffic.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The protocol of required traffic.
"""
return pulumi.get(self, "protocol")
@pulumi.output_type
class SpringCloudServiceTrace(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "connectionString":
suggest = "connection_string"
elif key == "sampleRate":
suggest = "sample_rate"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SpringCloudServiceTrace. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SpringCloudServiceTrace.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SpringCloudServiceTrace.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
connection_string: Optional[str] = None,
sample_rate: Optional[float] = None):
"""
:param str connection_string: The connection string used for Application Insights.
:param float sample_rate: The sampling rate of Application Insights Agent. Must be between `0.0` and `100.0`. Defaults to `10.0`.
"""
if connection_string is not None:
pulumi.set(__self__, "connection_string", connection_string)
if sample_rate is not None:
pulumi.set(__self__, "sample_rate", sample_rate)
@property
@pulumi.getter(name="connectionString")
def connection_string(self) -> Optional[str]:
"""
The connection string used for Application Insights.
"""
return pulumi.get(self, "connection_string")
@property
@pulumi.getter(name="sampleRate")
def sample_rate(self) -> Optional[float]:
"""
The sampling rate of Application Insights Agent. Must be between `0.0` and `100.0`. Defaults to `10.0`.
"""
return pulumi.get(self, "sample_rate")
@pulumi.output_type
class GetSpringCloudAppIdentityResult(dict):
def __init__(__self__, *,
identity_ids: Sequence[str],
principal_id: str,
tenant_id: str,
type: str):
"""
:param str principal_id: The Principal ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
:param str tenant_id: The Tenant ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
:param str type: The Type of Managed Identity assigned to the Spring Cloud Application.
"""
pulumi.set(__self__, "identity_ids", identity_ids)
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="identityIds")
def identity_ids(self) -> Sequence[str]:
return pulumi.get(self, "identity_ids")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The Principal ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The Tenant ID for the Service Principal associated with the Managed Service Identity of this Spring Cloud Application.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The Type of Managed Identity assigned to the Spring Cloud Application.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class GetSpringCloudAppPersistentDiskResult(dict):
def __init__(__self__, *,
mount_path: str,
size_in_gb: int):
"""
:param str mount_path: The mount path of the persistent disk.
:param int size_in_gb: The size of the persistent disk in GB.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "size_in_gb", size_in_gb)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> str:
"""
The mount path of the persistent disk.
"""
return pulumi.get(self, "mount_path")
@property
@pulumi.getter(name="sizeInGb")
def size_in_gb(self) -> int:
"""
The size of the persistent disk in GB.
"""
return pulumi.get(self, "size_in_gb")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingResult(dict):
def __init__(__self__, *,
http_basic_auths: Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingHttpBasicAuthResult'],
label: str,
repositories: Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositoryResult'],
search_paths: Sequence[str],
ssh_auths: Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingSshAuthResult'],
uri: str):
"""
:param Sequence['GetSpringCloudServiceConfigServerGitSettingHttpBasicAuthArgs'] http_basic_auths: A `http_basic_auth` block as defined below.
:param str label: The default label of the Git repository, which is a branch name, tag name, or commit-id of the repository
:param Sequence['GetSpringCloudServiceConfigServerGitSettingRepositoryArgs'] repositories: One or more `repository` blocks as defined below.
:param Sequence[str] search_paths: An array of strings used to search subdirectories of the Git repository.
:param Sequence['GetSpringCloudServiceConfigServerGitSettingSshAuthArgs'] ssh_auths: A `ssh_auth` block as defined below.
:param str uri: The URI of the Git repository
"""
pulumi.set(__self__, "http_basic_auths", http_basic_auths)
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "repositories", repositories)
pulumi.set(__self__, "search_paths", search_paths)
pulumi.set(__self__, "ssh_auths", ssh_auths)
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="httpBasicAuths")
def http_basic_auths(self) -> Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingHttpBasicAuthResult']:
"""
A `http_basic_auth` block as defined below.
"""
return pulumi.get(self, "http_basic_auths")
@property
@pulumi.getter
def label(self) -> str:
"""
The default label of the Git repository, which is a branch name, tag name, or commit-id of the repository
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def repositories(self) -> Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositoryResult']:
"""
One or more `repository` blocks as defined below.
"""
return pulumi.get(self, "repositories")
@property
@pulumi.getter(name="searchPaths")
def search_paths(self) -> Sequence[str]:
"""
An array of strings used to search subdirectories of the Git repository.
"""
return pulumi.get(self, "search_paths")
@property
@pulumi.getter(name="sshAuths")
def ssh_auths(self) -> Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingSshAuthResult']:
"""
A `ssh_auth` block as defined below.
"""
return pulumi.get(self, "ssh_auths")
@property
@pulumi.getter
def uri(self) -> str:
"""
The URI of the Git repository
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingHttpBasicAuthResult(dict):
def __init__(__self__, *,
password: str,
username: str):
"""
:param str password: The password used to access the HTTP Basic Authentication Git repository server.
:param str username: The username used to access the HTTP Basic Authentication Git repository server.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> str:
"""
The password used to access the HTTP Basic Authentication Git repository server.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> str:
"""
The username used to access the HTTP Basic Authentication Git repository server.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingRepositoryResult(dict):
def __init__(__self__, *,
http_basic_auths: Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthResult'],
label: str,
name: str,
patterns: Sequence[str],
search_paths: Sequence[str],
ssh_auths: Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositorySshAuthResult'],
uri: str):
"""
:param Sequence['GetSpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthArgs'] http_basic_auths: A `http_basic_auth` block as defined below.
:param str label: The default label of the Git repository, which is a branch name, tag name, or commit-id of the repository
:param str name: Specifies The name of the Spring Cloud Service resource.
:param Sequence[str] patterns: An array of strings used to match an application name. For each pattern, use the `{application}/{profile}` format with wildcards.
:param Sequence[str] search_paths: An array of strings used to search subdirectories of the Git repository.
:param Sequence['GetSpringCloudServiceConfigServerGitSettingRepositorySshAuthArgs'] ssh_auths: A `ssh_auth` block as defined below.
:param str uri: The URI of the Git repository
"""
pulumi.set(__self__, "http_basic_auths", http_basic_auths)
pulumi.set(__self__, "label", label)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "patterns", patterns)
pulumi.set(__self__, "search_paths", search_paths)
pulumi.set(__self__, "ssh_auths", ssh_auths)
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="httpBasicAuths")
def http_basic_auths(self) -> Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthResult']:
"""
A `http_basic_auth` block as defined below.
"""
return pulumi.get(self, "http_basic_auths")
@property
@pulumi.getter
def label(self) -> str:
"""
The default label of the Git repository, which is a branch name, tag name, or commit-id of the repository
"""
return pulumi.get(self, "label")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies The name of the Spring Cloud Service resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def patterns(self) -> Sequence[str]:
"""
An array of strings used to match an application name. For each pattern, use the `{application}/{profile}` format with wildcards.
"""
return pulumi.get(self, "patterns")
@property
@pulumi.getter(name="searchPaths")
def search_paths(self) -> Sequence[str]:
"""
An array of strings used to search subdirectories of the Git repository.
"""
return pulumi.get(self, "search_paths")
@property
@pulumi.getter(name="sshAuths")
def ssh_auths(self) -> Sequence['outputs.GetSpringCloudServiceConfigServerGitSettingRepositorySshAuthResult']:
"""
A `ssh_auth` block as defined below.
"""
return pulumi.get(self, "ssh_auths")
@property
@pulumi.getter
def uri(self) -> str:
"""
The URI of the Git repository
"""
return pulumi.get(self, "uri")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingRepositoryHttpBasicAuthResult(dict):
def __init__(__self__, *,
password: str,
username: str):
"""
:param str password: The password used to access the HTTP Basic Authentication Git repository server.
:param str username: The username used to access the HTTP Basic Authentication Git repository server.
"""
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def password(self) -> str:
"""
The password used to access the HTTP Basic Authentication Git repository server.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter
def username(self) -> str:
"""
The username used to access the HTTP Basic Authentication Git repository server.
"""
return pulumi.get(self, "username")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingRepositorySshAuthResult(dict):
def __init__(__self__, *,
host_key: str,
host_key_algorithm: str,
private_key: str,
strict_host_key_checking_enabled: bool):
"""
:param str host_key: The host key of the Git repository server.
:param str host_key_algorithm: The host key algorithm.
:param str private_key: The SSH private key to access the Git repository, needed when the URI starts with `git@` or `ssh://`.
:param bool strict_host_key_checking_enabled: Indicates whether the Config Server instance will fail to start if the host_key does not match.
"""
pulumi.set(__self__, "host_key", host_key)
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
pulumi.set(__self__, "private_key", private_key)
pulumi.set(__self__, "strict_host_key_checking_enabled", strict_host_key_checking_enabled)
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> str:
"""
The host key of the Git repository server.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> str:
"""
The host key algorithm.
"""
return pulumi.get(self, "host_key_algorithm")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The SSH private key to access the Git repository, needed when the URI starts with `git@` or `ssh://`.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="strictHostKeyCheckingEnabled")
def strict_host_key_checking_enabled(self) -> bool:
"""
Indicates whether the Config Server instance will fail to start if the host_key does not match.
"""
return pulumi.get(self, "strict_host_key_checking_enabled")
@pulumi.output_type
class GetSpringCloudServiceConfigServerGitSettingSshAuthResult(dict):
def __init__(__self__, *,
host_key: str,
host_key_algorithm: str,
private_key: str,
strict_host_key_checking_enabled: bool):
"""
:param str host_key: The host key of the Git repository server.
:param str host_key_algorithm: The host key algorithm.
:param str private_key: The SSH private key to access the Git repository, needed when the URI starts with `git@` or `ssh://`.
:param bool strict_host_key_checking_enabled: Indicates whether the Config Server instance will fail to start if the host_key does not match.
"""
pulumi.set(__self__, "host_key", host_key)
pulumi.set(__self__, "host_key_algorithm", host_key_algorithm)
pulumi.set(__self__, "private_key", private_key)
pulumi.set(__self__, "strict_host_key_checking_enabled", strict_host_key_checking_enabled)
@property
@pulumi.getter(name="hostKey")
def host_key(self) -> str:
"""
The host key of the Git repository server.
"""
return pulumi.get(self, "host_key")
@property
@pulumi.getter(name="hostKeyAlgorithm")
def host_key_algorithm(self) -> str:
"""
The host key algorithm.
"""
return pulumi.get(self, "host_key_algorithm")
@property
@pulumi.getter(name="privateKey")
def private_key(self) -> str:
"""
The SSH private key to access the Git repository, needed when the URI starts with `git@` or `ssh://`.
"""
return pulumi.get(self, "private_key")
@property
@pulumi.getter(name="strictHostKeyCheckingEnabled")
def strict_host_key_checking_enabled(self) -> bool:
"""
Indicates whether the Config Server instance will fail to start if the host_key does not match.
"""
return pulumi.get(self, "strict_host_key_checking_enabled")
@pulumi.output_type
class GetSpringCloudServiceRequiredNetworkTrafficRuleResult(dict):
def __init__(__self__, *,
direction: str,
fqdns: Sequence[str],
ip_addresses: Sequence[str],
port: int,
protocol: str):
"""
:param str direction: The direction of required traffic. Possible values are `Inbound`, `Outbound`.
:param Sequence[str] fqdns: The FQDN list of required traffic.
:param Sequence[str] ip_addresses: The IP list of required traffic.
:param int port: The port of required traffic.
:param str protocol: The protocol of required traffic.
"""
pulumi.set(__self__, "direction", direction)
pulumi.set(__self__, "fqdns", fqdns)
pulumi.set(__self__, "ip_addresses", ip_addresses)
pulumi.set(__self__, "port", port)
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def direction(self) -> str:
"""
The direction of required traffic. Possible values are `Inbound`, `Outbound`.
"""
return pulumi.get(self, "direction")
@property
@pulumi.getter
def fqdns(self) -> Sequence[str]:
"""
The FQDN list of required traffic.
"""
return pulumi.get(self, "fqdns")
@property
@pulumi.getter(name="ipAddresses")
def ip_addresses(self) -> Sequence[str]:
"""
The IP list of required traffic.
"""
return pulumi.get(self, "ip_addresses")
@property
@pulumi.getter
def port(self) -> int:
"""
The port of required traffic.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> str:
"""
The protocol of required traffic.
"""
return pulumi.get(self, "protocol")
|
b5c1b3397a031519bb2e3488c1d1f8f16a73cda4
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/rllib/algorithms/pg/tests/test_pg.py
|
9a289c827bd0b98cced58d854192d91f390bffbc
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 8,122
|
py
|
test_pg.py
|
from gymnasium.spaces import Box, Dict, Discrete, Tuple
import numpy as np
import unittest
import ray
import ray.rllib.algorithms.pg as pg
from ray.rllib.evaluation.postprocessing import Postprocessing
from ray.rllib.examples.env.random_env import RandomEnv
from ray.rllib.models.tf.tf_action_dist import Categorical
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils.numpy import fc
from ray.rllib.utils.test_utils import (
check,
check_compute_single_action,
check_train_results,
framework_iterator,
)
from ray import tune
from ray.rllib.utils.metrics.learner_info import (
LEARNER_INFO,
LEARNER_STATS_KEY,
DEFAULT_POLICY_ID,
)
class TestPG(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_pg_compilation(self):
"""Test whether PG can be built with all frameworks."""
config = pg.PGConfig()
# Test with filter to see whether they work w/o preprocessing.
config.rollouts(
num_rollout_workers=1,
observation_filter="MeanStdFilter",
).training(train_batch_size=500)
num_iterations = 1
image_space = Box(-1.0, 1.0, shape=(84, 84, 3))
simple_space = Box(-1.0, 1.0, shape=(3,))
tune.register_env(
"random_dict_env",
lambda _: RandomEnv(
{
"observation_space": Dict(
{
"a": simple_space,
"b": Discrete(2),
"c": image_space,
}
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
tune.register_env(
"random_tuple_env",
lambda _: RandomEnv(
{
"observation_space": Tuple(
[simple_space, Discrete(2), image_space]
),
"action_space": Box(-1.0, 1.0, shape=(1,)),
}
),
)
for _ in framework_iterator(config):
# Test for different env types (discrete w/ and w/o image, + cont).
for env in [
"random_dict_env",
"random_tuple_env",
"ALE/MsPacman-v5",
"CartPole-v1",
"FrozenLake-v1",
]:
print(f"env={env}")
config.environment(env)
algo = config.build()
for i in range(num_iterations):
results = algo.train()
check_train_results(results)
print(results)
check_compute_single_action(algo, include_prev_action_reward=True)
def test_pg_loss_functions(self):
"""Tests the PG loss function math."""
config = (
pg.PGConfig()
.rollouts(num_rollout_workers=0)
.training(
gamma=0.99,
model={
"fcnet_hiddens": [10],
"fcnet_activation": "linear",
},
)
)
# Fake CartPole episode of n time steps.
train_batch = SampleBatch(
{
SampleBatch.OBS: np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 1.0, 1.1, 1.2]]
),
SampleBatch.ACTIONS: np.array([0, 1, 1]),
SampleBatch.REWARDS: np.array([1.0, 1.0, 1.0]),
SampleBatch.TERMINATEDS: np.array([False, False, True]),
SampleBatch.EPS_ID: np.array([1234, 1234, 1234]),
SampleBatch.AGENT_INDEX: np.array([0, 0, 0]),
}
)
for fw, sess in framework_iterator(config, session=True):
dist_cls = Categorical if fw != "torch" else TorchCategorical
algo = config.build(env="CartPole-v1")
policy = algo.get_policy()
vars = policy.model.trainable_variables()
if sess:
vars = policy.get_session().run(vars)
# Post-process (calculate simple (non-GAE) advantages) and attach
# to train_batch dict.
# A = [0.99^2 * 1.0 + 0.99 * 1.0 + 1.0, 0.99 * 1.0 + 1.0, 1.0] =
# [2.9701, 1.99, 1.0]
train_batch_ = pg.post_process_advantages(policy, train_batch.copy())
if fw == "torch":
train_batch_ = policy._lazy_tensor_dict(train_batch_)
# Check Advantage values.
check(train_batch_[Postprocessing.ADVANTAGES], [2.9701, 1.99, 1.0])
# Actual loss results.
if sess:
results = policy.get_session().run(
policy._loss,
feed_dict=policy._get_loss_inputs_dict(train_batch_, shuffle=False),
)
else:
results = policy.loss(
policy.model, dist_class=dist_cls, train_batch=train_batch_
)
# Calculate expected results.
if fw != "torch":
expected_logits = fc(
fc(train_batch_[SampleBatch.OBS], vars[0], vars[1], framework=fw),
vars[2],
vars[3],
framework=fw,
)
else:
expected_logits = fc(
fc(train_batch_[SampleBatch.OBS], vars[2], vars[3], framework=fw),
vars[0],
vars[1],
framework=fw,
)
expected_logp = dist_cls(expected_logits, policy.model).logp(
train_batch_[SampleBatch.ACTIONS]
)
adv = train_batch_[Postprocessing.ADVANTAGES]
if sess:
expected_logp = sess.run(expected_logp)
elif fw == "torch":
expected_logp = expected_logp.detach().cpu().numpy()
adv = adv.detach().cpu().numpy()
else:
expected_logp = expected_logp.numpy()
expected_loss = -np.mean(expected_logp * adv)
check(results, expected_loss, decimals=4)
def test_pg_lr(self):
"""Test PG with learning rate schedule."""
config = pg.PGConfig()
config.reporting(
min_sample_timesteps_per_iteration=10,
# Make sure that results contain info on default policy
min_train_timesteps_per_iteration=10,
# 0 metrics reporting delay, this makes sure timestep,
# which lr depends on, is updated after each worker rollout.
min_time_s_per_iteration=0,
)
config.rollouts(
num_rollout_workers=1,
)
config.training(
lr=0.2,
lr_schedule=[[0, 0.2], [500, 0.001]],
train_batch_size=50,
)
def _step_n_times(algo, n: int):
"""Step Algorithm n times.
Returns:
learning rate at the end of the execution.
"""
for _ in range(n):
results = algo.train()
return results["info"][LEARNER_INFO][DEFAULT_POLICY_ID][LEARNER_STATS_KEY][
"cur_lr"
]
for _ in framework_iterator(config):
algo = config.build(env="CartPole-v1")
lr = _step_n_times(algo, 1) # 50 timesteps
# Close to 0.2
self.assertGreaterEqual(lr, 0.15)
lr = _step_n_times(algo, 8) # Close to 500 timesteps
# LR Annealed to 0.001
self.assertLessEqual(float(lr), 0.5)
lr = _step_n_times(algo, 2) # > 500 timesteps
# LR == 0.001
self.assertAlmostEqual(lr, 0.001)
algo.stop()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
8a5865032fa7cb33162cbb46b1dca9dc8b3131d0
|
8e68fa08e0275bb77a57f2dcb1b2082afbd928e9
|
/tests/test_tokenizer.py
|
6eddf6e81c974a8eb961439347987ce4e93401f4
|
[
"MIT"
] |
permissive
|
marcelm/cutadapt
|
a892a871ed8b5b8491c2177ede43a6ebe0b43810
|
cefb3e0bc6bacf5e15a842174502852466bd6990
|
refs/heads/main
| 2023-09-01T08:14:21.694545
| 2023-08-31T08:18:46
| 2023-08-31T08:18:46
| 4,577,614
| 450
| 165
|
MIT
| 2023-09-11T06:48:19
| 2012-06-06T20:26:25
|
Python
|
UTF-8
|
Python
| false
| false
| 633
|
py
|
test_tokenizer.py
|
import pytest
from cutadapt.tokenizer import tokenize_braces, StringToken, BraceToken, TokenizeError
def test_tokenize_braces():
tokenize = tokenize_braces
assert list(tokenize("")) == []
assert list(tokenize("text")) == [StringToken("text")]
assert list(tokenize("before {variable} after")) == [
StringToken("before "),
BraceToken("variable"),
StringToken(" after"),
]
def test_tokenize_unexpected_braces():
with pytest.raises(TokenizeError):
list(tokenize_braces("abc {def{ghi}"))
with pytest.raises(TokenizeError):
list(tokenize_braces("abc {def} gh} i"))
|
d1777ad1c9469b9d1654385378f064cb99d06564
|
484341c71d06ebe09adc1b8ee7691eb516e5bb3d
|
/src/fava/beans/helpers.py
|
84f824816405bfc61564c04eda8920d305f1b0e8
|
[
"MIT"
] |
permissive
|
beancount/fava
|
076aa5f279a6ef40bbd48eaac61e18afcdf8dac0
|
216d7ddf5796dd46f12515e8c2726692393e545b
|
refs/heads/main
| 2023-09-06T05:13:11.284042
| 2023-09-04T19:01:37
| 2023-09-04T19:01:37
| 47,411,155
| 1,630
| 292
|
MIT
| 2023-09-13T16:18:23
| 2015-12-04T15:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
helpers.py
|
"""Helpers for Beancount entries."""
from __future__ import annotations
from typing import Any
from typing import TypeVar
from fava.beans.abc import Directive
from fava.beans.abc import Posting
T = TypeVar("T", Directive, Posting)
def replace(entry: T, **kwargs: Any) -> T:
"""Create a copy of the given directive, replacing some arguments."""
if isinstance(entry, tuple):
return entry._replace(**kwargs) # type: ignore[attr-defined,no-any-return]
raise TypeError(f"Could not replace attribute in type {type(entry)}")
|
bc4bb118200d20030b8fa9645ea32e8462082464
|
83b8b30ebb633eecd29ca0a7a20cc43a293c9333
|
/tests/basics/frozenset_difference.py
|
bc8b9c21cc6684eb84b192ec77dfaa14f856dc82
|
[
"MIT",
"GPL-1.0-or-later"
] |
permissive
|
adafruit/circuitpython
|
430ec895149d1eb814b505db39b4977a35ee88a7
|
506dca71b0cbb7af749bb51f86b01021db5483b3
|
refs/heads/main
| 2023-08-21T16:30:46.781068
| 2023-08-20T00:39:44
| 2023-08-20T00:39:44
| 66,166,069
| 3,806
| 1,560
|
MIT
| 2023-09-14T19:23:51
| 2016-08-20T20:10:40
|
C
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
frozenset_difference.py
|
try:
frozenset
except NameError:
print("SKIP")
raise SystemExit
l = [1, 2, 3, 4]
s = frozenset(l)
outs = [s.difference(),
s.difference(frozenset({1})),
s.difference(frozenset({1}), [1, 2]),
s.difference(frozenset({1}), {1, 2}, {2, 3})]
for out in outs:
print(type(out), sorted(out))
s = frozenset(l)
try:
print(s.difference_update({1}))
except AttributeError:
print("AttributeError")
|
79013f8f960cff6c8de0b30e0b69ce0d82559b0f
|
7dfc958846a98c9dc90829ace237adb0f4e79057
|
/tests/tasks/test_link.py
|
f56692a42bb0afef7b0a759c2926e7574f87e74c
|
[
"Apache-2.0"
] |
permissive
|
ploomber/ploomber
|
b4fad542106ed85bb3a195f38ee547da111bd049
|
516b64e531b13eeda36b747a268506fa8dd4dc98
|
refs/heads/master
| 2023-08-08T07:18:12.419524
| 2023-08-03T04:40:19
| 2023-08-03T04:40:19
| 235,190,220
| 3,076
| 219
|
Apache-2.0
| 2023-08-29T03:56:08
| 2020-01-20T20:13:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
test_link.py
|
from pathlib import Path
import pytest
from ploomber import DAG
from ploomber.tasks import Link, PythonCallable
from ploomber.products import File
# TODO: merge this with test_task_input.py and parametrize tests
def touch_root(product):
Path(str(product)).touch()
def touch(product, upstream):
Path(str(product)).touch()
def test_link_is_up_to_date_before_build(tmp_directory):
dag = DAG()
Path("some_file.txt").touch()
t1 = Link(File("some_file.txt"), dag, name="some_file")
assert not t1.product._is_outdated()
def test_downstream_from_link_is_up_to_date_after_build(tmp_directory):
# Link.metadata.timestamp is patched to return 0, hence checking timestamps
# from upstream dependencies in t2 should not mark it as outdated
dag = DAG()
Path("some_file.txt").touch()
t1 = Link(File("some_file.txt"), dag, name="some_file")
t2 = PythonCallable(touch, File("another_file"), dag)
t1 >> t2
dag.build()
assert not t2.product._is_outdated()
def test_error_raised_if_link_has_upstream_dependencies(tmp_directory):
dag = DAG()
t0 = PythonCallable(touch_root, File("another_file"), dag)
Path("some_file.txt").touch()
t1 = Link(File("some_file.txt"), dag, name="some_file")
with pytest.raises(RuntimeError) as excinfo:
t0 >> t1
msg = "Link tasks should not have upstream dependencies"
assert msg in str(excinfo.getrepr())
def test_error_raised_if_link_product_does_not_exist(tmp_directory):
dag = DAG()
with pytest.raises(RuntimeError) as excinfo:
Link(File("some_file.txt"), dag, name="some_file")
msg = (
"Link tasks should point to Products that already exist. "
'"some_file" task product "some_file.txt" does not exist'
)
assert msg in str(excinfo.getrepr())
|
19f10fe377cbe9664f9abfaf5b645863844f7037
|
0d9b5e2fa71b51b532f38d92ac7caff7d70f0110
|
/playlistor/urls.py
|
007ccd809e3a07657a5e8216154bae2bef43c0a0
|
[
"MIT"
] |
permissive
|
akornor/playlistor
|
7edf05739bfb1acc51965c6724cba4e074c08387
|
2b9a6774bcdc3f4a788392f246f0b1a38d32064d
|
refs/heads/master
| 2023-07-12T12:09:31.793107
| 2023-07-11T22:04:27
| 2023-07-11T22:04:27
| 161,928,541
| 362
| 29
|
MIT
| 2023-01-25T13:14:12
| 2018-12-15T17:30:44
|
Python
|
UTF-8
|
Python
| false
| false
| 490
|
py
|
urls.py
|
from django.urls import path, include
from main import views
urlpatterns = [
path("celery-progress/", include("celery_progress.urls"), name="celery-progress"),
path("login", views.login, name="login"),
path("expand", views.expand, name="expand"),
path("playlist", views.playlist, name="playlist"),
path("callback", views.callback, name="spotify-callback"),
path("subscribers", views.add_subscriber, name="add_subscriber"),
path("", views.index, name="home"),
]
|
0a903d7d221bb7016a5f9ae99953eeb7552f5324
|
45434808785c1e76ee5992523cd5098eec138686
|
/tests/test_readme.py
|
ef4338616a930c34ba2ee9f576e09a00312bb558
|
[
"MIT"
] |
permissive
|
EmilStenstrom/conllu
|
7a14e77a44d95c165a107555a76eaaa08dd454cd
|
1650915fd7edd883a63c3660d4c42277d8ca748b
|
refs/heads/master
| 2023-09-01T15:41:20.388099
| 2023-08-02T02:22:52
| 2023-08-02T14:25:52
| 65,674,992
| 310
| 72
|
MIT
| 2023-08-02T14:25:54
| 2016-08-14T16:12:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
test_readme.py
|
import contextlib
import doctest
import os
import re
import tempfile
from doctest import OutputChecker
from pathlib import Path
from unittest import mock
class ReadmeOutputChecker(OutputChecker):
def check_output(self, want, got, optionflags):
# Allow dicts and lists to be formatted with whitespace around brackets
if want.startswith("{") or want.startswith("["):
want = re.sub(r"\s*([\{\[\]\}])\s*", r"\1", want, flags=re.MULTILINE)
got = re.sub(r"\s*([\{\[\]\}])\s*", r"\1", got, flags=re.MULTILINE)
return OutputChecker.check_output(self, want, got, optionflags)
@contextlib.contextmanager
def temporary_chdir():
try:
old = os.getcwd()
with tempfile.TemporaryDirectory() as tempdir:
os.chdir(tempdir)
yield
finally:
os.chdir(old)
def test_readme():
readme_file = Path("README.md")
# Copy contents of README, to remove ``` and run all code in one session
readme_data = ""
with open(readme_file, "r") as f:
for line in f:
if line == "```\n":
readme_data += "\n"
else:
readme_data += line
with temporary_chdir():
with mock.patch('doctest.OutputChecker', ReadmeOutputChecker):
doctest.run_docstring_examples(
readme_data,
globs={},
name=readme_file.name,
optionflags=(
doctest.ELLIPSIS
| doctest.NORMALIZE_WHITESPACE
| doctest.FAIL_FAST
| doctest.REPORT_NDIFF
),
)
|
34f151cd2138994b2f7b1edd732ca8f2de57f74d
|
944a49e62bc79622fe01abee62403397a1b0504d
|
/openstackclient/tests/functional/network/v2/test_network_ndp_proxy.py
|
62eecf869ffaaeec45fcb7445a73771af24f3cfb
|
[
"Apache-2.0"
] |
permissive
|
openstack/python-openstackclient
|
1c22984f9b29ae8ff9bbea26067981e2130ed039
|
78988d1786c0634ee055714910d1e6187f941673
|
refs/heads/master
| 2023-08-28T15:10:05.542862
| 2023-08-26T12:44:20
| 2023-08-26T12:44:20
| 4,170,310
| 286
| 224
|
Apache-2.0
| 2022-09-19T13:29:49
| 2012-04-28T21:07:25
|
Python
|
UTF-8
|
Python
| false
| false
| 9,357
|
py
|
test_network_ndp_proxy.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstackclient.tests.functional.network.v2 import common
class L3NDPProxyTests(common.NetworkTests):
def setUp(self):
super().setUp()
# Nothing in this class works with Nova Network
if not self.haz_network:
self.skipTest("No Network service present")
if not self.is_extension_enabled('l3-ndp-proxy'):
self.skipTest("No l3-ndp-proxy extension present")
self.ROT_NAME = self.getUniqueString()
self.EXT_NET_NAME = self.getUniqueString()
self.EXT_SUB_NAME = self.getUniqueString()
self.INT_NET_NAME = self.getUniqueString()
self.INT_SUB_NAME = self.getUniqueString()
self.INT_PORT_NAME = self.getUniqueString()
self.ADDR_SCOPE_NAME = self.getUniqueString()
self.SUBNET_P_NAME = self.getUniqueString()
self.created_ndp_proxies = []
json_output = self.openstack(
'address scope create --ip-version 6 '
'%(address_s_name)s' % {'address_s_name': self.ADDR_SCOPE_NAME},
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.ADDRESS_SCOPE_ID = json_output['id']
json_output = self.openstack(
'subnet pool create %(subnet_p_name)s '
'--address-scope %(address_scope)s '
'--pool-prefix 2001:db8::/96 --default-prefix-length 112'
% {
'subnet_p_name': self.SUBNET_P_NAME,
'address_scope': self.ADDRESS_SCOPE_ID,
},
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.SUBNET_POOL_ID = json_output['id']
json_output = self.openstack(
'network create --external ' + self.EXT_NET_NAME,
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.EXT_NET_ID = json_output['id']
json_output = self.openstack(
'subnet create --ip-version 6 --subnet-pool '
'%(subnet_pool)s --network %(net_id)s %(sub_name)s'
% {
'subnet_pool': self.SUBNET_POOL_ID,
'net_id': self.EXT_NET_ID,
'sub_name': self.EXT_SUB_NAME,
},
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.EXT_SUB_ID = json_output['id']
json_output = self.openstack(
'router create ' + self.ROT_NAME,
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.ROT_ID = json_output['id']
output = self.openstack(
'router set %(router_id)s --external-gateway %(net_id)s'
% {'router_id': self.ROT_ID, 'net_id': self.EXT_NET_ID}
)
self.assertEqual('', output)
output = self.openstack('router set --enable-ndp-proxy ' + self.ROT_ID)
self.assertEqual('', output)
json_output = self.openstack(
'router show -c enable_ndp_proxy ' + self.ROT_ID,
parse_output=True,
)
self.assertTrue(json_output['enable_ndp_proxy'])
json_output = self.openstack(
'network create ' + self.INT_NET_NAME,
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.INT_NET_ID = json_output['id']
json_output = self.openstack(
'subnet create --ip-version 6 --subnet-pool '
'%(subnet_pool)s --network %(net_id)s %(sub_name)s'
% {
'subnet_pool': self.SUBNET_POOL_ID,
'net_id': self.INT_NET_ID,
'sub_name': self.INT_SUB_NAME,
},
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.INT_SUB_ID = json_output['id']
json_output = self.openstack(
'port create --network %(net_id)s '
'%(port_name)s'
% {
'net_id': self.INT_NET_ID,
'port_name': self.INT_PORT_NAME,
},
parse_output=True,
)
self.assertIsNotNone(json_output['id'])
self.INT_PORT_ID = json_output['id']
self.INT_PORT_ADDRESS = json_output['fixed_ips'][0]['ip_address']
output = self.openstack(
'router add subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID
)
self.assertEqual('', output)
def tearDown(self):
for ndp_proxy in self.created_ndp_proxies:
output = self.openstack(
'router ndp proxy delete ' + ndp_proxy['id']
)
self.assertEqual('', output)
output = self.openstack('port delete ' + self.INT_PORT_ID)
self.assertEqual('', output)
output = self.openstack(
'router set --disable-ndp-proxy ' + self.ROT_ID
)
self.assertEqual('', output)
output = self.openstack(
'router remove subnet ' + self.ROT_ID + ' ' + self.INT_SUB_ID
)
self.assertEqual('', output)
output = self.openstack('subnet delete ' + self.INT_SUB_ID)
self.assertEqual('', output)
output = self.openstack('network delete ' + self.INT_NET_ID)
self.assertEqual('', output)
output = self.openstack(
'router unset ' + self.ROT_ID + ' ' + '--external-gateway'
)
self.assertEqual('', output)
output = self.openstack('router delete ' + self.ROT_ID)
self.assertEqual('', output)
output = self.openstack('subnet delete ' + self.EXT_SUB_ID)
self.assertEqual('', output)
output = self.openstack('network delete ' + self.EXT_NET_ID)
self.assertEqual('', output)
output = self.openstack('subnet pool delete ' + self.SUBNET_POOL_ID)
self.assertEqual('', output)
output = self.openstack(
'address scope delete ' + self.ADDRESS_SCOPE_ID
)
self.assertEqual('', output)
super().tearDown()
def _create_ndp_proxies(self, ndp_proxies):
for ndp_proxy in ndp_proxies:
output = self.openstack(
'router ndp proxy create %(router)s --name %(name)s '
'--port %(port)s --ip-address %(address)s'
% {
'router': ndp_proxy['router_id'],
'name': ndp_proxy['name'],
'port': ndp_proxy['port_id'],
'address': ndp_proxy['address'],
},
parse_output=True,
)
self.assertEqual(ndp_proxy['router_id'], output['router_id'])
self.assertEqual(ndp_proxy['port_id'], output['port_id'])
self.assertEqual(ndp_proxy['address'], output['ip_address'])
self.created_ndp_proxies.append(output)
def test_create_ndp_proxy(self):
ndp_proxies = [
{
'name': self.getUniqueString(),
'router_id': self.ROT_ID,
'port_id': self.INT_PORT_ID,
'address': self.INT_PORT_ADDRESS,
}
]
self._create_ndp_proxies(ndp_proxies)
def test_ndp_proxy_list(self):
ndp_proxies = {
'name': self.getUniqueString(),
'router_id': self.ROT_ID,
'port_id': self.INT_PORT_ID,
'address': self.INT_PORT_ADDRESS,
}
self._create_ndp_proxies([ndp_proxies])
ndp_proxy = self.openstack(
'router ndp proxy list',
parse_output=True,
)[0]
self.assertEqual(ndp_proxies['name'], ndp_proxy['Name'])
self.assertEqual(ndp_proxies['router_id'], ndp_proxy['Router ID'])
self.assertEqual(ndp_proxies['address'], ndp_proxy['IP Address'])
def test_ndp_proxy_set_and_show(self):
ndp_proxies = {
'name': self.getUniqueString(),
'router_id': self.ROT_ID,
'port_id': self.INT_PORT_ID,
'address': self.INT_PORT_ADDRESS,
}
description = 'balala'
self._create_ndp_proxies([ndp_proxies])
ndp_proxy_id = self.created_ndp_proxies[0]['id']
output = self.openstack(
'router ndp proxy set --description %s %s'
% (description, ndp_proxy_id)
)
self.assertEqual('', output)
json_output = self.openstack(
'router ndp proxy show ' + ndp_proxy_id,
parse_output=True,
)
self.assertEqual(ndp_proxies['name'], json_output['name'])
self.assertEqual(ndp_proxies['router_id'], json_output['router_id'])
self.assertEqual(ndp_proxies['port_id'], json_output['port_id'])
self.assertEqual(ndp_proxies['address'], json_output['ip_address'])
self.assertEqual(description, json_output['description'])
|
02e6206cd87c7b4cf77c1f7a35ae1814b9824952
|
b40d1a26ea04a19ec0da7bf55db84b7ee36cc898
|
/leetcode.com/python/406_Queue_Reconstruction_by_Height.py
|
14327dae0b3f28a53d8f9fe35a3456521ab1e01c
|
[
"MIT"
] |
permissive
|
partho-maple/coding-interview-gym
|
5e8af7d404c28d4b9b52e5cffc540fd51d8025cf
|
20ae1a048eddbc9a32c819cf61258e2b57572f05
|
refs/heads/master
| 2022-09-11T16:36:01.702626
| 2022-03-14T08:39:47
| 2022-03-14T08:39:47
| 69,802,909
| 862
| 438
|
MIT
| 2022-08-18T06:42:46
| 2016-10-02T14:51:31
|
Python
|
UTF-8
|
Python
| false
| false
| 443
|
py
|
406_Queue_Reconstruction_by_Height.py
|
class Solution(object):
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
people.sort(key = lambda x: (-x[0], x[1]))
output = []
for i in people:
output.insert(i[1], i)
return output
sol = Solution()
input = [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
output = sol.reconstructQueue(input)
print("Res: ", output)
|
62144df50ed7c6ba5700ec9ac33d62cd1beac6b8
|
9bc2e2961f6f7c4a47e6eebc897fbf14a4e85d61
|
/studio/storage/storage_type.py
|
099e1dd0e416543edd6bc7bfa47d08d104500ca6
|
[
"Apache-2.0"
] |
permissive
|
studioml/studio
|
9ee8fd12a9fa0ac09144e2520b1a3be8756ab2fa
|
e8aedf9c15baa872eb7aee4d6b28ad6208a9fca2
|
refs/heads/master
| 2023-08-16T01:54:59.184542
| 2023-01-09T20:06:01
| 2023-01-09T20:06:01
| 91,284,550
| 406
| 59
|
Apache-2.0
| 2023-09-06T17:23:10
| 2017-05-15T01:49:28
|
Python
|
UTF-8
|
Python
| false
| false
| 200
|
py
|
storage_type.py
|
from enum import Enum
class StorageType(Enum):
storageHTTP = 1
storageS3 = 2
storageLocal = 3
storageFirebase = 4
storageDockerHub = 5
storageSHub = 6
storageInvalid = 99
|
e4e4d461df9e02b6412c7847a3b41fe3a858f5b4
|
420e7db695f82c7cf9d29735df956fa86bc0f14f
|
/tests/test_dns_server.py
|
4d2d0aa41fe2c8c9add4c5409980edfa80619c2f
|
[
"BSD-3-Clause"
] |
permissive
|
Kkevsterrr/geneva
|
bf929e3056dc6215bca079f1fd587866907a1cd5
|
6b091060ed0946b98a2ff9196dfbf93d85cbb28a
|
refs/heads/master
| 2023-08-23T22:30:49.750259
| 2023-05-18T21:24:14
| 2023-05-18T21:24:14
| 221,001,148
| 1,771
| 168
|
BSD-3-Clause
| 2023-05-26T10:04:58
| 2019-11-11T14:37:39
|
Python
|
UTF-8
|
Python
| false
| false
| 17,303
|
py
|
test_dns_server.py
|
# Scapy modules
from scapy.layers.dns import IP, UDP, raw, DNS as DNS_, DNSQR, struct
# DNS Modules
import dns.zone
# Import the root of the project: used to import DNSServer
import os
import sys
import inspect
import logging
import pytest
basepath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(basepath)
sys.path.insert(0, parent_dir)
import evolve
from plugins.dns.server import DNSServer
# Default values
INTERFACE = 'lo'
LISTENER = DNSServer.socket_UDP
PORT = 53
AUTHORITY = False
DNS_RESOLVER = "1.1.1.1"
LOG_DIR = basepath + "/DNS/"
ZONES_DIR = basepath + "/DNS/zones/"
LOGGING_LEVEL = logging.INFO
# Error definitions
RECORD_COUNT_ERROR = "record_count_error"
RECORD_VALUE_ERROR = "record_value_error"
@pytest.mark.skip()
@pytest.mark.parametrize("listener", [DNSServer.socket_UDP, DNSServer.socket_TCP, DNSServer.netfilter_queue])
def test_dns_server(listener, logger):
"""
Tests the main method
"""
# TODO test is currently disabled, will be replaced by a test that
# tests the full functionality of receiving DNS queries
args = {
'interface': INTERFACE,
'port': PORT,
'authority': AUTHORITY,
'zones_dir': ZONES_DIR,
'log_dir': LOG_DIR,
'dry_run': True,
'listener': listener
}
server = DNSServer.server.main(args)
@pytest.mark.parametrize("listener", [DNSServer.socket_UDP, DNSServer.socket_TCP, DNSServer.netfilter_queue])
def test_zone_records(listener, logger):
"""
Tests if it can read the information in the zones file correctly
"""
args = {
"interface": INTERFACE,
"listener": listener,
"port": PORT,
"authority": AUTHORITY,
"dns_resolver": DNS_RESOLVER,
"zones_dir": ZONES_DIR
}
# Testing variable
server = DNSServer(args, logger=logger)
server.load_zones()
# Answer variables
example_com = dns.zone.from_file(ZONES_DIR + "example.com", "example.com", rdclass=1, relativize=False)
example2_com = dns.zone.from_file(ZONES_DIR + "example2.com", "example2.com", rdclass=1, relativize=False)
# ---------------- Testing A records -----------------
# No errors
check_records(server, example_com, "example.com.", "A")
check_records(server, example_com, "ns1.example.com.", "A")
check_records(server, example_com, "ns2.example.com.", "A")
check_records(server, example_com, "mail.example.com.", "A")
check_records(server, example_com, "mail2.example.com.", "A")
check_records(server, example_com, "www2.example.com.", "A")
# Errors
# ns1.example.com. has 2 A records while ns2.example.com. has 1 A record
check_records(server, example_com, "ns2.example.com.", "A", False, RECORD_COUNT_ERROR, "ns1.example.com.")
# Both example.com. and ns1.example.com. have 2 A records but the value of those records are different
check_records(server, example_com, "example.com.", "A", False, RECORD_VALUE_ERROR, "ns1.example.com.")
# No errors with a different zone file
check_records(server, example2_com, "example2.com.", "A")
check_records(server, example2_com, "ns1.example2.com.", "A")
check_records(server, example2_com, "ns2.example2.com.", "A")
check_records(server, example2_com, "mail.example2.com.", "A")
check_records(server, example2_com, "mail2.example2.com.", "A")
check_records(server, example2_com, "www2.example2.com.", "A")
# Errors with a different zone
# ns1.example.com. has 2 A records while ns2.example.com. has 1 A record
check_records(server, example2_com, "ns2.example2.com.", "A", False, RECORD_COUNT_ERROR, "ns1.example2.com.")
# Both example.com. and ns1.example.com. have 2 A records but the value of those records are different
check_records(server, example2_com, "example2.com.", "A", False, RECORD_VALUE_ERROR, "ns1.example2.com.")
# ---------------- Testing TXT records -----------------
# No errors
check_records(server, example_com, "example.com.", "TXT")
check_records(server, example2_com, "example2.com.", "TXT")
# ---------------- Testing MX records -----------------
# No errors
check_records(server, example_com, "example.com.", "MX")
check_records(server, example2_com, "example2.com.", "MX")
# ---------------- Testing NS records -----------------
# No errors
check_records(server, example_com, "example.com.", "NS")
check_records(server, example2_com, "example2.com.", "NS")
# ---------------- Testing CNAME records -----------------
# No errors
check_records(server, example_com, "www.example.com.", "CNAME")
check_records(server, example2_com, "www.example2.com.", "CNAME")
# ---------------- Testing NXDOMAIN -----------------
# No errors
check_nxdomain(server, "www3.example.com.", "A")
check_nxdomain(server, "www3.example.com.", "TXT")
check_nxdomain(server, "www3.example.com.", "NS")
check_nxdomain(server, "www3.example.com.", "MX")
check_nxdomain(server, "www3.example.com.", "CNAME")
def test_forwarding(logger):
"""
Tests if DNSServer properly enables and disables forwarding of DNS queries that it does not have answers to
"""
args = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": AUTHORITY,
"dns_resolver": DNS_RESOLVER,
"zones_dir": ZONES_DIR
}
args_no_forward = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": AUTHORITY,
"dns_resolver": None,
"zones_dir": ZONES_DIR
}
# Testing variable
server = DNSServer(args, logger=logger)
server_no_forward = DNSServer(args_no_forward, logger=logger)
# Zone loading happens during actual startup, so load it here
server.load_zones()
server_no_forward.load_zones()
# Answer variables
example_com = dns.zone.from_file(ZONES_DIR + "example.com", "example.com", rdclass=1, relativize=False)
example2_com = dns.zone.from_file(ZONES_DIR + "example2.com", "example2.com", rdclass=1, relativize=False)
# Test if it can forward a query
check_record_exists(server, "google.com.", "A")
check_record_exists(server, "msn.com.", "A")
# ------------- NXDOMAIN ---------------
# NXDOMAIN for all domains outside of the zones configured
check_nxdomain(server_no_forward, "google.com.", "A")
check_nxdomain(server_no_forward, "google.com.", "TXT")
check_nxdomain(server_no_forward, "google.com.", "NS")
check_nxdomain(server_no_forward, "google.com.", "MX")
check_nxdomain(server_no_forward, "google.com.", "CNAME")
check_nxdomain(server_no_forward, "msn.com.", "A")
check_nxdomain(server_no_forward, "msn.com.", "TXT")
check_nxdomain(server_no_forward, "msn.com.", "NS")
check_nxdomain(server_no_forward, "msn.com.", "MX")
check_nxdomain(server_no_forward, "msn.com.", "CNAME")
# NXDOMAIN for domains declared in the zones but does not exist
check_nxdomain(server_no_forward, "www3.example.com.", "A")
check_nxdomain(server_no_forward, "www3.example.com.", "TXT")
check_nxdomain(server_no_forward, "www3.example.com.", "NS")
check_nxdomain(server_no_forward, "www3.example.com.", "MX")
check_nxdomain(server_no_forward, "www3.example.com.", "CNAME")
# ------------- Resource Records ---------------
# Resource Records declared in the zones
check_records(server_no_forward, example_com, "example.com.", "A")
check_records(server_no_forward, example_com, "example.com.", "TXT")
check_records(server_no_forward, example_com, "example.com.", "MX")
check_records(server_no_forward, example_com, "example.com.", "NS")
check_records(server_no_forward, example_com, "www.example.com.", "CNAME")
check_records(server_no_forward, example2_com, "example2.com.", "A")
check_records(server_no_forward, example2_com, "example2.com.", "TXT")
check_records(server_no_forward, example2_com, "example2.com.", "MX")
check_records(server_no_forward, example2_com, "example2.com.", "NS")
check_records(server_no_forward, example2_com, "www.example2.com.", "CNAME")
def test_authority_reply(logger):
"""
Tests that the DNS responses correctly include the authority flag when set
"""
args = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": True,
"dns_resolver": DNS_RESOLVER,
"zones_dir": ZONES_DIR
}
args_no_auth = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": False,
"dns_resolver": DNS_RESOLVER,
"zones_dir": ZONES_DIR
}
server = DNSServer(args, logger=logger)
server_no_auth = DNSServer(args_no_auth, logger=logger)
# Zone loading happens during actual startup, so load it here
server.load_zones()
server_no_auth.load_zones()
example_com = dns.zone.from_file(ZONES_DIR + "example.com", "example.com", rdclass=1, relativize=False)
# Test with authority - Zones configuration
check_records(server, example_com, "example.com.", "A", authority=True)
check_records(server, example_com, "example.com.", "TXT", authority=True)
check_records(server, example_com, "example.com.", "MX", authority=True)
check_records(server, example_com, "example.com.", "NS", authority=True)
check_records(server, example_com, "www.example.com.", "CNAME", authority=True)
# Test with no authority - Zone configuration
check_records(server_no_auth, example_com, "example.com.", "A", authority=False)
check_records(server_no_auth, example_com, "example.com.", "TXT", authority=False)
check_records(server_no_auth, example_com, "example.com.", "MX", authority=False)
check_records(server_no_auth, example_com, "example.com.", "NS", authority=False)
check_records(server_no_auth, example_com, "www.example.com.", "CNAME", authority=False)
# Test with authority - Zone configuration - NXDOMAIN
check_nxdomain(server, "www3.example.com.", "A", authority=True)
check_nxdomain(server, "www3.example.com.", "TXT", authority=True)
check_nxdomain(server, "www3.example.com.", "NS", authority=True)
check_nxdomain(server, "www3.example.com.", "MX", authority=True)
check_nxdomain(server, "www3.example.com.", "CNAME", authority=True)
# Test without authority - Zone configuration - NXDOMAIN
check_nxdomain(server_no_auth, "www3.example.com.", "A", authority=False)
check_nxdomain(server_no_auth, "www3.example.com.", "TXT", authority=False)
check_nxdomain(server_no_auth, "www3.example.com.", "NS", authority=False)
check_nxdomain(server_no_auth, "www3.example.com.", "MX", authority=False)
check_nxdomain(server_no_auth, "www3.example.com.", "CNAME", authority=False)
# Test with authority - DNS Forwarding - Exists
check_record_exists(server, "google.com.", "A", authority=True)
check_record_exists(server, "msn.com.", "A", authority=True)
# Test without authority - DNS Forwarding - Exists
check_record_exists(server_no_auth, "google.com.", "A", authority=False)
check_record_exists(server_no_auth, "msn.com.", "A", authority=False)
# Test with authority - DNS Forwarding - NXDOMAIN
check_nxdomain(server, "12398.google.com.", "A", authority=True)
check_record_exists(server, "12398.msn.com.", "A", authority=True)
# Test without authority - DNS Forwarding - NXDOMAIN
check_nxdomain(server_no_auth, "12398.google.com.", "A", authority=False)
check_record_exists(server_no_auth, "12398.msn.com.", "A", authority=False)
def test_tld_does_not_exist(logger):
"""
Tests that if one queries for a TLD that does not exist, the program will simply respond with NXDOMAIN
:return:
"""
args = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": AUTHORITY,
"dns_resolver": DNS_RESOLVER,
"zones_dir": ZONES_DIR
}
args_no_auth = {
"interface": INTERFACE,
"listener": LISTENER,
"port": PORT,
"authority": AUTHORITY,
"dns_resolver": None,
"zones_dir": ZONES_DIR
}
server = DNSServer(args, logger=logger)
server_no_forward = DNSServer(args_no_auth, logger=logger)
# Zone loading happens during actual startup, so load it here
server.load_zones()
server_no_forward.load_zones()
check_nxdomain(server_no_forward, "google.tp.", "A")
check_nxdomain(server_no_forward, "google.techn.", "CNAME")
check_nxdomain(server_no_forward, "google.techno.", "MX")
check_nxdomain(server_no_forward, "google.technol.", "TXT")
check_nxdomain(server_no_forward, "google.technolo.", "NS")
check_nxdomain(server, "google.tp.", "A")
check_nxdomain(server, "google.techn.", "CNAME")
check_nxdomain(server, "google.techno.", "MX")
check_nxdomain(server, "google.technol.", "TXT")
check_nxdomain(server, "google.technolo.", "NS")
def check_nxdomain(server, query, query_type, authority=False):
"""
Tests that the DNS response marks the query as NXDOMAIN
"""
dns_query = IP(dst="127.0.0.1") / UDP(dport=53) / \
DNS_(rd=1, qd=DNSQR(qname=query, qtype=query_type))
dns_query = IP(raw(dns_query))
response = server.build_dns_response(dns_query)
assert response[DNS_].rcode == 3
assert response[DNS_].ancount == 0
if authority is True:
assert response[DNS_].aa == 1
def get_value(record, query_type):
"""
Gets the value (rdata) of a specific resource record
"""
if query_type == "TXT":
return dns.rdata._escapify(record.strings[0])
elif query_type == "MX":
return (struct.pack("!H", record.preference) + record.exchange.to_wire(None, None)).decode('utf-8')
return record.to_text()
def check_record_exists(server, query, query_type, authority=False):
"""
Checks if there is at least one resource record.
Optionally, check if the DNS response has the "Authoritative Answer" flag set
"""
dns_query = IP(dst="127.0.0.1") / UDP(dport=53) / \
DNS_(rd=1, qd=DNSQR(qname=query, qtype=query_type))
dns_query = IP(raw(dns_query))
response = server.build_dns_response(dns_query)
assert response[DNS_].rcode == 0
assert response[DNS_].ancount > 0
assert response[DNS_].an[0].rdata != ''
if authority is True:
assert response[DNS_].aa == 1
def check_records(server, answer, query, query_type, authority=False, error=None, other_query=None):
"""
Checks that the A record value & record count matches (if error is None)
Otherwise, if error is specified, then it checks to make sure that the error is achieved
Optionally, check if the DNS response has the "Authoritative Answer" flag set
"""
dns_query = IP(dst="127.0.0.1") / UDP(dport=53) / \
DNS_(rd=1, qd=DNSQR(qname=query, qtype=query_type))
dns_query = IP(raw(dns_query))
response = server.build_dns_response(dns_query)
if other_query is None:
data = answer.find_rdataset(query, query_type)
else:
data = answer.find_rdataset(other_query, query_type)
if error is None:
assert len(data) == response[DNS_].ancount
for i in range(response[DNS_].ancount):
# DEBUGGING REQUIRED FOR SCAPY UPGRADES to field types
# print("Comparison check")
# print(type(response[DNS_].an[i].type))
# print(response[DNS_].an[i].type)
# print(response[DNS_].an[i].show())
# print(type(response[DNS_].an[i].rdata))
# print(response[DNS_].an[i].rdata)
if response[DNS_].an[i].type == 16: # TXT
assert get_value(data[i], query_type) == response[DNS_].an[i].rdata[0]
continue
elif response[DNS_].an[i].type == 1: # A
assert get_value(data[i], query_type) == response[DNS_].an[i].rdata
continue
assert get_value(data[i], query_type) == response[DNS_].an[i].rdata.decode('utf-8')
elif error == RECORD_COUNT_ERROR:
assert len(data) != response[DNS_].ancount
elif error == RECORD_VALUE_ERROR:
assert len(data) == response[DNS_].ancount
for i in range(response[DNS_].ancount):
# DEBUGGING REQUIRED FOR SCAPY UPGRADES to field types
# print("Comparison check")
# print(type(response[DNS_].an[i].type))
# print(response[DNS_].an[i].type)
# print(response[DNS_].an[i].show())
# print(type(response[DNS_].an[i].rdata))
# print(response[DNS_].an[i].rdata)
if response[DNS_].an[i].type == 16: # TXT
assert get_value(data[i], query_type) != response[DNS_].an[i].rdata[0]
continue
elif response[DNS_].an[i].type == 1: # A
assert get_value(data[i], query_type) != response[DNS_].an[i].rdata
continue
assert get_value(data[i], query_type) != response[DNS_].an[i].rdata.decode('utf-8')
if authority is True:
assert response[DNS_].aa == 1
|
33773c27a58711669b1b0ad2fcb0706a1f0e504b
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/cv/AttGAN/preprocess.py
|
eb91b27f116dcc7ac00e8ff9142b6c828673e5ea
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 4,882
|
py
|
preprocess.py
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""pre process for 310 inference"""
import os
from os.path import join
import argparse
import numpy as np
selected_attrs = [
'Bald', 'Bangs', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows',
'Eyeglasses', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'No_Beard', 'Pale_Skin', 'Young'
]
def parse(arg=None):
"""Define configuration of preprocess"""
parser = argparse.ArgumentParser()
parser.add_argument('--attrs', dest='attrs', default=selected_attrs, nargs='+', help='attributes to learn')
parser.add_argument('--attrs_path', type=str, default='../data/list_attr_custom.txt')
parser.add_argument('--test_int', dest='test_int', type=float, default=1.0)
parser.add_argument('--thres_int', dest='thres_int', type=float, default=0.5)
return parser.parse_args(arg)
args = parse()
args.n_attrs = len(args.attrs)
def check_attribute_conflict(att_batch, att_name, att_names):
"""Check Attributes"""
def _set(att, att_name):
if att_name in att_names:
att[att_names.index(att_name)] = 0.0
att_id = att_names.index(att_name)
for att in att_batch:
if att_name in ['Bald', 'Receding_Hairline'] and att[att_id] != 0:
_set(att, 'Bangs')
elif att_name == 'Bangs' and att[att_id] != 0:
_set(att, 'Bald')
_set(att, 'Receding_Hairline')
elif att_name in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair'] and att[att_id] != 0:
for n in ['Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Gray_Hair']:
if n != att_name:
_set(att, n)
elif att_name in ['Straight_Hair', 'Wavy_Hair'] and att[att_id] != 0:
for n in ['Straight_Hair', 'Wavy_Hair']:
if n != att_name:
_set(att, n)
elif att_name in ['Mustache', 'No_Beard'] and att[att_id] != 0:
for n in ['Mustache', 'No_Beard']:
if n != att_name:
_set(att, n)
return att_batch
def read_cfg_file(attr_path):
"""Read configuration from attribute file"""
attr_list = open(attr_path, "r", encoding="utf-8").readlines()[1].split()
atts = [attr_list.index(att) + 1 for att in selected_attrs]
labels = np.loadtxt(attr_path, skiprows=2, usecols=atts, dtype=np.int)
attr_number = int(open(attr_path, "r", encoding="utf-8").readlines()[0])
labels = [labels] if attr_number == 1 else labels[0:]
new_attr = []
for index in range(attr_number):
att = [np.asarray((labels[index] + 1) // 2)]
new_attr.append(att)
new_attr = np.array(new_attr)
return new_attr, attr_number
def preprocess_cfg(attrs, numbers):
"""Preprocess attribute file"""
new_attr = []
for index in range(numbers):
attr = attrs[index]
att_b_list = [attr]
for i in range(args.n_attrs):
tmp = attr.copy()
tmp[:, i] = 1 - tmp[:, i]
tmp = check_attribute_conflict(tmp, selected_attrs[i], selected_attrs)
att_b_list.append(tmp)
for i, att_b in enumerate(att_b_list):
att_b_ = (att_b * 2 - 1) * args.thres_int
if i > 0:
att_b_[..., i - 1] = att_b_[..., i - 1] * args.test_int / args.thres_int
new_attr.append(att_b_)
return new_attr
def write_cfg_file(attrs, numbers):
"""Write attribute file"""
cur_dir = os.getcwd()
print(cur_dir)
path = join(cur_dir, 'attrs.txt')
with open(path, "w") as f:
f.writelines(str(numbers))
f.writelines("\n")
f.writelines(str(args.n_attrs))
f.writelines("\n")
counts = numbers * args.n_attrs
for index in range(counts):
attrs_list = attrs[index][0]
new_attrs_list = ["%s" % x for x in attrs_list]
sequence = " ".join(new_attrs_list)
f.writelines(sequence)
f.writelines("\n")
print("Generate cfg file successfully.")
if __name__ == "__main__":
if args.attrs_path is None:
print("Path is not correct!")
attributes, n_images = read_cfg_file(args.attrs_path)
new_attrs = preprocess_cfg(attributes, n_images)
write_cfg_file(new_attrs, n_images)
|
8da89c18915a97732a6e79d90ada3a3e3efdf1cf
|
b28019656242b4245d31b4c9179c5aa20cf69801
|
/lsp_model/__init__.py
|
20e879b7ff0dedd01490b38a126e3d310edd4910
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
microsoft/DialoGPT
|
e45f859f4a9ce167b5f6be787323302d6535e9fe
|
61194e44363a48b4cb323752a4bbb33c8a84a8a4
|
refs/heads/master
| 2023-09-04T06:39:30.550296
| 2022-10-17T23:41:52
| 2022-10-17T23:41:52
| 205,250,704
| 2,274
| 377
|
MIT
| 2022-09-09T20:29:15
| 2019-08-29T21:07:46
|
Python
|
UTF-8
|
Python
| false
| false
| 401
|
py
|
__init__.py
|
__version__ = "0.0.1"
from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path
from pytorch_pretrained_bert.modeling_gpt2 import GPT2Config, GPT2Model, GPT2Config
from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer
from .modeling_gpt2 import GPT2LMHeadModel
from .optim import Adam
|
c1f6f234780cfd4eab1e763d52bb775415a87adc
|
8f2c55a2530c3e59dab5907c0044c618b88dd09b
|
/third_party/isort_container/isort/__main__.py
|
91cc154dcb62bae3eed88014d0e0d03028af4c6e
|
[
"Apache-2.0",
"EPL-1.0"
] |
permissive
|
fabioz/PyDev.Debugger
|
5a9c6d4c09be85a0e2d9fb93567fd65faf04c81d
|
26864816cbfcf002a99913bcc31ebef48042a4ac
|
refs/heads/main
| 2023-08-18T01:08:34.323363
| 2023-04-15T11:15:47
| 2023-04-15T11:15:47
| 21,870,144
| 363
| 126
|
Apache-2.0
| 2023-07-30T23:03:31
| 2014-07-15T18:01:12
|
Python
|
UTF-8
|
Python
| false
| false
| 205
|
py
|
__main__.py
|
from __future__ import absolute_import
from isort.pie_slice import apply_changes_to_python_environment
apply_changes_to_python_environment()
from isort.main import main # noqa: E402 isort:skip
main()
|
b1bfa44078ab2dc1366638a28053dffd310976c4
|
e7f662e18141c11bb452a31b844dd5555cca11dd
|
/nitime/analysis/correlation.py
|
27359c4fd03be61a664269c77e6856aa0621102e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
nipy/nitime
|
75c7786e17514ecc7045c42760cd42b1c16ca0d6
|
4411b3047e37d21354e399aaaca77810f50c5fe2
|
refs/heads/master
| 2023-06-29T21:13:24.231303
| 2023-06-15T22:33:13
| 2023-06-15T22:33:13
| 294,865
| 198
| 69
|
BSD-3-Clause
| 2023-08-25T20:49:40
| 2009-09-02T00:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 5,327
|
py
|
correlation.py
|
import numpy as np
from nitime import descriptors as desc
from nitime import timeseries as ts
from nitime import algorithms as tsa
# To support older versions of numpy that don't have tril_indices:
from nitime.index_utils import tril_indices
from .base import BaseAnalyzer
class CorrelationAnalyzer(BaseAnalyzer):
"""Analyzer object for correlation analysis. Has the same API as the
CoherenceAnalyzer"""
def __init__(self, input=None):
"""
Parameters
----------
input : TimeSeries object
Containing the data to analyze.
Examples
--------
>>> np.set_printoptions(precision=4) # for doctesting
>>> t1 = ts.TimeSeries(data = np.sin(np.arange(0,
... 10*np.pi,10*np.pi/100)).reshape(2,50),
... sampling_rate=np.pi)
>>> c1 = CorrelationAnalyzer(t1)
>>> c1 = CorrelationAnalyzer(t1)
>>> c1.corrcoef
array([[ 1., -1.],
[-1., 1.]])
>>> c1.xcorr.sampling_rate # doctest: +ELLIPSIS
3.141592653... Hz
>>> c1.xcorr.t0 # doctest: +ELLIPSIS
-15.91549430915... s
"""
BaseAnalyzer.__init__(self, input)
@desc.setattr_on_read
def corrcoef(self):
"""The correlation coefficient between every pairwise combination of
time-series contained in the object"""
return np.corrcoef(self.input.data)
@desc.setattr_on_read
def xcorr(self):
"""The cross-correlation between every pairwise combination time-series
in the object. Uses np.correlation('full').
Returns
-------
TimeSeries : the time-dependent cross-correlation, with zero-lag
at time=0
"""
tseries_length = self.input.data.shape[0]
t_points = self.input.data.shape[-1]
xcorr = np.zeros((tseries_length,
tseries_length,
t_points * 2 - 1))
data = self.input.data
for i in range(tseries_length):
data_i = data[i]
for j in range(i, tseries_length):
xcorr[i, j] = np.correlate(data_i,
data[j],
mode='full')
idx = tril_indices(tseries_length, -1)
xcorr[idx[0], idx[1], ...] = xcorr[idx[1], idx[0], ...]
return ts.TimeSeries(xcorr,
sampling_interval=self.input.sampling_interval,
t0=-self.input.sampling_interval * t_points)
@desc.setattr_on_read
def xcorr_norm(self):
"""The cross-correlation between every pairwise combination time-series
in the object, where the zero lag correlation is normalized to be equal
to the correlation coefficient between the time-series
Returns
-------
TimeSeries : A TimeSeries object
the time-dependent cross-correlation, with zero-lag at time=0
"""
tseries_length = self.input.data.shape[0]
t_points = self.input.data.shape[-1]
xcorr = np.zeros((tseries_length,
tseries_length,
t_points * 2 - 1))
data = self.input.data
for i in range(tseries_length):
data_i = data[i]
for j in range(i, tseries_length):
xcorr[i, j] = np.correlate(data_i,
data[j],
mode='full')
xcorr[i, j] /= (xcorr[i, j, t_points])
xcorr[i, j] *= self.corrcoef[i, j]
idx = tril_indices(tseries_length, -1)
xcorr[idx[0], idx[1], ...] = xcorr[idx[1], idx[0], ...]
return ts.TimeSeries(xcorr,
sampling_interval=self.input.sampling_interval,
t0=-self.input.sampling_interval * t_points)
class SeedCorrelationAnalyzer(object):
"""
This analyzer takes two time-series. The first is designated as a
time-series of seeds. The other is designated as a time-series of targets.
The analyzer performs a correlation analysis between each of the channels
in the seed time-series and *all* of the channels in the target
time-series.
"""
def __init__(self, seed_time_series=None, target_time_series=None):
"""
Parameters
----------
seed_time_series : a TimeSeries object
target_time_series : a TimeSeries object
"""
self.seed = seed_time_series
self.target = target_time_series
@desc.setattr_on_read
def corrcoef(self):
#If there is more than one channel in the seed time-series:
if len(self.seed.shape) > 1:
# Preallocate results
Cxy = np.empty((self.seed.data.shape[0],
self.target.data.shape[0]), dtype=float)
for seed_idx, this_seed in enumerate(self.seed.data):
Cxy[seed_idx] = tsa.seed_corrcoef(this_seed, self.target.data)
#In the case where there is only one channel in the seed time-series:
else:
Cxy = tsa.seed_corrcoef(self.seed.data, self.target.data)
return Cxy.squeeze()
|
971592a22bb06af22e8ff0e53d84c8ccc44ca9e7
|
ff5ef4d13a31943d8b22342eb3d6b3291b44f15f
|
/pychromecast/controllers/plex.py
|
f610522272728dacb5e31483860b1be0abe11291
|
[
"MIT"
] |
permissive
|
home-assistant-libs/pychromecast
|
d158252ca2d3ef58c75153ed451ba5de141c0611
|
a21d3ac9c2e55d34b3834251186877a17ea869ce
|
refs/heads/master
| 2023-09-05T12:28:13.014909
| 2023-09-05T11:16:20
| 2023-09-05T11:16:20
| 13,265,524
| 574
| 137
|
MIT
| 2023-09-12T09:55:29
| 2013-10-02T07:58:34
|
Python
|
UTF-8
|
Python
| false
| false
| 18,187
|
py
|
plex.py
|
"""
Controller to interface with the Plex-app.
"""
import json
import threading
from copy import deepcopy
from urllib.parse import urlparse
from . import BaseController
from ..const import MESSAGE_TYPE
STREAM_TYPE_UNKNOWN = "UNKNOWN"
STREAM_TYPE_BUFFERED = "BUFFERED"
STREAM_TYPE_LIVE = "LIVE"
SEEK_KEY = "currentTime"
TYPE_PLAY = "PLAY"
TYPE_PAUSE = "PAUSE"
TYPE_STOP = "STOP"
TYPE_STEPFORWARD = "STEPFORWARD"
TYPE_STEPBACKWARD = "STEPBACK"
TYPE_PREVIOUS = "PREVIOUS"
TYPE_NEXT = "NEXT"
TYPE_LOAD = "LOAD"
TYPE_DETAILS = "SHOWDETAILS"
TYPE_SEEK = "SEEK"
TYPE_MEDIA_STATUS = "MEDIA_STATUS"
TYPE_GET_STATUS = "GET_STATUS"
TYPE_EDIT_TRACKS_INFO = "EDIT_TRACKS_INFO"
def media_to_chromecast_command(
media=None,
type="LOAD", # pylint: disable=redefined-builtin
requestId=1,
offset=0,
directPlay=True,
directStream=True,
subtitleSize=100,
audioBoost=100,
transcoderVideo=True,
transcoderVideoRemuxOnly=False,
transcoderAudio=True,
isVerifiedHostname=True,
contentType="video",
myPlexSubscription=True,
contentId=None,
streamType=STREAM_TYPE_BUFFERED,
port=32400,
protocol="http",
address=None,
username=None,
autoplay=True,
currentTime=0,
playQueue=None,
playQueueID=None,
startItem=None,
version="1.10.1.4602",
**kwargs,
): # pylint: disable=invalid-name, too-many-locals, protected-access
"""Create the message that chromecast requires. Use pass of plexapi media object or
set all the needed kwargs manually. See the code for what to set.
Args:
media (None, optional): a :class:`~plexapi.base.Playable
type (str): Default LOAD, SHOWDETAILS.
requestId (int): The requestId, Chromecasts may use this.
offset (int): Offset of the playback in seconds.
directPlay (bool): Default True
directStream (bool): Default True
subtitleSize (int): Set the subtitle size, possibly only 100 & 200.
audioBoost (int): Default 100
transcoderVideo (bool): Default True
transcoderVideoRemuxOnly (bool): Default False
transcoderAudio (bool): Default True
isVerifiedHostname (bool): Default True
contentType (str): Default 'video', 'audio'
myPlexSubscription (bool): True if user has a PlexPass.
contentId (str): The key Chromecasts use to start playback.
streamType (str): Default BUFFERED, LIVE
port (int): PMS port
address (str): PMS host, without scheme.
username (None): Username of the user that started playback.
autoplay (bool): Auto play after the video is done.
currentTime (int): Set playback from this time. default 0
version (str): PMS version. Default 1.10.1.4602
startItem (:class:`~plexapi.media.Media`, optional): Media item in list/playlist/playqueue where playback should
start. Overrides existing startItem for playqueues if set.
**kwargs: To allow overrides, this will be merged with the rest of the msg.
Returns:
dict: Returs a dict formatted correctly to start playback on a Chromecast.
"""
if media is not None:
# Lets set some params for the user if they use plexapi.
server = media[0]._server if isinstance(media, list) else media._server
server_url = urlparse(server._baseurl)
protocol = server_url.scheme
address = server_url.hostname
port = server_url.port
machineIdentifier = server.machineIdentifier
token = server._token
username = server.myPlexUsername
myPlexSubscription = server.myPlexSubscription
if getattr(media, "TYPE", None) == "playqueue":
if startItem:
media = media.items
else:
playQueue = media
if playQueue is None:
playQueue = server.createPlayQueue(media, startItem=startItem)
playQueueID = playQueue.playQueueID
contentId = playQueue.selectedItem.key
contentType = playQueue.items[0].listType
version = server.version
# Chromecasts seem to start playback 5 seconds before the offset.
if offset != 0:
currentTime = offset
msg = {
"type": type,
"requestId": requestId,
"media": {
"contentId": contentId,
"streamType": streamType,
"contentType": contentType,
"customData": {
"offset": offset,
"directPlay": directPlay,
"directStream": directStream,
"subtitleSize": subtitleSize,
"audioBoost": audioBoost,
"server": {
"machineIdentifier": machineIdentifier,
"transcoderVideo": transcoderVideo,
"transcoderVideoRemuxOnly": transcoderVideoRemuxOnly,
"transcoderAudio": transcoderAudio,
"version": version,
"myPlexSubscription": myPlexSubscription,
"isVerifiedHostname": isVerifiedHostname,
"protocol": protocol,
"address": address,
"port": port,
"accessToken": token,
"user": {"username": username},
},
"containerKey": f"/playQueues/{playQueueID}?own=1&window=200",
},
"autoplay": autoplay,
"currentTime": currentTime,
"activeTrackIds": None,
},
}
# Allow passing of kwargs to the dict.
msg.update(kwargs)
return msg
@property
def episode_title(self):
"""Return episode title."""
return self.media_metadata.get("subtitle")
class PlexController(BaseController):
# pylint: disable=too-many-public-methods
"""Controller to interact with Plex namespace."""
def __init__(self):
super().__init__("urn:x-cast:plex", "9AC194DC")
self.app_id = "9AC194DC"
self.namespace = "urn:x-cast:plex"
self.request_id = 0
self.play_media_event = threading.Event()
self._last_play_msg = {}
def _send_cmd(
self,
msg,
namespace=None,
inc_session_id=False,
callback_function=None,
inc=True,
): # pylint: disable=too-many-arguments
"""Wrapper for the commands.
Args:
msg (dict): The actual command that will be sent.
namespace (None, optional): What namespace should be used to send this.
inc_session_id (bool, optional): Include session ID.
callback_function (None, optional): If callback is provided it is
executed after the command.
inc (bool, optional): Increase the requestsId.
"""
self.logger.debug(
"Sending msg %r %s %s %s %s.",
msg,
namespace,
inc_session_id,
callback_function,
inc,
)
if inc:
self._inc_request()
if namespace:
old = self.namespace
try:
self.namespace = namespace
self.send_message(
msg,
inc_session_id=inc_session_id,
callback_function=callback_function,
)
finally:
self.namespace = old
else:
self.send_message(
msg, inc_session_id=inc_session_id, callback_function=callback_function
)
def _inc_request(self):
# Is this getting passed to Plex?
self.request_id += 1
return self.request_id
def channel_connected(self):
"""Updates status when a media channel is connected."""
self.update_status()
def receive_message(self, _message, data: dict):
"""Called when a message from Plex to our controller is received.
I haven't seen any message for it, but lets keep for for now.
I have done minimal testing.
Args:
message (dict): Description
data (dict): message.payload_utf8 interpreted as a JSON dict.
Returns:
bool: True if the message is handled.
"""
if data[MESSAGE_TYPE] == TYPE_MEDIA_STATUS:
self.logger.debug("(PlexController) MESSAGE RECEIVED: %r.", data)
return True
return False
def update_status(self, callback_function_param=False):
"""Send message to update status."""
self.send_message(
{MESSAGE_TYPE: TYPE_GET_STATUS}, callback_function=callback_function_param
)
def stop(self):
"""Send stop command."""
self._send_cmd({MESSAGE_TYPE: TYPE_STOP})
def pause(self):
"""Send pause command."""
self._send_cmd({MESSAGE_TYPE: TYPE_PAUSE})
def play(self):
"""Send play command."""
self._send_cmd({MESSAGE_TYPE: TYPE_PLAY})
def previous(self):
"""Send previous command."""
self._send_cmd({MESSAGE_TYPE: TYPE_PREVIOUS})
def next(self):
"""Send next command."""
self._send_cmd({MESSAGE_TYPE: TYPE_NEXT})
def seek(self, position, resume_state="PLAYBACK_START"):
"""Send seek command.
Args:
position (int): Offset in seconds.
resume_state (str, default): PLAYBACK_START
"""
self._send_cmd(
{MESSAGE_TYPE: TYPE_SEEK, SEEK_KEY: position, "resumeState": resume_state}
)
def rewind(self):
"""Rewind back to the start."""
self.seek(0)
def set_volume(self, percent):
"""Set the volume in percent (1-100).
Args:
percent (int): Percent of volume to be set.
"""
self._socket_client.receiver_controller.set_volume(float(percent / 100))
def volume_up(self, delta=0.1):
"""Increment volume by 0.1 (or delta) unless at max.
Returns the new volume.
"""
if delta <= 0:
raise ValueError(f"volume delta must be greater than zero, not {delta}")
return self.set_volume(self.status.volume_level + delta)
def volume_down(self, delta=0.1):
"""Decrement the volume by 0.1 (or delta) unless at 0.
Returns the new volume.
"""
if delta <= 0:
raise ValueError(f"volume delta must be greater than zero, not {delta}")
return self.set_volume(self.status.volume_level - delta)
def mute(self, status=None):
"""Toggle muting of audio.
Args:
status (None, optional): Override for on/off.
"""
if status is None:
status = not self.status.volume_muted
self._socket_client.receiver_controller.set_volume_muted(status)
def show_media(self, media=None, **kwargs):
"""Show media item's info on screen."""
msg = media_to_chromecast_command(
media, type=TYPE_DETAILS, requestId=self._inc_request(), **kwargs
)
def callback(): # pylint: disable=missing-docstring
self._send_cmd(msg, inc_session_id=True, inc=False)
self.launch(callback)
def quit_app(self):
"""Quit the Plex app."""
self._socket_client.receiver_controller.stop_app()
@property
def status(self):
"""Get the Chromecast's playing status.
Returns:
pychromecast.controllers.media.MediaStatus: Slightly modified status with patched
method for episode_title.
"""
status = self._socket_client.media_controller.status
status.episode_title = episode_title
return status
def _reset_playback(self, offset=None):
"""Reset playback.
Args:
offset (None, optional): Start playback from this offset in seconds,
otherwise playback will start from current time.
"""
if self._last_play_msg:
offset_now = self.status.adjusted_current_time
msg = deepcopy(self._last_play_msg)
msg["media"]["customData"]["offset"] = (
offset_now if offset is None else offset
)
msg["current_time"] = offset_now
self._send_cmd(
msg,
namespace="urn:x-cast:com.google.cast.media",
inc_session_id=True,
inc=False,
)
else:
self.logger.debug(
"Can not reset the stream, _last_play_msg "
"was not set with _send_start_play."
)
def _send_start_play(self, media=None, **kwargs):
"""Helper to send a playback command.
Args:
media (None, optional): :class:`~plexapi.base.Playable
**kwargs: media_to_chromecast_command docs string.
"""
msg = media_to_chromecast_command(
media, requestiId=self._inc_request(), **kwargs
)
self.logger.debug("Create command: \n%r\n", json.dumps(msg, indent=4))
self._last_play_msg = msg
self._send_cmd(
msg,
namespace="urn:x-cast:com.google.cast.media",
inc_session_id=True,
inc=False,
)
def block_until_playing(self, media=None, timeout=None, **kwargs):
"""Block until media is playing, typically useful in a script.
Another way to do the same is to check if the
controller is_active or by using self.status.player_state.
Args:
media (None, optional): Can also be :class:`~plexapi.base.Playable
if not, you need to fill out all the kwargs.
timeout (None, int): default None
**kwargs: See media_to_chromecast_command docs string.
"""
# In case media isnt playing.
self.play_media_event.clear()
self.play_media(media, **kwargs)
self.play_media_event.wait(timeout)
self.play_media_event.clear()
def play_media(self, media=None, **kwargs):
"""Start playback on the Chromecast.
Args:
media (None, optional): Can also be :class:`~plexapi.base.Playable
if not, you need to fill out all the kwargs.
**kwargs: See media_to_chromecast_command docs string.
"""
self.play_media_event.clear()
def app_launched_callback(): # pylint: disable=missing-docstring
try:
self._send_start_play(media, **kwargs)
finally:
self.play_media_event.set()
self.launch(app_launched_callback)
def join(self, timeout=None):
"""Join the thread."""
self._socket_client.join(timeout=timeout)
def disconnect(self, timeout=None, blocking=True):
"""Disconnect the controller."""
self._socket_client.disconnect()
if blocking:
self.join(timeout=timeout)
# pylint: disable=too-many-public-methods
class PlexApiController(PlexController):
"""A controller that can use PlexAPI."""
def __init__(self, pms):
super().__init__()
self.pms = pms
def _get_current_media(self):
"""Get current media_item, media, & part for PMS."""
key = int(self.status.content_id.split("/")[-1])
media_item = self.pms.fetchItem(key).reload()
media_idx = self.status.media_custom_data.get("mediaIndex", 0)
part_idx = self.status.media_custom_data.get("partIndex", 0)
media = media_item.media[media_idx]
part = media.parts[part_idx]
return media_item, media, part
def _change_track(self, track, type_="subtitle", reset_playback=True):
"""Sets a new default audio/subtitle track.
Args:
track (None): The chosen track.
type_ (str): The type of track.
reset_playback (bool, optional): Reset playback after the track has
been changed.
Raises:
ValueError: If type isn't subtitle or audio.
"""
item, _, part = self._get_current_media()
if type_ == "subtitle":
method = part.subtitleStreams()
default = part.setDefaultSubtitleStream
elif type_ == "audio":
method = part.audioStreams()
default = part.setDefaultAudioStream
else:
raise ValueError("Set type parameter as subtitle or audio.")
for track_ in method:
if track in (track_.index, track_.language, track_.languageCode):
self.logger.debug("Change %s to %s.", type_, track)
default(track_)
break
item.reload()
if reset_playback:
self._reset_playback()
def enable_audiotrack(self, audio):
"""Enable an audiotrack.
Args:
audio (str): Can be index, language or languageCode.
"""
self._change_track(self, audio, "audio")
def disable_subtitle(self):
"""Disable a subtitle track."""
(
_,
__,
part,
) = self._get_current_media()
part.resetDefaultSubtitleStream()
self._reset_playback()
def enable_subtitle(self, subtitle):
"""Enable a subtitle track.
Args:
subtitle (str): Can be index, language or languageCode.
"""
self._change_track(subtitle)
def play_media(self, media=None, **kwargs):
"""Start playback on the Chromecast.
Args:
media (None, optional): Can also be :class:`~plexapi.base.Playable
if not, you need to fill out all the kwargs.
**kwargs: See media_to_chromecast_command docs string. `version` is set
to the version of the PMS reported by the API by default.
"""
args = {"version": self.pms.version}
args.update(kwargs)
super().play_media(media, **args)
|
77fd3911f197a4a0ee00880b0d571f0efe159116
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/cloudformation/graph_manager.py
|
df8881456a366d2c39c462f657fbf9982b94fa75
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,905
|
py
|
graph_manager.py
|
from __future__ import annotations
import json
import logging
from typing import List, Dict, Optional, Tuple, TYPE_CHECKING, Any
from checkov.cloudformation.cfn_utils import get_folder_definitions
from checkov.cloudformation.context_parser import ContextParser
from checkov.cloudformation.graph_builder.graph_to_definitions import convert_graph_vertices_to_definitions
from checkov.cloudformation.graph_builder.local_graph import CloudformationLocalGraph
from checkov.common.graph.graph_builder.consts import GraphSource
from checkov.common.graph.graph_manager import GraphManager
if TYPE_CHECKING:
from checkov.common.typing import LibraryGraphConnector
class CloudformationGraphManager(GraphManager[CloudformationLocalGraph, "dict[str, dict[str, Any]]"]):
def __init__(self, db_connector: LibraryGraphConnector, source: str = GraphSource.CLOUDFORMATION) -> None:
super().__init__(db_connector=db_connector, parser=None, source=source)
def build_graph_from_source_directory(
self,
source_dir: str,
local_graph_class: type[CloudformationLocalGraph] = CloudformationLocalGraph,
render_variables: bool = True,
parsing_errors: Optional[Dict[str, Exception]] = None,
download_external_modules: bool = False,
excluded_paths: Optional[List[str]] = None,
) -> Tuple[CloudformationLocalGraph, dict[str, dict[str, Any]]]:
logging.info(f"[CloudformationGraphManager] Parsing files in source dir {source_dir}")
parsing_errors = {} if parsing_errors is None else parsing_errors
definitions, definitions_raw = get_folder_definitions(source_dir, excluded_paths, parsing_errors) # type:ignore[arg-type]
local_graph = self.build_graph_from_definitions(definitions, render_variables)
rendered_definitions, _ = convert_graph_vertices_to_definitions(local_graph.vertices, source_dir)
# TODO: replace with real graph rendering
for cf_file in rendered_definitions.keys():
file_definition = rendered_definitions.get(cf_file, None)
file_definition_raw = definitions_raw.get(cf_file, None)
if file_definition is not None and file_definition_raw is not None:
cf_context_parser = ContextParser(cf_file, file_definition, file_definition_raw)
logging.debug(
f"Template Dump for {cf_file}: {json.dumps(file_definition, indent=2, default=str)}"
)
cf_context_parser.evaluate_default_refs()
return local_graph, rendered_definitions
def build_graph_from_definitions(
self, definitions: dict[str, dict[str, Any]], render_variables: bool = True
) -> CloudformationLocalGraph:
local_graph = CloudformationLocalGraph(definitions, source=self.source)
local_graph.build_graph(render_variables=render_variables)
return local_graph
|
19eb938b94c3774e01a31859abe556c7885ee383
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/light/test_significant_change.py
|
6bececc02443581530c1b3eec68f0e8075c09f76
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
test_significant_change.py
|
"""Test the Light significant change platform."""
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
)
from homeassistant.components.light.significant_change import (
async_check_significant_change,
)
async def test_significant_change() -> None:
"""Detect Light significant changes."""
assert not async_check_significant_change(None, "on", {}, "on", {})
assert async_check_significant_change(None, "on", {}, "off", {})
# Brightness
assert not async_check_significant_change(
None, "on", {ATTR_BRIGHTNESS: 60}, "on", {ATTR_BRIGHTNESS: 61}
)
assert async_check_significant_change(
None, "on", {ATTR_BRIGHTNESS: 60}, "on", {ATTR_BRIGHTNESS: 63}
)
# Color temp
assert not async_check_significant_change(
None, "on", {ATTR_COLOR_TEMP: 60}, "on", {ATTR_COLOR_TEMP: 64}
)
assert async_check_significant_change(
None, "on", {ATTR_COLOR_TEMP: 60}, "on", {ATTR_COLOR_TEMP: 65}
)
# Effect
for eff1, eff2, expected in (
(None, None, False),
(None, "colorloop", True),
("colorloop", None, True),
("colorloop", "jump", True),
("colorloop", "colorloop", False),
):
result = async_check_significant_change(
None, "on", {ATTR_EFFECT: eff1}, "on", {ATTR_EFFECT: eff2}
)
assert result is expected
# Hue
assert not async_check_significant_change(
None, "on", {ATTR_HS_COLOR: [120, 20]}, "on", {ATTR_HS_COLOR: [124, 20]}
)
assert async_check_significant_change(
None, "on", {ATTR_HS_COLOR: [120, 20]}, "on", {ATTR_HS_COLOR: [125, 20]}
)
# Satursation
assert not async_check_significant_change(
None, "on", {ATTR_HS_COLOR: [120, 20]}, "on", {ATTR_HS_COLOR: [120, 22]}
)
assert async_check_significant_change(
None, "on", {ATTR_HS_COLOR: [120, 20]}, "on", {ATTR_HS_COLOR: [120, 23]}
)
|
5ac08a48ebf06ac4d913e553dcb291bb08a8edfd
|
f79dec3c4033ca3cbb55d8a51a748cc7b8b6fbab
|
/lang/nodejs4/patches/patch-tools_install.py
|
76368b1e3760eb8fc0111e030cdedd7c8ade34f7
|
[] |
no_license
|
jsonn/pkgsrc
|
fb34c4a6a2d350e8e415f3c4955d4989fcd86881
|
c1514b5f4a3726d90e30aa16b0c209adbc276d17
|
refs/heads/trunk
| 2021-01-24T09:10:01.038867
| 2017-07-07T15:49:43
| 2017-07-07T15:49:43
| 2,095,004
| 106
| 47
| null | 2016-09-19T09:26:01
| 2011-07-23T23:49:04
|
Makefile
|
UTF-8
|
Python
| false
| false
| 599
|
py
|
patch-tools_install.py
|
$NetBSD: patch-tools_install.py,v 1.2 2016/01/21 11:43:44 fhajny Exp $
Install man pages under the right directory.
--- tools/install.py.orig 2016-01-20 19:09:38.000000000 +0000
+++ tools/install.py
@@ -140,10 +140,7 @@ def files(action):
action(['deps/v8/tools/gdbinit'], 'share/doc/node/')
- if 'freebsd' in sys.platform or 'openbsd' in sys.platform:
- action(['doc/node.1'], 'man/man1/')
- else:
- action(['doc/node.1'], 'share/man/man1/')
+ action(['doc/node.1'], os.environ.get('PKGMANDIR') + '/man1/')
if 'true' == variables.get('node_install_npm'): npm_files(action)
|
e331d176914c4bf46767f51d45394f29b3a777b5
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/peco_opower/__init__.py
|
a0d26cf7b136acec26684658b6f70f8e3af1b2d9
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 55
|
py
|
__init__.py
|
"""Virtual integration: PECO Energy Company (PECO)."""
|
b3e23f9891d89f173d3ed8c1faf9e8d1f4fc8a82
|
7427adca4c012c4efea43a96486a246bfc518f66
|
/toppra/algorithm/algorithm.py
|
45805a3959c55726d5de922e0743044c384b6ed2
|
[
"MIT"
] |
permissive
|
hungpham2511/toppra
|
a2f77608f6ab2be7bcceb5842fe876f3e0fcf542
|
7fd6e900897d811311f22e2e877aae565ee710ae
|
refs/heads/develop
| 2023-07-23T21:14:56.992296
| 2023-06-14T08:19:35
| 2023-06-14T08:19:35
| 97,661,949
| 497
| 161
|
MIT
| 2023-09-13T08:54:18
| 2017-07-19T02:03:19
|
Python
|
UTF-8
|
Python
| false
| false
| 7,757
|
py
|
algorithm.py
|
"""
toppra.algorithm.algorithm
^^^^^^^^^^^^^^^^^^^^^^^^^^
This module defines the abstract data types that define TOPP algorithms.
"""
from typing import Dict, Any, List, Tuple, Optional
import typing as T
import abc
import enum
import numpy as np
import time
import matplotlib.pyplot as plt
from toppra.constants import TINY
from toppra.interpolator import SplineInterpolator, AbstractGeometricPath
from toppra.constraint import Constraint
import toppra.interpolator as interpolator
import toppra.parametrizer as tparam
import logging
logger = logging.getLogger(__name__)
class ParameterizationData(object):
"""Internal data and output.
"""
def __init__(self, *arg, **kwargs) -> None:
self.return_code: ParameterizationReturnCode = ParameterizationReturnCode.ErrUnknown
"ParameterizationReturnCode: Return code of the last parametrization attempt."
self.gridpoints: Optional[np.ndarray] = None
"np.ndarray: Shape (N+1, 1). Gridpoints"
self.sd_vec: Optional[np.ndarray] = None
"np.ndarray: Shape (N+1, 1). Path velocities"
self.sdd_vec: Optional[np.ndarray] = None
"np.ndarray: Shape (N+1, 1). Path acceleration"
self.K: Optional[np.ndarray] = None
"np.ndarray: Shape (N+1, 2). Controllable sets."
self.X: Optional[np.ndarray] = None
"np.ndarray: Shape (N+1, 2). Feasible sets."
def __repr__(self):
return "ParameterizationData(return_code:={}, N={:d})".format(
self.return_code, self.gridpoints.shape[0])
class ParameterizationReturnCode(enum.Enum):
"""Return codes from a parametrization attempt.
"""
Ok = "Ok: Successful parametrization"
ErrUnknown = "Error: Unknown issue"
ErrShortPath = "Error: Input path is very short"
FailUncontrollable = "Error: Instance is not controllable"
ErrForwardPassFail = "Error: Forward pass fail. Numerical errors occured"
def __repr__(self):
return super(ParameterizationReturnCode, self).__repr__()
def __str__(self):
return super(ParameterizationReturnCode, self).__repr__()
class ParameterizationAlgorithm(object):
"""Base parametrization algorithm class.
This class specifies the generic behavior for parametrization algorithms. For details on how
to *construct* a :class:`ParameterizationAlgorithm` instance, as well as configure it, refer
to the specific class.
Example usage:
.. code-block:: python
# usage
instance.compute_parametrization(0, 0)
output = instance.problem_data
# do this if you only want the final trajectory
traj = instance.compute_trajectory(0, 0)
.. seealso::
:class:`toppra.algorithm.TOPPRA`,
:class:`toppra.algorithm.TOPPRAsd`,
:class:`~ParameterizationReturnCode`,
:class:`~ParameterizationData`
"""
def __init__(self, constraint_list, path, gridpoints=None, parametrizer=None,
gridpt_max_err_threshold: float=1e-3, gridpt_min_nb_points: int=100):
self.constraints = constraint_list
self.path = path # Attr
self._problem_data = ParameterizationData()
# Handle gridpoints
if gridpoints is None:
gridpoints = interpolator.propose_gridpoints(
path,
max_err_threshold=gridpt_max_err_threshold,
min_nb_points=gridpt_min_nb_points
)
logger.info(
"No gridpoint specified. Automatically choose a gridpoint with %d points",
len(gridpoints)
)
if (
path.path_interval[0] != gridpoints[0]
or path.path_interval[1] != gridpoints[-1]
):
raise ValueError("Invalid manually supplied gridpoints.")
self.gridpoints = np.array(gridpoints)
self._problem_data.gridpoints = np.array(gridpoints)
self._N = len(gridpoints) - 1 # Number of stages. Number of point is _N + 1
for i in range(self._N):
if gridpoints[i + 1] <= gridpoints[i]:
logger.fatal("Input gridpoints are not monotonically increasing.")
raise ValueError("Bad input gridpoints.")
if parametrizer is None or parametrizer == "ParametrizeSpline":
# TODO: What is the best way to type parametrizer?
self.parametrizer: T.Any = tparam.ParametrizeSpline
elif parametrizer == "ParametrizeConstAccel":
self.parametrizer = tparam.ParametrizeConstAccel
@property
def constraints(self) -> List[Constraint]:
"""Constraints of interests."""
return self._constraints
@constraints.setter
def constraints(self, value: List[Constraint]) -> None:
# TODO: Validate constraints.
self._constraints = value
@property
def problem_data(self) -> ParameterizationData:
"""Data obtained when solving the path parametrization."""
return self._problem_data
@abc.abstractmethod
def compute_parameterization(self, sd_start: float, sd_end: float, return_data: bool=False):
"""Compute the path parameterization subject to starting and ending conditions.
After this method terminates, the attribute
:attr:`~problem_data` will contain algorithm output, as well
as the result. This is the preferred way of retrieving problem
output.
Parameters
----------
sd_start:
Starting path velocity. Must be positive.
sd_end:
Goal path velocity. Must be positive.
return_data:
If true also return the problem data.
"""
raise NotImplementedError
def compute_trajectory(self, sd_start: float = 0, sd_end: float = 0) -> Optional[AbstractGeometricPath]:
"""Compute the resulting joint trajectory and auxilliary trajectory.
This is a convenient method if only the final output is wanted.
Parameters
----------
sd_start:
Starting path velocity.
sd_end:
Goal path velocity.
return_data:
If true, return a dict containing the internal data.
Returns
-------
:
Time-parameterized joint position trajectory or
None If unable to parameterize.
"""
t0 = time.time()
self.compute_parameterization(sd_start, sd_end)
if self.problem_data.return_code != ParameterizationReturnCode.Ok:
logger.warning("Fail to parametrize path. Return code: %s", self.problem_data.return_code)
return None
outputtraj = self.parametrizer(self.path, self.problem_data.gridpoints, self.problem_data.sd_vec)
logger.info("Successfully parametrize path. Duration: %.3f, previously %.3f)",
outputtraj.path_interval[1], self.path.path_interval[1])
logger.info("Finish parametrization in %.3f secs", time.time() - t0)
return outputtraj
def inspect(self, compute=True):
"""Inspect the problem internal data."""
K = self.problem_data.K
X = self.problem_data.X
if X is not None:
plt.plot(X[:, 0], c="green", label="Feasible sets")
plt.plot(X[:, 1], c="green")
if K is not None:
plt.plot(K[:, 0], "--", c="red", label="Controllable sets")
plt.plot(K[:, 1], "--", c="red")
if self.problem_data.sd_vec is not None:
plt.plot(self.problem_data.sd_vec ** 2, label="Velocity profile")
plt.title("Path-position path-velocity plot")
plt.xlabel("Path position")
plt.ylabel("Path velocity square")
plt.legend()
plt.tight_layout()
plt.show()
|
7927f50f1c65b5ee49486ced76661ee78c5261ff
|
64695990d45b3adbc812089c75fb498c267e10d2
|
/examples/bench_diskless.py
|
dd7a78315bcc95e826072d081e1e21282b2c90f4
|
[
"MIT"
] |
permissive
|
Unidata/netcdf4-python
|
f9e3df1b67143bfef541a949f5c5cba49d66d0e9
|
e1d3c1f3aaa30643be5b71c94a4b6cf095d59dee
|
refs/heads/master
| 2023-08-09T22:09:27.304569
| 2023-07-10T00:43:11
| 2023-07-10T00:43:11
| 15,884,459
| 696
| 294
|
MIT
| 2023-09-05T03:59:29
| 2014-01-13T22:41:52
|
Cython
|
UTF-8
|
Python
| false
| false
| 2,750
|
py
|
bench_diskless.py
|
# benchmark reads and writes, with and without compression.
# tests all four supported file formats.
from numpy.random.mtrand import uniform
import netCDF4
from timeit import Timer
import os, sys
# create an n1dim by n2dim by n3dim random array.
n1dim = 30
n2dim = 15
n3dim = 73
n4dim = 144
ntrials = 10
sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim))
array = uniform(size=(n1dim,n2dim,n3dim,n4dim))
def write_netcdf(filename,zlib=False,least_significant_digit=None,format='NETCDF4',closeit=False):
file = netCDF4.Dataset(filename,'w',format=format,diskless=True,persist=True)
file.createDimension('n1', n1dim)
file.createDimension('n2', n2dim)
file.createDimension('n3', n3dim)
file.createDimension('n4', n4dim)
foo = file.createVariable('data',\
'f8',('n1','n2','n3','n4'),zlib=zlib,least_significant_digit=None)
foo.testme="hi I am an attribute"
foo.testme1="hi I am an attribute"
foo.testme2="hi I am an attribute"
foo.testme3="hi I am an attribute"
foo.testme4="hi I am an attribute"
foo.testme5="hi I am an attribute"
foo[:] = array
if closeit: file.close()
return file
def read_netcdf(ncfile):
data = ncfile.variables['data'][:]
for format in ['NETCDF4','NETCDF3_CLASSIC','NETCDF3_64BIT']:
sys.stdout.write('testing file format %s ...\n' % format)
# writing, no compression.
t = Timer("write_netcdf('test1.nc',closeit=True,format='%s')" % format,"from __main__ import write_netcdf")
sys.stdout.write('writing took %s seconds\n' %\
repr(sum(t.repeat(ntrials,1))/ntrials))
# test reading.
ncfile = write_netcdf('test1.nc',format=format)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
# test diskless=True in nc_open
format='NETCDF3_CLASSIC'
trials=50
sys.stdout.write('test caching of file in memory on open for %s\n' % format)
sys.stdout.write('testing file format %s ...\n' % format)
write_netcdf('test1.nc',format=format,closeit=True)
ncfile = netCDF4.Dataset('test1.nc',diskless=False)
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (from disk) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
ncfile = netCDF4.Dataset('test1.nc',diskless=True)
# setting diskless=True should cache the file in memory,
# resulting in faster reads.
t = Timer("read_netcdf(ncfile)","from __main__ import read_netcdf,ncfile")
sys.stdout.write('reading (cached in memory) took %s seconds\n' %
repr(sum(t.repeat(ntrials,1))/ntrials))
ncfile.close()
|
94787a41272be1848cd01f8d5c28320c8b8e87e8
|
98f1a0bfa5b20a0b81e9e555d76e706c62d949c9
|
/python/dgl/cuda/__init__.py
|
f478e131c2c29d5c844a07a1d2fbb039f59c45b1
|
[
"Apache-2.0"
] |
permissive
|
dmlc/dgl
|
3a8fbca3a7f0e9adf6e69679ad62948df48dfc42
|
bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1
|
refs/heads/master
| 2023-08-31T16:33:21.139163
| 2023-08-31T07:49:22
| 2023-08-31T07:49:22
| 130,375,797
| 12,631
| 3,482
|
Apache-2.0
| 2023-09-14T15:48:24
| 2018-04-20T14:49:09
|
Python
|
UTF-8
|
Python
| false
| false
| 150
|
py
|
__init__.py
|
""" CUDA wrappers """
from .. import backend as F
from .gpu_cache import GPUCache
if F.get_preferred_backend() == "pytorch":
from . import nccl
|
c2e4a5bdfba4cc7d6726d2bde176129fc5b76594
|
d7b9b490c954c7a9160b69f8ce2c907ef4681ecb
|
/nominations/templatetags/nominations.py
|
8e449dfcc6f310e4dc0869f6b6778e0ca663b8e1
|
[
"Apache-2.0"
] |
permissive
|
python/pythondotorg
|
00db93a4b1789a4d438806d106d9cee3349ad78c
|
c4ee749942227ca75c8e670546afe67232d647b2
|
refs/heads/main
| 2023-08-28T20:04:24.735314
| 2023-08-03T19:12:29
| 2023-08-03T19:12:29
| 6,127,047
| 1,131
| 646
|
Apache-2.0
| 2023-08-24T15:57:04
| 2012-10-08T16:00:15
|
Python
|
UTF-8
|
Python
| false
| false
| 172
|
py
|
nominations.py
|
import random
from django import template
register = template.Library()
@register.filter
def shuffle(arg):
aux = list(arg)[:]
random.shuffle(aux)
return aux
|
879dc2be79fc064ffac86ea60b159a0c08d3172a
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/telemetry/internal/snap_page_util_unittest.py
|
2ac0175a1d2c1cd6d1fb61081c190733567d053b
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,689
|
py
|
snap_page_util_unittest.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import hashlib
import os
import shutil
import tempfile
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import mock
from telemetry.internal import snap_page_util
from telemetry.testing import options_for_unittests
from telemetry.testing import tab_test_case
from telemetry.internal.browser import browser_finder
from telemetry.internal.util import path
class FakeResponse(object):
def __init__(self):
self.content = None
def read(self):
return self.content
class SnapPageTest(unittest.TestCase):
def setUp(self):
self.finder_options = options_for_unittests.GetCopy()
browser_to_create = browser_finder.FindBrowser(self.finder_options)
self.platform = browser_to_create.platform
self.platform.network_controller.Open()
def tearDown(self):
self.platform.network_controller.Close()
def _SnapWithDummyValuesExceptPath(self, snapshot_path):
snap_page_util.SnapPage(self.finder_options, 'url', interactive=False,
snapshot_path=snapshot_path,
enable_browser_log=False)
def testSnappingToInvalidSnapshotPath(self):
with self.assertRaises(ValueError):
self._SnapWithDummyValuesExceptPath('nosuffix')
with self.assertRaises(ValueError):
self._SnapWithDummyValuesExceptPath('')
with self.assertRaises(ValueError):
self._SnapWithDummyValuesExceptPath('foohtml')
with self.assertRaises(ValueError):
self._SnapWithDummyValuesExceptPath('foo.svg')
with self.assertRaises(ValueError):
self._SnapWithDummyValuesExceptPath('foo.xhtml')
def testSnappingSimplePage(self):
self.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
html_file_path = os.path.join(path.GetUnittestDataDir(), 'green_rect.html')
url = self.platform.http_server.UrlOf(html_file_path)
outfile = StringIO()
test_dir = tempfile.mkdtemp()
try:
snap_page_util._SnapPageToFile(
self.finder_options, url, interactive=False,
snapshot_path=os.path.join(test_dir, 'page.html'),
snapshot_file=outfile, enable_browser_log=False)
self.assertIn('id="green"', outfile.getvalue())
finally:
shutil.rmtree(test_dir)
@mock.patch('urllib2.urlopen')
def testSnappingPageWithImage(self, mock_urlopen):
test_dir = tempfile.mkdtemp()
try:
src_html_filename = 'image.html'
dest_html_path = os.path.join(test_dir, src_html_filename)
shutil.copyfile(os.path.join(path.GetUnittestDataDir(),
src_html_filename),
dest_html_path)
self.platform.SetHTTPServerDirectories(path.GetUnittestDataDir())
url = self.platform.http_server.UrlOf(
os.path.join(path.GetUnittestDataDir(), src_html_filename))
outfile = StringIO()
# Load the test image file's content so that we can return it
# from the mocked url request as if we'd actually fetched the
# image from an external source.
request_response = FakeResponse()
expected_image_path = os.path.join(path.GetUnittestDataDir(), 'image.png')
with open(expected_image_path, 'rb') as image_file:
request_response.content = image_file.read()
# Mock out the external image url fetch to return the loaded
# test image content.
mock_urlopen.return_value = request_response
snap_page_util._SnapPageToFile(
self.finder_options, url, interactive=False,
snapshot_path=os.path.join(test_dir, src_html_filename),
snapshot_file=outfile, enable_browser_log=False)
self.assertIn('id="target"', outfile.getvalue())
# Validate that the 'fetched' image was written to the
# destination local image path.
expected_fetched_md5 = hashlib.md5(request_response.content).hexdigest()
actual_fetched_md5 = None
with open(os.path.join(test_dir, 'image', '0-target.png'), 'rb') as f:
actual_fetched_md5 = hashlib.md5(f.read()).hexdigest()
self.assertEqual(expected_fetched_md5, actual_fetched_md5)
finally:
shutil.rmtree(test_dir)
class JSONTransmissionTest(tab_test_case.TabTestCase):
def testTransmittingLargeObject(self):
# Create a large array of 1 million elements
json_obj = [1] * 1000000
snap_page_util._TransmitLargeJSONToTab(
self._tab, json_obj, 'big_array')
self.assertEquals(self._tab.EvaluateJavaScript('big_array.length'), 1000000)
|
8e49a4c01d3a9268876010f6ee045b672566d112
|
6647c484a6601f70dd348076c484843807238ddf
|
/google/datalab/bigquery/__init__.py
|
bf708a7f633f9d24cf4ad43c24a6bdfc42eed8ef
|
[
"Apache-2.0"
] |
permissive
|
googledatalab/pydatalab
|
43624c271e25edfd97ac0ecf39ec4f55e9ad27b2
|
8bf007da3e43096aa3a3dca158fc56b286ba6f5c
|
refs/heads/master
| 2022-09-13T10:23:59.112507
| 2022-09-02T21:16:23
| 2022-09-02T21:16:23
| 58,776,721
| 200
| 91
|
Apache-2.0
| 2023-03-28T20:55:15
| 2016-05-13T22:42:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,437
|
py
|
__init__.py
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
"""Google Cloud Platform library - BigQuery Functionality."""
from __future__ import absolute_import
from ._csv_options import CSVOptions
from ._dataset import Dataset, Datasets
from ._external_data_source import ExternalDataSource
from ._query import Query
from ._query_output import QueryOutput
from ._query_results_table import QueryResultsTable
from ._query_stats import QueryStats
from ._sampling import Sampling
from ._schema import Schema, SchemaField
from ._table import Table, TableMetadata
from ._udf import UDF
from ._utils import TableName, DatasetName
from ._view import View
__all__ = ['CSVOptions', 'Dataset', 'Datasets', 'ExternalDataSource', 'Query', 'QueryOutput',
'QueryResultsTable', 'QueryStats', 'Sampling', 'Schema', 'SchemaField', 'Table',
'TableMetadata', 'UDF', 'TableName', 'DatasetName', 'View']
|
7e7c20fdfae97028e99baa9172fb58d838aa1176
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/Confluera/Scripts/ConflueraProgressionsDataWarroom/ConflueraProgressionsDataWarroom.py
|
2a63dedcc077a1c32d8bfb8d02e7fd14886038ca
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 927
|
py
|
ConflueraProgressionsDataWarroom.py
|
from CommonServerPython import *
from CommonServerUserPython import *
# Executes confluera-fetch-progressions command/script
progressions_data = demisto.executeCommand('confluera-fetch-progressions', {'hours': '72'})
if progressions_data[1] and progressions_data[1]['Contents']:
progressions = progressions_data[1]['Contents']
else:
progressions = []
data = []
for idx, progression in enumerate(progressions):
if progression['riskScore'] == 0:
color = "blue"
elif progression['riskScore'] < 25:
color = "green"
else:
color = "red"
temp_dct = {
"name": 'AP-' + str(progression['attackId']),
"data": [progression['riskScore']],
"color": color
}
data.append(temp_dct)
return_results({
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": data,
"params": {
"layout": "vertical"
}
}
})
|
63513803ba1ecd11581321797cc1103e1f1f5d3d
|
0d40af0c10fd47af5ea88675e5be3b7ee98cbcd8
|
/tools/lint/test/test_rst.py
|
e540081a94168d63b89566764185461517153f83
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
mozilla/gecko-dev
|
f02cdc9e61197c1d2a038cf1ad8cb5339e8cb62a
|
304e9eaa3d7dbfab38d5b5401cbae227b20efe37
|
refs/heads/master
| 2023-09-01T20:40:41.588946
| 2023-09-01T11:38:17
| 2023-09-01T11:38:17
| 13,509,108
| 3,025
| 1,882
|
NOASSERTION
| 2023-09-05T04:11:13
| 2013-10-11T20:35:42
| null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
test_rst.py
|
import mozunit
import pytest
from mozfile import which
LINTER = "rst"
pytestmark = pytest.mark.skipif(
not which("rstcheck"), reason="rstcheck is not installed"
)
def test_basic(lint, paths):
results = lint(paths())
assert len(results) == 2
assert "Title underline too short" in results[0].message
assert results[0].level == "error"
assert results[0].relpath == "bad.rst"
assert "Title overline & underline mismatch" in results[1].message
assert results[1].level == "error"
assert results[1].relpath == "bad2.rst"
if __name__ == "__main__":
mozunit.main()
|
c88b8e87a4ed3a8bd8c017765562ebc167b88fa7
|
6793f3b093478fdde550d8669b9b955081af5e0e
|
/nbconvert/postprocessors/base.py
|
06aee67497219d7c4966ed1c7a4e49a5a33afc53
|
[
"BSD-3-Clause"
] |
permissive
|
jupyter/nbconvert
|
0afe110c4ec39b68661c601f8f3b20fd21a9ba13
|
51c6e0a7d40918366e2a68c5ea471fd2c65722cb
|
refs/heads/main
| 2023-09-03T16:05:25.981152
| 2023-08-29T13:57:58
| 2023-08-29T13:57:58
| 33,653,617
| 1,645
| 654
|
BSD-3-Clause
| 2023-09-11T10:42:26
| 2015-04-09T06:58:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
base.py
|
"""
Basic post processor
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from nbconvert.utils.base import NbConvertBase
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class PostProcessorBase(NbConvertBase):
"""The base class for post processors."""
def __call__(self, input_):
"""
See def postprocess() ...
"""
self.postprocess(input_)
def postprocess(self, input_):
"""
Post-process output from a writer.
"""
msg = "postprocess"
raise NotImplementedError(msg)
|
b535a26b0ea9d2a66892c16e60503d7abe4b71e5
|
3dc647cd07a7361ed401e40d2b7cce8c826c8f6c
|
/Lib/_weakrefset.py
|
2a27684324d80ab32f576040fb80ec666a8f7ff8
|
[
"Python-2.0",
"CC-BY-4.0",
"MIT"
] |
permissive
|
RustPython/RustPython
|
5ddce4a9848b9de8c041ffd2634f83c0105d3f39
|
b864e5da1f18897fc884180b7093df5aa170024f
|
refs/heads/main
| 2023-09-04T12:38:29.458699
| 2023-09-03T12:33:42
| 2023-09-03T12:33:42
| 135,201,145
| 15,815
| 1,302
|
MIT
| 2023-09-14T08:11:45
| 2018-05-28T19:27:01
|
Rust
|
UTF-8
|
Python
| false
| false
| 5,923
|
py
|
_weakrefset.py
|
# Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
from types import GenericAlias
__all__ = ['WeakSet']
class _IterationGuard:
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet:
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
pop = self._pending_removals.pop
discard = self.data.discard
while True:
try:
item = pop()
except IndexError:
return
discard(item)
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
# Caveat: the iterator will keep a strong reference to
# `item` until it is resumed or closed.
yield item
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet') from None
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
def difference(self, other):
newset = self.copy()
newset.difference_update(other)
return newset
__sub__ = difference
def difference_update(self, other):
self.__isub__(other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self.__class__(item for item in other if item in self)
__and__ = intersection
def intersection_update(self, other):
self.__iand__(other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__le__ = issubset
def __lt__(self, other):
return self.data < set(map(ref, other))
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__ge__ = issuperset
def __gt__(self, other):
return self.data > set(map(ref, other))
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(map(ref, other))
def symmetric_difference(self, other):
newset = self.copy()
newset.symmetric_difference_update(other)
return newset
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
self.__ixor__(other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item, self._remove) for item in other)
return self
def union(self, other):
return self.__class__(e for s in (self, other) for e in s)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
def __repr__(self):
return repr(self.data)
__class_getitem__ = classmethod(GenericAlias)
|
01db71c35f46c287487fa9e038438af6d4b579f8
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/find-smallest-letter-greater-than-target.py
|
f9971d734dc8093e560190433aa6fa75df9a7b45
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 339
|
py
|
find-smallest-letter-greater-than-target.py
|
# Time: O(logn)
# Space: O(1)
import bisect
class Solution(object):
def nextGreatestLetter(self, letters, target):
"""
:type letters: List[str]
:type target: str
:rtype: str
"""
i = bisect.bisect_right(letters, target)
return letters[0] if i == len(letters) else letters[i]
|
20e636e454be5bb719cdb5ff624eecf8fbad6f1d
|
7e6afb4986a53c420d40a2039240f8c5ed3f9549
|
/python/stubs-out/mrpt/pymrpt/mrpt/opengl/stock_objects.pyi
|
11fb46aa9505efb5465f85cd99456a656c5a7642
|
[
"BSD-3-Clause"
] |
permissive
|
MRPT/mrpt
|
9ea3c39a76de78eacaca61a10e7e96646647a6da
|
34077ec74a90b593b587f2057d3280ea520a3609
|
refs/heads/develop
| 2023-08-17T23:37:29.722496
| 2023-08-17T15:39:54
| 2023-08-17T15:39:54
| 13,708,826
| 1,695
| 646
|
BSD-3-Clause
| 2023-09-12T22:02:53
| 2013-10-19T21:09:23
|
C++
|
UTF-8
|
Python
| false
| false
| 533
|
pyi
|
stock_objects.pyi
|
from typing import Any
def BumblebeeCamera(*args, **kwargs) -> Any: ...
def CornerXYSimple(*args, **kwargs) -> Any: ...
def CornerXYZ(*args, **kwargs) -> Any: ...
def CornerXYZEye(*args, **kwargs) -> Any: ...
def CornerXYZSimple(*args, **kwargs) -> Any: ...
def Hokuyo_URG(*args, **kwargs) -> Any: ...
def Hokuyo_UTM(*args, **kwargs) -> Any: ...
def Househam_Sprayer(*args, **kwargs) -> Any: ...
def RobotGiraff(*args, **kwargs) -> Any: ...
def RobotPioneer(*args, **kwargs) -> Any: ...
def RobotRhodon(*args, **kwargs) -> Any: ...
|
8db5ea29af3e053a332bf214fc258aa2d05f66e9
|
ad61cc119a42abfd3d64224a753817ae0f9ba058
|
/tests/unit/customizations/s3/test_filegenerator.py
|
c69c1f5dc12d5e1d97001d7b555cc5038534cde9
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-cli
|
30b0e5b0fb6d736f1540990955f0a7351ee7a908
|
147d16dfdb72dc9cf362b676a57e46a49375afbd
|
refs/heads/develop
| 2023-09-03T19:52:07.955543
| 2023-09-01T20:37:50
| 2023-09-01T20:37:50
| 6,780,767
| 13,038
| 4,107
|
NOASSERTION
| 2023-09-13T19:48:11
| 2012-11-20T16:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 28,544
|
py
|
test_filegenerator.py
|
# Copyright 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import platform
from awscli.testutils import mock, unittest, FileCreator, BaseAWSCommandParamsTest
from awscli.testutils import skip_if_windows
import stat
import tempfile
import shutil
import socket
from botocore.exceptions import ClientError
from awscli.compat import six
from awscli.customizations.s3.filegenerator import FileGenerator, \
FileDecodingError, FileStat, is_special_file, is_readable
from awscli.customizations.s3.utils import get_file_stat, EPOCH_TIME
from tests.unit.customizations.s3 import make_loc_files, clean_loc_files, \
compare_files
@skip_if_windows('Special files only supported on mac/linux')
class TestIsSpecialFile(unittest.TestCase):
def setUp(self):
self.files = FileCreator()
self.filename = 'foo'
def tearDown(self):
self.files.remove_all()
def test_is_character_device(self):
file_path = os.path.join(self.files.rootdir, self.filename)
self.files.create_file(self.filename, contents='')
with mock.patch('stat.S_ISCHR') as mock_class:
mock_class.return_value = True
self.assertTrue(is_special_file(file_path))
def test_is_block_device(self):
file_path = os.path.join(self.files.rootdir, self.filename)
self.files.create_file(self.filename, contents='')
with mock.patch('stat.S_ISBLK') as mock_class:
mock_class.return_value = True
self.assertTrue(is_special_file(file_path))
def test_is_fifo(self):
file_path = os.path.join(self.files.rootdir, self.filename)
mode = 0o600 | stat.S_IFIFO
os.mknod(file_path, mode)
self.assertTrue(is_special_file(file_path))
def test_is_socket(self):
file_path = os.path.join(self.files.rootdir, self.filename)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(file_path)
self.assertTrue(is_special_file(file_path))
class TestIsReadable(unittest.TestCase):
def setUp(self):
self.files = FileCreator()
self.filename = 'foo'
self.full_path = os.path.join(self.files.rootdir, self.filename)
def tearDown(self):
self.files.remove_all()
def test_unreadable_file(self):
self.files.create_file(self.filename, contents="foo")
open_function = 'awscli.customizations.s3.filegenerator._open'
with mock.patch(open_function) as mock_class:
mock_class.side_effect = OSError()
self.assertFalse(is_readable(self.full_path))
def test_unreadable_directory(self):
os.mkdir(self.full_path)
with mock.patch('os.listdir') as mock_class:
mock_class.side_effect = OSError()
self.assertFalse(is_readable(self.full_path))
class LocalFileGeneratorTest(unittest.TestCase):
def setUp(self):
self.client = None
self.file_creator = FileCreator()
self.files = make_loc_files(self.file_creator)
self.local_file = self.files[0]
self.local_dir = self.files[3] + os.sep
def tearDown(self):
clean_loc_files(self.file_creator)
def test_local_file(self):
"""
Generate a single local file.
"""
input_local_file = {'src': {'path': self.local_file,
'type': 'local'},
'dest': {'path': 'bucket/text1.txt',
'type': 's3'},
'dir_op': False, 'use_src_name': False}
params = {'region': 'us-east-1'}
files = FileGenerator(self.client, '').call(input_local_file)
result_list = []
for filename in files:
result_list.append(filename)
size, last_update = get_file_stat(self.local_file)
file_stat = FileStat(src=self.local_file, dest='bucket/text1.txt',
compare_key='text1.txt', size=size,
last_update=last_update, src_type='local',
dest_type='s3', operation_name='')
ref_list = [file_stat]
self.assertEqual(len(result_list), len(ref_list))
for i in range(len(result_list)):
compare_files(self, result_list[i], ref_list[i])
def test_local_directory(self):
"""
Generate an entire local directory.
"""
input_local_dir = {'src': {'path': self.local_dir,
'type': 'local'},
'dest': {'path': 'bucket/',
'type': 's3'},
'dir_op': True, 'use_src_name': True}
params = {'region': 'us-east-1'}
files = FileGenerator(self.client, '').call(input_local_dir)
result_list = []
for filename in files:
result_list.append(filename)
size, last_update = get_file_stat(self.local_file)
file_stat = FileStat(src=self.local_file, dest='bucket/text1.txt',
compare_key='text1.txt', size=size,
last_update=last_update, src_type='local',
dest_type='s3', operation_name='')
path = self.local_dir + 'another_directory' + os.sep \
+ 'text2.txt'
size, last_update = get_file_stat(path)
file_stat2 = FileStat(src=path,
dest='bucket/another_directory/text2.txt',
compare_key='another_directory/text2.txt',
size=size, last_update=last_update,
src_type='local',
dest_type='s3', operation_name='')
ref_list = [file_stat2, file_stat]
self.assertEqual(len(result_list), len(ref_list))
for i in range(len(result_list)):
compare_files(self, result_list[i], ref_list[i])
@skip_if_windows('Symlink tests only supported on mac/linux')
class TestIgnoreFilesLocally(unittest.TestCase):
"""
This class tests the ability to ignore particular files. This includes
skipping symlink when desired.
"""
def setUp(self):
self.client = None
self.files = FileCreator()
def tearDown(self):
self.files.remove_all()
def test_warning(self):
path = os.path.join(self.files.rootdir, 'badsymlink')
os.symlink('non-existent-file', path)
filegenerator = FileGenerator(self.client, '', True)
self.assertTrue(filegenerator.should_ignore_file(path))
def test_skip_symlink(self):
filename = 'foo.txt'
self.files.create_file(os.path.join(self.files.rootdir,
filename),
contents='foo.txt contents')
sym_path = os.path.join(self.files.rootdir, 'symlink')
os.symlink(filename, sym_path)
filegenerator = FileGenerator(self.client, '', False)
self.assertTrue(filegenerator.should_ignore_file(sym_path))
def test_no_skip_symlink(self):
filename = 'foo.txt'
path = self.files.create_file(os.path.join(self.files.rootdir,
filename),
contents='foo.txt contents')
sym_path = os.path.join(self.files.rootdir, 'symlink')
os.symlink(path, sym_path)
filegenerator = FileGenerator(self.client, '', True)
self.assertFalse(filegenerator.should_ignore_file(sym_path))
self.assertFalse(filegenerator.should_ignore_file(path))
def test_no_skip_symlink_dir(self):
filename = 'dir'
path = os.path.join(self.files.rootdir, 'dir/')
os.mkdir(path)
sym_path = os.path.join(self.files.rootdir, 'symlink')
os.symlink(path, sym_path)
filegenerator = FileGenerator(self.client, '', True)
self.assertFalse(filegenerator.should_ignore_file(sym_path))
self.assertFalse(filegenerator.should_ignore_file(path))
class TestThrowsWarning(unittest.TestCase):
def setUp(self):
self.files = FileCreator()
self.root = self.files.rootdir
self.client = None
def tearDown(self):
self.files.remove_all()
def test_no_warning(self):
file_gen = FileGenerator(self.client, '', False)
self.files.create_file("foo.txt", contents="foo")
full_path = os.path.join(self.root, "foo.txt")
return_val = file_gen.triggers_warning(full_path)
self.assertFalse(return_val)
self.assertTrue(file_gen.result_queue.empty())
def test_no_exists(self):
file_gen = FileGenerator(self.client, '', False)
filename = os.path.join(self.root, 'file')
return_val = file_gen.triggers_warning(filename)
self.assertTrue(return_val)
warning_message = file_gen.result_queue.get()
self.assertEqual(warning_message.message,
("warning: Skipping file %s. File does not exist." %
filename))
def test_no_read_access(self):
file_gen = FileGenerator(self.client, '', False)
self.files.create_file("foo.txt", contents="foo")
full_path = os.path.join(self.root, "foo.txt")
open_function = 'awscli.customizations.s3.filegenerator._open'
with mock.patch(open_function) as mock_class:
mock_class.side_effect = OSError()
return_val = file_gen.triggers_warning(full_path)
self.assertTrue(return_val)
warning_message = file_gen.result_queue.get()
self.assertEqual(warning_message.message,
("warning: Skipping file %s. File/Directory is "
"not readable." % full_path))
@skip_if_windows('Special files only supported on mac/linux')
def test_is_special_file_warning(self):
file_gen = FileGenerator(self.client, '', False)
file_path = os.path.join(self.files.rootdir, 'foo')
# Use socket for special file.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(file_path)
return_val = file_gen.triggers_warning(file_path)
self.assertTrue(return_val)
warning_message = file_gen.result_queue.get()
self.assertEqual(warning_message.message,
("warning: Skipping file %s. File is character "
"special device, block special device, FIFO, or "
"socket." % file_path))
@skip_if_windows('Symlink tests only supported on mac/linux')
class TestSymlinksIgnoreFiles(unittest.TestCase):
"""
This class tests the ability to list out the correct local files
depending on if symlinks are being followed. Also tests to ensure
broken symlinks fail.
"""
def setUp(self):
self.client = None
self.files = FileCreator()
# List of local filenames.
self.filenames = []
self.root = self.files.rootdir
self.bucket = 'bucket/'
filename_1 = self.files.create_file('foo.txt',
contents='foo.txt contents')
self.filenames.append(filename_1)
nested_dir = os.path.join(self.root, 'realfiles')
os.mkdir(nested_dir)
filename_2 = self.files.create_file(os.path.join(nested_dir,
'bar.txt'),
contents='bar.txt contents')
self.filenames.append(filename_2)
# Names of symlinks.
self.symlinks = []
# Names of files if symlinks are followed.
self.symlink_files = []
# Create symlink to file foo.txt.
symlink_1 = os.path.join(self.root, 'symlink_1')
os.symlink(filename_1, symlink_1)
self.symlinks.append(symlink_1)
self.symlink_files.append(symlink_1)
# Create a symlink to a file that does not exist.
symlink_2 = os.path.join(self.root, 'symlink_2')
os.symlink('non-existent-file', symlink_2)
self.symlinks.append(symlink_2)
# Create a symlink to directory realfiles
symlink_3 = os.path.join(self.root, 'symlink_3')
os.symlink(nested_dir, symlink_3)
self.symlinks.append(symlink_3)
self.symlink_files.append(os.path.join(symlink_3, 'bar.txt'))
def tearDown(self):
self.files.remove_all()
def test_no_follow_symlink(self):
abs_root = six.text_type(os.path.abspath(self.root) + os.sep)
input_local_dir = {'src': {'path': abs_root,
'type': 'local'},
'dest': {'path': self.bucket,
'type': 's3'},
'dir_op': True, 'use_src_name': True}
file_stats = FileGenerator(self.client, '', False).call(input_local_dir)
self.filenames.sort()
result_list = []
for file_stat in file_stats:
result_list.append(getattr(file_stat, 'src'))
self.assertEqual(len(result_list), len(self.filenames))
# Just check to make sure the right local files are generated.
for i in range(len(result_list)):
filename = six.text_type(os.path.abspath(self.filenames[i]))
self.assertEqual(result_list[i], filename)
def test_warn_bad_symlink(self):
"""
This tests to make sure it fails when following bad symlinks.
"""
abs_root = six.text_type(os.path.abspath(self.root) + os.sep)
input_local_dir = {'src': {'path': abs_root,
'type': 'local'},
'dest': {'path': self.bucket,
'type': 's3'},
'dir_op': True, 'use_src_name': True}
file_stats = FileGenerator(self.client, '', True).call(input_local_dir)
file_gen = FileGenerator(self.client, '', True)
file_stats = file_gen.call(input_local_dir)
all_filenames = self.filenames + self.symlink_files
all_filenames.sort()
result_list = []
for file_stat in file_stats:
result_list.append(getattr(file_stat, 'src'))
self.assertEqual(len(result_list), len(all_filenames))
# Just check to make sure the right local files are generated.
for i in range(len(result_list)):
filename = six.text_type(os.path.abspath(all_filenames[i]))
self.assertEqual(result_list[i], filename)
self.assertFalse(file_gen.result_queue.empty())
def test_follow_symlink(self):
# First remove the bad symlink.
os.remove(os.path.join(self.root, 'symlink_2'))
abs_root = six.text_type(os.path.abspath(self.root) + os.sep)
input_local_dir = {'src': {'path': abs_root,
'type': 'local'},
'dest': {'path': self.bucket,
'type': 's3'},
'dir_op': True, 'use_src_name': True}
file_stats = FileGenerator(self.client, '', True).call(input_local_dir)
all_filenames = self.filenames + self.symlink_files
all_filenames.sort()
result_list = []
for file_stat in file_stats:
result_list.append(getattr(file_stat, 'src'))
self.assertEqual(len(result_list), len(all_filenames))
# Just check to make sure the right local files are generated.
for i in range(len(result_list)):
filename = six.text_type(os.path.abspath(all_filenames[i]))
self.assertEqual(result_list[i], filename)
class TestListFilesLocally(unittest.TestCase):
maxDiff = None
def setUp(self):
self.directory = six.text_type(tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(self.directory)
@mock.patch('os.listdir')
def test_error_raised_on_decoding_error(self, listdir_mock):
# On Python3, sys.getdefaultencoding
file_generator = FileGenerator(None, None, None)
# utf-8 encoding for U+2713.
listdir_mock.return_value = [b'\xe2\x9c\x93']
list(file_generator.list_files(self.directory, dir_op=True))
# Ensure the message was added to the result queue and is
# being skipped.
self.assertFalse(file_generator.result_queue.empty())
warning_message = file_generator.result_queue.get()
self.assertIn("warning: Skipping file ", warning_message.message)
self.assertIn("Please check your locale settings.",
warning_message.message)
def test_list_files_is_in_sorted_order(self):
p = os.path.join
open(p(self.directory, 'test-123.txt'), 'w').close()
open(p(self.directory, 'test-321.txt'), 'w').close()
open(p(self.directory, 'test123.txt'), 'w').close()
open(p(self.directory, 'test321.txt'), 'w').close()
os.mkdir(p(self.directory, 'test'))
open(p(self.directory, 'test', 'foo.txt'), 'w').close()
file_generator = FileGenerator(None, None, None)
values = list(el[0] for el in file_generator.list_files(
self.directory, dir_op=True))
ref_vals = list(sorted(values,
key=lambda items: items.replace(os.sep, '/')))
self.assertEqual(values, ref_vals)
@mock.patch('awscli.customizations.s3.filegenerator.get_file_stat')
def test_list_files_with_invalid_timestamp(self, stat_mock):
stat_mock.return_value = 9, None
open(os.path.join(self.directory, 'test'), 'w').close()
file_generator = FileGenerator(None, None, None)
value = list(file_generator.list_files(self.directory, dir_op=True))[0]
self.assertIs(value[1]['LastModified'], EPOCH_TIME)
def test_list_local_files_with_unicode_chars(self):
p = os.path.join
open(p(self.directory, u'a'), 'w').close()
open(p(self.directory, u'a\u0300'), 'w').close()
open(p(self.directory, u'a\u0300-1'), 'w').close()
open(p(self.directory, u'a\u03001'), 'w').close()
open(p(self.directory, u'z'), 'w').close()
open(p(self.directory, u'\u00e6'), 'w').close()
os.mkdir(p(self.directory, u'a\u0300a'))
open(p(self.directory, u'a\u0300a', u'a'), 'w').close()
open(p(self.directory, u'a\u0300a', u'z'), 'w').close()
open(p(self.directory, u'a\u0300a', u'\u00e6'), 'w').close()
file_generator = FileGenerator(None, None, None)
values = list(el[0] for el in file_generator.list_files(
self.directory, dir_op=True))
expected_order = [os.path.join(self.directory, el) for el in [
u"a",
u"a\u0300",
u"a\u0300-1",
u"a\u03001",
u"a\u0300a%sa" % os.path.sep,
u"a\u0300a%sz" % os.path.sep,
u"a\u0300a%s\u00e6" % os.path.sep,
u"z",
u"\u00e6"
]]
self.assertEqual(values, expected_order)
class TestNormalizeSort(unittest.TestCase):
def test_normalize_sort(self):
names = ['xyz123456789',
'xyz1' + os.path.sep + 'test',
'xyz' + os.path.sep + 'test']
ref_names = [names[2], names[1], names[0]]
filegenerator = FileGenerator(None, None, None)
filegenerator.normalize_sort(names, os.path.sep, '/')
for i in range(len(ref_names)):
self.assertEqual(ref_names[i], names[i])
def test_normalize_sort_backslash(self):
names = ['xyz123456789',
'xyz1\\test',
'xyz\\test']
ref_names = [names[2], names[1], names[0]]
filegenerator = FileGenerator(None, None, None)
filegenerator.normalize_sort(names, '\\', '/')
for i in range(len(ref_names)):
self.assertEqual(ref_names[i], names[i])
class S3FileGeneratorTest(BaseAWSCommandParamsTest):
def setUp(self):
super(S3FileGeneratorTest, self).setUp()
self.client = self.driver.session.create_client('s3')
self.bucket = 'foo'
self.file1 = self.bucket + '/' + 'text1.txt'
self.file2 = self.bucket + '/' + 'another_directory/text2.txt'
def test_s3_file(self):
"""
Generate a single s3 file
Note: Size and last update are not tested because s3 generates them.
"""
input_s3_file = {'src': {'path': self.file1, 'type': 's3'},
'dest': {'path': 'text1.txt', 'type': 'local'},
'dir_op': False, 'use_src_name': False}
params = {'region': 'us-east-1'}
self.parsed_responses = [{"ETag": "abcd", "ContentLength": 100,
"LastModified": "2014-01-09T20:45:49.000Z"}]
self.patch_make_request()
file_gen = FileGenerator(self.client, '')
files = file_gen.call(input_s3_file)
result_list = []
for filename in files:
result_list.append(filename)
file_stat = FileStat(src=self.file1, dest='text1.txt',
compare_key='text1.txt',
size=result_list[0].size,
last_update=result_list[0].last_update,
src_type='s3',
dest_type='local', operation_name='')
ref_list = [file_stat]
self.assertEqual(len(result_list), len(ref_list))
for i in range(len(result_list)):
compare_files(self, result_list[i], ref_list[i])
def test_s3_single_file_404(self):
"""
Test the error message for a 404 ClientError for a single file listing
"""
input_s3_file = {'src': {'path': self.file1, 'type': 's3'},
'dest': {'path': 'text1.txt', 'type': 'local'},
'dir_op': False, 'use_src_name': False}
params = {'region': 'us-east-1'}
self.client = mock.Mock()
self.client.head_object.side_effect = \
ClientError(
{'Error': {'Code': '404', 'Message': 'Not Found'}},
'HeadObject',
)
file_gen = FileGenerator(self.client, '')
files = file_gen.call(input_s3_file)
# The error should include 404 and should include the key name.
with self.assertRaisesRegex(ClientError, '404.*text1.txt'):
list(files)
def test_s3_single_file_delete(self):
input_s3_file = {'src': {'path': self.file1, 'type': 's3'},
'dest': {'path': '', 'type': 'local'},
'dir_op': False, 'use_src_name': True}
self.client = mock.Mock()
file_gen = FileGenerator(self.client, 'delete')
result_list = list(file_gen.call(input_s3_file))
self.assertEqual(len(result_list), 1)
compare_files(
self,
result_list[0],
FileStat(src=self.file1, dest='text1.txt',
compare_key='text1.txt',
size=None, last_update=None,
src_type='s3', dest_type='local',
operation_name='delete')
)
self.client.head_object.assert_not_called()
def test_s3_directory(self):
"""
Generates s3 files under a common prefix. Also it ensures that
zero size files are ignored.
Note: Size and last update are not tested because s3 generates them.
"""
input_s3_file = {'src': {'path': self.bucket + '/', 'type': 's3'},
'dest': {'path': '', 'type': 'local'},
'dir_op': True, 'use_src_name': True}
params = {'region': 'us-east-1'}
files = FileGenerator(self.client, '').call(input_s3_file)
self.parsed_responses = [{
"CommonPrefixes": [], "Contents": [
{"Key": "another_directory/text2.txt", "Size": 100,
"LastModified": "2014-01-09T20:45:49.000Z"},
{"Key": "text1.txt", "Size": 10,
"LastModified": "2013-01-09T20:45:49.000Z"}]}]
self.patch_make_request()
result_list = []
for filename in files:
result_list.append(filename)
file_stat = FileStat(src=self.file2,
dest='another_directory' + os.sep +
'text2.txt',
compare_key='another_directory/text2.txt',
size=result_list[0].size,
last_update=result_list[0].last_update,
src_type='s3',
dest_type='local', operation_name='')
file_stat2 = FileStat(src=self.file1,
dest='text1.txt',
compare_key='text1.txt',
size=result_list[1].size,
last_update=result_list[1].last_update,
src_type='s3',
dest_type='local', operation_name='')
ref_list = [file_stat, file_stat2]
self.assertEqual(len(result_list), len(ref_list))
for i in range(len(result_list)):
compare_files(self, result_list[i], ref_list[i])
def test_s3_delete_directory(self):
"""
Generates s3 files under a common prefix. Also it ensures that
the directory itself is included because it is a delete command
Note: Size and last update are not tested because s3 generates them.
"""
input_s3_file = {'src': {'path': self.bucket + '/', 'type': 's3'},
'dest': {'path': '', 'type': 'local'},
'dir_op': True, 'use_src_name': True}
self.parsed_responses = [{
"CommonPrefixes": [], "Contents": [
{"Key": "another_directory/", "Size": 0,
"LastModified": "2012-01-09T20:45:49.000Z"},
{"Key": "another_directory/text2.txt", "Size": 100,
"LastModified": "2014-01-09T20:45:49.000Z"},
{"Key": "text1.txt", "Size": 10,
"LastModified": "2013-01-09T20:45:49.000Z"}]}]
self.patch_make_request()
files = FileGenerator(self.client, 'delete').call(input_s3_file)
result_list = []
for filename in files:
result_list.append(filename)
file_stat1 = FileStat(src=self.bucket + '/another_directory/',
dest='another_directory' + os.sep,
compare_key='another_directory/',
size=result_list[0].size,
last_update=result_list[0].last_update,
src_type='s3',
dest_type='local', operation_name='delete')
file_stat2 = FileStat(src=self.file2,
dest='another_directory' + os.sep + 'text2.txt',
compare_key='another_directory/text2.txt',
size=result_list[1].size,
last_update=result_list[1].last_update,
src_type='s3',
dest_type='local', operation_name='delete')
file_stat3 = FileStat(src=self.file1,
dest='text1.txt',
compare_key='text1.txt',
size=result_list[2].size,
last_update=result_list[2].last_update,
src_type='s3',
dest_type='local', operation_name='delete')
ref_list = [file_stat1, file_stat2, file_stat3]
self.assertEqual(len(result_list), len(ref_list))
for i in range(len(result_list)):
compare_files(self, result_list[i], ref_list[i])
if __name__ == "__main__":
unittest.main()
|
d1e9c3e2156f612d36a4e87bcf7d15a4417dc1ea
|
d05c946e345baa67e7894ee33ca21e24b8d26028
|
/gui-programming/hangman-game-gui/hangman.py
|
1763c3bd91207ac4aa96498472921c013306f9d1
|
[
"MIT"
] |
permissive
|
x4nth055/pythoncode-tutorials
|
327255550812f84149841d56f2d13eaa84efd42e
|
d6ba5d672f7060ba88384db5910efab1768c7230
|
refs/heads/master
| 2023-09-01T02:36:58.442748
| 2023-08-19T14:04:34
| 2023-08-19T14:04:34
| 199,449,624
| 1,858
| 2,055
|
MIT
| 2023-08-25T20:41:56
| 2019-07-29T12:35:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,418
|
py
|
hangman.py
|
import pygame
from pygame.locals import *
import random
from string import ascii_letters
pygame.init()
pygame.font.init()
screen = pygame.display.set_mode((400, 500))
pygame.display.set_caption("Hangman")
class Hangman():
def __init__(self):
with open("./words.txt", "r") as file:
# picks secret word
words = file.read().split("\n")
self.secret_word = random.choice(words)
# passing secret word's length for making letter blanks
self.guessed_word = "*" * len(self.secret_word)
self.wrong_guesses = []
self.wrong_guess_count = 0
self.taking_guess = True
self.running = True
self.background_color = (155, 120, 70)
self.gallow_color = (0,0,0)
self.body_color = (255,253,175)
self.font = pygame.font.SysFont("Courier New", 20)
self.FPS = pygame.time.Clock()
# draws the gallow
def _gallow(self):
stand = pygame.draw.rect(screen, self.gallow_color, pygame.Rect(75, 280, 120, 10))
body = pygame.draw.rect(screen, self.gallow_color, pygame.Rect(128, 40, 10, 240))
hanger = pygame.draw.rect(screen, self.gallow_color, pygame.Rect(128, 40, 80, 10))
rope = pygame.draw.rect(screen, self.gallow_color, pygame.Rect(205, 40,10, 30))
# draw man's body parts for every wrong guess
def _man_pieces(self):
if self.wrong_guess_count == 1:
head = pygame.draw.circle(screen, self.body_color, [210, 85], 20, 0)
elif self.wrong_guess_count == 2:
body = pygame.draw.rect(screen, self.body_color, pygame.Rect(206, 105, 8, 45))
elif self.wrong_guess_count == 3:
r_arm = pygame.draw.line(screen, self.body_color, [183, 149], [200, 107], 6)
elif self.wrong_guess_count == 4:
l_arm = pygame.draw.line(screen, self.body_color, [231, 149], [218, 107], 6),
elif self.wrong_guess_count == 5:
r_leg = pygame.draw.line(screen, self.body_color, [189, 198], [208, 148], 6),
elif self.wrong_guess_count == 6:
l_leg = pygame.draw.line(screen, self.body_color, [224, 198], [210, 148], 6)
def _right_guess(self, guess_letter):
index_positions = [index for index, item in enumerate(self.secret_word) if item == guess_letter]
for i in index_positions:
self.guessed_word = self.guessed_word[0:i] + guess_letter + self.guessed_word[i+1:]
# stacks a layer of color on guessed word to hide multiple guessed_word stack
screen.fill(pygame.Color(self.background_color), (10, 370, 390, 20))
def _wrong_guess(self, guess_letter):
self.wrong_guesses.append(guess_letter)
self.wrong_guess_count += 1
self._man_pieces()
def _guess_taker(self, guess_letter):
if guess_letter in ascii_letters:
if guess_letter in self.secret_word and guess_letter not in self.guessed_word:
self._right_guess(guess_letter)
elif guess_letter not in self.secret_word and guess_letter not in self.wrong_guesses:
self._wrong_guess(guess_letter)
def _message(self):
# win situation
if self.guessed_word == self.secret_word:
self.taking_guess = False
screen.fill(pygame.Color(0,0,79), (40, 218, 320, 30))
message = self.font.render("YOU WIN!!", True, (255,235,0))
screen.blit(message,(152,224))
# lose situation
elif self.wrong_guess_count == 6:
self.taking_guess = False
screen.fill(pygame.Color("grey"), (40, 218, 320, 30))
message = self.font.render("GAME OVER YOU LOSE!!", True, (150,0,10))
screen.blit(message,(78,224))
# shows the secret word if the player lose
word = self.font.render(f"secret word: {self.secret_word}", True, (255,255,255))
screen.blit(word,(10,300))
# removes the instruction message if not taking guesses anymore
if not self.taking_guess:
screen.fill(pygame.Color(self.background_color), (35, 460, 390, 20))
def main(self):
# game's main components (no need to update)
screen.fill(self.background_color)
self._gallow()
instructions = self.font.render('Press any key to take Guess', True, (9,255,78))
screen.blit(instructions,(35,460))
while self.running:
# shows the guessed word in the game window
guessed_word = self.font.render(f"guessed word: {self.guessed_word}", True, (0,0,138))
screen.blit(guessed_word,(10,370))
# shows the wrong guesses in the game window
wrong_guesses = self.font.render(f"wrong guesses: {' '.join(map(str, self.wrong_guesses))}", True, (125,0,0))
screen.blit(wrong_guesses,(10,420))
# checking game state
self._message()
for self.event in pygame.event.get():
if self.event.type == pygame.QUIT:
self.running = False
# manages keys pressed
elif self.event.type == pygame.KEYDOWN:
if self.taking_guess:
self._guess_taker(self.event.unicode)
pygame.display.flip()
self.FPS.tick(60)
pygame.quit()
if __name__ =="__main__":
h = Hangman()
h.main()
|
a8b96532682ad95ff2cf1d38fa06b6908ef22640
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ProdLORelationVO.py
|
5a04ea1b0ec542fae1b594b05f8972beeafbb4a3
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
ProdLORelationVO.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ProdLORelationVO(object):
def __init__(self):
self._lo_code = None
self._lo_type = None
self._prod_code = None
self._prod_version = None
@property
def lo_code(self):
return self._lo_code
@lo_code.setter
def lo_code(self, value):
self._lo_code = value
@property
def lo_type(self):
return self._lo_type
@lo_type.setter
def lo_type(self, value):
self._lo_type = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def prod_version(self):
return self._prod_version
@prod_version.setter
def prod_version(self, value):
self._prod_version = value
def to_alipay_dict(self):
params = dict()
if self.lo_code:
if hasattr(self.lo_code, 'to_alipay_dict'):
params['lo_code'] = self.lo_code.to_alipay_dict()
else:
params['lo_code'] = self.lo_code
if self.lo_type:
if hasattr(self.lo_type, 'to_alipay_dict'):
params['lo_type'] = self.lo_type.to_alipay_dict()
else:
params['lo_type'] = self.lo_type
if self.prod_code:
if hasattr(self.prod_code, 'to_alipay_dict'):
params['prod_code'] = self.prod_code.to_alipay_dict()
else:
params['prod_code'] = self.prod_code
if self.prod_version:
if hasattr(self.prod_version, 'to_alipay_dict'):
params['prod_version'] = self.prod_version.to_alipay_dict()
else:
params['prod_version'] = self.prod_version
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ProdLORelationVO()
if 'lo_code' in d:
o.lo_code = d['lo_code']
if 'lo_type' in d:
o.lo_type = d['lo_type']
if 'prod_code' in d:
o.prod_code = d['prod_code']
if 'prod_version' in d:
o.prod_version = d['prod_version']
return o
|
dd581cdbddc00b4b4c9b113fccbb89260fa88bc6
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/13_回溯算法/221028天池-02. 巡检周期.py
|
a00360277171c5a4cb1e820779bb4df16332a07d
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
221028天池-02. 巡检周期.py
|
# 221028天池-02. 巡检周期
# https://leetcode.cn/problems/zL2zJU/
# 2 <= record.length <= 12
# 1 <= robot.length <= 5
# 0 <= record[i] <= robot.length
# 0 <= robot[i] < record.length
# 现给定一段时间内的巡检记录,record[i] 表示在时刻 i 出发巡检的机器人数量,
# robot[j] 表示第 j 台机器人首次巡检的时刻。
# 请分离出 record 中各个机器人的记录,并按 robot 的顺序返回各机器人的 巡检周期 。
# 若存在多种分离方式,返回任意一种。
# 笛卡尔积
from itertools import product
from typing import List
class Solution:
def observingPeriodicity(self, record: List[int], robot: List[int]) -> List[int]:
# !枚举每个机器人的周期
n, m = len(record), len(robot)
for periods in product(range(1, n + 1), repeat=m):
counter = [0] * n
for start, period in zip(robot, periods):
for i in range(start, n, period):
counter[i] += 1
if counter == record:
return list(periods)
raise Exception("No solution")
|
bb9aeb2798377b96b857d0f694f12681c4ae34eb
|
704976ea552111c6a5af9cd7cb62b9d9abaf3996
|
/pypy/module/_collections/test/test_deque.py
|
21fcfcd401420acce726ba0f75fde715f3514192
|
[
"BSD-3-Clause"
] |
permissive
|
mesalock-linux/mesapy
|
4f02c5819ce7f2f6e249d34840f1aa097577645d
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
refs/heads/mesapy2.7
| 2023-08-16T21:33:02.239581
| 2019-08-13T10:29:43
| 2019-08-13T18:06:45
| 136,080,721
| 396
| 33
|
NOASSERTION
| 2020-04-01T03:05:18
| 2018-06-04T20:45:17
|
Python
|
UTF-8
|
Python
| false
| false
| 8,946
|
py
|
test_deque.py
|
class AppTestBasic:
spaceconfig = dict(usemodules=['_collections'])
def test_basics(self):
from _collections import deque
assert deque.__module__ == 'collections'
d = deque(xrange(-5125, -5000))
d.__init__(xrange(200))
for i in xrange(200, 400):
d.append(i)
for i in reversed(xrange(-200, 0)):
d.appendleft(i)
assert list(d) == range(-200, 400)
assert len(d) == 600
left = [d.popleft() for i in xrange(250)]
assert left == range(-200, 50)
assert list(d) == range(50, 400)
right = [d.pop() for i in xrange(250)]
right.reverse()
assert right == range(150, 400)
assert list(d) == range(50, 150)
def test_maxlen(self):
from _collections import deque
raises(ValueError, deque, 'abc', -1)
raises(ValueError, deque, 'abc', -2)
it = iter(range(10))
d = deque(it, maxlen=3)
assert list(it) == []
assert repr(d) == 'deque([7, 8, 9], maxlen=3)'
assert list(d) == range(7, 10)
d.appendleft(3)
assert list(d) == [3, 7, 8]
d.extend([20, 21])
assert list(d) == [8, 20, 21]
d.extendleft([-7, -6])
assert list(d) == [-6, -7, 8]
def test_maxlen_zero(self):
from _collections import deque
it = iter(range(100))
d = deque(it, maxlen=0)
assert list(d) == []
assert list(it) == []
d.extend(range(100))
assert list(d) == []
d.extendleft(range(100))
assert list(d) == []
def test_maxlen_attribute(self):
from _collections import deque
assert deque().maxlen is None
assert deque('abc').maxlen is None
assert deque('abc', maxlen=4).maxlen == 4
assert deque('abc', maxlen=0).maxlen == 0
raises((AttributeError, TypeError), "deque('abc').maxlen = 10")
def test_runtimeerror(self):
from _collections import deque
d = deque('abcdefg')
it = iter(d)
d.pop()
raises(RuntimeError, it.next)
#
d = deque('abcdefg')
it = iter(d)
d.append(d.pop())
raises(RuntimeError, it.next)
#
d = deque()
it = iter(d)
d.append(10)
raises(RuntimeError, it.next)
def test_count(self):
from _collections import deque
for s in ('', 'abracadabra', 'simsalabim'*50+'abc'):
s = list(s)
d = deque(s)
for letter in 'abcdeilmrs':
assert s.count(letter) == d.count(letter)
class MutatingCompare:
def __eq__(self, other):
d.pop()
return True
m = MutatingCompare()
d = deque([1, 2, 3, m, 4, 5])
raises(RuntimeError, d.count, 3)
def test_comparisons(self):
from _collections import deque
d = deque('xabc'); d.popleft()
for e in [d, deque('abc'), deque('ab'), deque(), list(d)]:
assert (d==e) == (type(d)==type(e) and list(d)==list(e))
assert (d!=e) == (not(type(d)==type(e) and list(d)==list(e)))
args = map(deque, ('', 'a', 'b', 'ab', 'ba', 'abc', 'xba', 'xabc', 'cba'))
for x in args:
for y in args:
assert (x == y) == (list(x) == list(y))
assert (x != y) == (list(x) != list(y))
assert (x < y) == (list(x) < list(y))
assert (x <= y) == (list(x) <= list(y))
assert (x > y) == (list(x) > list(y))
assert (x >= y) == (list(x) >= list(y))
assert cmp(x,y) == cmp(list(x),list(y))
def test_extend(self):
from _collections import deque
d = deque('a')
d.extend('bcd')
assert list(d) == list('abcd')
d.extend(d)
assert list(d) == list('abcdabcd')
def test_iadd(self):
from _collections import deque
d = deque('a')
original_d = d
d += 'bcd'
assert list(d) == list('abcd')
d += d
assert list(d) == list('abcdabcd')
assert original_d is d
def test_extendleft(self):
from _collections import deque
d = deque('a')
d.extendleft('bcd')
assert list(d) == list(reversed('abcd'))
d.extendleft(d)
assert list(d) == list('abcddcba')
def test_getitem(self):
from _collections import deque
n = 200
l = xrange(1000, 1000 + n)
d = deque(l)
for j in xrange(-n, n):
assert d[j] == l[j]
raises(IndexError, "d[-n-1]")
raises(IndexError, "d[n]")
def test_setitem(self):
from _collections import deque
n = 200
d = deque(xrange(n))
for i in xrange(n):
d[i] = 10 * i
assert list(d) == [10*i for i in xrange(n)]
l = list(d)
for i in xrange(1-n, 0, -3):
d[i] = 7*i
l[i] = 7*i
assert list(d) == l
def test_delitem(self):
from _collections import deque
d = deque("abcdef")
del d[-2]
assert list(d) == list("abcdf")
def test_reverse(self):
from _collections import deque
d = deque(xrange(1000, 1200))
d.reverse()
assert list(d) == list(reversed(range(1000, 1200)))
#
n = 100
data = map(str, range(n))
for i in range(n):
d = deque(data[:i])
r = d.reverse()
assert list(d) == list(reversed(data[:i]))
assert r is None
d.reverse()
assert list(d) == data[:i]
def test_rotate(self):
from _collections import deque
s = tuple('abcde')
n = len(s)
d = deque(s)
d.rotate(1) # verify rot(1)
assert ''.join(d) == 'eabcd'
d = deque(s)
d.rotate(-1) # verify rot(-1)
assert ''.join(d) == 'bcdea'
d.rotate() # check default to 1
assert tuple(d) == s
d.rotate(500000002)
assert tuple(d) == tuple('deabc')
d.rotate(-5000002)
assert tuple(d) == tuple(s)
def test_len(self):
from _collections import deque
d = deque('ab')
assert len(d) == 2
d.popleft()
assert len(d) == 1
d.pop()
assert len(d) == 0
raises(IndexError, d.pop)
raises(IndexError, d.popleft)
assert len(d) == 0
d.append('c')
assert len(d) == 1
d.appendleft('d')
assert len(d) == 2
d.clear()
assert len(d) == 0
assert list(d) == []
def test_remove(self):
from _collections import deque
d = deque('abcdefghcij')
d.remove('c')
assert d == deque('abdefghcij')
d.remove('c')
assert d == deque('abdefghij')
raises(ValueError, d.remove, 'c')
assert d == deque('abdefghij')
def test_repr(self):
from _collections import deque
d = deque(xrange(20))
e = eval(repr(d))
assert d == e
d.append(d)
assert '...' in repr(d)
def test_hash(self):
from _collections import deque
raises(TypeError, hash, deque('abc'))
def test_roundtrip_iter_init(self):
from _collections import deque
d = deque(xrange(200))
e = deque(d)
assert d is not e
assert d == e
assert list(d) == list(e)
def test_reduce(self):
from _collections import deque
#
d = deque('hello world')
r = d.__reduce__()
assert r == (deque, (list('hello world'),))
#
d = deque('hello world', 42)
r = d.__reduce__()
assert r == (deque, (list('hello world'), 42))
#
class D(deque):
pass
d = D('hello world')
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), None), {'a': 5})
#
class D(deque):
pass
d = D('hello world', 42)
d.a = 5
r = d.__reduce__()
assert r == (D, (list('hello world'), 42), {'a': 5})
def test_copy(self):
from _collections import deque
import copy
mut = [10]
d = deque([mut])
e = copy.copy(d)
assert d is not e
assert d == e
mut[0] = 11
assert d == e
def test_reversed(self):
from _collections import deque
for s in ('abcd', xrange(200)):
assert list(reversed(deque(s))) == list(reversed(s))
def test_free(self):
import gc
from _collections import deque
class X(object):
freed = False
def __del__(self):
X.freed = True
d = deque()
d.append(X())
d.pop()
gc.collect(); gc.collect(); gc.collect()
assert X.freed
|
f512d32a7f315ff4bca8c48566256aa057f776a5
|
5eb52c07e5b1bd00af77306f927f382b684cd6ff
|
/indy_node/test/auth_rule/auth_framework/node_services.py
|
a0491dfe66a30123783100376638e9a7bb709932
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-node
|
bce39486988f5114581cff4f6d14fc1b7684143c
|
e6bb87d4c605aff9914491d062248b6ec857334c
|
refs/heads/main
| 2023-09-03T15:33:08.187153
| 2023-05-08T22:48:21
| 2023-05-08T22:48:21
| 77,021,566
| 691
| 783
|
Apache-2.0
| 2023-05-09T15:42:43
| 2016-12-21T05:45:04
|
Python
|
UTF-8
|
Python
| false
| false
| 10,460
|
py
|
node_services.py
|
import pytest
from abc import abstractmethod
from plenum.test.test_node import ensureElectionsDone
from indy_common.authorize.auth_actions import split_action_id
from indy_common.authorize.auth_constraints import AuthConstraint
from indy_node.test.auth_rule.auth_framework.basic import AuthTest
from plenum.common.constants import STEWARD_STRING, TRUSTEE, TRUSTEE_STRING, VALIDATOR
from plenum.common.exceptions import RequestRejectedException
from plenum.test.helper import waitForViewChange
from plenum.test.pool_transactions.helper import sdk_add_new_nym
from indy_node.test.helper import build_auth_rule_request_json
from stp_core.loop.eventually import eventually
class NodeAuthTest(AuthTest):
def __init__(self, env, action_id):
super().__init__(env, action_id)
self.tconf = env.tconf
self.tdir = env.tdir
self.sdk_pool_handle = env.sdk_pool_handle
self.client_wallet = env.sdk_wallet_client
self.trustee_wallet = self._create_trustee(env.sdk_wallet_trustee)
self.new_nodes = {}
self.txnPoolNodeSet = env.txnPoolNodeSet
def prepare(self):
pass
def run(self):
# Step 1. Change auth rule
self.send_and_check(self.changed_auth_rule, self.trustee_wallet)
# Step 2. Check, that we cannot send NODE txn by old way
with pytest.raises(RequestRejectedException):
self.send_and_check(*self.node_req_1)
# Step 3. Check, that a new way works
self.send_and_check(*self.node_req_for_new_rule)
# Step 4. Return default auth rule
self.send_and_check(self.default_auth_rule, self.trustee_wallet)
# Step 5. Check, that default auth rule works
self.send_and_check(*self.node_req_2)
self._demote_new_nodes()
def result(self):
pass
def get_changed_auth_rule(self):
constraint = AuthConstraint(role=TRUSTEE,
sig_count=1,
need_to_be_owner=False)
params = self._generate_auth_rule_params(constraint)
return build_auth_rule_request_json(
self.looper, self.trustee_wallet[1], **params
)
def _create_steward(self):
return sdk_add_new_nym(self.looper, self.sdk_pool_handle,
self.trustee_wallet,
role=STEWARD_STRING)
def _create_trustee(self, trustee_wallet):
return sdk_add_new_nym(self.looper, self.sdk_pool_handle,
trustee_wallet,
role=TRUSTEE_STRING)
def _add_node(self, wallet=None, services=[VALIDATOR]):
if not wallet:
wallet = self._create_steward()
req, node_data, node_name = self._build_node(wallet,
self.tconf,
self.tdir,
services=services)
self.new_nodes[wallet] = (node_data, node_name)
return req, node_data, node_name, wallet
def _demote_new_nodes(self):
view_no = self.txnPoolNodeSet[0].viewNo
for wallet, (node_data, node_name) in self.new_nodes.items():
print("demote {}".format(node_name))
req1, node_data1, node_name1 = self._build_node(wallet,
self.tconf,
self.tdir,
services=[],
node_name=node_name,
node_data=node_data)
self.send_and_check(req1, wallet)
view_no = self._wait_view_change_finish(view_no)
def _wait_view_change_finish(self, view_no):
view_no += 1
waitForViewChange(looper=self.looper, txnPoolNodeSet=self.txnPoolNodeSet,
expectedViewNo=view_no)
def check_not_in_view_change():
assert all([not n.master_replica._consensus_data.waiting_for_new_view
for n in self.txnPoolNodeSet])
# we may have multiple view changes since we can select the same Primary as in previous view,
# or select a demoted node as a Primary
self.looper.run(eventually(check_not_in_view_change, timeout=100))
return view_no
@abstractmethod
def _generate_auth_rule_params(self, constraint):
pass
class AddNodeTest(NodeAuthTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id=action_id)
def prepare(self):
self.node_req_1 = self.get_node_req()
self.new_nodes.pop(self.node_req_1[1])
self.node_req_2 = self.get_node_req()
self.node_req_for_new_rule = self.get_node_req(self.trustee_wallet)
self.default_auth_rule = self.get_default_auth_rule()
self.changed_auth_rule = self.get_changed_auth_rule()
def get_node_req(self, steward_wallet=None):
req, node_data, node_name, wallet = self._add_node(steward_wallet)
return req, wallet
def _generate_auth_rule_params(self, constraint):
return dict(
auth_action=self.action.prefix,
auth_type=self.action.txn_type,
field=self.action.field,
new_value=self.action.new_value,
constraint=constraint.as_dict
)
class EditNodeTest(NodeAuthTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id=action_id)
def _generate_auth_rule_params(self, constraint):
return dict(
auth_action=self.action.prefix,
auth_type=self.action.txn_type,
field=self.action.field,
new_value=self.action.new_value,
old_value=self.action.old_value,
constraint=constraint.as_dict
)
class EditNodeServicesTest(EditNodeTest):
def __init__(self, env, action_id):
super().__init__(env, action_id)
self.new_services = self.action.new_value
def prepare(self):
req, node_data, node_name, wallet = self._edit_node()
self.node_req_1 = req, wallet
req, node_data, node_name, wallet = self._edit_node(node_name=node_name,
node_data=node_data,
wallet=self.trustee_wallet)
self.node_req_for_new_rule = req, self.trustee_wallet
req, node_data, node_name, wallet = self._edit_node()
self.node_req_2 = req, wallet
self.default_auth_rule = self.get_default_auth_rule()
self.changed_auth_rule = self.get_changed_auth_rule()
def run(self):
# Step 1. Change auth rule
self.send_and_check(self.changed_auth_rule, self.trustee_wallet)
# Step 2. Check, that we cannot send NODE txn by old way
with pytest.raises(RequestRejectedException):
self.send_and_check(*self.node_req_1)
view_no = self.txnPoolNodeSet[0].viewNo
# Step 3. Check, that a new way works
self.send_and_check(*self.node_req_for_new_rule)
view_no = self._wait_view_change_finish(view_no)
# Step 4. Return default auth rule
self.send_and_check(self.default_auth_rule, self.trustee_wallet)
# Step 5. Check, that default auth rule works
self.send_and_check(*self.node_req_2)
view_no = self._wait_view_change_finish(view_no)
self._demote_new_nodes()
def _edit_node(self, wallet=None, services=[VALIDATOR], node_name=None, node_data=None):
if not (node_name and node_data and wallet):
req, node_data, node_name, wallet = self._add_node()
self.send_and_check(req, wallet)
req, node_data, node_name = self._build_node(wallet,
self.tconf,
self.tdir,
services=services,
node_name=node_name,
node_data=node_data)
return req, node_data, node_name, wallet
class AddNewNodeTest(AddNodeTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id)
def run(self):
# Step 1. Change auth rule
self.send_and_check(self.changed_auth_rule, self.trustee_wallet)
# Step 2. Check, that we cannot send NODE txn by old way
with pytest.raises(RequestRejectedException):
self.send_and_check(*self.node_req_1)
prev_view_no = self.txnPoolNodeSet[0].viewNo
new_node_count = len(self.new_nodes)
# Step 3. Check, that a new way works
self.send_and_check(*self.node_req_for_new_rule)
prev_view_no = self._wait_view_change_finish(prev_view_no)
# Step 4. Return default auth rule
self.send_and_check(self.default_auth_rule, self.trustee_wallet)
# Step 5. Check, that default auth rule works
self.send_and_check(*self.node_req_2)
prev_view_no = self._wait_view_change_finish(prev_view_no)
self._demote_new_nodes()
class AddNewNodeEmptyServiceTest(AddNodeTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id)
def _add_node(self, wallet=None, services=[]):
return super()._add_node(wallet, services)
def _demote_new_nodes(self):
# Pass demote_new_nodes because in this test
# all nodes create with service=[]
pass
class DemoteNodeTest(EditNodeServicesTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id)
def _edit_node(self, wallet=None, services=[], node_name=None, node_data=None):
return super()._edit_node(wallet, services, node_name, node_data)
def _demote_new_nodes(self):
pass
class PromoteNodeTest(EditNodeServicesTest):
def __init__(self, env, action_id):
super().__init__(env,
action_id)
def _add_node(self, wallet=None, services=[]):
return super()._add_node(wallet, services)
|
770841e975ebad8e175065db1e02275668956bc7
|
450b551bd5f5c99bcf175ebdd114ecfb55788f3e
|
/actions/mail.py
|
c51e6f350f9e22f059de23ec3385dbe5cfefe74f
|
[] |
no_license
|
LogicJake/MLCompetitionHub
|
2482f8ce3ac7eb9d46a5fc62fa92147405cb1561
|
ab5e68b5aa424b98bd9ea98cf094b35bd12bb49d
|
refs/heads/master
| 2023-09-03T17:54:33.843632
| 2023-09-03T08:01:13
| 2023-09-03T08:01:13
| 234,690,463
| 144
| 27
| null | 2023-05-22T22:38:21
| 2020-01-18T06:10:35
|
Python
|
UTF-8
|
Python
| false
| false
| 2,671
|
py
|
mail.py
|
import copy
import os
import smtplib
from email.header import Header
from email.mime.text import MIMEText
from jinja2 import Environment, PackageLoader
STANDARD_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+08:00'
def generate(datas_):
datas = copy.deepcopy(datas_)
try:
with open('urls.txt', 'r') as f:
urls = f.readlines()
except Exception:
urls = []
urls = [url.strip() for url in urls]
new_datas = []
for data in datas:
competitions = []
for c in data['competitions']:
start_time = c['start_time']
deadline = c['deadline']
url = c['url']
# 加入到发送列表
if url not in urls:
# 转为标准时间格式字符串
if start_time is None:
start_time = '未给出具体时间'
else:
start_time = start_time.strftime(STANDARD_TIME_FORMAT)
if deadline is None:
deadline = '未给出具体时间'
else:
deadline = deadline.strftime(STANDARD_TIME_FORMAT)
cp = {
'name': c['name'],
'url': url,
'description': c['description'],
'deadline': deadline,
'reward': c['reward'],
'start_time': start_time,
}
competitions.append(cp)
if len(competitions) != 0:
cp = {}
cp['name'] = data['name']
cp['competitions'] = competitions
new_datas.append(cp)
if len(new_datas) == 0:
return
env = Environment(loader=PackageLoader('actions'))
template = env.get_template('mail.j2')
content = template.render(datas=new_datas)
content = content.strip()
mail_server = os.environ.get('mail_server')
mail_port = os.environ.get('mail_port')
mail_username = os.environ.get('mail_username')
mail_password = os.environ.get('mail_password')
mail_sender = os.environ.get('mail_sender')
with open('mails.txt', 'r') as f:
receivers = f.readlines()
receivers = [receiver.strip() for receiver in receivers]
receivers = [mail_sender] + receivers
message = MIMEText(content, 'html', 'utf-8')
message['From'] = mail_sender
message['To'] = ','.join(receivers)
message['Subject'] = Header('MLCompetitionHub: 有新的比赛了!', 'utf-8')
smtpObj = smtplib.SMTP_SSL(mail_server, mail_port)
smtpObj.login(mail_username, mail_password)
smtpObj.sendmail(mail_sender, receivers, message.as_string())
|
8d86ea63554a44f405869e00f25338accb3f06f9
|
495809d075e5faeb35dd519e411a583359aa69de
|
/algebric expression.py
|
fa8c3a75e312c272079279808530d5d06d210ddb
|
[] |
no_license
|
samirthapaliya/python-file
|
f09b833431a584bb64f40fb440e6258c4352ffc5
|
47ae0b720f8ec48b4173756c0be9f2e5b17f7d42
|
refs/heads/master
| 2021-06-26T07:21:51.964423
| 2021-03-15T12:52:19
| 2021-03-15T12:52:19
| 220,886,454
| 123
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
algebric expression.py
|
a=int(input("x"))
b=int(input("y"))
c=("x**3+3*x**2*y+3*x*y**2+y**3")
print(c)
|
e7b87fe3ab1800e756db7c0543fb99510e0bda01
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/nano/src/bigdl/nano/deps/automl/optuna_backend.py
|
dfa42c1f81df8e33a33e71566e6a0df0c76ef5bc
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 6,876
|
py
|
optuna_backend.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.nano.automl.hpo.backend import PrunerType, SamplerType
from bigdl.nano.utils.common import invalidInputError
from bigdl.nano.automl.hpo.space import SimpleSpace, NestedSpace, AutoObject
from bigdl.nano.automl.hpo.space import (
AutoObject, Space, SingleParam,
_get_hp_prefix)
import optuna
class OptunaBackend(object):
"""A Wrapper to shield user from Optuna specific configurations and API\
Later may support other HPO search engines."""
pruner_map = {
PrunerType.HyperBand: optuna.pruners.HyperbandPruner,
PrunerType.Median: optuna.pruners.MedianPruner,
PrunerType.Nop: optuna.pruners.NopPruner,
PrunerType.Patient: optuna.pruners.PatientPruner,
PrunerType.Percentile: optuna.pruners.PercentilePruner,
PrunerType.SuccessiveHalving: optuna.pruners.SuccessiveHalvingPruner,
PrunerType.Threshold: optuna.pruners.ThresholdPruner,
}
sampler_map = {
SamplerType.TPE: optuna.samplers.TPESampler,
SamplerType.CmaEs: optuna.samplers.CmaEsSampler,
SamplerType.Grid: optuna.samplers.GridSampler,
SamplerType.Random: optuna.samplers.RandomSampler,
SamplerType.PartialFixed: optuna.samplers.PartialFixedSampler,
SamplerType.NSGAII: optuna.samplers.NSGAIISampler,
SamplerType.MOTPE: optuna.samplers.MOTPESampler,
}
SPLITTER = u':' # For splitting prefix and names of hyperparam
@staticmethod
def get_other_args(kwargs, kwspaces):
"""Get key-word arguments which are not search spaces."""
return {k: kwargs[k] for k in set(kwargs) - set(kwspaces)}
@staticmethod
def _sample_space(trial, hp_name, hp_obj):
hp_type = str(type(hp_obj)).lower() # type of hyperparam
if 'integer' in hp_type or 'float' in hp_type or \
'categorical' in hp_type or 'ordinal' in hp_type:
try:
if 'integer' in hp_type:
hp_dimension = trial.suggest_int(
name=hp_name, low=int(hp_obj.lower), high=int(hp_obj.upper))
elif 'float' in hp_type:
if hp_obj.log: # log10-scale hyperparmeter
hp_dimension = trial.suggest_loguniform(
name=hp_name, low=float(hp_obj.lower), high=float(hp_obj.upper))
else:
hp_dimension = trial.suggest_float(
name=hp_name, low=float(hp_obj.lower), high=float(hp_obj.upper))
elif 'categorical' in hp_type:
hp_dimension = trial.suggest_categorical(
name=hp_name, choices=hp_obj.choices)
elif 'ordinal' in hp_type:
hp_dimension = trial.suggest_categorical(
name=hp_name, choices=hp_obj.sequence)
except (RuntimeError):
# TODO ValueErrors might be throw due to other reasons.
invalidInputError(False,
"If you set search space in model, "
"you must call model.search before model.fit.")
else:
invalidInputError(False,
"unknown hyperparameter type %s for param %s" %
(hp_type, hp_name))
return hp_dimension
@staticmethod
def get_hpo_config(trial, configspace):
"""Get hyper parameter suggestions from search space settings."""
# TODO better ways to map ConfigSpace to optuna spaces
hp_ordering = configspace.get_hyperparameter_names()
config = {}
# hp_prefix = _get_cs_prefix(configspace)
for hp_name in hp_ordering:
hp = configspace.get_hyperparameter(hp_name)
# TODO generate meaningful prefix for user in AutoObj
hp_prefix = _get_hp_prefix(hp)
optuna_hp_name = OptunaBackend._format_hp_name(hp_prefix, hp_name)
hp_dimension = OptunaBackend._sample_space(trial, optuna_hp_name, hp)
config[hp_name] = hp_dimension
return config
@staticmethod
def _format_hp_name(prefix, hp_name):
if prefix:
return "{}{}{}".format(
prefix, OptunaBackend.SPLITTER, hp_name)
else:
return hp_name
@staticmethod
def instantiate_param(trial, kwargs, arg_name):
"""
Instantiate auto objects in kwargs with trial params at runtime.
Note the params are replaced IN-PLACE
"""
# instantiate auto objects in runtime params a
v = kwargs.get(arg_name, None)
if not v:
return kwargs
if not isinstance(v, Space):
value = v
elif isinstance(v, AutoObject):
value = OptunaBackend.instantiate(trial, v)
else:
pobj = SingleParam(arg_name, v)
config = OptunaBackend.get_hpo_config(trial, pobj.cs)
value = pobj.sample(**config)
kwargs[arg_name] = value
return kwargs
@staticmethod
def instantiate(trial, lazyobj):
"""Instantiate a lazyobject from a trial's sampled param set."""
config = OptunaBackend.gen_config(trial, lazyobj)
return lazyobj.sample(**config)
@staticmethod
def gen_config(trial, automl_obj):
"""Generate the param config from a trial's sampled param set."""
configspace = automl_obj.cs
config = OptunaBackend.get_hpo_config(trial, configspace)
other_kwargs = OptunaBackend.get_other_args(
automl_obj.kwargs, automl_obj.kwspaces)
config.update(other_kwargs)
return config
@staticmethod
def create_sampler(sampler_type, kwargs):
"""Create a hyperparameter sampler by type."""
sampler_class = OptunaBackend.sampler_map.get(sampler_type)
return sampler_class(kwargs)
@staticmethod
def create_pruner(pruner_type, kwargs):
"""Create a pruner by type."""
pruner_class = OptunaBackend.pruner_map.get(pruner_type)
return pruner_class(**kwargs)
@staticmethod
def create_study(**kwargs):
"""Create a study to drive the hyperparameter search."""
return optuna.create_study(**kwargs)
|
b880d753ed18370f2de87c0e6aa7dac73f166df5
|
4960036fd0b875527c2e749b75680fe4a80177d6
|
/server/server/common/queries.py
|
a75a8a1d7905dd0f82ec850e7bcdbbdc02ef0801
|
[] |
no_license
|
suttacentral/suttacentral
|
ab62257d56c056b361d39a27029a94c2accb58ae
|
f6c4eb768c0ad8714a6b0a93d0160144048c5e7c
|
refs/heads/master
| 2023-08-31T21:51:06.761377
| 2023-08-29T03:35:25
| 2023-08-29T03:35:25
| 89,286,697
| 133
| 31
| null | 2023-09-14T12:09:05
| 2017-04-24T21:00:39
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 54,662
|
py
|
queries.py
|
LANGUAGES = '''
FOR l in language
SORT l.name
RETURN {
"uid": l.uid,
"name": l.name,
"iso_code": l.iso_code,
"is_root": l.is_root,
"localized": !!l.localized,
"localized_percent": l.localized_percent ? l.localized_percent : 0
}
'''
TEXTS_BY_LANG = '''
FOR text IN html_text
FILTER text.lang == @lang
LET nav_doc = (
RETURN DOCUMENT(CONCAT('super_nav_details/', text.uid))
)[0]
RETURN {
file_path: text.file_path,
uid: text.uid,
mtime: text.mtime,
author: text.author,
author_uid: text.author_uid,
author_short: text.author_short,
root_lang: nav_doc.root_lang,
acronym: nav_doc.acronym
}
'''
TEXTS_BY_LANG_FOR_SEARCH = '''
FOR text IN html_text
FILTER text.lang == @lang
LET nav_doc = (
RETURN DOCUMENT(CONCAT('super_nav_details/', text.uid))
)[0]
LET full_lang = (
FOR lang IN language
FILTER lang.uid == text.lang
RETURN lang.name
)[0]
RETURN {
file_path: text.file_path,
uid: text.uid,
author: text.author,
author_uid: text.author_uid,
author_short: text.author_short,
root_lang: nav_doc.root_lang,
lang: text.lang,
full_lang: full_lang,
acronym: nav_doc.acronym
}
'''
BILARA_TEXT_BY_LANG = '''
FOR text IN sc_bilara_texts
FILTER text.lang == @lang AND ('root' IN text.muids OR 'translation' IN text.muids)
LET nav_doc = (
RETURN DOCUMENT(CONCAT('super_nav_details/', text.uid))
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.lang == text.lang
LIMIT 1
RETURN name
)[0]
LET root_name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.is_root == true
LIMIT 1
RETURN name
)[0]
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN text.muids
LIMIT 1
RETURN author
)[0]
LET mtime_doc = (
RETURN DOCUMENT(CONCAT('mtimes/', REGEX_REPLACE(SUBSTRING(text.file_path, FIND_FIRST(text.file_path, 'sc_bilara_data')), '/', '_')))
)[0]
RETURN {
uid: text.uid,
title: name_doc.name ? name_doc.name : root_name_doc.name,
strings_path: text.file_path,
author: author_doc.long_name,
author_uid: author_doc.uid,
author_short: author_doc.short_name,
root_lang: nav_doc.root_lang,
acronym: nav_doc.acronym,
mtime: mtime_doc.mtime / 1000000000
}
'''
BILARA_TEXT_BY_LANG_FOR_SEARCH = '''
FOR text IN sc_bilara_texts
FILTER text.lang == @lang AND ('root' IN text.muids OR 'translation' IN text.muids) AND ('site' NOT IN text.muids)
AND NOT CONTAINS(text.file_path, 'blurb')
AND NOT CONTAINS(text.file_path, '-name')
LET nav_doc = (
RETURN DOCUMENT(CONCAT('super_nav_details/', text.uid))
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.lang == text.lang
LIMIT 1
RETURN name
)[0]
LET root_name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.is_root == true
LIMIT 1
RETURN name
)[0]
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN text.muids
LIMIT 1
RETURN author
)[0]
LET mtime_doc = (
RETURN DOCUMENT(CONCAT('mtimes/', REGEX_REPLACE(SUBSTRING(text.file_path, FIND_FIRST(text.file_path, 'sc_bilara_data')), '/', '_')))
)[0]
LET full_lang = (
FOR lang IN language
FILTER lang.uid == text.lang
RETURN lang.name
)[0]
RETURN {
uid: text.uid,
title: name_doc.name ? name_doc.name : root_name_doc.name,
strings_path: text.file_path,
author: author_doc.long_name,
author_uid: author_doc.uid,
author_short: author_doc.short_name,
lang: text.lang,
full_lang: full_lang,
root_lang: nav_doc.root_lang,
acronym: nav_doc.acronym
}
'''
TEXT_REFERENCES = '''
FOR text IN sc_bilara_texts
FILTER text.lang == @lang AND ('reference' IN text.muids)
LET nav_doc = (
RETURN DOCUMENT(CONCAT('super_nav_details/', text.uid))
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.lang == text.lang
LIMIT 1
RETURN name
)[0]
LET root_name_doc = (
FOR name IN names
FILTER name.uid == text.uid AND name.is_root == true
LIMIT 1
RETURN name
)[0]
LET mtime_doc = (
RETURN DOCUMENT(CONCAT('mtimes/', REGEX_REPLACE(SUBSTRING(text.file_path, FIND_FIRST(text.file_path, 'sc_bilara_data')), '/', '_')))
)[0]
LET full_lang = (
FOR lang IN language
FILTER lang.uid == text.lang
RETURN lang.name
)[0]
RETURN {
uid: text.uid,
title: name_doc.name ? name_doc.name : root_name_doc.name,
strings_path: text.file_path,
lang: text.lang,
full_lang: full_lang,
root_lang: nav_doc.root_lang,
acronym: nav_doc.acronym
}
'''
# Returns all uids in proper order assuming num is set correctly in data
UIDS_IN_ORDER_BY_DIVISION = '''
FOR division IN super_nav_details
FILTER division.type == 'branch'
LET division_uids = (
FOR doc, edge, path IN 0..10 OUTBOUND division super_nav_details_edges OPTIONS {order: 'dfs'}
RETURN doc.uid
)
RETURN {'division': division.uid, 'uids': division_uids}
'''
CURRENT_MTIMES = '''
WITH @@collection /* With statement forces query optimizer to work */
FOR text IN @@collection
FILTER text.lang == @lang
RETURN {uid: text.uid, author_uid: text.author_uid, mtime: text.mtime}
'''
CURRENT_BILARA_MTIMES = '''
WITH @@collection /* With statement forces query optimizer to work */
FOR text IN @@collection
FILTER text.lang == @lang AND ('root' IN text.muids OR 'translation' IN text.muids)
LET mtime_doc = (
RETURN DOCUMENT(CONCAT('mtimes/', REGEX_REPLACE(SUBSTRING(text.file_path, FIND_FIRST(text.file_path, "sc_bilara_data")), "/", "_")))
)[0]
RETURN {
uid: text.uid,
author_uid: text.muids[2],
mtime: mtime_doc.mtime
}
'''
MENU = '''
FOR navigation_doc IN super_nav_details
FILTER navigation_doc.type == 'root'
// Node children
LET descendants = (
FOR descendant IN OUTBOUND navigation_doc super_nav_details_edges OPTIONS {order: 'dfs'}
// Search info about doc language from language collection
LET lang_name = DOCUMENT('language', descendant.root_lang)['name']
LET child_range = DOCUMENT('child_range', descendant.uid)['range']
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', descendant.uid, @language))['name']
// Trying to get 2 blurbs with english and user-defined-language translations
LET en_and_language_blurbs = (
FOR blurb IN blurbs
FILTER blurb.uid == descendant.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
// Trying to get blurb with user-defined-language translation, take english if not exist
LET blurb = (
RETURN LENGTH(en_and_language_blurbs) == 2 ?
(FOR blurb IN en_and_language_blurbs FILTER blurb.lang == @language RETURN blurb)[0] :
en_and_language_blurbs[0]
)[0].blurb
LET yellow_brick_road = DOCUMENT('yellow_brick_road', CONCAT_SEPARATOR('_', descendant.uid, @language))
RETURN {
uid: descendant.uid,
root_name: descendant.name,
translated_name: translated_name,
acronym: descendant.acronym,
blurb: blurb,
node_type: descendant.type,
root_lang_iso: descendant.root_lang,
root_lang_name: lang_name,
child_range: child_range,
yellow_brick_road: !!yellow_brick_road,
yellow_brick_road_count: yellow_brick_road ? yellow_brick_road.count : 0,
}
)
LET lang_name = DOCUMENT('language', navigation_doc.root_lang)['name']
LET child_range = DOCUMENT('child_range', navigation_doc.uid)['range']
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))['name']
LET en_and_language_blurbs = (
FOR blurb IN blurbs
FILTER blurb.uid == navigation_doc.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
LET blurb = (
RETURN LENGTH(en_and_language_blurbs) == 2 ?
(FOR blurb IN en_and_language_blurbs FILTER blurb.lang == @language RETURN blurb)[0] :
en_and_language_blurbs[0]
)[0].blurb
LET yellow_brick_road = DOCUMENT('yellow_brick_road', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))
RETURN {
uid: navigation_doc.uid,
root_name: navigation_doc.name,
translated_name: translated_name,
blurb: blurb,
acronym: navigation_doc.acronym,
node_type: navigation_doc.type,
root_lang_iso: navigation_doc.root_lang,
root_lang_name: lang_name,
child_range: child_range,
yellow_brick_road: !!yellow_brick_road,
yellow_brick_road_count: yellow_brick_road ? yellow_brick_road.count : 0,
children: descendants,
}
'''
SUBMENU = '''
LET navigation_doc = DOCUMENT('super_nav_details', @submenu_id)
LET descendants = (
FOR descendant IN OUTBOUND navigation_doc super_nav_details_edges OPTIONS {order: 'dfs'}
LET lang_name = DOCUMENT('language', descendant.root_lang)['name']
LET child_range = DOCUMENT('child_range', descendant.uid)['range']
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', descendant.uid, @language))['name']
LET en_and_language_blurbs = (
FOR blurb IN blurbs
FILTER blurb.uid == descendant.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
LET blurb = (
RETURN LENGTH(en_and_language_blurbs) == 2 ?
(FOR blurb IN en_and_language_blurbs FILTER blurb.lang == @language RETURN blurb)[0] :
en_and_language_blurbs[0]
)[0].blurb
LET yellow_brick_road = DOCUMENT('yellow_brick_road', CONCAT_SEPARATOR('_', descendant.uid, @language))
RETURN {
uid: descendant.uid,
root_name: descendant.name,
translated_name: translated_name,
acronym: descendant.acronym,
blurb: blurb,
node_type: descendant.type,
root_lang_iso: descendant.root_lang,
root_lang_name: lang_name,
child_range: child_range,
yellow_brick_road: !!yellow_brick_road,
yellow_brick_road_count: yellow_brick_road ? yellow_brick_road.count : 0,
}
)
LET lang_name = DOCUMENT('language', navigation_doc.root_lang)['name']
LET child_range = DOCUMENT('child_range', navigation_doc.uid)['range']
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))['name']
LET en_and_language_blurbs = (
FOR blurb IN blurbs
FILTER blurb.uid == navigation_doc.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
LET blurb = (
RETURN LENGTH(en_and_language_blurbs) == 2 ?
(FOR blurb IN en_and_language_blurbs FILTER blurb.lang == @language RETURN blurb)[0] :
en_and_language_blurbs[0]
)[0].blurb
LET yellow_brick_road = DOCUMENT('yellow_brick_road', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))
RETURN {
uid: navigation_doc.uid,
root_name: navigation_doc.name,
translated_name: translated_name,
node_type: navigation_doc.type,
blurb: blurb,
acronym: navigation_doc.acronym,
root_lang_iso: navigation_doc.root_lang,
root_lang_name: lang_name,
child_range: child_range,
yellow_brick_road: !!yellow_brick_road,
yellow_brick_road_count: yellow_brick_road ? yellow_brick_road.count : 0,
children: descendants,
}
'''
TIPITAKA_MENU = '''
FOR navigation_doc IN super_nav_details
FILTER navigation_doc.type == 'root' AND navigation_doc.uid in ['sutta', 'vinaya', 'abhidhamma']
LET lang_name = DOCUMENT('language', navigation_doc.root_lang)['name']
LET child_range = DOCUMENT('child_range', navigation_doc.uid)['range']
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))['name']
LET en_and_language_blurbs = (
FOR blurb IN blurbs
FILTER blurb.uid == navigation_doc.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
LET blurb = (
RETURN LENGTH(en_and_language_blurbs) == 2 ?
(FOR blurb IN en_and_language_blurbs FILTER blurb.lang == @language RETURN blurb)[0] :
en_and_language_blurbs[0]
)[0].blurb
LET yellow_brick_road = DOCUMENT('yellow_brick_road', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))
RETURN {
uid: navigation_doc.uid,
root_name: navigation_doc.name,
translated_name: translated_name,
blurb: blurb,
acronym: navigation_doc.acronym,
node_type: navigation_doc.type,
yellow_brick_road: !!yellow_brick_road,
yellow_brick_road_count: yellow_brick_road ? yellow_brick_road.count : 0,
}
'''
SET_SUPER_NAV_DETAILS_ROOT_LANGUAGES = '''
FOR doc IN super_nav_details
FILTER doc.root_lang
FOR child IN 1..100 OUTBOUND doc super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER child
UPDATE child WITH { root_lang: doc.root_lang } IN super_nav_details
'''
SET_SUPER_NAV_DETAILS_NODES_TYPES = '''
FOR doc IN super_nav_details
LET child = (
FOR child IN OUTBOUND doc super_nav_details_edges OPTIONS {order: 'dfs'}
LIMIT 1
RETURN child
)[0]
LET parent = (
FOR parent IN INBOUND doc super_nav_details_edges OPTIONS {order: 'dfs'}
LIMIT 1
RETURN parent
)[0]
LET node_type_using_child = child ? 'branch' : 'leaf'
LET node_type = parent ? node_type_using_child : 'root'
UPDATE doc WITH { type: node_type } IN super_nav_details
'''
BUILD_YELLOW_BRICK_ROAD = '''
FOR lang IN language
LET lang_code = lang.iso_code
LET translated_uids = (
FOR doc IN v_text
SEARCH doc.lang == lang_code
RETURN DISTINCT doc.uid
)
FOR t_uid IN translated_uids
LET nav_doc = DOCUMENT('super_nav_details', t_uid)
FILTER nav_doc
LET translations_count = COUNT(
FOR doc IN v_text
SEARCH doc.lang == lang_code AND doc.uid == t_uid
RETURN doc
)
FOR doc IN 0..100 INBOUND nav_doc super_nav_details_edges OPTIONS {order: 'dfs'}
LET yellow_brick_doc = {
_key: CONCAT_SEPARATOR('_', doc.uid, lang_code),
uid: doc.uid,
lang: lang_code,
type: doc.type,
count: translations_count,
}
INSERT yellow_brick_doc INTO yellow_brick_road OPTIONS { overwriteMode: 'ignore' }
'''
COUNT_YELLOW_BRICK_ROAD = '''
FOR yb_doc IN yellow_brick_road
FILTER yb_doc.type == 'branch' OR yb_doc.type == 'root'
LET translated_leaf_count = SUM(
FOR child IN 1..100 OUTBOUND DOCUMENT('super_nav_details', yb_doc.uid) super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER child.type == 'leaf'
LET key = CONCAT_SEPARATOR('_', child.uid, yb_doc.lang)
LET yb_child = DOCUMENT('yellow_brick_road', key)
FILTER yb_child
RETURN yb_child.count
)
FILTER translated_leaf_count != 0
UPDATE yb_doc WITH { count: translated_leaf_count } IN yellow_brick_road
'''
# Takes 2 bind_vars: `language` and `uid` of root element
SUTTAPLEX_LIST = '''
FOR v, e, p IN 0..6 OUTBOUND CONCAT('super_nav_details/', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
LET legacy_translations = (
FOR text IN html_text
FILTER text.uid == v.uid
LET lang_doc = DOCUMENT('language', text.lang)
LET res = {
lang: text.lang,
lang_name: lang_doc.name,
is_root: lang_doc.is_root,
author: text.author,
author_short: text.author_short,
author_uid: text.author_uid,
publication_date: text.publication_date,
id: text._key,
segmented: false,
volpage: text.volpage,
has_comment: false
}
// Add title if it is in desired language
RETURN (text.lang == @language) ? MERGE(res, {title: text.name}) : res
)
LET bilara_translations = (
FOR text IN sc_bilara_texts
FILTER text.uid == v.uid AND ('root' IN text.muids OR 'translation' IN text.muids)
SORT text.lang
LET lang_doc = DOCUMENT('language', text.lang)
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN text.muids
LIMIT 1
RETURN author
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == v.uid AND name.lang == text.lang
LIMIT 1
RETURN name
)[0]
LET text_comment = (
FOR doc IN sc_bilara_texts
FILTER doc.uid == v.uid AND 'comment' IN doc.muids AND author_doc.uid in doc.muids
RETURN doc.muids
)
RETURN {
lang: text.lang,
lang_name: lang_doc.name,
is_root: lang_doc.is_root,
author: author_doc.long_name,
author_short: author_doc.short_name,
author_uid: author_doc.uid,
publication_date: null,
id: text._key,
segmented: true,
title: name_doc.name,
volpage: null,
has_comment: LENGTH(text_comment) > 0
}
)
LET blurbs_by_uid = (
FOR blurb IN blurbs
FILTER blurb.uid == v.uid AND (blurb.lang == @language OR blurb.lang == 'en')
LIMIT 2
RETURN blurb
)
LET blurb = (
RETURN LENGTH(blurbs_by_uid) == 2 ?
(FOR blurb IN blurbs_by_uid FILTER blurb.lang == @language RETURN blurb.blurb)[0] :
blurbs_by_uid[0].blurb
)[0]
LET difficulty = (
FOR difficulty IN difficulties
FILTER difficulty.uid == v.uid
LIMIT 1
RETURN difficulty.difficulty
)[0]
LET translations = FLATTEN([bilara_translations, legacy_translations])
LET volpages = (
FOR volpages IN text_extra_info
FILTER volpages.uid == v.uid
LIMIT 1
RETURN volpages.volpage
)[0]
LET is_segmented_original = (
FOR translation IN translations
FILTER translation.lang == v.root_lang AND translation.segmented == true
LIMIT 1
RETURN true
)[0]
LET filtered_translations = (
FOR translation IN translations
FILTER translation.lang != v.root_lang OR translation.segmented == true OR is_segmented_original == null
RETURN translation
)
LET translated_titles = (
FOR translation IN translations
FILTER translation.lang == @language AND HAS(translation, 'title') AND translation.title != null
LIMIT 1
RETURN translation.title
)[0]
LET name_title = (
FOR name IN names
FILTER name.uid == v.uid AND name.lang == @language
LIMIT 1
RETURN name.name
)[0]
LET parallel_count = LENGTH(
FOR rel IN relationship
FILTER rel._from == v._id
RETURN rel
)
LET biblio = (
FOR biblio IN biblios
FILTER biblio.uid == v.biblio_uid
LIMIT 1
RETURN biblio.text
)[0]
LET original_titles = (
FOR nav_item IN super_nav_details
FILTER nav_item.uid == v.uid
LIMIT 1
RETURN nav_item.name
)[0]
LET alt_volpages = (
FOR altVolpages IN text_extra_info
FILTER altVolpages.uid == v.uid
LIMIT 1
RETURN altVolpages.alt_volpage
)[0]
LET path_docs = (
FOR doc IN 1..100 INBOUND DOCUMENT('super_nav_details', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
RETURN doc.uid
)
LET priority_author = (
FOR priority IN prioritize
FILTER priority.tree == path_docs[LAST(path_docs)-1]
AND priority.translation_lang == @language
AND priority.root_lang == v.root_lang
return priority.creator
)[0]
LET references = (
FOR volpage IN text_references
FILTER volpage.uid == v.uid
RETURN volpage.volpage
)[0]
RETURN {
acronym: v.acronym,
volpages: volpages,
alt_volpages: alt_volpages,
uid: v.uid,
blurb: blurb,
difficulty: difficulty,
original_title: original_titles,
root_lang: v.root_lang,
root_lang_name: DOCUMENT('language', v.root_lang).name,
type: v.type,
from: e._from,
translated_title: translated_titles ? translated_titles : name_title,
translations: filtered_translations,
parallel_count: parallel_count,
biblio: biblio,
priority_author_uid: priority_author,
verseNo: references,
}
'''
FALLEN_LEAVES_SUTTAPLEX_LIST = '''
FOR doc IN fallen_leaves
FOR leaves IN doc.fallen_leaves
FILTER HAS(leaves, @uid)
FOR leaf_uid in leaves[@uid]
LET nav_detail = DOCUMENT('super_nav_details', leaf_uid)
LET legacy_translations = (
FOR text IN html_text
FILTER text.uid == leaf_uid
LET lang_doc = DOCUMENT('language', text.lang)
LET res = {
lang: text.lang,
lang_name: lang_doc.name,
is_root: lang_doc.is_root,
author: text.author,
author_short: text.author_short,
author_uid: text.author_uid,
publication_date: text.publication_date,
id: text._key,
segmented: false,
volpage: text.volpage
}
RETURN (text.lang == @language) ? MERGE(res, {title: text.name}) : res
)
LET bilara_translations = (
FOR text IN sc_bilara_texts
FILTER text.uid == leaf_uid AND ('root' IN text.muids OR 'translation' IN text.muids)
SORT text.lang
LET lang_doc = DOCUMENT('language', text.lang)
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN text.muids
LIMIT 1
RETURN author
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == leaf_uid AND name.lang == text.lang
LIMIT 1
RETURN name
)[0]
RETURN {
lang: text.lang,
lang_name: lang_doc.name,
is_root: lang_doc.is_root,
author: author_doc.long_name,
author_short: author_doc.short_name,
author_uid: author_doc.uid,
publication_date: null,
id: text._key,
segmented: true,
title: name_doc.name,
volpage: null
}
)
LET translations = FLATTEN([bilara_translations, legacy_translations])
LET is_segmented_original = (
FOR translation IN translations
FILTER translation.lang == nav_detail.root_lang AND translation.segmented == true
LIMIT 1
RETURN true
)[0]
LET filtered_translations = (
FOR translation IN translations
FILTER translation.lang != nav_detail.root_lang OR translation.segmented == true OR is_segmented_original == null
RETURN translation
)
LET name_title = (
FOR name IN names
FILTER name.uid == leaf_uid AND name.lang == @language
LIMIT 1
RETURN name.name
)[0]
LET original_titles = (
FOR nav_item IN super_nav_details
FILTER nav_item.uid == leaf_uid
LIMIT 1
RETURN nav_item.name
)[0]
LET parallel_count = LENGTH(
FOR rel IN relationship
FILTER rel._from == nav_detail._id
RETURN rel
)
RETURN {
acronym: null,
volpages: null,
alt_volpages: null,
uid: leaf_uid,
blurb: null,
difficulty: null,
original_title: original_titles,
root_lang: nav_detail.root_lang,
root_lang_name: DOCUMENT('language', nav_detail.root_lang.root_lang).name,
type: nav_detail.type,
from: null,
translated_title: name_title,
translations: filtered_translations,
parallel_count: parallel_count,
biblio: null,
priority_author_uid: null,
}
'''
PARALLELS = '''
FOR v, e, p IN OUTBOUND CONCAT('super_nav_details/', @uid) relationship
LET target = DOCUMENT(e._to)
LET legacy_translations = (
FOR text IN html_text
FILTER text.uid == target.uid
LET res = {
lang: text.lang,
lang_name: (FOR lang in language FILTER lang.uid == text.lang LIMIT 1 RETURN lang.name)[0],
author: text.author,
author_short: text.author_short,
author_uid: text.author_uid,
id: text._key,
segmented: false,
volpage: text.volpage
}
// Add title if it is in desired language
RETURN (text.lang == @language) ? MERGE(res, {title: text.name}) : res
)
LET bilara_translations = (
FOR text IN sc_bilara_texts
FILTER text.uid == target.uid AND 'root' IN text.muids
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN text.muids
LIMIT 1
RETURN author
)[0]
LET text_title = (
FOR name IN names
FILTER name.uid == @uid AND name.is_root == true
RETURN name.name
)[0]
LET res = {
lang: text.lang,
lang_name: (FOR lang in language FILTER lang.uid == text.lang LIMIT 1 RETURN lang.name)[0],
author: author_doc.long_name,
author_short: author_doc.short_name,
author_uid: author_doc.uid,
id: text._key,
segmented: true,
volpage: (FOR doc IN super_nav_details FILTER doc.uid == target.uid RETURN doc.volpage)[0]
}
RETURN (text.lang == @language) ? MERGE(res, {title: text_title}) : res
)
SORT e.resembling
LET biblio = (
FOR biblio IN biblios
FILTER biblio.uid == v.biblio_uid
LIMIT 1
RETURN biblio.text
)[0]
LET translations = FLATTEN([bilara_translations, legacy_translations])
LET volpages = (
FOR volpages IN text_extra_info
FILTER volpages.uid == v.uid
LIMIT 1
RETURN volpages.volpage
)[0]
LET alt_volpages = (
FOR altVolpages IN text_extra_info
FILTER altVolpages.uid == v.uid
LIMIT 1
RETURN altVolpages.alt_volpage
)[0]
LET translated_titles = (
FOR translation IN translations
FILTER translation.lang == @language AND HAS(translation, 'title')
LIMIT 1
RETURN translation.title
)[0]
LET original_titles = (
FOR original_name IN names
FILTER original_name.uid == v.uid AND original_name.is_root == true
LIMIT 1
RETURN original_name.name
)[0]
SORT e.number, e.to
RETURN {
from: e.from,
enumber: e.number,
to: {
to: e.to,
volpages: volpages,
alt_volpages: alt_volpages,
acronym: v.acronym,
uid: v.uid ? v.uid : 'orphan',
root_lang: v.root_lang,
original_title: original_titles,
translated_title: translated_titles,
type: e.type,
from: e._from,
biblio: biblio,
translations: translations
},
type: e.type,
remark: e.remark,
resembling: e.resembling
}
'''
SUTTA_VIEW = (
'''
LET root_text = DOCUMENT('super_nav_details', @uid)
LET legacy_html = (
FOR html IN html_text
FILTER html.uid == @uid AND ((html.lang == @language AND LOWER(html.author_uid) == @author_uid)
OR html.lang == root_text.root_lang)
RETURN {
uid: html.uid,
lang: html.lang,
is_root: html.lang == root_text.root_lang,
title: html.name,
author: html.author,
author_short: html.author_short,
author_uid: html.author_uid,
file_path: html.file_path,
next: html.next,
previous: html.prev
}
)
LET root_bilara_obj = (
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid AND 'root' IN doc.muids
LIMIT 1
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN doc.muids
LIMIT 1
RETURN author
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == doc.uid AND name.is_root == true
LIMIT 1
RETURN name
)[0]
RETURN {
uid: doc.uid,
author: author_doc.long_name,
author_short: author_doc.short_name,
author_uid: author_doc.uid,
lang: doc.lang,
title: name_doc.name,
previous: {
author_uid: author_doc.uid,
lang: doc.lang,
name: null,
uid: null,
},
next: {
author_uid: author_doc.uid,
lang: doc.lang,
name: null,
uid: null,
},
}
)[0]
LET translated_bilara_obj = (
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid AND doc.lang == @language AND @author_uid IN doc.muids
LIMIT 1
LET author_doc = (
FOR author IN bilara_author_edition
FILTER author.uid IN doc.muids
LIMIT 1
RETURN author
)[0]
LET name_doc = (
FOR name IN names
FILTER name.uid == doc.uid AND name.lang == doc.lang
LIMIT 1
RETURN name
)[0]
RETURN {
uid: doc.uid,
lang: doc.lang,
author_uid: author_doc.uid,
author: author_doc.long_name,
author_short: author_doc.short_name,
title: name_doc.name,
previous: {
author_uid: author_doc.uid,
lang: doc.lang,
name: null,
uid: null,
},
next: {
author_uid: author_doc.uid,
lang: doc.lang,
name: null,
uid: null,
},
}
)[0]
LET suttaplex = ('''
+ SUTTAPLEX_LIST
+ ''')[0]
RETURN {
root_text: translated_bilara_obj ? root_bilara_obj : legacy_html[0],
translation: translated_bilara_obj ? (root_bilara_obj == translated_bilara_obj ? null : translated_bilara_obj)
: (FOR html IN legacy_html FILTER html.lang == @language LIMIT 1 RETURN html)[0],
segmented: translated_bilara_obj ? true : false,
suttaplex: suttaplex,
bilara_root_text: root_bilara_obj,
bilara_translated_text: translated_bilara_obj
}
'''
)
SUTTA_NAME = '''
LET translated_name = (
FOR name IN names
FILTER name.uid == @uid AND name.is_root == false AND name.lang == @lang
LIMIT 1
RETURN name.name
)[0]
LET root_name = (
FOR name IN names
FILTER name.uid == @uid AND name.is_root == true
LIMIT 1
RETURN name.name
)[0]
RETURN translated_name ? translated_name : root_name
'''
VAGGA_CHILDREN = '''
FOR doc IN 1..100 OUTBOUND DOCUMENT('super_nav_details', @uid) super_nav_details_edges
FILTER doc.type == 'leaf'
RETURN doc.uid
'''
SEGMENTED_SUTTA_VIEW = '''
LET result = MERGE(
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid
FILTER 'translation' NOT IN doc.muids OR @author_uid IN doc.muids
FILTER 'comment' NOT IN doc.muids OR @author_uid IN doc.muids
LET type = doc.muids[0]
RETURN {
[CONCAT(type, '_text')]: doc.file_path
}
)
RETURN result
'''
SEGMENTED_TRANSLATION_TEXT = '''
LET result = MERGE(
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid
FILTER 'translation_text' IN doc.muids OR @language IN doc.muids
LET type = doc.muids[0]
RETURN {
[CONCAT(type, '_text')]: doc.file_path
}
)
RETURN result
'''
CURRENCIES = '''
FOR currency IN currencies
FILTER currency.use == true
LET expected_name = DOCUMENT(CONCAT('currency_names/', currency.symbol, '_', @language)).name
LET name = expected_name ? expected_name : DOCUMENT(CONCAT('currency_names/', currency.symbol, '_', 'en')).name
SORT name
RETURN {
name: name,
symbol: currency.symbol,
american_express: currency.american_express,
decimal: currency.decimal
}
'''
PARAGRAPHS = '''
FOR paragraph IN paragraphs
RETURN {
uid: paragraph.uid,
description: paragraph.description
}
'''
IMAGES = '''
FOR image IN images
FILTER image.division == @division AND image.vol == @vol
FILTER image.page_number < @page+3
FILTER image.page_number > @page-3
SORT image.page_number
RETURN {name: image.name,
pageNumber: image.page_number}
'''
EPIGRAPHS = '''
FOR epigraph IN epigraphs
SORT RAND()
LIMIT @number
RETURN KEEP(epigraph, ['uid', 'epigraph'])
'''
WHY_WE_READ = '''
FOR text IN why_we_read
SORT RAND()
LIMIT @number
RETURN text.text
'''
DICTIONARY_ADJACENT = '''
LET word_number = (
FOR dictionary IN dictionaries_complex
FILTER dictionary.word == @word
LIMIT 1
RETURN dictionary.num
)
LET adjacent_words = (
FOR selected IN dictionaries_complex
FILTER selected.num < word_number+6
FILTER selected.num > word_number-6
SORT selected.num
RETURN selected.word
)
RETURN UNIQUE(adjacent_words)
'''
DICTIONARY_FULL = '''
LET dict_simple = (
FOR dict IN dictionaries_simple
FILTER dict.entry == @word AND dict.to == @language
RETURN {
from: dict.from,
to: dict.to,
entry: dict.entry,
grammar: dict.grammar,
definition: dict.definition,
xr: dict.xr,
dictname: dict.dictname,
text: null
}
)
LET dict_complex = (
FOR dict IN dictionaries_complex
FILTER dict.word == @word AND dict.to == @language
RETURN {
from: dict.from,
to: dict.to,
entry: dict.word,
grammar: null,
definition: null,
xr: null,
dictname: dict.dictname,
text: dict.text,
}
)
RETURN APPEND(dict_simple, dict_complex)
'''
DICTIONARY_SEARCH_RESULT_FULL = '''
LET dic_complex = (
FOR doc IN dictionaries_complex
FILTER doc.to == @language AND (doc.word == LOWER(@word) OR doc.word_ascii == LOWER(@word))
RETURN {
dictname: doc.dictname,
lang_to: doc.to,
lang_from: doc.from,
word: doc.word,
word_ascii: doc.word_ascii,
text: doc.text,
grammar: null,
definition: null,
xr: null
}
)
LET dic_simple = (
FOR doc IN dictionaries_simple
FILTER doc.to == @language AND doc.entry == LOWER(@word)
RETURN {
dictname: doc.dictname,
lang_to: doc.to,
lang_from: doc.from,
word: doc.entry,
word_ascii: null,
text: '',
grammar: doc.grammar,
definition: doc.definition,
xr: doc.xr
}
)
RETURN APPEND(dic_complex, dic_simple)
'''
DICTIONARY_SIMILAR = '''
LET words = FLATTEN(
FOR doc IN v_dict SEARCH STARTS_WITH(doc.word_ascii, LEFT(@word_ascii, 1))
FILTER doc.word != @word
LET ed1 = LEVENSHTEIN_DISTANCE(@word_ascii, doc.word_ascii) * 2
LET ed2 = LEVENSHTEIN_DISTANCE(@word, doc.word)
FILTER ed2 < MAX([1, LENGTH(@word) / 2])
SORT ed1 + ed2
RETURN DISTINCT doc.word
)
RETURN SLICE(words, 0, 10)
'''
DICTIONARY_SIMPLE = '''
FOR dict IN dictionaries_simple
FILTER dict.from == @from AND dict.to == @to
RETURN {
entry: dict.entry,
grammar: dict.grammar,
definition: dict.definition,
xr: dict.xr,
pronunciation: dict.pronunciation
}
'''
EXPANSION = '''
LET expansion_item = (
FOR entry IN uid_expansion
RETURN { [ entry.uid ]: [ entry.acronym, entry.name ] }
)
RETURN MERGE(expansion_item)
'''
class PWA:
MENU = '''
LET langs = UNION(@languages ? @languages : [], @include_root ? (
FOR lang IN language FILTER lang.is_root RETURN lang.uid
) : [])
LET menu = (
FOR div IN 1..6 OUTBOUND DOCUMENT('super_nav_details', 'sutta') super_nav_details_edges OPTIONS {order: 'dfs'}
LET has_subdivisions = LENGTH(
FOR d, d_edge, d_path IN 1..1 OUTBOUND div super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER d_edge.type != 'leaf'
LIMIT 1
RETURN 1
)
FILTER has_subdivisions
RETURN div.uid
)
LET grouped_children = MERGE(
FOR d, d_edge, d_path IN 1..20 OUTBOUND DOCUMENT('super_nav_details', 'sutta') super_nav_details_edges OPTIONS {order: 'dfs'}
COLLECT is_div = d.type != 'leaf' INTO uids = d.uid
RETURN {[is_div ? 'branch' : 'leaf']: uids}
)
LET suttaplex = REMOVE_VALUES(grouped_children['branch'], ['long', 'middle', 'linked', 'numbered', 'minor', 'other-group',
'dn', 'da', 'mn', 'ma', 'sn', 'sa', 'an', 'ea', 'kn', 'thig', 'thag',
'kn', 'dhp', 'iti', 'snp', 'vv', 'pv', 'tha-ap', 'thi-ap', 'bv', 'cp',
'ja', 'mnd', 'cnd', 'ps', 'ne', 'pe', 'mil', 'uv', 'uvs', 'pdhp', 'gdhp',
'minor-lzh', 'avs', 'other-xct'])
LET texts = (
FOR text IN v_text FILTER text.lang IN langs AND text.uid IN grouped_children['leaf']
FILTER HAS(text, "author_uid") or LENGTH(text.muids) >= 3
COLLECT uid = text.uid INTO groups = {lang: text.lang, author_uid: HAS(text, 'author_uid') ? text.author_uid : text.muids[2]}
RETURN {uid, translations:(
FOR text IN groups
COLLECT lang = text.lang INTO authorsOfLang = text.author_uid
LET authors = UNIQUE(authorsOfLang)
RETURN {lang, authors}
)}
)
RETURN {
menu,
suttaplex,
texts
}
'''
SIZES = '''
LET languages = (FOR s IN pwa_sizes
RETURN { [s.lang]: KEEP(s, ['parallels', 'base', 'lookup'])})
RETURN MERGE(languages)
'''
# The translation count queries use COLLECT/AGGREGATE
# these are very fast queries
TRANSLATION_COUNT_BY_LANGUAGE = '''
LET root_langs = (FOR lang IN language FILTER lang.is_root RETURN lang.uid)
LET root_lang_total = COUNT(FOR text IN v_text SEARCH text.lang IN root_langs
RETURN 1)
LET langs = (
FOR text IN v_text
COLLECT lang_code = text.lang WITH COUNT INTO total
LET lang = DOCUMENT('language', lang_code)
LET translated = total / root_lang_total
RETURN {
num: lang.num,
iso_code: lang.iso_code,
is_root: lang.is_root,
name: lang.name,
total: total,
percent: translated > 0.01 ? CEIL(100 * translated) : CEIL(1000 * translated) / 10
}
)
LET sorted_langs = MERGE(
FOR lang IN langs
COLLECT is_root = lang.is_root INTO groupings
RETURN {
[is_root]: groupings[*].lang
}
)
RETURN {
ancient: (
FOR doc IN sorted_langs["true"]
SORT doc.total DESC
RETURN UNSET(doc, 'is_root', 'num', 'percent')
),
modern: (
FOR doc IN sorted_langs["false"]
SORT doc.total DESC
RETURN UNSET(doc, 'is_root', 'num')
)
}
'''
TRANSLATION_COUNT_BY_DIVISION = '''
/* First we count the number of texts by (sub)division uid based on pattern matching */
LET counts = MERGE(
FOR doc IN v_text
SEARCH doc.lang == @lang
COLLECT division_uid = REGEX_REPLACE(doc.uid, '([a-z]+(?:-[a-z]+|-[0-9]+)*).*', '$1') WITH COUNT INTO div_count
SORT null
RETURN {
[division_uid]: div_count
}
)
LET keys = ATTRIBUTES(counts)
FOR key IN keys
LET doc = DOCUMENT('super_nav_details', key)
FILTER doc
/* Determine the highest division level */
LET highest_div = LAST(
FOR v, e, p IN 0..10 INBOUND doc super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER v.type == 'branch'
RETURN {
uid: v.uid,
name: v.name,
root_lang: v.root_lang
}
)
COLLECT div = highest_div /* Filter out the subdivisions */
/* But accumulate their counts */
AGGREGATE total = SUM(counts[key])
RETURN {
uid: div.uid,
name: div.name,
root_lang: div.root_lang,
total: total
}
'''
TRANSLATION_COUNT_BY_AUTHOR = '''
LET legacy_counts = (
FOR doc IN html_text
FILTER doc.lang == @lang
COLLECT author = doc.author WITH COUNT INTO total
SORT null
RETURN {
author,
total
}
)
LET segmented_counts = (
FOR doc IN sc_bilara_texts
FILTER doc.lang == @lang AND ('root' IN doc.muids OR 'translation' IN doc.muids)
COLLECT author = doc.muids[2] WITH COUNT INTO total
SORT null
RETURN {
author,
total
}
)
FOR subcount IN APPEND(legacy_counts, segmented_counts)
/* If there are multiple authors split them and count seperately */
FOR author_name IN SPLIT(subcount.author, ', ')
COLLECT name = author_name
AGGREGATE total = SUM(subcount.total)
SORT total DESC
RETURN {name, total}
'''
SUTTA_SINGLE_PALI_TEXT = '''
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid AND doc.lang == 'pli' AND 'root' IN doc.muids
LIMIT 1
RETURN {@uid: doc.file_path}
'''
SUTTA_PATH = '''
LET path_docs = (
FOR doc IN 1..100 INBOUND DOCUMENT('super_nav_details', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
RETURN doc.uid
)
RETURN {
'full_path': CONCAT_SEPARATOR('/', REVERSE(APPEND(path_docs, '/pitaka')))
}
'''
ALL_DOC_UID_BY_ROOT_UID = '''
LET root_uid = REVERSE(POP(
FOR doc IN 1..10 INBOUND DOCUMENT('super_nav_details', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER doc.type == 'branch'
RETURN doc.uid
))[0]
FOR docs IN 1..10 OUTBOUND DOCUMENT('super_nav_details', root_uid) super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER docs.type == 'leaf'
RETURN docs.uid
'''
CANDIDATE_AUTHORS = '''
LET bilara_translations = (
FOR doc IN sc_bilara_texts
FILTER doc.uid == @uid AND doc.lang == @lang AND 'translation' IN doc.muids AND doc.muids[2] != @author_uid
SORT RAND()
return doc.muids[2]
)
LET legacy_translations = (
FOR html IN html_text
FILTER html.uid == @uid AND html.lang == @lang AND html.author_uid != @author_uid
SORT RAND()
RETURN html.author_uid
)
RETURN UNION(bilara_translations, legacy_translations)
'''
SUTTA_PALI_REFERENCE = '''
FOR pali IN pali_reference_edition
COLLECT edition_set = pali.edition_set, name = pali.name, short_name = pali.short_name
RETURN {
edition_set: edition_set,
name: NOT_NULL(name, short_name)
}
'''
ALL_TEXTS_BY_LANGUAGES = '''
FOR doc IN v_text
SEARCH doc.lang IN @languages
LET langs = REMOVE_VALUE(@languages, 'pli')
FILTER doc.lang IN langs OR (doc.lang == 'pli' AND 'root' IN doc.muids)
RETURN doc
'''
SUTTA_PUBLICATION_INFO = '''
LET path_docs = (
FOR doc IN 1..100 INBOUND DOCUMENT('super_nav_details', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
RETURN doc.uid
)
FOR pub_doc IN publications
FILTER (pub_doc.text_uid IN path_docs OR pub_doc.text_uid == @uid) AND pub_doc.translation_lang_iso == @lang
AND (pub_doc.author_uid == @authorUid OR @authorUid IN pub_doc.collaborator[*].collaborator_uid)
RETURN pub_doc
'''
PLI_SUTTA_PUBLICATION_INFO = '''
FOR pliPublication IN publications
FILTER pliPublication.publication_number == 'scpub64'
RETURN pliPublication
'''
AVAILABLE_VOICES = '''
FOR v IN available_voices
FILTER v.uid == @uid
RETURN {
uid: v.uid,
voices: v.voices
}
'''
BILARA_REFERENCES = '''
FOR references IN sc_bilara_texts
FILTER 'reference' IN references.muids
RETURN {
'uid': references.uid,
'file_path': references.file_path
}
'''
UPDATE_TEXT_EXTRA_INFO_VOLPAGE = '''
UPSERT { uid: @uid }
INSERT { uid: @uid, acronym: null, alt_acronym: null, volpage: @ref, alt_volpage: null, alt_name: null, biblio_uid: null }
UPDATE {
volpage: @ref
} IN text_extra_info
'''
UPDATE_TEXT_EXTRA_INFO_ALT_VOLPAGE = '''
UPSERT { uid: @uid }
INSERT { uid: @uid, acronym: null, alt_acronym: null, volpage: null, alt_volpage: @ref, alt_name: null, biblio_uid: null }
UPDATE {
alt_volpage: @ref
} IN text_extra_info
'''
UPSERT_TEXT_EXTRA_ACRONYM_INFO = '''
UPSERT { uid: @uid }
INSERT { uid: @uid, acronym: @acronym, alt_acronym: null, volpage: null, alt_volpage: null, alt_name: null, biblio_uid: null }
UPDATE {
acronym: @acronym
} IN text_extra_info
'''
UPDATE_SUPER_NAV_DETAILS_ACRONYM_INFO = '''
FOR u IN super_nav_details
FILTER u.uid == @uid
UPDATE u WITH { acronym: @acronym } IN super_nav_details
'''
UPSERT_NAMES = '''
UPSERT { uid: @uid, lang: @lang }
INSERT { name: @name, is_root: false, lang: @lang, uid: @uid }
UPDATE {
} IN names
'''
UPSERT_ROOT_NAMES = '''
UPSERT { uid: @uid, is_root: true }
INSERT { name: @name, is_root: true, lang: null, uid: @uid }
UPDATE {
name: @name
} IN names
'''
PARALLELS_LITE = '''
FOR v IN 0..6 OUTBOUND CONCAT('super_nav_details/', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
FILTER v.type == 'leaf'
LET parallels = (
FOR k, e IN OUTBOUND CONCAT('super_nav_details/', v.uid) relationship
LET parallel_legacy_root = (
FOR text IN html_text
FILTER text.uid == k.uid AND text.lang == 'lzh'
LIMIT 1
LET res = {
uid: text.uid,
lang: text.lang,
author_uid: text.author_uid,
}
RETURN res
)
LET parallel_bilara_root = (
FOR text IN sc_bilara_texts
FILTER text.uid == k.uid AND 'root' IN text.muids
LET res = {
uid: text.uid,
lang: text.lang,
author_uid: text.muids[2],
}
RETURN res
)
LET parallel_root = FLATTEN([parallel_bilara_root, parallel_legacy_root])
RETURN {
from: e.from,
to: {
to: e.to,
uid: k.uid,
acronym: k.acronym,
parallel_root: parallel_root
}
}
)
LET original_legacy_root = (
FOR text IN html_text
FILTER text.uid == v.uid AND text.lang == 'lzh'
LIMIT 1
LET res = {
uid: text.uid,
lang: text.lang,
author_uid: text.author_uid,
}
RETURN res
)
LET original_bilara_root = (
FOR text IN sc_bilara_texts
FILTER text.uid == v.uid AND 'root' IN text.muids
LET res = {
uid: text.uid,
lang: text.lang,
author_uid: text.muids[2],
}
RETURN res
)
LET original_root = FLATTEN([original_bilara_root, original_legacy_root])
RETURN {
uid: v.uid,
name: v.name,
acronym: v.acronym,
parallels: parallels,
original_root: original_root
}
'''
EBOOK_DATA_QUERY = '''
LET translation_filter = SPLIT(@translation_muids, '-')
FOR doc, edge, path IN 0..10 OUTBOUND CONCAT('super_nav_details/', @uid) super_nav_details_edges OPTIONS {order: 'dfs'}
LET uid = doc.uid
LET name = FIRST(FOR name_doc IN names FILTER name_doc.uid == doc.uid AND name_doc.lang == @lang RETURN name_doc.name)
LET blurb = FIRST(
FOR blurb_doc in blurbs
FILTER blurb_doc.uid == uid
FILTER blurb_doc.lang == @lang
RETURN blurb_doc
)
LET files = MERGE(
FOR file_doc IN sc_bilara_texts
FILTER file_doc.uid == uid
FOR key IN ATTRIBUTES(@file_data)
LET value = SPLIT(@file_data[key], '-')
FILTER file_doc.muids ALL IN value
RETURN {[key]: file_doc.file_path}
)
RETURN {
uid,
type: doc.type,
name: name,
blurb: blurb.blurb,
files
}
'''
ACRONYM_IS_NULL_UIDS = '''
FOR doc IN super_nav_details
FILTER doc.type == 'leaf' AND doc.acronym == null
RETURN doc.uid
'''
SINGLE_ROOT_TEXT = '''
FOR root IN sc_bilara_texts
FILTER 'root' IN root.muids
AND root.lang == 'pli'
AND root.uid == @uid
RETURN root
'''
ABBREVIATION_SUPER_NAME_ACRONYM = '''
FOR name_doc IN super_name
LET acronym = (
FOR doc IN super_nav_details
FILTER doc.uid == name_doc.uid
RETURN doc.acronym
)[0]
RETURN {
name: name_doc.name,
acronym: acronym
}
'''
NAVIGATION_QUERY = '''
FOR uid IN @uids
LET navigation_doc = DOCUMENT('super_nav_details', uid)
LET descendants = (
FOR descendant IN OUTBOUND navigation_doc super_nav_details_edges OPTIONS {order: 'dfs'}
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', descendant.uid, @language))['name']
RETURN {
uid: descendant.uid,
root_name: descendant.name,
translated_name: translated_name,
acronym: descendant.acronym,
node_type: descendant.type,
}
)
LET translated_name = DOCUMENT('names', CONCAT_SEPARATOR('_', navigation_doc.uid, @language))['name']
RETURN {
uid: navigation_doc.uid,
root_name: navigation_doc.name,
translated_name: translated_name,
node_type: navigation_doc.type,
acronym: navigation_doc.acronym,
children: descendants,
}
'''
|
01c881c0ab92ff7e52bc88913f0b3bb7463d5766
|
c4b8e1e09dedbccd37ca008ecaaca4438610bbaf
|
/google_or_tools/car_sat.py
|
ae3d5669226b121c855ef4d4eeb44dcaeb7bf1e4
|
[
"MIT"
] |
permissive
|
hakank/hakank
|
4806598b98cb36dd51b24b0ab688f52dadfe9626
|
c337aaf8187f15dcdc4d5b09cd2ed0dbdb2e72c2
|
refs/heads/master
| 2023-08-15T00:21:52.750270
| 2023-07-27T16:21:40
| 2023-07-27T16:21:40
| 11,933,517
| 336
| 97
|
MIT
| 2023-07-27T11:19:42
| 2013-08-06T20:12:10
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 4,202
|
py
|
car_sat.py
|
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Car sequencing in Google CP Solver.
This model is based on the car sequencing model in
Pascal Van Hentenryck
'The OPL Optimization Programming Language', page 184ff.
It is port of my of OR-tools CP model car.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
from cp_sat_utils import count_vars
class SolutionPrinter(cp.CpSolverSolutionCallback):
def __init__(self, slot,Slots,Options,capacity,setup,num_sol):
cp.CpSolverSolutionCallback.__init__(self)
self.__slot = slot
self.__Slots = Slots
self.__Options = Options
self.__capacity = capacity
self.__setup = setup
self.__num_sol = num_sol
self.__solution_count = 0
def OnSolutionCallback(self):
self.__solution_count += 1
print(f"Solution #{self.__solution_count}")
print("slot:%s" % ",".join([str(self.Value(self.__slot[i])) for i in self.__Slots]))
print("setup:")
for o in self.__Options:
print("%i/%i:" % (self.__capacity[o][0], self.__capacity[o][1]), end=" ")
for s in self.__Slots:
print(self.Value(self.__setup[o, s]), end=" ")
print()
print()
if self.__solution_count >= self.__num_sol:
self.StopSearch()
def SolutionCount(self):
return self.__solution_count
def main(num_sol=3):
model = cp.CpModel()
# data
nbCars = 6
nbOptions = 5
nbSlots = 10
Cars = list(range(nbCars))
Options = list(range(nbOptions))
Slots = list(range(nbSlots))
# car 0 1 2 3 4 5
demand = [1, 1, 2, 2, 2, 2]
option = [
# car 0 1 2 3 4 5
[1, 0, 0, 0, 1, 1], # option 1
[0, 0, 1, 1, 0, 1], # option 2
[1, 0, 0, 0, 1, 0], # option 3
[1, 1, 0, 1, 0, 0], # option 4
[0, 0, 1, 0, 0, 0] # option 5
]
capacity = [(1, 2), (2, 3), (1, 3), (2, 5), (1, 5)]
optionDemand = [
sum([demand[j] * option[i][j] for j in Cars]) for i in Options
]
#
# variables
#
slot = [model.NewIntVar(0, nbCars - 1, "slot[%i]" % i) for i in Slots]
setup = {}
for i in Options:
for j in Slots:
setup[(i, j)] = model.NewIntVar(0, 1, "setup[%i,%i]" % (i, j))
#
# constraints
#
for c in Cars:
count_vars(model,slot,c,demand[c])
for o in Options:
for s in range(0, nbSlots - capacity[o][1] + 1):
b = [setup[o, j] for j in range(s, s + capacity[o][1] - 1)]
model.Add(sum(b) <= capacity[o][0])
for o in Options:
for s in Slots:
model.AddElement(slot[s], option[o], setup[(o, s)])
for o in Options:
for i in range(optionDemand[o]):
s_range = list(range(0, nbSlots - (i + 1) * capacity[o][1]))
ss = [setup[o, s] for s in s_range]
cc = optionDemand[o] - (i + 1) * capacity[o][0]
if len(ss) > 0 and cc >= 0:
model.Add(sum(ss) >= cc)
#
# search and result
#
solver = cp.CpSolver()
solution_printer = SolutionPrinter(slot,Slots,Options,capacity,setup,num_sol)
status = solver.SearchForAllSolutions(model, solution_printer)
print("status:", solver.StatusName(status))
print()
print("NumSolutions:", solution_printer.SolutionCount())
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
num_sol = 3
if __name__ == "__main__":
if len(sys.argv) > 1:
num_sol = int(sys.argv[1])
main(num_sol)
|
6889bbf6ec024d6759fa9cf4ba00210122c93922
|
7c91ff850f81bf8759b055971d592a71ef025732
|
/pyglet/media/devices/base.py
|
a439c74f665917abd922f29f0d696a5f1bdcc3db
|
[
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pyglet/pyglet
|
d9da2cccd52a6bc5c09548536876602f6e1412f0
|
094c638f0529fecab4e74556487b92453a78753c
|
refs/heads/master
| 2023-08-17T15:08:09.192350
| 2023-08-17T01:51:50
| 2023-08-17T01:51:50
| 191,043,601
| 1,687
| 427
|
BSD-3-Clause
| 2023-09-14T08:51:31
| 2019-06-09T18:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
base.py
|
from abc import ABCMeta, abstractmethod
from enum import Enum, auto
from typing import Dict, Optional
from pyglet import event
class DeviceState(Enum):
ACTIVE = auto()
DISABLED = auto()
MISSING = auto()
UNPLUGGED = auto()
class DeviceFlow(Enum):
OUTPUT = auto()
INPUT = auto()
INPUT_OUTPUT = auto()
class AudioDevice:
"""Base class for a platform independent audio device.
_platform_state and _platform_flow is used to make device state numbers."""
platform_state: Dict[int, DeviceState] = {} # Must be defined by the parent.
platform_flow: Dict[int, DeviceFlow] = {} # Must be defined by the parent.
def __init__(self, dev_id: str, name: str, description: str, flow: int, state: int):
self.id = dev_id
self.flow = flow # platform value
self.state = state # platform value
self.name = name
self.description = description
def __repr__(self):
return "{}(name='{}', state={}, flow={})".format(
self.__class__.__name__, self.name, self.platform_state[self.state].name, self.platform_flow[self.flow].name)
class AbstractAudioDeviceManager(event.EventDispatcher, metaclass=ABCMeta):
def __del__(self):
"""Required to remove handlers before exit, as it can cause problems with the event system's weakrefs."""
self.remove_handlers(self)
@abstractmethod
def get_default_output(self):
"""Returns a default active output device or None if none available."""
pass
@abstractmethod
def get_default_input(self):
"""Returns a default active input device or None if none available."""
pass
@abstractmethod
def get_output_devices(self):
"""Returns a list of all active output devices."""
pass
@abstractmethod
def get_input_devices(self):
"""Returns a list of all active input devices."""
pass
@abstractmethod
def get_all_devices(self):
"""Returns a list of all audio devices, no matter what state they are in."""
pass
def on_device_state_changed(self, device: AudioDevice, old_state: DeviceState, new_state: DeviceState):
"""Event, occurs when the state of a device changes, provides the old state and new state."""
pass
def on_device_added(self, device: AudioDevice):
"""Event, occurs when a new device is added to the system."""
pass
def on_device_removed(self, device: AudioDevice):
"""Event, occurs when an existing device is removed from the system."""
pass
def on_default_changed(self, device: Optional[AudioDevice], flow: DeviceFlow):
"""Event, occurs when the default audio device changes.
If there is no device that can be the default on the system, can be None.
The flow determines whether an input or output device became it's respective default.
"""
pass
AbstractAudioDeviceManager.register_event_type('on_device_state_changed')
AbstractAudioDeviceManager.register_event_type('on_device_added')
AbstractAudioDeviceManager.register_event_type('on_device_removed')
AbstractAudioDeviceManager.register_event_type('on_default_changed')
|
b902776be757ba73cde3be5be5235a819d534fe9
|
b06340ae3dfcb551bacefa362c034b064809fd28
|
/examples/test_example_context_manager_fail.py
|
3ce9df793b1ff89f7ba0191e155e623ffd27aa0a
|
[
"MIT"
] |
permissive
|
okken/pytest-check
|
cd3b82ae31932d54550822abb6cc96fa6b4e7c88
|
c7e7741e4d5665a07b0985932acc484aac2d5095
|
refs/heads/main
| 2023-08-19T09:10:40.776832
| 2023-08-11T20:44:36
| 2023-08-11T20:44:36
| 108,791,429
| 282
| 35
|
MIT
| 2023-08-11T20:37:17
| 2017-10-30T02:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 521
|
py
|
test_example_context_manager_fail.py
|
"""
Everything should fail in this file.
This test is useful for testing:
- messages
- stop on fail (-x)
- pseudo-tracebacks
- lack of tracebacks (--tb=no)
"""
from pytest_check import check
def test_3_failed_checks():
with check:
assert 1 == 0
with check:
assert 1 > 2
with check:
assert 1 < 5 < 4
def test_messages():
with check("first fail"):
assert 1 == 0
with check("second fail"):
assert 1 > 2
with check("third fail"):
assert 1 < 5 < 4
|
e4077590fea1af4f9bb015090b34fe7f8d274cca
|
fa39269e490c6ad0065c97875d4d4c5023bed480
|
/tests/test_scripts.py
|
b7a02f4783294c532d09b670f69b806d7a91fc54
|
[
"Apache-2.0"
] |
permissive
|
InsightSoftwareConsortium/SimpleITK-Notebooks
|
ad40d28462f4877b8b9918bfece5efc2f251d956
|
1fd9796e424624d5f4ea82dd72fc0cb5547aad6d
|
refs/heads/master
| 2023-09-04T08:55:09.819104
| 2023-08-24T20:38:52
| 2023-08-24T20:38:52
| 7,489,426
| 794
| 366
|
Apache-2.0
| 2023-08-24T20:38:53
| 2013-01-07T20:37:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,245
|
py
|
test_scripts.py
|
import os
import pytest
import pathlib
import hashlib
import sys
import pandas as pd
# Add the script source directory to the path so that we can import
sys.path.append(str(pathlib.Path(__file__).parent.parent.absolute() / "Python/scripts"))
from characterize_data import characterize_data
class TestScripts:
def setup_method(self):
# Path to testing data is expected in the following location:
self.data_path = pathlib.Path(__file__).parent.parent.absolute() / "Data"
def files_md5(self, ascii_file_list, binary_file_list):
"""
Compute a single/combined md5 hash for a list of ascii and binary files.
We can't read all files as binary because of platform specific differences in
ascii files. For ascii files we need to open in text mode and use the read() method which
to quote the documentation:
In text mode, the default when reading is to convert platform-specific line endings (\n on Unix, \r\n on
Windows) to just \n.
This ensures that we get the same md5 hash on all platforms. If we opened the text files as binary the hashes
become platform dependent (\r\n vs. \n).
"""
md5 = hashlib.md5()
for file_name in ascii_file_list:
with open(file_name, "r") as fp:
file_contents = fp.read()
md5.update(file_contents.encode("utf-8"))
for file_name in binary_file_list:
with open(file_name, "rb") as fp:
file_contents = fp.read()
md5.update(file_contents)
return md5.hexdigest()
@pytest.mark.parametrize(
"output_file, analysis_type, result_md5hash",
[
(
"per_file_data_characteristics.csv",
"per_file",
"912ede9ecfe519346f3a519f59215f6d",
),
(
"per_series_data_characteristics.csv",
"per_series",
"8a806fa717739b9c6f2132a719b1ab8f",
),
],
)
def test_characterize_data(
self, output_file, analysis_type, result_md5hash, tmp_path
):
# NOTE: For now not testing pdf files. Setting the SOURCE_DATE_EPOCH
# didn't resolve the variability across platforms, getting different
# md5 hash values. Not sure if it is possible to do regression testing
# with the pdf files.
# Set the SOURCE_DATE_EPOCH environment variable value so that the pdf,ps files
# created have the same date. The file content includes the date time and we want
# to ignore that difference.
# https://github.com/matplotlib/matplotlib/issues/6317/
# os.environ["SOURCE_DATE_EPOCH"] = "42"
output_dir = tmp_path
# Run the script, output files are written to the output_path directory
# these are csv and pdf files
characterize_data(
[
str(self.data_path / "CIRS057A_MR_CT_DICOM"),
str(output_dir / output_file),
analysis_type,
]
)
# csv files needs to be modified as follows before comparing to expected values:
# 1. Modify absolute file paths to only include file name so that they are independent
# of file location.
# 2. Sort the file names in the "files" column, os.walk returns directories and file
# names in arbitrary order and the order is different across operating systems.
# 3. Sort the image entries (per series or per file) according to MD5 hash as the row order
# depends on the directory order which isn't consistent, same issue as in 2.
result_files = output_dir.glob("*.csv")
for file in result_files:
df = pd.read_csv(file).sort_values(by="MD5 intensity hash")
df["files"] = df["files"].apply(
lambda x: sorted([pathlib.Path(fname).name for fname in eval(x)])
)
df.to_csv(file, index=False)
assert (
self.files_md5(
ascii_file_list=output_dir.glob("*.csv"),
binary_file_list=[], # output_dir.glob("*.pdf"),
)
== result_md5hash
)
|
3c893f9150d15f46f420a3619af6003fabf47fc8
|
8a87f5b889a9ce7d81421515f06d9c9cbf6ce64a
|
/3rdParty/boost/1.78.0/libs/mpl/doc/src/docutils/setup.py
|
73a9195e5f023da841b163fa9fd614d3b2a4000e
|
[
"BSL-1.0",
"Apache-2.0",
"BSD-3-Clause",
"ICU",
"Zlib",
"GPL-1.0-or-later",
"OpenSSL",
"ISC",
"LicenseRef-scancode-gutenberg-2020",
"MIT",
"GPL-2.0-only",
"CC0-1.0",
"LicenseRef-scancode-autoconf-simple-exception",
"LicenseRef-scancode-pcre",
"Bison-exception-2.2",
"LicenseRef-scancode-public-domain",
"JSON",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"BSD-4-Clause",
"Python-2.0",
"LGPL-2.1-or-later"
] |
permissive
|
arangodb/arangodb
|
0980625e76c56a2449d90dcb8d8f2c485e28a83b
|
43c40535cee37fc7349a21793dc33b1833735af5
|
refs/heads/devel
| 2023-08-31T09:34:47.451950
| 2023-08-31T07:25:02
| 2023-08-31T07:25:02
| 2,649,214
| 13,385
| 982
|
Apache-2.0
| 2023-09-14T17:02:16
| 2011-10-26T06:42:00
|
C++
|
UTF-8
|
Python
| false
| false
| 825
|
py
|
setup.py
|
#!/usr/bin/env python
# Copyright Aleksey Gurtovoy 2007-2009
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import sys, os
from distutils.core import setup
setup(
name="html_refdoc",
version=".1",
description="convert C++ rst documentation to a set of HTML pages/frames.",
author="Aleksey Gurtovoy",
author_email="agurtovoy@meta-comm.com",
packages=['docutils.writers.html4_refdoc', 'docutils.parsers.rst.directives'],
package_dir={'docutils.writers.html4_refdoc': 'writers/html4_refdoc'
,'docutils.parsers.rst.directives': 'parsers/rst/directives' },
package_data={'docutils.writers.html4_refdoc': ['frames.css']},
scripts=["tools/rst2htmlrefdoc.py"],
)
|
622292e87beb9d495327107152442a1fe6988116
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/api_tests/actions/views/test_action_detail.py
|
15d9fb20d845f1f7900965fb9c0fdf2188c17dd3
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,330
|
py
|
test_action_detail.py
|
import pytest
from api.base.settings.defaults import API_BASE
from api_tests.requests.mixins import NodeRequestTestMixin, PreprintRequestTestMixin
@pytest.mark.django_db
class TestActionDetailNodeRequests(NodeRequestTestMixin):
@pytest.fixture()
def url(self, node_request):
action = node_request.actions.last()
return '/{}actions/{}/'.format(API_BASE, action._id)
def test_admin_cannot_view_action(self, app, url, admin):
res = app.get(url, auth=admin.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to view this Action'
def test_requester_cannot_view_action(self, app, url, requester):
res = app.get(url, auth=requester.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to view this Action'
def test_write_contrib_cannot_view_action(self, app, url, write_contrib):
res = app.get(url, auth=write_contrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to view this Action'
def test_noncontrib_cannot_view_action(self, app, url, noncontrib):
res = app.get(url, auth=noncontrib.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to view this Action'
@pytest.mark.django_db
class TestActionDetailPreprintRequests(PreprintRequestTestMixin):
def url(self, request):
action = request.actions.last()
return '/{}actions/{}/'.format(API_BASE, action._id)
def test_no_one_can_view_these_actions(self, app, admin, write_contrib, noncontrib, moderator, pre_request, post_request, none_request):
pre_url = self.url(pre_request)
post_url = self.url(post_request)
none_url = self.url(none_request)
for url in [pre_url, post_url, none_url]:
for user in [admin, write_contrib, noncontrib, moderator]:
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
assert res.json['errors'][0]['detail'] == 'You do not have permission to view this Action'
|
deeacf5e7270f2610cad2523f47ea50005c100ba
|
df307de92fe1937a4933c9c5872cfb900bef8159
|
/KG-Search-Flask/app.py
|
90ebf7d183ec25007f2a2867131f54015788fbd8
|
[
"MIT"
] |
permissive
|
xyjigsaw/Knowledge-Graph-And-Visualization-Demo
|
d1f849077f9244848e476d4a60e39b6df36224ec
|
c42958023fe3af270a2bbd68075582cfb0ec1d85
|
refs/heads/master
| 2023-05-29T04:20:57.170940
| 2023-05-22T12:03:04
| 2023-05-22T12:03:04
| 244,573,834
| 172
| 53
|
MIT
| 2021-09-24T10:16:13
| 2020-03-03T07:50:58
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 5,385
|
py
|
app.py
|
from flask import Flask, render_template, flash, request
from flask_restful import reqparse, abort, Api, Resource
from forms import KeywordSearchForm
from neo4j_models import Neo4jTool
from jieba import posseg
import raw2json
import json
import cpca
app = Flask(__name__)
api = Api(app)
app.config.update(RESTFUL_JSON=dict(ensure_ascii=False)) # API中文支持
global entity_json
neo_con = Neo4jTool()
neo_con.connect2neo4j()
@app.route('/', methods=['GET', 'POST'])
def main():
print('Neo4j has connected...')
form = KeywordSearchForm(request.form)
try:
if form.validate_on_submit():
print("Submit successfully...")
keyword = form.keyword.data.strip()
res = entity_analysis(keyword)
try:
if len(res) == 0:
nothing = {'title': '<h1>Not Found</h1>'}
return render_template('entity.html', nothing=json.dumps(nothing, ensure_ascii=False), form=form)
else:
res_json = raw2json.analysis(json.loads(json.dumps(res, ensure_ascii=False)))
return render_template('entity.html', data=res_json['data'], links=res_json['links'],
format_triple=res_json['format_triple'], categories=res_json['categories'],
form=form)
except:
print("[log-neo4j] some error exist!!!")
flash("some error exist!!!")
else:
flash("valid form")
except:
print("[log-neo4j] empty form")
return render_template('entity.html', form=form)
def is_loc(loc):
d = cpca.transform([loc], open_warning=False)
if str(d['省'][0]):
return True
if str(d['市'][0]):
return True
if str(d['区'][0]):
return True
return False
def entity_analysis(entity):
db = neo_con
words = entity.split(' ')
if len(words) == 1:
if is_loc(words[0]):
return db.match_location4event_patient(entity)
else:
wordp = posseg.cut(words[0])
for w in wordp:
if w.flag in ['v', 'vd', 'vn', 'vg']:
return db.match_topic4event(entity)
elif w.flag in ['nr']:
return db.match_patient_name(entity)
elif len(words) == 2:
isloc_dict = {}
flag = 0
for word in words:
isloc_dict[word] = is_loc(word)
if isloc_dict[word]:
flag = 1
if isloc_dict[words[0]]:
wordp = posseg.cut(words[1])
for w in wordp:
if w.flag in ['v', 'vd', 'vn', 'vg']:
return db.match_location_topic4event(words[0], words[1])
elif w.flag in ['m']:
return db.match_location_time4event_patient(words[0], words[1])
else:
gender = words[1].replace('性', '').replace('生', '')
return db.match_location_gender4patient(words[0], gender)
else:
wordp = posseg.cut(words[0])
for w in wordp:
if w.flag in ['v', 'vd', 'vn', 'vg']:
return db.match_location_topic4event(words[1], words[0])
elif w.flag in ['m']:
return db.match_location_time4event_patient(words[1], words[0])
else:
gender = words[0].replace('性', '').replace('生', '')
return db.match_location_gender4patient(words[1], gender)
if not flag:
wordp = posseg.cut(words[0])
for w in wordp:
if w.flag in ['m']:
return db.match_name_time4location_event(words[1], words[0])
else:
return db.match_name_time4location_event(words[0], words[1])
elif len(words) == 3:
loc = ''
for word in words:
if is_loc(word):
loc = word
words.remove(word)
break
wordp = posseg.cut(words[0])
for w in wordp:
if w.flag in ['m']:
return db.match_location_time_topic4patient(loc, words[0], words[1])
else:
return db.match_location_time_topic4patient(loc, words[1], words[0])
else:
answer = db.match_location4event_patient(words[0])
if len(answer) == 0:
answer = db.match_topic4event(words[0])
return answer
# For Api
parser = reqparse.RequestParser()
parser.add_argument('string', type=str)
class post_data(Resource):
def post(self):
args = parser.parse_args()
print('@', args)
entity_json = raw2json.analysis(json.loads(json.dumps(entity_analysis(args['string']), ensure_ascii=False)))
return entity_json
class get_data(Resource):
def get(self, string): # 根据string获取对应的value
entity_json = raw2json.analysis(json.loads(json.dumps(entity_analysis(string), ensure_ascii=False)))
return entity_json
api.add_resource(get_data, '/api/<string>')
api.add_resource(post_data, '/api')
if __name__ == '__main__':
app.run()
|
291a10a0b2ccadabcdd67596e98adc90a64b8cc9
|
36dc453a64869574eec11b698d8fb368bba894dc
|
/setup.py
|
edfe3eec5637c514f124deff6b0d2a45c2ae9dba
|
[
"LicenseRef-scancode-free-unknown",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"Apache-2.0"
] |
permissive
|
SimonBiggs/scriptedforms
|
32b2688f3975455d5b4f177d6983009fd49623e0
|
070bc94fe5288d1d7872a18da196b8411b54763d
|
refs/heads/master
| 2022-10-10T07:47:47.104081
| 2022-09-27T22:50:37
| 2022-09-27T22:50:37
| 96,386,447
| 537
| 41
|
Apache-2.0
| 2022-09-27T22:45:50
| 2017-07-06T03:55:12
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
setup.py
|
import os
from glob import glob
from setuptools import setup
here = os.path.dirname(os.path.abspath(__file__))
name = 'scriptedforms'
pjoin = os.path.join
tar_path = pjoin(here, 'scriptedforms', '*.tgz')
version_ns = {}
with open(pjoin(here, name, '_version.py')) as file:
code = file.read()
exec(code, version_ns)
version = version_ns['__version__']
def get_data_files():
"""Get the data files for the package.
"""
return [
('share/jupyter/lab/extensions', [
os.path.relpath(f, '.') for f in glob(tar_path)
]),
('etc/jupyter/jupyter_notebook_config.d', [
os.path.relpath(
pjoin(here, 'scriptedforms', 'scriptedforms.json'), '.')
])
]
setup(
name="scriptedforms",
version=version,
author="Simon Biggs",
author_email="sbiggs@scriptedforms.com.au",
description="ScriptedForms.",
long_description=(
""
),
keywords=[],
packages=[
"scriptedforms"
],
entry_points={
'console_scripts': [
'scriptedforms=scriptedforms:main',
],
},
data_files=get_data_files(),
license='AGPL-3.0+',
python_requires='>=3.5',
install_requires=[
'notebook >= 5.5',
'numpy',
'pandas',
'watchdog',
'matplotlib',
'jupyterlab >= 0.32.0'
],
classifiers=[],
url="https://scriptedforms.com.au",
include_package_data=True
)
|
b169e65ffddeddde474e62ca729c1fd6e6e045e2
|
391fb5b11425d59ea917c6fed51fe1fa9c672764
|
/opytimizer/optimizers/swarm/kh.py
|
4a368d87440505635f1fff8572028286c99cebd4
|
[
"Apache-2.0"
] |
permissive
|
gugarosa/opytimizer
|
89e60d582dee9e31b1723e35d08103d7f8f5d3e1
|
7326a887ed8e3858bc99c8815048d56d02edf88c
|
refs/heads/master
| 2023-08-01T08:09:12.055317
| 2023-05-11T15:21:58
| 2023-05-11T15:21:58
| 109,152,650
| 602
| 45
|
Apache-2.0
| 2023-09-07T14:26:13
| 2017-11-01T16:04:01
|
Python
|
UTF-8
|
Python
| false
| false
| 18,505
|
py
|
kh.py
|
"""Krill Herd.
"""
import copy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import opytimizer.math.general as g
import opytimizer.math.random as r
import opytimizer.utils.constant as c
import opytimizer.utils.exception as e
from opytimizer.core import Optimizer
from opytimizer.core.agent import Agent
from opytimizer.core.function import Function
from opytimizer.core.space import Space
from opytimizer.utils import logging
logger = logging.get_logger(__name__)
class KH(Optimizer):
"""A KH class, inherited from Optimizer.
This is the designed class to define KH-related
variables and methods.
References:
A. Gandomi and A. Alavi. Krill herd: A new bio-inspired optimization algorithm.
Communications in Nonlinear Science and Numerical Simulation (2012).
"""
def __init__(self, params: Optional[Dict[str, Any]] = None) -> None:
"""Initialization method.
Args:
params: Contains key-value parameters to the meta-heuristics.
"""
super(KH, self).__init__()
self.N_max = 0.01
self.w_n = 0.42
self.NN = 5
self.V_f = 0.02
self.w_f = 0.38
self.D_max = 0.002
self.C_t = 0.5
self.Cr = 0.2
self.Mu = 0.05
self.build(params)
logger.info("Class overrided.")
@property
def N_max(self) -> float:
"""Maximum induced speed."""
return self._N_max
@N_max.setter
def N_max(self, N_max: float) -> None:
if not isinstance(N_max, (float, int)):
raise e.TypeError("`N_max` should be a float or integer")
if N_max < 0:
raise e.ValueError("`N_max` should be >= 0")
self._N_max = N_max
@property
def w_n(self) -> float:
"""Inertia weight of the neighbours' motion."""
return self._w_n
@w_n.setter
def w_n(self, w_n: float) -> None:
if not isinstance(w_n, (float, int)):
raise e.TypeError("`w_n` should be a float or integer")
if w_n < 0 or w_n > 1:
raise e.ValueError("`w_n` should be between 0 and 1")
self._w_n = w_n
@property
def NN(self) -> int:
"""Number of neighbours."""
return self._NN
@NN.setter
def NN(self, NN: int) -> None:
if not isinstance(NN, int):
raise e.TypeError("`NN` should be a integer")
if NN < 0:
raise e.ValueError("`NN` should be >= 0")
self._NN = NN
@property
def V_f(self) -> float:
"""Foraging speed."""
return self._V_f
@V_f.setter
def V_f(self, V_f: float) -> None:
if not isinstance(V_f, (float, int)):
raise e.TypeError("`V_f` should be a float or integer")
if V_f < 0:
raise e.ValueError("`V_f` should be >= 0")
self._V_f = V_f
@property
def w_f(self) -> float:
"""Inertia weight of the foraging motion."""
return self._w_f
@w_f.setter
def w_f(self, w_f: float) -> None:
if not isinstance(w_f, (float, int)):
raise e.TypeError("`w_f` should be a float or integer")
if w_f < 0 or w_f > 1:
raise e.ValueError("`w_f` should be between 0 and 1")
self._w_f = w_f
@property
def D_max(self) -> float:
"""Maximum diffusion speed."""
return self._D_max
@D_max.setter
def D_max(self, D_max: float) -> None:
if not isinstance(D_max, (float, int)):
raise e.TypeError("`D_max` should be a float or integer")
if D_max < 0:
raise e.ValueError("`D_max` should be >= 0")
self._D_max = D_max
@property
def C_t(self) -> float:
"""Position constant."""
return self._C_t
@C_t.setter
def C_t(self, C_t: float) -> None:
if not isinstance(C_t, (float, int)):
raise e.TypeError("`C_t` should be a float or integer")
if C_t < 0 or C_t > 2:
raise e.ValueError("`C_t` should be between 0 and 2")
self._C_t = C_t
@property
def Cr(self) -> float:
"""Crossover probability."""
return self._Cr
@Cr.setter
def Cr(self, Cr: float) -> None:
if not isinstance(Cr, (float, int)):
raise e.TypeError("`Cr` should be a float or integer")
if Cr < 0 or Cr > 1:
raise e.ValueError("`Cr` should be between 0 and 1")
self._Cr = Cr
@property
def Mu(self) -> float:
"""Mutation probability."""
return self._Mu
@Mu.setter
def Mu(self, Mu: float) -> None:
if not isinstance(Mu, (float, int)):
raise e.TypeError("`Mu` should be a float or integer")
if Mu < 0 or Mu > 1:
raise e.ValueError("`Mu` should be between 0 and 1")
self._Mu = Mu
@property
def motion(self) -> np.ndarray:
"""Array of motions."""
return self._motion
@motion.setter
def motion(self, motion: np.ndarray) -> None:
if not isinstance(motion, np.ndarray):
raise e.TypeError("`motion` should be a numpy array")
self._motion = motion
@property
def foraging(self) -> np.ndarray:
"""Array of foragings."""
return self._foraging
@foraging.setter
def foraging(self, foraging: np.ndarray) -> None:
if not isinstance(foraging, np.ndarray):
raise e.TypeError("`foraging` should be a numpy array")
self._foraging = foraging
def compile(self, space: Space) -> None:
"""Compiles additional information that is used by this optimizer.
Args:
space: A Space object containing meta-information.
"""
self.motion = np.zeros((space.n_agents, space.n_variables, space.n_dimensions))
self.foraging = np.zeros(
(space.n_agents, space.n_variables, space.n_dimensions)
)
def _food_location(self, agents: List[Agent], function: Function) -> Agent:
"""Calculates the food location.
Args:
agents: List of agents.
function: A Function object that will be used as the objective function.
Returns:
(Agent): A new food location.
"""
food = copy.deepcopy(agents[0])
sum_fitness_pos = np.sum(
[1 / (agent.fit + c.EPSILON) * agent.position for agent in agents], axis=0
)
sum_fitness = np.sum([1 / (agent.fit + c.EPSILON) for agent in agents])
food.position = sum_fitness_pos / sum_fitness
food.clip_by_bound()
food.fit = function(food.position)
return food
def _sensing_distance(self, agents: List[Agent], idx: int) -> Tuple[float, float]:
"""Calculates the sensing distance for an individual krill (eq. 7).
Args:
agents: List of agents.
idx: Selected agent.
Returns:
(Tuple[float, float]): The sensing distance for an individual krill.
"""
eucl_distance = [
g.euclidean_distance(agents[idx].position, agent.position)
for agent in agents
]
distance = np.sum(eucl_distance) / (self.NN * len(agents))
return distance, eucl_distance
def _get_neighbours(
self,
agents: List[Agent],
idx: int,
sensing_distance: float,
eucl_distance: List[float],
) -> List[Agent]:
"""Gathers the neighbours based on the sensing distance.
Args:
agents: List of agents.
idx: Selected agent.
sensing_distance: Sensing distanced used to gather the krill's neighbours.
eucl_distance: List of euclidean distances.
Returns:
(List[Agent]): A list containing the krill's neighbours.
"""
neighbours = []
for i, dist in enumerate(eucl_distance):
if idx != i and sensing_distance > dist:
neighbours.append(agents[i])
return neighbours
def _local_alpha(
self, agent: Agent, worst: Agent, best: Agent, neighbours: List[Agent]
) -> float:
"""Calculates the local alpha (eq. 4).
Args:
agent: Selected agent.
worst: Worst agent.
best: Best agent.
neighbours: List of neighbours.
Returns:
(float): The local alpha.
"""
fitness = [
(agent.fit - neighbour.fit) / (worst.fit - best.fit + c.EPSILON)
for neighbour in neighbours
]
position = [
(neighbour.position - agent.position)
/ (g.euclidean_distance(neighbour.position, agent.position) + c.EPSILON)
for neighbour in neighbours
]
alpha = np.sum([fit * pos for (fit, pos) in zip(fitness, position)], axis=0)
return alpha
def _target_alpha(
self, agent: Agent, worst: Agent, best: Agent, C_best: float
) -> float:
"""Calculates the target alpha (eq. 8).
Args:
agent: Selected agent.
worst: Worst agent.
best: Best agent.
C_best: Effectiveness coefficient.
Returns:
(float): The target alpha.
"""
fitness = (agent.fit - best.fit) / (worst.fit - best.fit + c.EPSILON)
position = (best.position - agent.position) / (
g.euclidean_distance(best.position, agent.position) + c.EPSILON
)
alpha = C_best * fitness * position
return alpha
def _neighbour_motion(
self,
agents: List[Agent],
idx: int,
iteration: int,
n_iterations: int,
motion: np.ndarray,
) -> np.ndarray:
"""Performs the motion induced by other krill individuals (eq. 2).
Args:
agents: List of agents.
idx: Selected agent.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
motion: Array of motions.
Returns:
(np.ndarray): The krill's neighbour motion.
"""
# Calculates the sensing distance (eq. 7)
sensing_distance, eucl_distance = self._sensing_distance(agents, idx)
# Calculates the local alpha (eq. 4)
neighbours = self._get_neighbours(agents, idx, sensing_distance, eucl_distance)
alpha_l = self._local_alpha(agents[idx], agents[-1], agents[0], neighbours)
# Calculates the effective coefficient (eq. 9)
C_best = 2 * (r.generate_uniform_random_number() + iteration / n_iterations)
# Calculates the target alpha (eq. 8)
alpha_t = self._target_alpha(agents[idx], agents[-1], agents[0], C_best)
# Calculates the neighbour motion (eq. 2)
neighbour_motion = self.N_max * (alpha_l + alpha_t) + self.w_n * motion
return neighbour_motion
def _food_beta(
self, agent: Agent, worst: Agent, best: Agent, food: np.ndarray, C_food: float
) -> np.ndarray:
"""Calculates the food attraction (eq. 13).
Args:
agent: Selected agent.
worst: Worst agent.
best: Best agent.
food: Food location.
C_food: Food coefficient.
Returns:
(np.ndarray): The food attraction.
"""
fitness = (agent.fit - food.fit) / (worst.fit - best.fit + c.EPSILON)
position = (food.position - agent.position) / (
g.euclidean_distance(food.position, agent.position) + c.EPSILON
)
beta = C_food * fitness * position
return beta
def _best_beta(self, agent: Agent, worst: Agent, best: Agent) -> np.ndarray:
"""Calculates the best attraction (eq. 15).
Args:
agent: Selected agent.
worst: Worst agent.
best: Best agent.
Returns:
(np.ndarray): The best attraction.
"""
fitness = (agent.fit - best.fit) / (worst.fit - best.fit + c.EPSILON)
position = (best.position - agent.position) / (
g.euclidean_distance(best.position, agent.position) + c.EPSILON
)
beta = fitness * position
return beta
def _foraging_motion(
self,
agents: List[Agent],
idx: int,
iteration: int,
n_iterations: int,
food: np.ndarray,
foraging: np.ndarray,
) -> np.ndarray:
"""Performs the foraging induced by the food location (eq. 10).
Args:
agents: List of agents.
idx: Selected agent.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
food: Food location.
foraging: Array of foraging motions.
Returns:
(np.ndarray): The krill's foraging motion.
"""
# Calculates the food coefficient (eq. 14)
C_food = 2 * (1 - iteration / n_iterations)
# Calculates the food attraction (eq. 13)
beta_f = self._food_beta(agents[idx], agents[-1], agents[0], food, C_food)
# Calculates the best attraction (eq. 15)
beta_b = self._best_beta(agents[idx], agents[-1], agents[0])
# Calculates the foraging motion (eq. 10)
foraging_motion = self.V_f * (beta_f + beta_b) + self.w_f * foraging
return foraging_motion
def _physical_diffusion(
self, n_variables: int, n_dimensions: int, iteration: int, n_iterations: int
) -> float:
"""Performs the physical diffusion of individual krills (eq. 16-17).
Args:
n_variables: Number of decision variables.
n_dimensions: Number of dimensions.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
Returns:
(float): The physical diffusion.
"""
# Calculates the physical diffusion (eq. 17)
r1 = r.generate_uniform_random_number(-1, 1, size=(n_variables, n_dimensions))
physical_diffusion = self.D_max * (1 - iteration / n_iterations) * r1
return physical_diffusion
def _update_position(
self,
agents: List[Agent],
idx: int,
iteration: int,
n_iterations: int,
food: np.ndarray,
motion: np.ndarray,
foraging: np.ndarray,
) -> np.ndarray:
"""Updates a single krill position (eq. 18-19).
Args:
agents: List of agents.
idx: Selected agent.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
food: Food location.
motion: Array of motions.
foraging: Array of foraging motions.
Returns:
(np.ndarray): The updated position.
"""
neighbour_motion = self._neighbour_motion(
agents, idx, iteration, n_iterations, motion
)
foraging_motion = self._foraging_motion(
agents, idx, iteration, n_iterations, food, foraging
)
physical_diffusion = self._physical_diffusion(
agents[idx].n_variables, agents[idx].n_dimensions, iteration, n_iterations
)
# Calculates the delta (eq. 19)
delta_t = self.C_t * np.sum(agents[idx].ub - agents[idx].lb)
# Updates the current agent's position (eq. 18)
new_position = agents[idx].position + delta_t * (
neighbour_motion + foraging_motion + physical_diffusion
)
return new_position
def _crossover(self, agents: List[Agent], idx: int) -> Agent:
"""Performs the crossover between selected agent and a randomly agent (eq. 21).
Args:
agents: List of agents.
idx: Selected agent.
Returns:
(Agent): An agent after suffering a crossover operator.
"""
a = copy.deepcopy(agents[idx])
m = r.generate_integer_random_number(0, len(agents), exclude_value=idx)
Cr = self.Cr * (
(agents[idx].fit - agents[0].fit)
/ (agents[-1].fit - agents[0].fit + c.EPSILON)
)
for j in range(a.n_variables):
r1 = r.generate_uniform_random_number()
if r1 < Cr:
a.position[j] = copy.deepcopy(agents[m].position[j])
return a
def _mutation(self, agents: List[Agent], idx: int) -> Agent:
"""Performs the mutation between selected agent and randomly agents (eq. 22).
Args:
agents: List of agents.
idx: Selected agent.
Returns:
(Agent): An agent after suffering a mutation operator.
"""
a = copy.deepcopy(agents[idx])
p = r.generate_integer_random_number(0, len(agents), exclude_value=idx)
q = r.generate_integer_random_number(0, len(agents), exclude_value=idx)
Mu = self.Mu / (
(agents[idx].fit - agents[0].fit)
/ (agents[-1].fit - agents[0].fit + c.EPSILON)
+ c.EPSILON
)
for j in range(a.n_variables):
r1 = r.generate_uniform_random_number()
if r1 < Mu:
r2 = r.generate_uniform_random_number()
a.position[j] = agents[0].position[j] + r2 * (
agents[p].position[j] - agents[q].position[j]
)
return a
def update(
self, space: Space, function: Function, iteration: int, n_iterations: int
) -> None:
"""Wraps motion and genetic updates over all agents and variables.
Args:
space: Space containing agents and update-related information.
function: A Function object that will be used as the objective function.
iteration: Current iteration.
n_iterations: Maximum number of iterations.
"""
space.agents.sort(key=lambda x: x.fit)
# Calculates the food location (eq. 12)
food = self._food_location(space.agents, function)
for i, _ in enumerate(space.agents):
space.agents[i].position = self._update_position(
space.agents,
i,
iteration,
n_iterations,
food,
self.motion[i],
self.foraging[i],
)
space.agents[i] = self._crossover(space.agents, i)
space.agents[i] = self._mutation(space.agents, i)
|
02f86f7dfbdeda363ab6c309ab6b50307c843f6c
|
c5952aa93ddede4cbe8e027f8ac507ffdbca6124
|
/factors.py
|
23aa72db5504bf14ef1e85456b0aee0386acf5fe
|
[] |
no_license
|
hackingmath/Math-Adventures
|
1cea476ee5aea807367670aefc062bc2530218ba
|
327751c5db4526e858ab240b29b9cc55bb742586
|
refs/heads/master
| 2023-07-06T00:25:25.391354
| 2023-06-23T15:57:18
| 2023-06-23T15:57:18
| 125,400,295
| 114
| 51
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 218
|
py
|
factors.py
|
#factors.py
def factors(num):
'''Returns a list of the factors of num'''
factorList = []
for i in range(1,num+1):
if num % i == 0:
factorList.append(i)
return factorList
|
8322d78ef9137a65018ac340482b6634e23ce8fa
|
33be4b8a076a83adb73ff6b267d8135dbbdd16fc
|
/chapter2/code/os module/platform_version.py
|
d6984892ef25dfbf9c1c281e89c74ebb6fa0cf56
|
[
"MIT"
] |
permissive
|
PacktPublishing/Mastering-Python-for-Networking-and-Security-Second-Edition
|
73bbe8bd6464585bb3cef788eb8fd62ec89f8983
|
b748cca5841b013b44c98f1884611b66676fef1a
|
refs/heads/master
| 2023-01-27T10:59:59.868438
| 2023-01-18T10:10:33
| 2023-01-18T10:10:33
| 254,302,959
| 117
| 67
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
platform_version.py
|
from platform import python_implementation, python_version_tuple
print(python_implementation())
for attribute in python_version_tuple():
print(attribute)
|
d44aae2200f017c355aa197cc9c4ade4f8c4f0fc
|
f8b1bb830377785ac65be495795dd711cfc0fb17
|
/setup.py
|
68dbc78e2978b6a2f76208422886323eb4a21d2e
|
[
"MIT"
] |
permissive
|
bactopia/bactopia
|
97d0bed69cc4938e35f87908dae1035bc1d8a081
|
cee87a2ccecf2305850a915bbe8944942c477ce9
|
refs/heads/master
| 2023-08-17T08:16:07.862308
| 2022-11-28T21:30:51
| 2022-11-28T21:30:51
| 172,074,074
| 286
| 53
|
MIT
| 2023-09-11T04:37:35
| 2019-02-22T13:49:38
|
Nextflow
|
UTF-8
|
Python
| false
| false
| 651
|
py
|
setup.py
|
from setuptools import setup, find_packages
import glob
import os
import pkg_resources
from bactopia import __version__
setup(
author="Robert A. Petit III",
author_email='robbie.petit@gmail.com',
description="A Python package for working with Bactopia.",
entry_points={
'console_scripts': [
'bactopia-summary=bactopia.commands.summary:main',
'bactopia-jsonify=bactopia.commands.jsonify:main'
],
},
keywords=[],
name='bactopia',
packages=find_packages(),
python_requires='>=3.6',
url='https://github.com/bactopia/bactopia',
version=__version__,
zip_safe=False
)
|
9b767fe7a96221c1a65240df09ba70076c22e6b2
|
db12b990924703cd74748d8585cd9c11fafa6746
|
/h2o-py/tests/testdir_algos/automl/pyunit_automl_algo_parameters.py
|
9fc3fe5c88e12133b8ef81663dd81d225d162de6
|
[
"Apache-2.0"
] |
permissive
|
h2oai/h2o-3
|
919019a8f297eec676011a9cfd2cc2d97891ce14
|
d817ab90c8c47f6787604a0b9639b66234158228
|
refs/heads/master
| 2023-08-17T18:50:17.732191
| 2023-08-17T16:44:42
| 2023-08-17T16:44:42
| 17,371,412
| 6,872
| 2,345
|
Apache-2.0
| 2023-09-14T18:05:40
| 2014-03-03T16:08:07
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,056
|
py
|
pyunit_automl_algo_parameters.py
|
import sys, os
import re
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
import h2o.exceptions
from h2o.automl import H2OAutoML
from tests import pyunit_utils as pu
from _automl_utils import import_dataset, get_partitioned_model_names
max_models = 5
def test_algo_parameter_can_be_applied_only_to_a_specific_algo():
ds = import_dataset()
aml = H2OAutoML(project_name="py_specific_algo_param",
algo_parameters=dict(
GBM__monotone_constraints=dict(AGE=1)
),
max_models=6,
seed=1)
aml.train(y=ds.target, training_frame=ds.train)
model_names = get_partitioned_model_names(aml.leaderboard).all
models_supporting_monotone_constraints = [n for n in model_names if re.match(r"GBM|XGBoost", n)]
assert next((m for m in models_supporting_monotone_constraints if m.startswith('GBM')), None), "There should be at least one GBM model"
for m in models_supporting_monotone_constraints:
model = h2o.get_model(m)
mc_value = next(v['actual'] for n, v in model.params.items() if n == 'monotone_constraints')
if m.startswith('GBM'):
assert isinstance(mc_value, list)
age = next((v for v in mc_value if v['key'] == 'AGE'), None)
assert age is not None
assert age['value'] == 1.0
else:
assert mc_value is None
def test_cannot_set_unauthorized_algo_parameter():
ds = import_dataset()
aml = H2OAutoML(project_name="py_unauthorized_algo_param",
algo_parameters=dict(
score_tree_interval=7
),
max_models=6,
seed=1)
try:
aml.train(y=ds.target, training_frame=ds.train)
except h2o.exceptions.H2OResponseError as e:
assert "algo_parameters: score_tree_interval" in str(e)
pu.run_tests([
test_algo_parameter_can_be_applied_only_to_a_specific_algo,
test_cannot_set_unauthorized_algo_parameter,
])
|
54a3cd3440645332642e17e38f46e236a3271b3d
|
2ac03b8c24df220ea32ea525e1d65aeb294cd1a4
|
/custom_components/waste_collection_schedule/waste_collection_schedule/source/circulus_nl.py
|
9cb68f185eac1e3144b60645a834a56f292269c9
|
[
"MIT"
] |
permissive
|
mampfes/hacs_waste_collection_schedule
|
a7b98319a7483dedc8cf78b724f93932934c1702
|
1dc9476efef9963a141b9ac987e2708224b9eaaf
|
refs/heads/master
| 2023-08-16T21:14:46.088962
| 2023-08-16T10:05:24
| 2023-08-16T10:05:24
| 254,347,436
| 495
| 428
|
MIT
| 2023-09-12T18:59:07
| 2020-04-09T11:02:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,017
|
py
|
circulus_nl.py
|
from datetime import date, datetime, timedelta
import requests
from waste_collection_schedule import Collection
TITLE = "Circulus"
DESCRIPTION = "Source for circulus.nl waste collection."
URL = "https://mijn.circulus.nl"
TEST_CASES = {
"Test1": {"postal_code": "7206AC", "house_number": "1"},
}
ICON_MAP = {
"REST": "mdi:trash-can",
"ZWAKRA": "mdi:recycle",
"GFT": "mdi:leaf",
"PAP": "mdi:newspaper-variant-multiple",
}
WASTE_MAP = {
"REST": "Zwarte Kliko",
"ZWAKRA": "Glas & Blik",
"GFT": "Groene Kliko",
"PAP": "Papier",
}
API_URL = "https://mijn.circulus.nl"
class Source:
def __init__(self, postal_code, house_number):
self._postal_code = postal_code
self._house_number = house_number
def fetch(self):
location_data = {"zipCode": self._postal_code, "number": self._house_number}
entries = []
# Make a post request and store the cookies
r = requests.post(f"{API_URL}/register/zipcode.json", data=location_data)
r.raise_for_status()
cookies = r.cookies
# Check if the CB_SESSION cookie exists
if "CB_SESSION" in cookies:
# Make a GET request and store the JSON data
req_params = {
"from": date.today().strftime("%Y-%m-%d"),
"till": (date.today() + timedelta(days=365)).strftime("%Y-%m-%d"),
}
r = requests.get(
f"{API_URL}/afvalkalender.json", params=req_params, cookies=cookies
)
r.raise_for_status()
for item in r.json()["customData"]["response"]["garbage"]:
for newdate in item["dates"]:
entries.append(
Collection(
date=datetime.strptime(newdate, "%Y-%m-%d").date(),
t=WASTE_MAP[item["code"]],
icon=ICON_MAP.get(item["code"]),
)
)
return entries
|
56af1e84ea0f783f577dd97031515caece893e41
|
c36d64932d3e5623923acb31d3b4a0d87fb5c00d
|
/src/repair/testing.py
|
0a1fee8528aa7c8d38614309c19ad92e713ce218
|
[
"MIT"
] |
permissive
|
mechtaev/angelix
|
25ccf8d80a698d1bedc19d857253d09ff473ebb2
|
d50f3234dc06d443d8c54dbab44d8bc3f2ac0722
|
refs/heads/master
| 2022-10-12T02:54:05.827587
| 2022-09-26T07:55:26
| 2022-09-26T07:55:26
| 42,175,803
| 102
| 43
|
MIT
| 2022-09-26T03:35:19
| 2015-09-09T12:00:57
|
Java
|
UTF-8
|
Python
| false
| false
| 3,236
|
py
|
testing.py
|
import os
from os.path import basename, join, exists
from utils import cd
import subprocess
import logging
import sys
import tempfile
from glob import glob
logger = logging.getLogger(__name__)
class Tester:
def __init__(self, config, oracle, workdir):
self.config = config
self.oracle = oracle
self.workdir = workdir
def __call__(self, project, test, dump=None, trace=None, load=None, klee=False, env=os.environ, check_instrumented=False):
src = basename(project.dir)
if klee:
logger.info('running test \'{}\' of {} source with KLEE'.format(test, src))
else:
if not self.config['mute_test_message']:
logger.info('running test \'{}\' of {} source'.format(test, src))
environment = dict(env)
if dump is not None:
environment['ANGELIX_WITH_DUMPING'] = dump
reachable_dir = join(dump, 'reachable') # maybe it should be done in other place?
os.mkdir(reachable_dir)
if trace is not None:
environment['ANGELIX_WITH_TRACING'] = trace
if (trace is not None) or (dump is not None) or (load is not None):
environment['ANGELIX_RUN'] = 'angelix-run-test'
if klee:
environment['ANGELIX_RUN'] = 'angelix-run-klee'
# using stub library to make lli work
environment['LLVMINTERP'] = 'lli -load {}/libkleeRuntest.so'.format(os.environ['KLEE_LIBRARY_PATH'])
if load is not None:
environment['ANGELIX_WITH_LOADING'] = load
environment['ANGELIX_WORKDIR'] = self.workdir
environment['ANGELIX_TEST_ID'] = test
dirpath = tempfile.mkdtemp()
executions = join(dirpath, 'executions')
environment['ANGELIX_RUN_EXECUTIONS'] = executions
if self.config['verbose'] and not self.config['mute_test_message']:
subproc_output = sys.stderr
else:
subproc_output = subprocess.DEVNULL
with cd(project.dir):
proc = subprocess.Popen(self.oracle + " " + test,
env=environment,
stdout=subproc_output,
stderr=subproc_output,
shell=True)
if klee or self.config['test_timeout'] is None: # KLEE has its own timeout
code = proc.wait()
else:
code = proc.wait(timeout=self.config['test_timeout'])
instrumented = True
if dump is not None or trace is not None or klee:
if exists(executions):
with open(executions) as file:
content = file.read()
if len(content) > 1:
logger.warning("ANGELIX_RUN is executed multiple times by test {}".format(test))
instrumented = False
else:
if not self.config['mute_test_message']:
logger.warning("ANGELIX_RUN is not executed by test {}".format(test))
instrumented = False
if check_instrumented:
return (code == 0, instrumented)
else:
return code == 0
|
661385525be1f23b553b14116af286978e45c42f
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/python_modules/libraries/dagster-aws/dagster_aws_tests/ecs_tests/launcher_tests/repo.py
|
0a04980cd82db4f51d686eceb78defd6fb75ab71
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
repo.py
|
import dagster
@dagster.op
def node(_):
pass
@dagster.job
def job():
node()
@dagster.repository
def repository():
return {"jobs": {"job": job}}
|
f85de56aeb5cb07889983800a67b86779376ad3f
|
ace570f65d70e6ce9461bcb81aaaac31c57ec111
|
/host/greatfet/interfaces/led.py
|
a4448e5fbe8556d874400a8a2b0c5fc347e49fd8
|
[
"BSD-3-Clause"
] |
permissive
|
greatscottgadgets/greatfet
|
9ed060aec2d293844c0ac59612f09ecae9c7632b
|
2409575d28fc7c9cae44c9085c7457ddfb54f893
|
refs/heads/master
| 2023-05-12T18:12:31.748720
| 2023-05-05T07:18:21
| 2023-05-05T07:18:21
| 48,184,998
| 273
| 95
|
BSD-3-Clause
| 2023-05-05T07:18:22
| 2015-12-17T16:17:35
|
C
|
UTF-8
|
Python
| false
| false
| 1,104
|
py
|
led.py
|
#
# This file is part of GreatFET
#
from ..interface import GreatFETInterface
class LED(GreatFETInterface):
""" Simple periheral that allows control of an LED through the GreatFET HAL."""
def __init__(self, board, led_number):
"""Create a new object representing a GreatFET LED.
board -- The GreatFET board object that owns the given LED.
led_number -- The one-indexed LED number. On GreatFET boards, this
matches the number printed on the silkscreen.
"""
# Store a reference to the parent board.
self.board = board
# Store which of the four(?) LEDs we refer to.
# TODO: Validate this?
self.led_number = led_number
# Function that toggles the relevant LED value. """
def toggle(self):
self.board.apis.leds.toggle(self.led_number)
# Function that turns on the relevant LED value. """
def on(self):
self.board.apis.leds.on(self.led_number)
# Function that turns off the relevant LED value. """
def off(self):
self.board.apis.leds.off(self.led_number)
|
d08214f95c14556f13b375f787cf8cd2aaa58335
|
307d3837d31f9e3728af2b62ca51ebf63fe6ec6b
|
/hall_of_fame/lysuk96/List/LTC_105.py
|
93e8e54b53a56760589857e684bb63fa62fe57f9
|
[] |
no_license
|
ellynhan/challenge100-codingtest-study
|
905043497d154b8a7333ca536e536d013f6e7454
|
bcdc6d04f13b12ba80b42e066f9d244d7c2cc698
|
refs/heads/master
| 2023-09-01T14:10:13.481013
| 2023-08-27T14:38:52
| 2023-08-27T14:38:52
| 401,561,230
| 162
| 176
| null | 2023-09-09T14:56:25
| 2021-08-31T03:30:36
|
C++
|
UTF-8
|
Python
| false
| false
| 979
|
py
|
LTC_105.py
|
'''
Given a string s, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases.
Example 1:
Input: s = "A man, a plan, a canal: Panama"
Output: true
Explanation: "amanaplanacanalpanama" is a palindrome.
Example 2:
Input: s = "race a car"
Output: false
Explanation: "raceacar" is not a palindrome.
Constraints:
1 <= s.length <= 2 * 105
s consists only of printable ASCII characters.
'''
class Solution:
def isPalindrome(self, s: str) -> bool:
s = s.upper()
i = 0
j = len(s)-1
while(i < j) :
if (s[i].isalnum() == False):
i +=1
elif (s[j].isalnum() == False):
j -=1
else:
if(s[i] == s[j]):
print(s[i])
i+=1
j-=1
else:
return False
return True
solution = Solution()
s = input()
print(solution.isPalindrome(s))
|
34eded580a3d73d3c1eb18e0f6ce245890e0d086
|
5d55e0885bacd718588f25b71675c1127c93fc0a
|
/river/base/transformer.py
|
defc0c80f87a160ebb3d0387c646abd6c44d9ea1
|
[
"BSD-3-Clause"
] |
permissive
|
online-ml/river
|
5698b60e65493eba28d0c0c1992f19eb996c0bfa
|
c658393084ed4147a782daa6bcd4a467c3abb0cb
|
refs/heads/main
| 2023-09-03T00:12:55.121301
| 2023-08-29T12:04:20
| 2023-08-29T12:04:20
| 167,388,434
| 3,372
| 389
|
BSD-3-Clause
| 2023-09-12T08:11:15
| 2019-01-24T15:18:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,259
|
py
|
transformer.py
|
from __future__ import annotations
import abc
import typing
from river import base
if typing.TYPE_CHECKING:
import pandas as pd
class BaseTransformer:
def __add__(self, other):
"""Fuses with another Transformer into a TransformerUnion."""
from river import compose
return compose.TransformerUnion(self, other)
def __radd__(self, other):
"""Fuses with another Transformer into a TransformerUnion."""
from river import compose
return compose.TransformerUnion(other, self)
def __mul__(self, other):
from river import compose
if isinstance(other, Transformer) or isinstance(other, compose.Pipeline):
return compose.TransformerProduct(self, other)
return compose.Grouper(transformer=self, by=other)
def __rmul__(self, other):
"""Creates a Grouper."""
return self * other
@abc.abstractmethod
def transform_one(self, x: dict) -> dict:
"""Transform a set of features `x`.
Parameters
----------
x
A dictionary of features.
Returns
-------
The transformed values.
"""
class Transformer(base.Estimator, BaseTransformer):
"""A transformer."""
@property
def _supervised(self):
return False
def learn_one(self, x: dict) -> Transformer:
"""Update with a set of features `x`.
A lot of transformers don't actually have to do anything during the `learn_one` step
because they are stateless. For this reason the default behavior of this function is to do
nothing. Transformers that however do something during the `learn_one` can override this
method.
Parameters
----------
x
A dictionary of features.
Returns
-------
self
"""
return self
class SupervisedTransformer(base.Estimator, BaseTransformer):
"""A supervised transformer."""
@property
def _supervised(self):
return True
def learn_one(self, x: dict, y: base.typing.Target) -> SupervisedTransformer:
"""Update with a set of features `x` and a target `y`.
Parameters
----------
x
A dictionary of features.
y
A target.
Returns
-------
self
"""
return self
class MiniBatchTransformer(Transformer):
"""A transform that can operate on mini-batches."""
@abc.abstractmethod
def transform_many(self, X: pd.DataFrame) -> pd.DataFrame:
"""Transform a mini-batch of features.
Parameters
----------
X
A DataFrame of features.
Returns
-------
A new DataFrame.
"""
def learn_many(self, X: pd.DataFrame) -> Transformer:
"""Update with a mini-batch of features.
A lot of transformers don't actually have to do anything during the `learn_many` step
because they are stateless. For this reason the default behavior of this function is to do
nothing. Transformers that however do something during the `learn_many` can override this
method.
Parameters
----------
X
A DataFrame of features.
Returns
-------
self
"""
return self
class MiniBatchSupervisedTransformer(Transformer):
"""A supervised transformer that can operate on mini-batches."""
@property
def _supervised(self):
return True
@abc.abstractmethod
def learn_many(self, X: pd.DataFrame, y: pd.Series) -> MiniBatchSupervisedTransformer:
"""Update the model with a mini-batch of features `X` and targets `y`.
Parameters
----------
X
A dataframe of features.
y
A series of boolean target values.
Returns
-------
self
"""
return self
@abc.abstractmethod
def transform_many(self, X: pd.DataFrame) -> pd.DataFrame:
"""Transform a mini-batch of features.
Parameters
----------
X
A DataFrame of features.
Returns
-------
A new DataFrame.
"""
|
73674f9d73d4a0246977ef7ddbdac14ac302df67
|
5e255ad1360c90478393744586663741a9569c21
|
/linebot/v3/module/__init__.py
|
d514b844b31f312b21a9e48e47d6fc1801bbc9c7
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 1,382
|
py
|
__init__.py
|
# coding: utf-8
# flake8: noqa
"""
LINE Messaging API
This document describes LINE Messaging API. # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
__version__ = "3.3.0"
# import apis into sdk package
from linebot.v3.module.api.line_module import LineModule
from linebot.v3.module.api.async_line_module import AsyncLineModule
# import ApiClient
from linebot.v3.module.api_response import ApiResponse
from linebot.v3.module.api_client import ApiClient
from linebot.v3.module.async_api_client import AsyncApiClient
from linebot.v3.module.configuration import Configuration
from linebot.v3.module.exceptions import OpenApiException
from linebot.v3.module.exceptions import ApiTypeError
from linebot.v3.module.exceptions import ApiValueError
from linebot.v3.module.exceptions import ApiKeyError
from linebot.v3.module.exceptions import ApiAttributeError
from linebot.v3.module.exceptions import ApiException
# import models into sdk package
from linebot.v3.module.models.acquire_chat_control_request import AcquireChatControlRequest
from linebot.v3.module.models.detach_module_request import DetachModuleRequest
from linebot.v3.module.models.get_modules_response import GetModulesResponse
from linebot.v3.module.models.module_bot import ModuleBot
|
ea7f36d9f97a6a3e0f1f50c714827260ed48428a
|
d594f3926f6379ef7c382c608cb211f507240420
|
/csunplugged/resources/utils/get_resource_generator.py
|
6ce38edba388fc7a1947853a928373f4510ef246
|
[
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"OFL-1.1",
"LGPL-2.0-or-later",
"AGPL-3.0-only",
"CC-BY-4.0",
"Apache-2.0",
"BSD-3-Clause",
"CC-BY-SA-4.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
uccser/cs-unplugged
|
0b9151f84dd490d5b90771a3706327a623d39edc
|
363e281ff17cefdef0ec61078b1718eef2eaf71a
|
refs/heads/develop
| 2023-08-25T08:45:29.833025
| 2023-08-22T02:58:35
| 2023-08-22T02:58:35
| 66,315,075
| 200
| 41
|
MIT
| 2023-09-14T02:15:40
| 2016-08-22T23:16:40
|
Python
|
UTF-8
|
Python
| false
| false
| 688
|
py
|
get_resource_generator.py
|
"""Module for importing a resource view module."""
import importlib
from django.conf import settings
def get_resource_generator(generator_module, requested_options=None):
"""Return view module for resource.
Args:
generator_module: Name of resource generator (str).
requested_options: QueryDict of requested_options (QueryDict).
Returns:
Instance of resource generator for given resource.
"""
generator_class_name = generator_module
module = importlib.import_module(settings.RESOURCE_GENERATORS_PACKAGE)
generator_class = getattr(module, generator_class_name)
generator = generator_class(requested_options)
return generator
|
53f40d06683c3dd7089366ebc78cfb1ef0fc2af6
|
7eb606a7957e5500f163c93dc4b19418cf9cf335
|
/tests/integration_tests/test_model_training_options.py
|
1e46a5a4b233af83c3c18a6c33fdc23d35d84e2e
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
ludwig-ai/ludwig
|
024f74da86567a57ec8e30efcb4600f0c52333a1
|
e1d023e41606c9b76b35e1d231c2f13368a30eca
|
refs/heads/master
| 2023-09-03T08:07:32.978301
| 2023-09-01T19:39:32
| 2023-09-01T19:39:32
| 163,346,054
| 2,567
| 285
|
Apache-2.0
| 2023-09-14T20:34:52
| 2018-12-27T23:58:12
|
Python
|
UTF-8
|
Python
| false
| false
| 17,878
|
py
|
test_model_training_options.py
|
import json
import logging
import os.path
import re
import numpy as np
import pandas as pd
import pytest
import torch
from ludwig import globals as global_vars
from ludwig.api import LudwigModel
from ludwig.backend import LOCAL_BACKEND
from ludwig.constants import (
BATCH_SIZE,
CATEGORY,
DEFAULTS,
EPOCHS,
INPUT_FEATURES,
OUTPUT_FEATURES,
PREPROCESSING,
TRAINER,
TRAINING,
)
from ludwig.contribs.mlflow import MlflowCallback
from ludwig.experiment import experiment_cli
from ludwig.features.number_feature import numeric_transformation_registry
from ludwig.globals import DESCRIPTION_FILE_NAME, TRAINING_PREPROC_FILE_NAME
from ludwig.schema.optimizers import optimizer_registry
from ludwig.utils.data_utils import load_json, replace_file_extension
from ludwig.utils.misc_utils import get_from_registry
from ludwig.utils.package_utils import LazyLoader
from tests.integration_tests import synthetic_test_data
from tests.integration_tests.utils import category_feature, generate_data, LocalTestBackend
mlflow = LazyLoader("mlflow", globals(), "mlflow")
RANDOM_SEED = 42
@pytest.mark.parametrize("early_stop", [3, 5])
def test_early_stopping(early_stop, tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {"epochs": 75, "early_stop": early_stop, "batch_size": 16},
}
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
# run experiment
generated_data = synthetic_test_data.get_generated_data()
_, _, _, _, output_dir = experiment_cli(
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
output_directory=str(results_dir),
config=config,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
skip_save_model=True,
skip_save_log=True,
)
# test existence of required files
train_stats_fp = os.path.join(output_dir, "training_statistics.json")
metadata_fp = os.path.join(output_dir, DESCRIPTION_FILE_NAME)
assert os.path.isfile(train_stats_fp)
assert os.path.isfile(metadata_fp)
# retrieve results so we can validate early stopping
with open(train_stats_fp) as f:
train_stats = json.load(f)
with open(metadata_fp) as f:
metadata = json.load(f)
# get early stopping value
early_stop_value = metadata["config"][TRAINER]["early_stop"]
# retrieve validation losses
vald_losses_data = train_stats["validation"]["combined"]["loss"]
last_evaluation = len(vald_losses_data) - 1
best_evaluation = np.argmin(vald_losses_data)
assert last_evaluation - best_evaluation == early_stop_value
@pytest.mark.parametrize("skip_save_progress", [False])
@pytest.mark.parametrize("skip_save_model", [False, True])
def test_model_progress_save(skip_save_progress, skip_save_model, tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {"epochs": 5, BATCH_SIZE: 128},
}
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
# run experiment
generated_data = synthetic_test_data.get_generated_data()
_, _, _, _, output_dir = experiment_cli(
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
output_directory=str(results_dir),
config=config,
skip_save_processed_input=True,
skip_save_progress=skip_save_progress,
skip_save_unprocessed_output=True,
skip_save_model=skip_save_model,
skip_save_log=True,
)
# ========== Check for required result data sets =============
model_dir = os.path.join(output_dir, "model")
files = [f for f in os.listdir(model_dir) if re.match(r"model_weights", f)]
if skip_save_model:
assert len(files) == 0
else:
assert len(files) == 1
training_checkpoints_dir = os.path.join(output_dir, "model", "training_checkpoints")
training_checkpoints = os.listdir(training_checkpoints_dir)
if skip_save_progress:
assert len(training_checkpoints) == 0
else:
assert len(training_checkpoints) > 0
@pytest.mark.parametrize("optimizer", ["sgd", "adam"])
def test_resume_training(optimizer, tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {"epochs": 2, "batch_size": 16, "optimizer": {"type": optimizer}},
}
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
generated_data = synthetic_test_data.get_generated_data()
_, _, _, _, output_dir1 = experiment_cli(
config,
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
)
config[TRAINER]["epochs"] = 5
experiment_cli(
config,
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
model_resume_path=output_dir1,
)
_, _, _, _, output_dir2 = experiment_cli(
config,
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
)
# compare learning curves with and without resuming
ts1 = load_json(os.path.join(output_dir1, "training_statistics.json"))
ts2 = load_json(os.path.join(output_dir2, "training_statistics.json"))
print("ts1", ts1)
print("ts2", ts2)
assert ts1[TRAINING]["combined"]["loss"] == ts2[TRAINING]["combined"]["loss"]
# compare predictions with and without resuming
y_pred1 = np.load(os.path.join(output_dir1, "y_predictions.npy"))
y_pred2 = np.load(os.path.join(output_dir2, "y_predictions.npy"))
print("y_pred1", y_pred1)
print("y_pred2", y_pred2)
assert np.all(np.isclose(y_pred1, y_pred2))
@pytest.mark.parametrize("optimizer", ["sgd", "adam"])
def test_resume_training_mlflow(optimizer, tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {"epochs": 2, "batch_size": 16, "optimizer": {"type": optimizer}},
}
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
mlflow_uri = f"file://{tmp_path}/mlruns"
experiment_name = optimizer + "_experiment"
generated_data = synthetic_test_data.get_generated_data()
_, _, _, _, output_dir1 = experiment_cli(
config,
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
callbacks=[MlflowCallback(mlflow_uri)],
experiment_name=experiment_name,
)
# Can't change any artifact spec on a run once it has been logged to mlflow, so skipping changing epochs
_, _, _, _, output_dir2 = experiment_cli(
config,
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
model_resume_path=output_dir1,
callbacks=[MlflowCallback(mlflow_uri)],
experiment_name=experiment_name,
)
# make sure there is only one mlflow run id
experiment = mlflow.get_experiment_by_name(experiment_name)
previous_runs = mlflow.search_runs([experiment.experiment_id])
assert len(previous_runs) == 1
@pytest.mark.parametrize("optimizer_type", optimizer_registry)
def test_optimizers(optimizer_type, tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {"epochs": 5, "batch_size": 16, "evaluate_training_set": True, "optimizer": {"type": optimizer_type}},
}
# special handling for adadelta and lbfgs, break out of local minima
if optimizer_type == "adadelta":
config[TRAINER]["learning_rate"] = 0.1
if optimizer_type == "lbfgs":
config[TRAINER]["learning_rate"] = 0.05
model = LudwigModel(config)
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
# run experiment
generated_data = synthetic_test_data.get_generated_data_for_optimizer()
train_stats, preprocessed_data, output_directory = model.train(
training_set=generated_data.train_df,
output_directory=str(results_dir),
config=config,
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
skip_save_model=True,
skip_save_log=True,
)
# retrieve training losses for first and last entries.
train_losses = train_stats[TRAINING]["combined"]["loss"]
last_entry = len(train_losses)
# ensure train loss for last entry is less than to the first entry.
np.testing.assert_array_less(train_losses[last_entry - 1], train_losses[0])
def test_regularization(tmp_path):
input_features, output_features = synthetic_test_data.get_feature_configs()
config = {
"input_features": input_features,
"output_features": output_features,
"combiner": {"type": "concat"},
TRAINER: {
"epochs": 1,
"batch_size": 16,
"regularization_lambda": 1,
},
}
# create sub-directory to store results
results_dir = tmp_path / "results"
results_dir.mkdir()
regularization_losses = []
generated_data = synthetic_test_data.get_generated_data()
for regularizer in [None, "l1", "l2", "l1_l2"]:
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
# setup regularization parameters
config[TRAINER]["regularization_type"] = regularizer
# run experiment
_, _, _, _, output_dir = experiment_cli(
training_set=generated_data.train_df,
validation_set=generated_data.validation_df,
test_set=generated_data.test_df,
output_directory=str(results_dir),
config=config,
experiment_name="regularization",
model_name=str(regularizer),
skip_save_processed_input=True,
skip_save_progress=True,
skip_save_unprocessed_output=True,
skip_save_model=True,
skip_save_log=True,
)
# test existence of required files
train_stats_fp = os.path.join(output_dir, "training_statistics.json")
metadata_fp = os.path.join(output_dir, DESCRIPTION_FILE_NAME)
assert os.path.isfile(train_stats_fp)
assert os.path.isfile(metadata_fp)
# retrieve results so we can compare training loss with regularization
with open(train_stats_fp) as f:
train_stats = json.load(f)
# retrieve training losses for all epochs
train_losses = train_stats[TRAINING]["combined"]["loss"]
regularization_losses.append(train_losses[0])
# create a set of losses
regularization_losses_set = set(regularization_losses)
# ensure all losses obtained with the different methods are different
assert len(regularization_losses) == len(regularization_losses_set)
# test cache checksum function
def test_cache_checksum(csv_filename, tmp_path):
# setup for training
input_features = [category_feature(encoder={"vocab_size": 5})]
output_features = [category_feature(decoder={"vocab_size": 2}, top_k=2)]
source_dataset = os.path.join(tmp_path, csv_filename)
source_dataset = generate_data(input_features, output_features, source_dataset)
config = {
INPUT_FEATURES: input_features,
OUTPUT_FEATURES: output_features,
DEFAULTS: {CATEGORY: {PREPROCESSING: {"fill_value": "<UNKNOWN>"}}},
TRAINER: {EPOCHS: 2, BATCH_SIZE: 128},
}
backend = LocalTestBackend()
cache_fname = replace_file_extension(source_dataset, TRAINING_PREPROC_FILE_NAME)
# conduct initial training
output_directory = os.path.join(tmp_path, "results")
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
first_training_timestamp = os.path.getmtime(cache_fname)
# conduct second training, should not force recreating hdf5
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# time stamps should be the same
assert first_training_timestamp == current_training_timestamp
# force recreating cache file by changing checksum by updating defaults
prior_training_timestamp = current_training_timestamp
config[DEFAULTS][CATEGORY][PREPROCESSING]["fill_value"] = "<EMPTY>"
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# timestamp should differ
assert prior_training_timestamp < current_training_timestamp
# force recreating cache by updating modification time of source dataset
prior_training_timestamp = current_training_timestamp
os.utime(source_dataset)
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# timestamps should be different
assert prior_training_timestamp < current_training_timestamp
# force change in feature preprocessing
prior_training_timestamp = current_training_timestamp
input_features = config[INPUT_FEATURES].copy()
input_features[0][PREPROCESSING] = {"lowercase": True}
config[INPUT_FEATURES] = input_features
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# timestamps should be different
assert prior_training_timestamp < current_training_timestamp
# force change in features names (and properties)
prior_training_timestamp = current_training_timestamp
input_features = [category_feature(encoder={"vocab_size": 5}), category_feature()]
source_dataset = generate_data(input_features, output_features, source_dataset)
config[INPUT_FEATURES] = input_features
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# timestamps should be different
assert prior_training_timestamp < current_training_timestamp
# force change in Ludwig version
prior_training_timestamp = current_training_timestamp
global_vars.LUDWIG_VERSION = "new_version"
model = LudwigModel(config, backend=backend)
model.train(dataset=source_dataset, output_directory=output_directory)
current_training_timestamp = os.path.getmtime(cache_fname)
# timestamps should be different
assert prior_training_timestamp < current_training_timestamp
@pytest.mark.parametrize("transformer_key", list(numeric_transformation_registry.keys()))
def test_numeric_transformer(transformer_key, tmpdir):
Transformer = get_from_registry(transformer_key, numeric_transformation_registry)
transformer_name = Transformer().__class__.__name__
if transformer_name == "Log1pTransformer":
raw_values = np.random.lognormal(5, 2, size=100)
else:
raw_values = np.random.normal(5, 2, size=100)
backend = LOCAL_BACKEND
parameters = Transformer.fit_transform_params(raw_values, backend)
if transformer_name in {"Log1pTransformer", "IdentityTransformer"}:
# should be empty
assert not bool(parameters)
else:
# should not be empty
assert bool(parameters)
# instantiate numeric transformer
numeric_transfomer = Transformer(**parameters)
# transform values
transformed_values = numeric_transfomer.transform(raw_values)
# inverse transform the prior transformed values
reconstructed_values = numeric_transfomer.inverse_transform(transformed_values)
# should now match
assert np.allclose(raw_values, reconstructed_values)
# now test numeric transformer with output feature
df = pd.DataFrame(np.array([raw_values, raw_values]).T, columns=["x", "y"])
config = {
"input_features": [{"name": "x", "type": "number"}],
"output_features": [{"name": "y", "type": "number", "preprocessing": {"normalization": transformer_key}}],
"combiner": {
"type": "concat",
},
TRAINER: {
"epochs": 2,
"batch_size": 16,
},
}
args = {
"config": config,
"skip_save_processed_input": True,
"output_directory": os.path.join(tmpdir, "results"),
"logging_level": logging.WARN,
}
# ensure no exceptions are raised
experiment_cli(dataset=df, **args)
|
61d54f5be1c6d684aa9d54695f21ad7f8007a597
|
dd221d1ab80a49190a0c93277e2471debaa2db95
|
/hanlp/common/structure.py
|
cc9c61c75ce3c5a6014eaf1f2017ddb377ba4f29
|
[
"Apache-2.0",
"CC-BY-NC-SA-4.0"
] |
permissive
|
hankcs/HanLP
|
29a22d4e240617e4dc67929c2f9760a822402cf7
|
be2f04905a12990a527417bd47b79b851874a201
|
refs/heads/doc-zh
| 2023-08-18T12:48:43.533453
| 2020-02-15T17:19:28
| 2023-03-14T02:46:03
| 24,976,755
| 32,454
| 9,770
|
Apache-2.0
| 2023-08-13T03:11:39
| 2014-10-09T06:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
structure.py
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-08-26 14:58
from typing import Dict
from hanlp_common.configurable import Configurable
from hanlp_common.reflection import classpath_of
from hanlp_common.structure import SerializableDict
class ConfigTracker(Configurable):
def __init__(self, locals_: Dict, exclude=('kwargs', 'self', '__class__', 'locals_')) -> None:
"""This base class helps sub-classes to capture their arguments passed to ``__init__``, and also their types so
that they can be deserialized from a config in dict form.
Args:
locals_: Obtained by :meth:`locals`.
exclude: Arguments to be excluded.
Examples:
>>> class MyClass(ConfigTracker):
>>> def __init__(self, i_need_this='yes') -> None:
>>> super().__init__(locals())
>>> obj = MyClass()
>>> print(obj.config)
{'i_need_this': 'yes', 'classpath': 'test_config_tracker.MyClass'}
"""
if 'kwargs' in locals_:
locals_.update(locals_['kwargs'])
self.config = SerializableDict(
(k, v.config if hasattr(v, 'config') else v) for k, v in locals_.items() if k not in exclude)
self.config['classpath'] = classpath_of(self)
class History(object):
def __init__(self):
""" A history of training context. It records how many steps have passed and provides methods to decide whether
an update should be performed, and to caculate number of training steps given dataloader size and
``gradient_accumulation``.
"""
self.num_mini_batches = 0
def step(self, gradient_accumulation):
""" Whether the training procedure should perform an update.
Args:
gradient_accumulation: Number of batches per update.
Returns:
bool: ``True`` to update.
"""
self.num_mini_batches += 1
return self.num_mini_batches % gradient_accumulation == 0
def num_training_steps(self, num_batches, gradient_accumulation):
""" Caculate number of training steps.
Args:
num_batches: Size of dataloader.
gradient_accumulation: Number of batches per update.
Returns:
"""
return len(
[i for i in range(self.num_mini_batches + 1, self.num_mini_batches + num_batches + 1) if
i % gradient_accumulation == 0])
|
25ad2d47c5a793213cd4dba57e110f81b8e3ac91
|
63ace5832d453e325681d02f6496a0999b72edcb
|
/bip_utils/substrate/conf/substrate_conf_getter.py
|
4a653186e77beba8df917fd1e6e19bf415191cf0
|
[
"MIT"
] |
permissive
|
ebellocchia/bip_utils
|
c9ec04c687f4247e57434319e36b2abab78f0b32
|
d15c75ddd74e4838c396a0d036ef6faf11b06a4b
|
refs/heads/master
| 2023-09-01T13:38:55.567370
| 2023-08-16T17:04:14
| 2023-08-16T17:04:14
| 251,130,186
| 244
| 88
|
MIT
| 2023-08-23T13:46:19
| 2020-03-29T20:42:48
|
Python
|
UTF-8
|
Python
| false
| false
| 3,104
|
py
|
substrate_conf_getter.py
|
# Copyright (c) 2021 Emanuele Bellocchia
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Module for getting Substrate coins configuration."""
# Imports
from typing import Dict
from bip_utils.substrate.conf.substrate_coin_conf import SubstrateCoinConf
from bip_utils.substrate.conf.substrate_coins import SubstrateCoins
from bip_utils.substrate.conf.substrate_conf import SubstrateConf
class SubstrateConfGetterConst:
"""Class container for Substrate configuration getter constants."""
# Map from SubstrateCoins to configuration classes
COIN_TO_CONF: Dict[SubstrateCoins, SubstrateCoinConf] = {
SubstrateCoins.ACALA: SubstrateConf.Acala,
SubstrateCoins.BIFROST: SubstrateConf.Bifrost,
SubstrateCoins.CHAINX: SubstrateConf.ChainX,
SubstrateCoins.EDGEWARE: SubstrateConf.Edgeware,
SubstrateCoins.GENERIC: SubstrateConf.Generic,
SubstrateCoins.KARURA: SubstrateConf.Karura,
SubstrateCoins.KUSAMA: SubstrateConf.Kusama,
SubstrateCoins.MOONBEAM: SubstrateConf.Moonbeam,
SubstrateCoins.MOONRIVER: SubstrateConf.Moonriver,
SubstrateCoins.PHALA: SubstrateConf.Phala,
SubstrateCoins.PLASM: SubstrateConf.Plasm,
SubstrateCoins.POLKADOT: SubstrateConf.Polkadot,
SubstrateCoins.SORA: SubstrateConf.Sora,
SubstrateCoins.STAFI: SubstrateConf.Stafi,
}
class SubstrateConfGetter:
"""
Substrate configuration getter class.
It allows to get the Substrate configuration of a specific coin.
"""
@staticmethod
def GetConfig(coin_type: SubstrateCoins) -> SubstrateCoinConf:
"""
Get coin configuration.
Args:
coin_type (SubstrateCoins): Coin type
Returns:
SubstrateCoinConf: Coin configuration
Raises:
TypeError: If coin type is not of a SubstrateCoins enumerative
"""
if not isinstance(coin_type, SubstrateCoins):
raise TypeError("Coin type is not an enumerative of SubstrateCoins")
return SubstrateConfGetterConst.COIN_TO_CONF[coin_type]
|
44a31d0154da99ed41c1d10be65b265fab0213ba
|
de181afb1d7286dd82b6253ab927bb6d2fa4b975
|
/src/bepasty/tests/test_app.py
|
7597b5065bd3c800a8dba4e0ef797a00b08f96e1
|
[
"BSD-2-Clause"
] |
permissive
|
bepasty/bepasty-server
|
a3bab0b4e9d0dcf18aabeec7d934b08f313f2cd5
|
b006fc27bc872d859c09a5d873c80c33de7eed7d
|
refs/heads/master
| 2023-08-31T14:53:22.594826
| 2023-08-28T18:48:58
| 2023-08-28T18:48:58
| 16,217,714
| 151
| 45
|
BSD-2-Clause
| 2023-08-28T18:49:00
| 2014-01-24T21:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
test_app.py
|
#
# app tests
#
from flask import request, url_for
from flask.views import MethodView
from ..app import create_app
from ..config import Config
def test_secret_key(monkeypatch):
monkeypatch.setattr(Config, 'PERMISSIONS', {
'admin': 'admin,list,create,read,delete',
'full': 'list,create,read,delete',
'none': '',
})
monkeypatch.setattr(Config, 'SECRET_KEY', 'secret')
app = create_app()
secret_key = app.config['SECRET_KEY']
assert len(secret_key) > len(Config.SECRET_KEY)
Config.PERMISSIONS = {
'admin': 'admin,list,create,read,delete',
'none': '',
}
app = create_app()
assert app.config['SECRET_KEY'] != secret_key
class TestView(MethodView):
callback = None
def get(self):
TestView.callback()
return 'done'
def prepare(callback):
app = create_app()
app.add_url_rule('/test_call', view_func=TestView.as_view('test.test_call'))
TestView.callback = staticmethod(callback)
client = app.test_client()
assert app.config['APP_BASE_PATH'] == Config.APP_BASE_PATH
return app, client
def test_none(monkeypatch):
monkeypatch.setattr(Config, 'APP_BASE_PATH', None)
def none_callback():
url = url_for('test.test_call')
assert url == request.path
app, client = prepare(none_callback)
response = client.get('/bepasty/test_call')
assert response.status_code == 404
response = client.get('/test_call')
assert response.status_code == 200
assert response.data == b'done'
def test_prefix(monkeypatch):
monkeypatch.setattr(Config, 'APP_BASE_PATH', '/bepasty')
def prefix_callback():
url = url_for('test.test_call')
assert url == Config.APP_BASE_PATH + request.path
app, client = prepare(prefix_callback)
response = client.get('/test_call')
assert response.status_code == 404
response = client.get('/bepasty/test_call')
assert response.status_code == 200
assert response.data == b'done'
|
5d646305b7e26fe40cfd69abecf152deca5a50b1
|
07df6279388a17192eb4e4e417383a1f56208839
|
/mmdet3d/datasets/pipelines/loading.py
|
96a3a8d0d41145c8c09d0616d1d3c03fc6738ccf
|
[
"Apache-2.0"
] |
permissive
|
HuangJunJie2017/BEVDet
|
11d4ca45286739c9bd099f715cb0edc9408a914f
|
f71858d02eb0fbd09860150ade67558d7984b1be
|
refs/heads/dev2.1
| 2023-05-23T15:35:45.216750
| 2023-05-07T16:35:04
| 2023-05-07T16:35:04
| 432,979,408
| 985
| 192
|
Apache-2.0
| 2023-04-28T15:06:51
| 2021-11-29T09:28:12
|
Python
|
UTF-8
|
Python
| false
| false
| 43,433
|
py
|
loading.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import mmcv
import numpy as np
import torch
from PIL import Image
from pyquaternion import Quaternion
from mmdet3d.core.points import BasePoints, get_points_type
from mmdet.datasets.pipelines import LoadAnnotations, LoadImageFromFile
from ...core.bbox import LiDARInstance3DBoxes
from ..builder import PIPELINES
@PIPELINES.register_module()
class LoadOccGTFromFile(object):
def __call__(self, results):
occ_gt_path = results['occ_gt_path']
occ_gt_path = os.path.join(occ_gt_path, "labels.npz")
occ_labels = np.load(occ_gt_path)
semantics = occ_labels['semantics']
mask_lidar = occ_labels['mask_lidar']
mask_camera = occ_labels['mask_camera']
results['voxel_semantics'] = semantics
results['mask_lidar'] = mask_lidar
results['mask_camera'] = mask_camera
return results
@PIPELINES.register_module()
class LoadMultiViewImageFromFiles(object):
"""Load multi channel images from a list of separate channel files.
Expects results['img_filename'] to be a list of filenames.
Args:
to_float32 (bool, optional): Whether to convert the img to float32.
Defaults to False.
color_type (str, optional): Color type of the file.
Defaults to 'unchanged'.
"""
def __init__(self, to_float32=False, color_type='unchanged'):
self.to_float32 = to_float32
self.color_type = color_type
def __call__(self, results):
"""Call function to load multi-view image from files.
Args:
results (dict): Result dict containing multi-view image filenames.
Returns:
dict: The result dict containing the multi-view image data.
Added keys and values are described below.
- filename (str): Multi-view image filenames.
- img (np.ndarray): Multi-view image arrays.
- img_shape (tuple[int]): Shape of multi-view image arrays.
- ori_shape (tuple[int]): Shape of original image arrays.
- pad_shape (tuple[int]): Shape of padded image arrays.
- scale_factor (float): Scale factor.
- img_norm_cfg (dict): Normalization configuration of images.
"""
filename = results['img_filename']
# img is of shape (h, w, c, num_views)
img = np.stack(
[mmcv.imread(name, self.color_type) for name in filename], axis=-1)
if self.to_float32:
img = img.astype(np.float32)
results['filename'] = filename
# unravel to list, see `DefaultFormatBundle` in formatting.py
# which will transpose each image separately and then stack into array
results['img'] = [img[..., i] for i in range(img.shape[-1])]
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(to_float32={self.to_float32}, '
repr_str += f"color_type='{self.color_type}')"
return repr_str
@PIPELINES.register_module()
class LoadImageFromFileMono3D(LoadImageFromFile):
"""Load an image from file in monocular 3D object detection. Compared to 2D
detection, additional camera parameters need to be loaded.
Args:
kwargs (dict): Arguments are the same as those in
:class:`LoadImageFromFile`.
"""
def __call__(self, results):
"""Call functions to load image and get image meta information.
Args:
results (dict): Result dict from :obj:`mmdet.CustomDataset`.
Returns:
dict: The dict contains loaded image and meta information.
"""
super().__call__(results)
results['cam2img'] = results['img_info']['cam_intrinsic']
return results
@PIPELINES.register_module()
class LoadPointsFromMultiSweeps(object):
"""Load points from multiple sweeps.
This is usually used for nuScenes dataset to utilize previous sweeps.
Args:
sweeps_num (int, optional): Number of sweeps. Defaults to 10.
load_dim (int, optional): Dimension number of the loaded points.
Defaults to 5.
use_dim (list[int], optional): Which dimension to use.
Defaults to [0, 1, 2, 4].
time_dim (int, optional): Which dimension to represent the timestamps
of each points. Defaults to 4.
file_client_args (dict, optional): Config dict of file clients,
refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details. Defaults to dict(backend='disk').
pad_empty_sweeps (bool, optional): Whether to repeat keyframe when
sweeps is empty. Defaults to False.
remove_close (bool, optional): Whether to remove close points.
Defaults to False.
test_mode (bool, optional): If `test_mode=True`, it will not
randomly sample sweeps but select the nearest N frames.
Defaults to False.
"""
def __init__(self,
sweeps_num=10,
load_dim=5,
use_dim=[0, 1, 2, 4],
time_dim=4,
file_client_args=dict(backend='disk'),
pad_empty_sweeps=False,
remove_close=False,
test_mode=False):
self.load_dim = load_dim
self.sweeps_num = sweeps_num
self.use_dim = use_dim
self.time_dim = time_dim
assert time_dim < load_dim, \
f'Expect the timestamp dimension < {load_dim}, got {time_dim}'
self.file_client_args = file_client_args.copy()
self.file_client = None
self.pad_empty_sweeps = pad_empty_sweeps
self.remove_close = remove_close
self.test_mode = test_mode
assert max(use_dim) < load_dim, \
f'Expect all used dimensions < {load_dim}, got {use_dim}'
def _load_points(self, pts_filename):
"""Private function to load point clouds data.
Args:
pts_filename (str): Filename of point clouds data.
Returns:
np.ndarray: An array containing point clouds data.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
pts_bytes = self.file_client.get(pts_filename)
points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError:
mmcv.check_file_exist(pts_filename)
if pts_filename.endswith('.npy'):
points = np.load(pts_filename)
else:
points = np.fromfile(pts_filename, dtype=np.float32)
return points
def _remove_close(self, points, radius=1.0):
"""Removes point too close within a certain radius from origin.
Args:
points (np.ndarray | :obj:`BasePoints`): Sweep points.
radius (float, optional): Radius below which points are removed.
Defaults to 1.0.
Returns:
np.ndarray: Points after removing.
"""
if isinstance(points, np.ndarray):
points_numpy = points
elif isinstance(points, BasePoints):
points_numpy = points.tensor.numpy()
else:
raise NotImplementedError
x_filt = np.abs(points_numpy[:, 0]) < radius
y_filt = np.abs(points_numpy[:, 1]) < radius
not_close = np.logical_not(np.logical_and(x_filt, y_filt))
return points[not_close]
def __call__(self, results):
"""Call function to load multi-sweep point clouds from files.
Args:
results (dict): Result dict containing multi-sweep point cloud
filenames.
Returns:
dict: The result dict containing the multi-sweep points data.
Added key and value are described below.
- points (np.ndarray | :obj:`BasePoints`): Multi-sweep point
cloud arrays.
"""
points = results['points']
points.tensor[:, self.time_dim] = 0
sweep_points_list = [points]
ts = results['timestamp']
if self.pad_empty_sweeps and len(results['sweeps']) == 0:
for i in range(self.sweeps_num):
if self.remove_close:
sweep_points_list.append(self._remove_close(points))
else:
sweep_points_list.append(points)
else:
if len(results['sweeps']) <= self.sweeps_num:
choices = np.arange(len(results['sweeps']))
elif self.test_mode:
choices = np.arange(self.sweeps_num)
else:
choices = np.random.choice(
len(results['sweeps']), self.sweeps_num, replace=False)
for idx in choices:
sweep = results['sweeps'][idx]
points_sweep = self._load_points(sweep['data_path'])
points_sweep = np.copy(points_sweep).reshape(-1, self.load_dim)
if self.remove_close:
points_sweep = self._remove_close(points_sweep)
sweep_ts = sweep['timestamp'] / 1e6
points_sweep[:, :3] = points_sweep[:, :3] @ sweep[
'sensor2lidar_rotation'].T
points_sweep[:, :3] += sweep['sensor2lidar_translation']
points_sweep[:, self.time_dim] = ts - sweep_ts
points_sweep = points.new_point(points_sweep)
sweep_points_list.append(points_sweep)
points = points.cat(sweep_points_list)
points = points[:, self.use_dim]
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
return f'{self.__class__.__name__}(sweeps_num={self.sweeps_num})'
@PIPELINES.register_module()
class PointSegClassMapping(object):
"""Map original semantic class to valid category ids.
Map valid classes as 0~len(valid_cat_ids)-1 and
others as len(valid_cat_ids).
Args:
valid_cat_ids (tuple[int]): A tuple of valid category.
max_cat_id (int, optional): The max possible cat_id in input
segmentation mask. Defaults to 40.
"""
def __init__(self, valid_cat_ids, max_cat_id=40):
assert max_cat_id >= np.max(valid_cat_ids), \
'max_cat_id should be greater than maximum id in valid_cat_ids'
self.valid_cat_ids = valid_cat_ids
self.max_cat_id = int(max_cat_id)
# build cat_id to class index mapping
neg_cls = len(valid_cat_ids)
self.cat_id2class = np.ones(
self.max_cat_id + 1, dtype=np.int) * neg_cls
for cls_idx, cat_id in enumerate(valid_cat_ids):
self.cat_id2class[cat_id] = cls_idx
def __call__(self, results):
"""Call function to map original semantic class to valid category ids.
Args:
results (dict): Result dict containing point semantic masks.
Returns:
dict: The result dict containing the mapped category ids.
Updated key and value are described below.
- pts_semantic_mask (np.ndarray): Mapped semantic masks.
"""
assert 'pts_semantic_mask' in results
pts_semantic_mask = results['pts_semantic_mask']
converted_pts_sem_mask = self.cat_id2class[pts_semantic_mask]
results['pts_semantic_mask'] = converted_pts_sem_mask
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(valid_cat_ids={self.valid_cat_ids}, '
repr_str += f'max_cat_id={self.max_cat_id})'
return repr_str
@PIPELINES.register_module()
class NormalizePointsColor(object):
"""Normalize color of points.
Args:
color_mean (list[float]): Mean color of the point cloud.
"""
def __init__(self, color_mean):
self.color_mean = color_mean
def __call__(self, results):
"""Call function to normalize color of points.
Args:
results (dict): Result dict containing point clouds data.
Returns:
dict: The result dict containing the normalized points.
Updated key and value are described below.
- points (:obj:`BasePoints`): Points after color normalization.
"""
points = results['points']
assert points.attribute_dims is not None and \
'color' in points.attribute_dims.keys(), \
'Expect points have color attribute'
if self.color_mean is not None:
points.color = points.color - \
points.color.new_tensor(self.color_mean)
points.color = points.color / 255.0
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__
repr_str += f'(color_mean={self.color_mean})'
return repr_str
@PIPELINES.register_module()
class LoadPointsFromFile(object):
"""Load Points From File.
Load points from file.
Args:
coord_type (str): The type of coordinates of points cloud.
Available options includes:
- 'LIDAR': Points in LiDAR coordinates.
- 'DEPTH': Points in depth coordinates, usually for indoor dataset.
- 'CAMERA': Points in camera coordinates.
load_dim (int, optional): The dimension of the loaded points.
Defaults to 6.
use_dim (list[int], optional): Which dimensions of the points to use.
Defaults to [0, 1, 2]. For KITTI dataset, set use_dim=4
or use_dim=[0, 1, 2, 3] to use the intensity dimension.
shift_height (bool, optional): Whether to use shifted height.
Defaults to False.
use_color (bool, optional): Whether to use color features.
Defaults to False.
file_client_args (dict, optional): Config dict of file clients,
refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details. Defaults to dict(backend='disk').
"""
def __init__(self,
coord_type,
load_dim=6,
use_dim=[0, 1, 2],
shift_height=False,
use_color=False,
file_client_args=dict(backend='disk')):
self.shift_height = shift_height
self.use_color = use_color
if isinstance(use_dim, int):
use_dim = list(range(use_dim))
assert max(use_dim) < load_dim, \
f'Expect all used dimensions < {load_dim}, got {use_dim}'
assert coord_type in ['CAMERA', 'LIDAR', 'DEPTH']
self.coord_type = coord_type
self.load_dim = load_dim
self.use_dim = use_dim
self.file_client_args = file_client_args.copy()
self.file_client = None
def _load_points(self, pts_filename):
"""Private function to load point clouds data.
Args:
pts_filename (str): Filename of point clouds data.
Returns:
np.ndarray: An array containing point clouds data.
"""
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
pts_bytes = self.file_client.get(pts_filename)
points = np.frombuffer(pts_bytes, dtype=np.float32)
except ConnectionError:
mmcv.check_file_exist(pts_filename)
if pts_filename.endswith('.npy'):
points = np.load(pts_filename)
else:
points = np.fromfile(pts_filename, dtype=np.float32)
return points
def __call__(self, results):
"""Call function to load points data from file.
Args:
results (dict): Result dict containing point clouds data.
Returns:
dict: The result dict containing the point clouds data.
Added key and value are described below.
- points (:obj:`BasePoints`): Point clouds data.
"""
pts_filename = results['pts_filename']
points = self._load_points(pts_filename)
points = points.reshape(-1, self.load_dim)
points = points[:, self.use_dim]
attribute_dims = None
if self.shift_height:
floor_height = np.percentile(points[:, 2], 0.99)
height = points[:, 2] - floor_height
points = np.concatenate(
[points[:, :3],
np.expand_dims(height, 1), points[:, 3:]], 1)
attribute_dims = dict(height=3)
if self.use_color:
assert len(self.use_dim) >= 6
if attribute_dims is None:
attribute_dims = dict()
attribute_dims.update(
dict(color=[
points.shape[1] - 3,
points.shape[1] - 2,
points.shape[1] - 1,
]))
points_class = get_points_type(self.coord_type)
points = points_class(
points, points_dim=points.shape[-1], attribute_dims=attribute_dims)
results['points'] = points
return results
def __repr__(self):
"""str: Return a string that describes the module."""
repr_str = self.__class__.__name__ + '('
repr_str += f'shift_height={self.shift_height}, '
repr_str += f'use_color={self.use_color}, '
repr_str += f'file_client_args={self.file_client_args}, '
repr_str += f'load_dim={self.load_dim}, '
repr_str += f'use_dim={self.use_dim})'
return repr_str
@PIPELINES.register_module()
class LoadPointsFromDict(LoadPointsFromFile):
"""Load Points From Dict."""
def __call__(self, results):
assert 'points' in results
return results
@PIPELINES.register_module()
class LoadAnnotations3D(LoadAnnotations):
"""Load Annotations3D.
Load instance mask and semantic mask of points and
encapsulate the items into related fields.
Args:
with_bbox_3d (bool, optional): Whether to load 3D boxes.
Defaults to True.
with_label_3d (bool, optional): Whether to load 3D labels.
Defaults to True.
with_attr_label (bool, optional): Whether to load attribute label.
Defaults to False.
with_mask_3d (bool, optional): Whether to load 3D instance masks.
for points. Defaults to False.
with_seg_3d (bool, optional): Whether to load 3D semantic masks.
for points. Defaults to False.
with_bbox (bool, optional): Whether to load 2D boxes.
Defaults to False.
with_label (bool, optional): Whether to load 2D labels.
Defaults to False.
with_mask (bool, optional): Whether to load 2D instance masks.
Defaults to False.
with_seg (bool, optional): Whether to load 2D semantic masks.
Defaults to False.
with_bbox_depth (bool, optional): Whether to load 2.5D boxes.
Defaults to False.
poly2mask (bool, optional): Whether to convert polygon annotations
to bitmasks. Defaults to True.
seg_3d_dtype (dtype, optional): Dtype of 3D semantic masks.
Defaults to int64
file_client_args (dict): Config dict of file clients, refer to
https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
for more details.
"""
def __init__(self,
with_bbox_3d=True,
with_label_3d=True,
with_attr_label=False,
with_mask_3d=False,
with_seg_3d=False,
with_bbox=False,
with_label=False,
with_mask=False,
with_seg=False,
with_bbox_depth=False,
poly2mask=True,
seg_3d_dtype=np.int64,
file_client_args=dict(backend='disk')):
super().__init__(
with_bbox,
with_label,
with_mask,
with_seg,
poly2mask,
file_client_args=file_client_args)
self.with_bbox_3d = with_bbox_3d
self.with_bbox_depth = with_bbox_depth
self.with_label_3d = with_label_3d
self.with_attr_label = with_attr_label
self.with_mask_3d = with_mask_3d
self.with_seg_3d = with_seg_3d
self.seg_3d_dtype = seg_3d_dtype
def _load_bboxes_3d(self, results):
"""Private function to load 3D bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D bounding box annotations.
"""
results['gt_bboxes_3d'] = results['ann_info']['gt_bboxes_3d']
results['bbox3d_fields'].append('gt_bboxes_3d')
return results
def _load_bboxes_depth(self, results):
"""Private function to load 2.5D bounding box annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 2.5D bounding box annotations.
"""
results['centers2d'] = results['ann_info']['centers2d']
results['depths'] = results['ann_info']['depths']
return results
def _load_labels_3d(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded label annotations.
"""
results['gt_labels_3d'] = results['ann_info']['gt_labels_3d']
return results
def _load_attr_labels(self, results):
"""Private function to load label annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded label annotations.
"""
results['attr_labels'] = results['ann_info']['attr_labels']
return results
def _load_masks_3d(self, results):
"""Private function to load 3D mask annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D mask annotations.
"""
pts_instance_mask_path = results['ann_info']['pts_instance_mask_path']
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
mask_bytes = self.file_client.get(pts_instance_mask_path)
pts_instance_mask = np.frombuffer(mask_bytes, dtype=np.int64)
except ConnectionError:
mmcv.check_file_exist(pts_instance_mask_path)
pts_instance_mask = np.fromfile(
pts_instance_mask_path, dtype=np.int64)
results['pts_instance_mask'] = pts_instance_mask
results['pts_mask_fields'].append('pts_instance_mask')
return results
def _load_semantic_seg_3d(self, results):
"""Private function to load 3D semantic segmentation annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing the semantic segmentation annotations.
"""
pts_semantic_mask_path = results['ann_info']['pts_semantic_mask_path']
if self.file_client is None:
self.file_client = mmcv.FileClient(**self.file_client_args)
try:
mask_bytes = self.file_client.get(pts_semantic_mask_path)
# add .copy() to fix read-only bug
pts_semantic_mask = np.frombuffer(
mask_bytes, dtype=self.seg_3d_dtype).copy()
except ConnectionError:
mmcv.check_file_exist(pts_semantic_mask_path)
pts_semantic_mask = np.fromfile(
pts_semantic_mask_path, dtype=np.int64)
results['pts_semantic_mask'] = pts_semantic_mask
results['pts_seg_fields'].append('pts_semantic_mask')
return results
def __call__(self, results):
"""Call function to load multiple types annotations.
Args:
results (dict): Result dict from :obj:`mmdet3d.CustomDataset`.
Returns:
dict: The dict containing loaded 3D bounding box, label, mask and
semantic segmentation annotations.
"""
results = super().__call__(results)
if self.with_bbox_3d:
results = self._load_bboxes_3d(results)
if results is None:
return None
if self.with_bbox_depth:
results = self._load_bboxes_depth(results)
if results is None:
return None
if self.with_label_3d:
results = self._load_labels_3d(results)
if self.with_attr_label:
results = self._load_attr_labels(results)
if self.with_mask_3d:
results = self._load_masks_3d(results)
if self.with_seg_3d:
results = self._load_semantic_seg_3d(results)
return results
def __repr__(self):
"""str: Return a string that describes the module."""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, '
repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, '
repr_str += f'{indent_str}with_attr_label={self.with_attr_label}, '
repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, '
repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, '
repr_str += f'{indent_str}with_bbox={self.with_bbox}, '
repr_str += f'{indent_str}with_label={self.with_label}, '
repr_str += f'{indent_str}with_mask={self.with_mask}, '
repr_str += f'{indent_str}with_seg={self.with_seg}, '
repr_str += f'{indent_str}with_bbox_depth={self.with_bbox_depth}, '
repr_str += f'{indent_str}poly2mask={self.poly2mask})'
return repr_str
@PIPELINES.register_module()
class PointToMultiViewDepth(object):
def __init__(self, grid_config, downsample=1):
self.downsample = downsample
self.grid_config = grid_config
def points2depthmap(self, points, height, width):
height, width = height // self.downsample, width // self.downsample
depth_map = torch.zeros((height, width), dtype=torch.float32)
coor = torch.round(points[:, :2] / self.downsample)
depth = points[:, 2]
kept1 = (coor[:, 0] >= 0) & (coor[:, 0] < width) & (
coor[:, 1] >= 0) & (coor[:, 1] < height) & (
depth < self.grid_config['depth'][1]) & (
depth >= self.grid_config['depth'][0])
coor, depth = coor[kept1], depth[kept1]
ranks = coor[:, 0] + coor[:, 1] * width
sort = (ranks + depth / 100.).argsort()
coor, depth, ranks = coor[sort], depth[sort], ranks[sort]
kept2 = torch.ones(coor.shape[0], device=coor.device, dtype=torch.bool)
kept2[1:] = (ranks[1:] != ranks[:-1])
coor, depth = coor[kept2], depth[kept2]
coor = coor.to(torch.long)
depth_map[coor[:, 1], coor[:, 0]] = depth
return depth_map
def __call__(self, results):
points_lidar = results['points']
imgs, rots, trans, intrins = results['img_inputs'][:4]
post_rots, post_trans, bda = results['img_inputs'][4:]
depth_map_list = []
for cid in range(len(results['cam_names'])):
cam_name = results['cam_names'][cid]
lidar2lidarego = np.eye(4, dtype=np.float32)
lidar2lidarego[:3, :3] = Quaternion(
results['curr']['lidar2ego_rotation']).rotation_matrix
lidar2lidarego[:3, 3] = results['curr']['lidar2ego_translation']
lidar2lidarego = torch.from_numpy(lidar2lidarego)
lidarego2global = np.eye(4, dtype=np.float32)
lidarego2global[:3, :3] = Quaternion(
results['curr']['ego2global_rotation']).rotation_matrix
lidarego2global[:3, 3] = results['curr']['ego2global_translation']
lidarego2global = torch.from_numpy(lidarego2global)
cam2camego = np.eye(4, dtype=np.float32)
cam2camego[:3, :3] = Quaternion(
results['curr']['cams'][cam_name]
['sensor2ego_rotation']).rotation_matrix
cam2camego[:3, 3] = results['curr']['cams'][cam_name][
'sensor2ego_translation']
cam2camego = torch.from_numpy(cam2camego)
camego2global = np.eye(4, dtype=np.float32)
camego2global[:3, :3] = Quaternion(
results['curr']['cams'][cam_name]
['ego2global_rotation']).rotation_matrix
camego2global[:3, 3] = results['curr']['cams'][cam_name][
'ego2global_translation']
camego2global = torch.from_numpy(camego2global)
cam2img = np.eye(4, dtype=np.float32)
cam2img = torch.from_numpy(cam2img)
cam2img[:3, :3] = intrins[cid]
lidar2cam = torch.inverse(camego2global.matmul(cam2camego)).matmul(
lidarego2global.matmul(lidar2lidarego))
lidar2img = cam2img.matmul(lidar2cam)
points_img = points_lidar.tensor[:, :3].matmul(
lidar2img[:3, :3].T) + lidar2img[:3, 3].unsqueeze(0)
points_img = torch.cat(
[points_img[:, :2] / points_img[:, 2:3], points_img[:, 2:3]],
1)
points_img = points_img.matmul(
post_rots[cid].T) + post_trans[cid:cid + 1, :]
depth_map = self.points2depthmap(points_img, imgs.shape[2],
imgs.shape[3])
depth_map_list.append(depth_map)
depth_map = torch.stack(depth_map_list)
results['gt_depth'] = depth_map
return results
def mmlabNormalize(img):
from mmcv.image.photometric import imnormalize
mean = np.array([123.675, 116.28, 103.53], dtype=np.float32)
std = np.array([58.395, 57.12, 57.375], dtype=np.float32)
to_rgb = True
img = imnormalize(np.array(img), mean, std, to_rgb)
img = torch.tensor(img).float().permute(2, 0, 1).contiguous()
return img
@PIPELINES.register_module()
class PrepareImageInputs(object):
"""Load multi channel images from a list of separate channel files.
Expects results['img_filename'] to be a list of filenames.
Args:
to_float32 (bool): Whether to convert the img to float32.
Defaults to False.
color_type (str): Color type of the file. Defaults to 'unchanged'.
"""
def __init__(
self,
data_config,
is_train=False,
sequential=False,
):
self.is_train = is_train
self.data_config = data_config
self.normalize_img = mmlabNormalize
self.sequential = sequential
def get_rot(self, h):
return torch.Tensor([
[np.cos(h), np.sin(h)],
[-np.sin(h), np.cos(h)],
])
def img_transform(self, img, post_rot, post_tran, resize, resize_dims,
crop, flip, rotate):
# adjust image
img = self.img_transform_core(img, resize_dims, crop, flip, rotate)
# post-homography transformation
post_rot *= resize
post_tran -= torch.Tensor(crop[:2])
if flip:
A = torch.Tensor([[-1, 0], [0, 1]])
b = torch.Tensor([crop[2] - crop[0], 0])
post_rot = A.matmul(post_rot)
post_tran = A.matmul(post_tran) + b
A = self.get_rot(rotate / 180 * np.pi)
b = torch.Tensor([crop[2] - crop[0], crop[3] - crop[1]]) / 2
b = A.matmul(-b) + b
post_rot = A.matmul(post_rot)
post_tran = A.matmul(post_tran) + b
return img, post_rot, post_tran
def img_transform_core(self, img, resize_dims, crop, flip, rotate):
# adjust image
img = img.resize(resize_dims)
img = img.crop(crop)
if flip:
img = img.transpose(method=Image.FLIP_LEFT_RIGHT)
img = img.rotate(rotate)
return img
def choose_cams(self):
if self.is_train and self.data_config['Ncams'] < len(
self.data_config['cams']):
cam_names = np.random.choice(
self.data_config['cams'],
self.data_config['Ncams'],
replace=False)
else:
cam_names = self.data_config['cams']
return cam_names
def sample_augmentation(self, H, W, flip=None, scale=None):
fH, fW = self.data_config['input_size']
if self.is_train:
resize = float(fW) / float(W)
resize += np.random.uniform(*self.data_config['resize'])
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int((1 - np.random.uniform(*self.data_config['crop_h'])) *
newH) - fH
crop_w = int(np.random.uniform(0, max(0, newW - fW)))
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = self.data_config['flip'] and np.random.choice([0, 1])
rotate = np.random.uniform(*self.data_config['rot'])
else:
resize = float(fW) / float(W)
if scale is not None:
resize += scale
else:
resize += self.data_config.get('resize_test', 0.0)
resize_dims = (int(W * resize), int(H * resize))
newW, newH = resize_dims
crop_h = int((1 - np.mean(self.data_config['crop_h'])) * newH) - fH
crop_w = int(max(0, newW - fW) / 2)
crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)
flip = False if flip is None else flip
rotate = 0
return resize, resize_dims, crop, flip, rotate
def get_sensor_transforms(self, cam_info, cam_name):
w, x, y, z = cam_info['cams'][cam_name]['sensor2ego_rotation']
# sweep sensor to sweep ego
sensor2ego_rot = torch.Tensor(
Quaternion(w, x, y, z).rotation_matrix)
sensor2ego_tran = torch.Tensor(
cam_info['cams'][cam_name]['sensor2ego_translation'])
sensor2ego = sensor2ego_rot.new_zeros((4, 4))
sensor2ego[3, 3] = 1
sensor2ego[:3, :3] = sensor2ego_rot
sensor2ego[:3, -1] = sensor2ego_tran
# sweep ego to global
w, x, y, z = cam_info['cams'][cam_name]['ego2global_rotation']
ego2global_rot = torch.Tensor(
Quaternion(w, x, y, z).rotation_matrix)
ego2global_tran = torch.Tensor(
cam_info['cams'][cam_name]['ego2global_translation'])
ego2global = ego2global_rot.new_zeros((4, 4))
ego2global[3, 3] = 1
ego2global[:3, :3] = ego2global_rot
ego2global[:3, -1] = ego2global_tran
return sensor2ego, ego2global
def get_inputs(self, results, flip=None, scale=None):
imgs = []
sensor2egos = []
ego2globals = []
intrins = []
post_rots = []
post_trans = []
cam_names = self.choose_cams()
results['cam_names'] = cam_names
canvas = []
for cam_name in cam_names:
cam_data = results['curr']['cams'][cam_name]
filename = cam_data['data_path']
img = Image.open(filename)
post_rot = torch.eye(2)
post_tran = torch.zeros(2)
intrin = torch.Tensor(cam_data['cam_intrinsic'])
sensor2ego, ego2global = \
self.get_sensor_transforms(results['curr'], cam_name)
# image view augmentation (resize, crop, horizontal flip, rotate)
img_augs = self.sample_augmentation(
H=img.height, W=img.width, flip=flip, scale=scale)
resize, resize_dims, crop, flip, rotate = img_augs
img, post_rot2, post_tran2 = \
self.img_transform(img, post_rot,
post_tran,
resize=resize,
resize_dims=resize_dims,
crop=crop,
flip=flip,
rotate=rotate)
# for convenience, make augmentation matrices 3x3
post_tran = torch.zeros(3)
post_rot = torch.eye(3)
post_tran[:2] = post_tran2
post_rot[:2, :2] = post_rot2
canvas.append(np.array(img))
imgs.append(self.normalize_img(img))
if self.sequential:
assert 'adjacent' in results
for adj_info in results['adjacent']:
filename_adj = adj_info['cams'][cam_name]['data_path']
img_adjacent = Image.open(filename_adj)
img_adjacent = self.img_transform_core(
img_adjacent,
resize_dims=resize_dims,
crop=crop,
flip=flip,
rotate=rotate)
imgs.append(self.normalize_img(img_adjacent))
intrins.append(intrin)
sensor2egos.append(sensor2ego)
ego2globals.append(ego2global)
post_rots.append(post_rot)
post_trans.append(post_tran)
if self.sequential:
for adj_info in results['adjacent']:
post_trans.extend(post_trans[:len(cam_names)])
post_rots.extend(post_rots[:len(cam_names)])
intrins.extend(intrins[:len(cam_names)])
# align
for cam_name in cam_names:
sensor2ego, ego2global = \
self.get_sensor_transforms(adj_info, cam_name)
sensor2egos.append(sensor2ego)
ego2globals.append(ego2global)
imgs = torch.stack(imgs)
sensor2egos = torch.stack(sensor2egos)
ego2globals = torch.stack(ego2globals)
intrins = torch.stack(intrins)
post_rots = torch.stack(post_rots)
post_trans = torch.stack(post_trans)
results['canvas'] = canvas
return (imgs, sensor2egos, ego2globals, intrins, post_rots, post_trans)
def __call__(self, results):
results['img_inputs'] = self.get_inputs(results)
return results
@PIPELINES.register_module()
class LoadAnnotationsBEVDepth(object):
def __init__(self, bda_aug_conf, classes, is_train=True):
self.bda_aug_conf = bda_aug_conf
self.is_train = is_train
self.classes = classes
def sample_bda_augmentation(self):
"""Generate bda augmentation values based on bda_config."""
if self.is_train:
rotate_bda = np.random.uniform(*self.bda_aug_conf['rot_lim'])
scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim'])
flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio']
flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio']
else:
rotate_bda = 0
scale_bda = 1.0
flip_dx = False
flip_dy = False
return rotate_bda, scale_bda, flip_dx, flip_dy
def bev_transform(self, gt_boxes, rotate_angle, scale_ratio, flip_dx,
flip_dy):
rotate_angle = torch.tensor(rotate_angle / 180 * np.pi)
rot_sin = torch.sin(rotate_angle)
rot_cos = torch.cos(rotate_angle)
rot_mat = torch.Tensor([[rot_cos, -rot_sin, 0], [rot_sin, rot_cos, 0],
[0, 0, 1]])
scale_mat = torch.Tensor([[scale_ratio, 0, 0], [0, scale_ratio, 0],
[0, 0, scale_ratio]])
flip_mat = torch.Tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if flip_dx:
flip_mat = flip_mat @ torch.Tensor([[-1, 0, 0], [0, 1, 0],
[0, 0, 1]])
if flip_dy:
flip_mat = flip_mat @ torch.Tensor([[1, 0, 0], [0, -1, 0],
[0, 0, 1]])
rot_mat = flip_mat @ (scale_mat @ rot_mat)
if gt_boxes.shape[0] > 0:
gt_boxes[:, :3] = (
rot_mat @ gt_boxes[:, :3].unsqueeze(-1)).squeeze(-1)
gt_boxes[:, 3:6] *= scale_ratio
gt_boxes[:, 6] += rotate_angle
if flip_dx:
gt_boxes[:,
6] = 2 * torch.asin(torch.tensor(1.0)) - gt_boxes[:,
6]
if flip_dy:
gt_boxes[:, 6] = -gt_boxes[:, 6]
gt_boxes[:, 7:] = (
rot_mat[:2, :2] @ gt_boxes[:, 7:].unsqueeze(-1)).squeeze(-1)
return gt_boxes, rot_mat
def __call__(self, results):
gt_boxes, gt_labels = results['ann_infos']
gt_boxes, gt_labels = torch.Tensor(gt_boxes), torch.tensor(gt_labels)
rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation(
)
bda_mat = torch.zeros(4, 4)
bda_mat[3, 3] = 1
gt_boxes, bda_rot = self.bev_transform(gt_boxes, rotate_bda, scale_bda,
flip_dx, flip_dy)
bda_mat[:3, :3] = bda_rot
if len(gt_boxes) == 0:
gt_boxes = torch.zeros(0, 9)
results['gt_bboxes_3d'] = \
LiDARInstance3DBoxes(gt_boxes, box_dim=gt_boxes.shape[-1],
origin=(0.5, 0.5, 0.5))
results['gt_labels_3d'] = gt_labels
imgs, rots, trans, intrins = results['img_inputs'][:4]
post_rots, post_trans = results['img_inputs'][4:]
results['img_inputs'] = (imgs, rots, trans, intrins, post_rots,
post_trans, bda_rot)
if 'voxel_semantics' in results:
if flip_dx:
results['voxel_semantics'] = results['voxel_semantics'][::-1,...].copy()
results['mask_lidar'] = results['mask_lidar'][::-1,...].copy()
results['mask_camera'] = results['mask_camera'][::-1,...].copy()
if flip_dy:
results['voxel_semantics'] = results['voxel_semantics'][:,::-1,...].copy()
results['mask_lidar'] = results['mask_lidar'][:,::-1,...].copy()
results['mask_camera'] = results['mask_camera'][:,::-1,...].copy()
return results
|
0480d17bd3be0401fab48ec4d85543f87658473a
|
5917ffcb780cfcfe4e2b87b11fca1f68f387b239
|
/plenum/server/quota_control.py
|
3356c0ba46e195a4d7acf3be38c9d03632269b18
|
[
"Apache-2.0"
] |
permissive
|
hyperledger/indy-plenum
|
6ff9f705af80dfa28d4cb92743683f78bb937aa3
|
698b9500ad3a7a15993af72a1c35a406c5673262
|
refs/heads/main
| 2023-08-29T01:32:26.384729
| 2023-06-20T16:42:11
| 2023-06-20T16:42:11
| 51,585,028
| 171
| 420
|
Apache-2.0
| 2023-06-20T16:42:14
| 2016-02-12T12:03:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,169
|
py
|
quota_control.py
|
from abc import ABC, abstractmethod
from stp_zmq.zstack import Quota
class QuotaControl(ABC):
@abstractmethod
def update_state(self, state: dict):
pass
@property
@abstractmethod
def node_quota(self) -> Quota:
pass
@property
@abstractmethod
def client_quota(self) -> Quota:
pass
class StaticQuotaControl(QuotaControl):
def __init__(self, node_quota: Quota, client_quota: Quota):
self._node_quota = node_quota
self._client_quota = client_quota
def update_state(self, state: dict):
pass
@property
def node_quota(self) -> Quota:
return self._node_quota
@property
def client_quota(self) -> Quota:
return self._client_quota
class CompositeQuotaControl(QuotaControl):
def __init__(self, *args):
self._controls = [*args]
def update_state(self, state: dict):
for qc in self._controls:
qc.update_state(state)
@property
def node_quota(self) -> Quota:
return Quota(count=min(qc.node_quota.count for qc in self._controls),
size=min(qc.node_quota.size for qc in self._controls))
@property
def client_quota(self) -> Quota:
return Quota(count=min(qc.client_quota.count for qc in self._controls),
size=min(qc.client_quota.size for qc in self._controls))
class RequestQueueQuotaControl(QuotaControl):
def __init__(self,
max_request_queue_size: int,
max_node_quota: Quota,
max_client_quota: Quota):
self._max_request_queue_size = max_request_queue_size
self._max_node_quota = max_node_quota
self._max_client_quota = max_client_quota
self._request_queue_overflow = False
def update_state(self, state: dict):
self._request_queue_overflow = state.get('request_queue_size', 0) >= self._max_request_queue_size
@property
def node_quota(self) -> Quota:
return self._max_node_quota
@property
def client_quota(self) -> Quota:
return Quota(count=0, size=0) if self._request_queue_overflow else self._max_client_quota
|
1d503e8dc046beb558a62da0cbb28f1b2342e656
|
0d543b6f877114fc7ff7f5c2485230f606f6d98d
|
/2022/13.py
|
e7fde3adb06a8efcb7bf1f55c91ab868f0adb95a
|
[] |
no_license
|
jonathanpaulson/AdventOfCode
|
eca9d1732ec80dd640d6eed01b3a18d3b3ee455b
|
215f18d7d5b9761ec181954d2e62b6fed3bd12f5
|
refs/heads/master
| 2023-01-08T00:25:09.651009
| 2022-12-25T05:39:11
| 2022-12-25T05:39:11
| 321,228,487
| 227
| 103
| null | 2022-12-01T09:31:36
| 2020-12-14T04:03:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
13.py
|
#!/usr/bin/python3
import sys
import math
from copy import deepcopy
from collections import defaultdict, deque
infile = sys.argv[1] if len(sys.argv)>1 else '13.in'
data = open(infile).read().strip()
lines = [x for x in data.split('\n')]
def compare(p1,p2):
if isinstance(p1, int) and isinstance(p2,int):
if p1 < p2:
return -1
elif p1 == p2:
return 0
else:
return 1
elif isinstance(p1, list) and isinstance(p2, list):
i = 0
while i<len(p1) and i<len(p2):
c = compare(p1[i], p2[i])
if c==-1:
return -1
if c==1:
return 1
i += 1
if i==len(p1) and i<len(p2):
return -1
elif i==len(p2) and i<len(p1):
return 1
else:
return 0
elif isinstance(p1, int) and isinstance(p2, list):
return compare([p1], p2)
else:
return compare(p1, [p2])
packets = []
part1 = 0
for i,group in enumerate(data.split('\n\n')):
p1,p2 = group.split('\n')
p1 = eval(p1)
p2 = eval(p2)
packets.append(p1)
packets.append(p2)
if compare(p1, p2)==-1:
part1 += 1+i
print(part1)
packets.append([[2]])
packets.append([[6]])
from functools import cmp_to_key
packets = sorted(packets, key=cmp_to_key(lambda p1,p2: compare(p1,p2)))
part2 = 1
for i,p in enumerate(packets):
if p==[[2]] or p==[[6]]:
part2 *= i+1
print(part2)
|
51a0260aac1b11b11ebfdf787105a9d27c89f62b
|
15f0514701a78e12750f68ba09d68095172493ee
|
/Python3/65.py
|
c08d2daa8e181900e7f2c32de4187e2a5ecb7fd0
|
[
"MIT"
] |
permissive
|
strengthen/LeetCode
|
5e38c8c9d3e8f27109b9124ae17ef8a4139a1518
|
3ffa6dcbeb787a6128641402081a4ff70093bb61
|
refs/heads/master
| 2022-12-04T21:35:17.872212
| 2022-11-30T06:23:24
| 2022-11-30T06:23:24
| 155,958,163
| 936
| 365
|
MIT
| 2021-11-15T04:02:45
| 2018-11-03T06:47:38
| null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
65.py
|
__________________________________________________________________________________________________
sample 20 ms submission
class Solution:
def isNumber(self, s: str) -> bool:
try:
float(s)
return True
except:
return False
__________________________________________________________________________________________________
sample 13072 kb submission
class Solution:
def isNumber(self, s: str) -> bool:
try:
float(s.strip())
return True
except ValueError:
return False
__________________________________________________________________________________________________
|
10c435fe36af9c75467c3925474260c9570eb7b7
|
ac235a23f22be0d6f1818bb53902177f9969813a
|
/tests/integration/test_encoding.py
|
0f0deb991da87a4422172e444c85447017f3897d
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
DataDog/dd-trace-py
|
f09d6d48c4c69aea68f999fc8a458ade5c6150cf
|
1e3bd6d4edef5cda5a0831a6a7ec8e4046659d17
|
refs/heads/1.x
| 2023-09-01T20:25:26.746324
| 2023-09-01T18:54:37
| 2023-09-01T18:54:37
| 61,572,326
| 461
| 426
|
NOASSERTION
| 2023-09-14T20:38:57
| 2016-06-20T18:52:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,457
|
py
|
test_encoding.py
|
# -*- coding: utf-8 -*-
import os
import mock
import pytest
from ddtrace import Tracer
AGENT_VERSION = os.environ.get("AGENT_VERSION")
class TestTraceAcceptedByAgent:
def test_simple_trace_accepted_by_agent(self):
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.writer.log") as log:
with tracer.trace("root"):
for _ in range(999):
with tracer.trace("child"):
pass
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"tags",
[
({"env": "my-env", "tag1": "some_str_1", "tag2": "some_str_2", "tag3": "some_str_3"}),
({"env": "test-env", b"tag1": "some_str_1", b"tag2": "some_str_2", b"tag3": "some_str_3"}),
({"env": "my-test-env", u"😐": "some_str_1", b"tag2": "some_str_2", "unicode": u"😐"}),
],
)
def test_trace_with_meta_accepted_by_agent(self, tags):
"""Meta tags should be text types."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.writer.log") as log:
with tracer.trace("root", service="test_encoding", resource="test_resource") as root:
root.set_tags(tags)
for _ in range(999):
with tracer.trace("child") as child:
child.set_tags(tags)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.parametrize(
"metrics",
[
({"num1": 12345, "num2": 53421, "num3": 1, "num4": 10}),
({b"num1": 123.45, b"num2": 543.21, b"num3": 11.0, b"num4": 1.20}),
({u"😐": "123.45", b"num2": "1", "num3": "999.99", "num4": "12345"}),
],
)
def test_trace_with_metrics_accepted_by_agent(self, metrics):
"""Metric tags should be numeric types - i.e. int, float, long (py3), and str numbers."""
tracer = Tracer()
with mock.patch("ddtrace.internal.writer.writer.log") as log:
with tracer.trace("root") as root:
root.set_metrics(metrics)
for _ in range(999):
with tracer.trace("child") as child:
child.set_metrics(metrics)
tracer.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
|
152171b86ea00e2d4fe95703358cf1abcbab9938
|
524619551b87670456f06d417c514e28778963bc
|
/federation/tests/entities/activitypub/test_mappers.py
|
566503fd5f9e25a0029233881cb3ef610ebcc54d
|
[
"BSD-3-Clause"
] |
permissive
|
jaywink/federation
|
a91979c3d64eb3d2822d1cffa41ce93f13794ad4
|
60694662a5a323d28c82c44609935638e4d55eab
|
refs/heads/master
| 2023-08-08T00:04:03.472996
| 2023-06-04T13:08:23
| 2023-06-04T13:08:23
| 38,371,381
| 109
| 11
|
BSD-3-Clause
| 2021-09-13T23:04:00
| 2015-07-01T13:01:20
|
Python
|
UTF-8
|
Python
| false
| false
| 18,502
|
py
|
test_mappers.py
|
from datetime import datetime
from unittest.mock import patch, Mock, DEFAULT
import json
import pytest
#from federation.entities.activitypub.entities import (
# models.Follow, models.Accept, models.Person, models.Note, models.Note,
# models.Delete, models.Announce)
import federation.entities.activitypub.models as models
from federation.entities.activitypub.mappers import message_to_objects, get_outbound_entity
from federation.entities.base import Accept, Follow, Profile, Post, Comment, Image, Share, Retraction
from federation.tests.fixtures.payloads import (
ACTIVITYPUB_FOLLOW, ACTIVITYPUB_PROFILE, ACTIVITYPUB_PROFILE_INVALID, ACTIVITYPUB_UNDO_FOLLOW, ACTIVITYPUB_POST,
ACTIVITYPUB_COMMENT, ACTIVITYPUB_RETRACTION, ACTIVITYPUB_SHARE, ACTIVITYPUB_RETRACTION_SHARE,
ACTIVITYPUB_POST_IMAGES, ACTIVITYPUB_POST_WITH_SOURCE_MARKDOWN, ACTIVITYPUB_POST_WITH_TAGS,
ACTIVITYPUB_POST_WITH_SOURCE_BBCODE, ACTIVITYPUB_POST_WITH_MENTIONS, ACTIVITYPUB_PROFILE_WITH_DIASPORA_GUID,
ACTIVITYPUB_REMOTE_PROFILE, ACTIVITYPUB_COLLECTION)
from federation.types import UserType, ReceiverVariant
class TestActivitypubEntityMappersReceive:
@patch.object(models.Follow, "post_receive", autospec=True)
def test_message_to_objects__calls_post_receive_hook(self, mock_post_receive):
message_to_objects(ACTIVITYPUB_FOLLOW, "https://example.com/actor")
assert mock_post_receive.called
def test_message_to_objects__announce(self):
entities = message_to_objects(ACTIVITYPUB_SHARE, "https://mastodon.social/users/jaywink")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, models.Announce)
assert entity.actor_id == "https://mastodon.social/users/jaywink"
assert entity.target_id == "https://mastodon.social/users/Gargron/statuses/102559779793316012"
assert entity.id == "https://mastodon.social/users/jaywink/statuses/102560701449465612/activity"
assert entity.public is True
assert entity.entity_type == "Post"
assert entity.raw_content == ""
def test_message_to_objects__follow(self):
entities = message_to_objects(ACTIVITYPUB_FOLLOW, "https://example.com/actor")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, models.Follow)
assert entity.actor_id == "https://example.com/actor"
assert entity.target_id == "https://example.org/actor"
assert entity.following is True
def test_message_to_objects__unfollow(self):
entities = message_to_objects(ACTIVITYPUB_UNDO_FOLLOW, "https://example.com/actor")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, models.Follow)
assert entity.actor_id == "https://example.com/actor"
assert entity.target_id == "https://example.org/actor"
assert entity.following is False
@pytest.mark.skip
def test_message_to_objects_mentions_are_extracted(self):
entities = message_to_objects(
DIASPORA_POST_SIMPLE_WITH_MENTION, "alice@alice.diaspora.example.org"
)
assert len(entities) == 1
post = entities[0]
assert post._mentions == {'jaywink@jasonrobinson.me'}
def test_message_to_objects_simple_post(self):
entities = message_to_objects(ACTIVITYPUB_POST, "https://diaspodon.fr/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
assert isinstance(post, Post)
assert post.raw_content == '<p><span class="h-card"><a class="u-url mention" ' \
'href="https://dev.jasonrobinson.me/u/jaywink/">' \
'@<span>jaywink</span></a></span> boom</p>'
assert post.rendered_content == '<p><span class="h-card"><a class="u-url mention" href="https://dev.jasonrobinson.me/u/jaywink/">' \
'@<span>jaywink</span></a></span> boom</p>'
assert post.id == "https://diaspodon.fr/users/jaywink/statuses/102356911717767237"
assert post.actor_id == "https://diaspodon.fr/users/jaywink"
assert post.public is True
assert post._media_type == "text/html"
assert getattr(post, "target_id", None) is None
def test_message_to_objects_simple_post__with_tags(self):
entities = message_to_objects(ACTIVITYPUB_POST_WITH_TAGS, "https://diaspodon.fr/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
assert isinstance(post, Post)
assert post.raw_content == '<p>boom #test</p>'
# TODO: fix this test
@pytest.mark.skip
def test_message_to_objects_simple_post__with_mentions(self):
entities = message_to_objects(ACTIVITYPUB_POST_WITH_MENTIONS, "https://mastodon.social/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
assert isinstance(post, Post)
assert len(post._mentions) == 1
assert list(post._mentions)[0] == "https://dev3.jasonrobinson.me/u/jaywink/"
def test_message_to_objects_simple_post__with_source__bbcode(self):
entities = message_to_objects(ACTIVITYPUB_POST_WITH_SOURCE_BBCODE, "https://diaspodon.fr/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
assert isinstance(post, Post)
assert post.rendered_content == '<p><span class="h-card"><a class="u-url mention" href="https://dev.jasonrobinson.me/u/jaywink/">' \
'@<span>jaywink</span></a></span> boom</p>'
assert post.raw_content == '<p><span class="h-card"><a class="u-url mention" ' \
'href="https://dev.jasonrobinson.me/u/jaywink/">' \
'@<span>jaywink</span></a></span> boom</p>'
def test_message_to_objects_simple_post__with_source__markdown(self):
entities = message_to_objects(ACTIVITYPUB_POST_WITH_SOURCE_MARKDOWN, "https://diaspodon.fr/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
assert isinstance(post, Post)
assert post.rendered_content == '<p><span class="h-card"><a href="https://dev.jasonrobinson.me/u/jaywink/" ' \
'class="u-url mention">@<span>jaywink</span></a></span> boom</p>'
assert post.raw_content == "@jaywink boom"
assert post.id == "https://diaspodon.fr/users/jaywink/statuses/102356911717767237"
assert post.actor_id == "https://diaspodon.fr/users/jaywink"
assert post.public is True
assert post._media_type == "text/markdown"
assert getattr(post, "target_id", None) is None
def test_message_to_objects_post_with_photos(self):
entities = message_to_objects(ACTIVITYPUB_POST_IMAGES, "https://mastodon.social/users/jaywink")
assert len(entities) == 1
post = entities[0]
assert isinstance(post, models.Note)
# TODO: test video and audio attachment
assert len(post._children) == 2
photo = post._children[0]
assert isinstance(photo, Image)
assert photo.url == "https://files.mastodon.social/media_attachments/files/017/642/079/original/" \
"f51b0aee0ee1f2e1.jpg"
assert photo.name == ""
assert photo.raw_content == ""
assert photo.height == 0
assert photo.width == 0
assert photo.guid == ""
assert photo.handle == ""
def test_message_to_objects_comment(self):
entities = message_to_objects(ACTIVITYPUB_COMMENT, "https://diaspodon.fr/users/jaywink")
assert len(entities) == 1
comment = entities[0]
assert isinstance(comment, models.Note)
assert isinstance(comment, Comment)
assert comment.raw_content == '<p><span class="h-card"><a class="u-url mention" ' \
'href="https://dev.jasonrobinson.me/u/jaywink/">' \
'@<span>jaywink</span></a></span> boom</p>'
assert comment.id == "https://diaspodon.fr/users/jaywink/statuses/102356911717767237"
assert comment.actor_id == "https://diaspodon.fr/users/jaywink"
assert comment.target_id == "https://dev.jasonrobinson.me/content/653bad70-41b3-42c9-89cb-c4ee587e68e4/"
@pytest.mark.skip
def test_message_to_objects_like(self, mock_validate):
entities = message_to_objects(
DIASPORA_POST_LIKE, "alice@alice.diaspora.example.org", sender_key_fetcher=Mock()
)
assert len(entities) == 1
like = entities[0]
assert isinstance(like, DiasporaLike)
assert isinstance(like, Reaction)
assert like.target_guid == "((parent_guidparent_guidparent_guidparent_guid))"
assert like.guid == "((guidguidguidguidguidguid))"
assert like.handle == "alice@alice.diaspora.example.org"
assert like.participation == "reaction"
assert like.reaction == "like"
assert like.signature == "((signature))"
assert like._xml_tags == [
"parent_type", "guid", "parent_guid", "positive", "author",
]
mock_validate.assert_called_once_with()
def test_message_to_objects_profile(self):
entities = message_to_objects(ACTIVITYPUB_PROFILE, "http://example.com/1234")
assert len(entities) == 1
profile = entities[0]
assert profile.id == "https://diaspodon.fr/users/jaywink"
assert profile.inboxes == {
"private": "https://diaspodon.fr/users/jaywink/inbox",
"public": "https://diaspodon.fr/inbox",
}
assert profile.handle == None
assert profile.name == "Jason Robinson"
assert profile.image_urls == {
"large": "https://diaspodon.fr/system/accounts/avatars/000/033/155/original/pnc__picked_media_be51984c-4"
"3e9-4266-9b9a-b74a61ae4167.jpg?1538505110",
"medium": "https://diaspodon.fr/system/accounts/avatars/000/033/155/original/pnc__picked_media_be51984c-4"
"3e9-4266-9b9a-b74a61ae4167.jpg?1538505110",
"small": "https://diaspodon.fr/system/accounts/avatars/000/033/155/original/pnc__picked_media_be51984c-4"
"3e9-4266-9b9a-b74a61ae4167.jpg?1538505110",
}
assert profile.gender == ""
assert profile.raw_content == "<p>Temp account while implementing AP for Socialhome.</p><p><a href=\"" \
"https://jasonrobinson.me\" rel=\"nofollow noopener\" target=\"_blank\">" \
"<span class=\"invisible\">https://</span><span class=\"\">jasonrobinson." \
"me</span><span class=\"invisible\"></span></a> / <a href=\"https://social" \
"home.network\" rel=\"nofollow noopener\" target=\"_blank\"><span class=\"i" \
"nvisible\">https://</span><span class=\"\">socialhome.network</span><span c" \
"lass=\"invisible\"></span></a> / <a href=\"https://feneas.org\" rel=\"nofoll" \
"ow noopener\" target=\"_blank\"><span class=\"invisible\">https://</span><spa" \
"n class=\"\">feneas.org</span><span class=\"invisible\"></span></a></p>"
assert profile.location == ""
assert profile.public is True
assert profile.nsfw is False
assert profile.tag_list == []
def test_message_to_objects_profile__diaspora_guid_extracted(self):
entities = message_to_objects(
ACTIVITYPUB_PROFILE_WITH_DIASPORA_GUID, "https://friendica.feneas.org/profile/feneas",
)
assert len(entities) == 1
profile = entities[0]
assert profile.id == "https://friendica.feneas.org/profile/feneas"
assert profile.guid == "76158462365bd347844d248732383358"
#@patch('federation.tests.django.utils.get_profile', return_value=None)
@patch('federation.entities.activitypub.models.get_profile', return_value=None)
@patch('federation.utils.activitypub.fetch_document')
def test_message_to_objects_receivers_are_saved(self, mock_fetch, mock_func):
def side_effect(*args, **kwargs):
payloads = {'https://diaspodon.fr/users/jaywink': json.dumps(ACTIVITYPUB_PROFILE),
'https://fosstodon.org/users/astdenis': json.dumps(ACTIVITYPUB_REMOTE_PROFILE),
'https://diaspodon.fr/users/jaywink/followers': json.dumps(ACTIVITYPUB_COLLECTION),
}
if args[0] in payloads.keys():
return payloads[args[0]], 200, None
else:
return None, None, 'Nothing here'
mock_fetch.side_effect = side_effect
# noinspection PyTypeChecker
entities = message_to_objects(
ACTIVITYPUB_POST,
"https://diaspodon.fr/users/jaywink",
)
entity = entities[0]
assert set(entity._receivers) == {
UserType(
id='https://diaspodon.fr/users/jaywink', receiver_variant=ReceiverVariant.FOLLOWERS,
),
UserType(
id='https://fosstodon.org/users/astdenis',
receiver_variant=ReceiverVariant.ACTOR,
)
}
def test_message_to_objects_retraction(self):
entities = message_to_objects(ACTIVITYPUB_RETRACTION, "https://friendica.feneas.org/profile/jaywink")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, Retraction)
assert entity.actor_id == "https://friendica.feneas.org/profile/jaywink"
assert entity.target_id == "https://friendica.feneas.org/objects/76158462-165d-3386-aa23-ba2090614385"
assert entity.entity_type == "Object"
def test_message_to_objects_retraction__share(self):
entities = message_to_objects(ACTIVITYPUB_RETRACTION_SHARE, "https://mastodon.social/users/jaywink")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, Retraction)
assert entity.actor_id == "https://mastodon.social/users/jaywink"
assert entity.target_id == "https://mastodon.social/users/jaywink/statuses/102571932479036987/activity"
assert entity.entity_type == "Object"
@pytest.mark.skip
def test_message_to_objects_reshare_extra_properties(self):
entities = message_to_objects(DIASPORA_RESHARE_WITH_EXTRA_PROPERTIES, "alice@example.org")
assert len(entities) == 1
entity = entities[0]
assert isinstance(entity, DiasporaReshare)
assert entity.raw_content == "Important note here"
assert entity.entity_type == "Comment"
@patch("federation.entities.activitypub.mappers.logger.error")
def test_invalid_entity_logs_an_error(self, mock_logger):
entities = message_to_objects(ACTIVITYPUB_PROFILE_INVALID, "http://example.com/1234")
assert len(entities) == 0
assert mock_logger.called
def test_adds_source_protocol_to_entity(self):
entities = message_to_objects(ACTIVITYPUB_PROFILE, "http://example.com/1234")
assert entities[0]._source_protocol == "activitypub"
@pytest.mark.skip
# since calamus turns the whole payload into objects, the source payload is not kept
def test_source_object(self):
entities = message_to_objects(ACTIVITYPUB_PROFILE, "http://example.com/1234")
entity = entities[0]
assert entity._source_object == ACTIVITYPUB_PROFILE
@pytest.mark.skip
def test_element_to_objects_calls_retrieve_remote_profile(self, mock_retrieve, mock_validate):
message_to_objects(DIASPORA_POST_COMMENT, "alice@alice.diaspora.example.org")
mock_retrieve.assert_called_once_with("alice@alice.diaspora.example.org")
@pytest.mark.skip
def test_element_to_objects_verifies_handles_are_the_same(self, mock_check):
message_to_objects(DIASPORA_POST_SIMPLE, "bob@example.org")
mock_check.assert_called_once_with("bob@example.org", "alice@alice.diaspora.example.org")
@pytest.mark.skip
def test_element_to_objects_returns_no_entity_if_handles_are_different(self):
entities = message_to_objects(DIASPORA_POST_SIMPLE, "bob@example.org")
assert not entities
class TestGetOutboundEntity:
def test_already_fine_entities_are_returned_as_is(self, private_key):
entity = models.Accept()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = models.Follow()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
entity = models.Person()
entity.validate = Mock()
assert get_outbound_entity(entity, private_key) == entity
@patch.object(models.Accept, "validate", new=Mock())
def test_accept_is_converted_to_activitypubaccept(self, private_key):
entity = Accept()
assert isinstance(get_outbound_entity(entity, private_key), models.Accept)
@patch.object(models.Follow, "validate", new=Mock())
def test_follow_is_converted_to_activitypubfollow(self, private_key):
entity = Follow()
assert isinstance(get_outbound_entity(entity, private_key), models.Follow)
@patch.object(models.Person, "validate", new=Mock())
def test_profile_is_converted_to_activitypubprofile(self, private_key):
entity = Profile()
assert isinstance(get_outbound_entity(entity, private_key), models.Person)
def test_entity_is_validated__fail(self, private_key):
entity = Share(
actor_id="https://localhost.local/foo",
id="https://localhost.local/bar",
created_at=datetime.now(),
)
with pytest.raises(ValueError):
get_outbound_entity(entity, private_key)
def test_entity_is_validated__success(self, private_key):
entity = Share(
actor_id="https://localhost.local/foo",
id="https://localhost.local/bar",
created_at=datetime.now(),
target_id="https://localhost.local/bar",
)
get_outbound_entity(entity, private_key)
|
a4e5e2a0168a7903974ea379987e4eb9860d76d7
|
f1c2e4b3147af77e23306f841610aafd6db1c6b0
|
/submarine-sdk/pysubmarine/setup.py
|
69320be650fc8d786fcc5108a2b36fb9f2281a22
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"MIT",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
apache/submarine
|
a2927f5f4f7f5faff4701139f2f0f88a98195e7f
|
0c10613f39b707d5e446c515c12fa28295c8052e
|
refs/heads/master
| 2023-08-30T14:35:43.145942
| 2023-08-20T00:19:54
| 2023-08-24T23:50:49
| 209,459,144
| 663
| 269
|
Apache-2.0
| 2023-09-03T09:05:06
| 2019-09-19T04:00:17
|
Java
|
UTF-8
|
Python
| false
| false
| 3,542
|
py
|
setup.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
with open("README.md") as f:
long_description = f.read()
setup(
name="apache-submarine",
version="0.8.0.dev",
description="A python SDK for submarine",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/apache/submarine",
packages=find_packages(exclude=["tests", "tests.*"]),
package_data={"submarine.cli.config": ["cli_config.yaml"]},
install_requires=[
"numpy",
"pandas",
"sqlalchemy>=1.4.0, <2.0.0",
"sqlparse",
"pymysql",
"requests>=2.26.0", # SUBMARINE-922. avoid GPL dependency.
"urllib3>=1.15.1",
"certifi>=14.05.14",
"python-dateutil>=2.5.3",
"pyarrow>=6.0.1",
"boto3>=1.17.58",
"click>=8.1.0",
"rich",
"dacite",
"pyaml",
],
extras_require={
"tf": ["tensorflow==1.15.5", "numpy>=1.16.0,<1.19.0", "protobuf>=3.6.1,<3.20"],
"tf2": [
"tensorflow>=2.6.0,<2.10.0",
"numpy>=1.14.5",
"keras>=2.6.0",
"protobuf>=3.9.2,<3.20",
"tensorflow-addons==0.17.0",
"tensorflow-estimator>=2.6.0,<2.10.0",
"tf_slim==1.1.0",
# todo(cdmikechen): Based on SUBMARINE-1372, typeguard has recently been upgraded to version 3.0,
# which will restrict some python syntax and types more tightly.
# We are not upgrading this in submarine 0.8.0 for now,
# and will fix version compatibility issues in 0.8.1 or 0.9.0.
"typeguard<3.0.0",
# todo(cdmikechen): SUBMARINE-1389. From scipy 1.11.0
# (https://github.com/scipy/scipy/releases/tag/v1.11.0),
# scipy need numpy 1.21.6 or geater in python 3.9.
# So that we need to restrict scipy < 1.11.0 to support tf2.6.
# From submarine 0.8.1 or 0.9.0, we may no longer support tensorflow 2.6
"scipy<1.11.0",
],
"pytorch": ["torch>=1.5.0", "torchvision>=0.6.0"],
},
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
entry_points={
"console_scripts": [
"submarine = submarine.cli.main:entry_point",
],
},
license="Apache License, Version 2.0",
maintainer="Apache Submarine Community",
maintainer_email="dev@submarine.apache.org",
)
|
92780096f4c2781c9417edd592ec59a98612f73c
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/battle_results/presenter/events/quests.py
|
59bc85d2a5dd61ba4aaa382a8265bd9ed87977d6
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 395
|
py
|
quests.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/battle_results/presenter/events/quests.py
import typing
if typing.TYPE_CHECKING:
from gui.battle_results.reusable import ReusableInfo
from gui.impl.gen.view_models.views.lobby.postbattle.events.base_event_model import BaseEventModel
def getQuestsEvents(tooltipData, reusable, result):
pass
|
af3fa07f5606596d0379cf930183d90e7b7d4d51
|
bc2ccaed5cf0367d196cedeef51d4b89db310db0
|
/astroNN/neuralode/odeint.py
|
56e0c2b8f3b5c7a0a04e33bf7caf756f1de6703a
|
[
"MIT"
] |
permissive
|
henrysky/astroNN
|
6477f816bd18037a62f65038f61a0c15f39bbd7f
|
4b15c14ca6d7300276e55114fc0c23c81d30c5f3
|
refs/heads/master
| 2023-07-21T07:45:35.428248
| 2023-07-10T05:57:38
| 2023-07-10T05:57:38
| 106,888,772
| 184
| 55
|
MIT
| 2023-09-11T14:42:15
| 2017-10-14T01:40:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,886
|
py
|
odeint.py
|
import tensorflow as tf
from astroNN.neuralode.dop853 import dop853
from astroNN.neuralode.runge_kutta import rk4
method_list = {"dop853": dop853, "rk4": rk4}
def odeint(
func=None,
x=None,
t=None,
aux=None,
method="dop853",
precision=tf.float32,
*args,
**kwargs,
):
"""
To computes the numerical solution of a system of first order ordinary differential equations y'=f(x,y). Default
precision at float32.
:param func: function of the differential equation, usually take func([position, velocity], time) and return velocity, acceleration
:type func: callable
:param x: initial x, usually is [position, velocity]
:type x: Union([tf.Tensor, numpy.ndarray, list])
:param t: set of times at which one wants the result
:type t: Union([tf.Tensor, numpy.ndarray, list])
:param method: numerical integrator to use, available integrators are ['dop853', 'rk4']
:type method: str
:param precision: float precision, tf.float32 or tf.float64
:type precision: type
:param t: set of times at which one wants the result
:type t: Union([tf.Tensor, numpy.ndarray, list])
:return: integrated result
:rtype: tf.Tensor
:History: 2020-May-31 - Written - Henry Leung (University of Toronto)
"""
try:
ode_method = method_list[method.lower()]
except KeyError:
raise NotImplementedError(f"Method {method} is not implemented")
# check things if they are tensors
if not isinstance(x, tf.Tensor):
x = tf.constant(x)
if not isinstance(t, tf.Tensor):
t = tf.constant(t)
if precision == tf.float32:
tf_float = tf.float32
elif precision == tf.float64:
tf_float = tf.float64
else:
raise TypeError(f"Data type {precision} not understood")
x = tf.cast(x, tf_float)
t = tf.cast(t, tf_float)
if aux is not None:
aux_flag = True
else:
aux_flag = False
if not isinstance(aux, tf.Tensor) and aux_flag:
aux = tf.constant(aux, dtype=tf_float)
@tf.function
def wrapped_func(x, t, *args, **kwargs):
return func(x, t, *args, **kwargs)
if not aux_flag:
if len(x.shape) < 2: # ensure multi-dim
return ode_method(
func=wrapped_func, x=x, t=t, tf_float=tf_float, *args, **kwargs
)[0]
else:
total_num = x.shape[0]
if len(t.shape) < 2:
t = tf.stack([t] * total_num)
def odeint_external(tensor):
return ode_method(
func=wrapped_func,
x=tensor[0],
t=tensor[1],
tf_float=tf_float,
*args,
**kwargs,
)
@tf.function
def parallelized_func(tensor):
return tf.map_fn(odeint_external, tensor)
# result in (x, t, aux)
result = parallelized_func((x, t))
else:
if len(x.shape) < 2: # ensure multi-dim
return ode_method(
func=wrapped_func, x=x, t=t, aux=aux, tf_float=tf_float, *args, **kwargs
)[0]
else:
total_num = x.shape[0]
if len(t.shape) < 2:
t = tf.stack([t] * total_num)
def odeint_external(tensor):
return ode_method(
func=wrapped_func,
x=tensor[0],
t=tensor[1],
aux=tensor[2],
tf_float=tf_float,
*args,
**kwargs,
)
@tf.function
def parallelized_func(tensor):
return tf.map_fn(odeint_external, tensor)
# result in (x, t, aux)
result = parallelized_func((x, t, aux))
return result[0]
|
157125e96d040805ebfd4c83373ed69665d9f822
|
21ae99905ba4fc33e484eb55f27bcaa60207536a
|
/web/datasets/tests/management/commands/test_cityhall.py
|
db6b6de8d612240047af450e9cbc6528454c2000
|
[
"MIT"
] |
permissive
|
DadosAbertosDeFeira/maria-quiteria
|
416f3e4d9fb531dd0c56d34227756e63cde9d7d8
|
9d55015aaaeb5e2107cc97ee8a84a04d1b56c798
|
refs/heads/main
| 2023-08-30T21:07:51.448228
| 2023-08-19T22:10:47
| 2023-08-19T22:10:47
| 220,512,724
| 177
| 84
|
MIT
| 2023-09-10T16:37:40
| 2019-11-08T17:07:12
|
Python
|
UTF-8
|
Python
| false
| false
| 7,797
|
py
|
test_cityhall.py
|
from datetime import datetime
import pytest
from django.utils.timezone import make_aware
from web.datasets.management.commands._cityhall import save_bid
@pytest.mark.django_db
class TestSaveBid:
def test_save_bid(self, mock_backup_file):
item = {
"crawled_at": make_aware(datetime(2020, 3, 21, 7, 15, 17, 908831)),
"crawled_from": "http://www.feiradesantana.ba.gov.br/servicos.asp",
"session_at": make_aware(datetime(2018, 4, 17, 8, 30, 0)),
"public_agency": "PMFS",
"month": 4,
"year": 2018,
"description": (
"Aquisi\u00e7\u00e3o de arma de fogo longa para a "
"Guarda Municipal de Feira de Santana.OBS: EDITAL DISPON\u00cdVEL"
"NO SITE: WWW.BLLCOMPRAS.ORG.BR"
),
"history": [],
"codes": (
"Licita\u00e7\u00e3o 133-2018 / " "Preg\u00e3o Eletr\u00f4nico 047-2018"
),
"modality": "pregao_eletronico",
"files": [
{
"url": "http://www.feiradesantana.ba.gov.br/servicos.asp?id=2",
"checksum": "checksum",
"content": None,
}
],
}
bid = save_bid(item)
assert bid.session_at == item["session_at"]
assert bid.description == item["description"]
assert bid.public_agency == item["public_agency"]
assert bid.modality == item["modality"]
assert bid.files
def test_save_history(self, mock_backup_file):
item = {
"public_agency": "PMFS",
"crawled_at": make_aware(datetime(2020, 4, 4, 14, 29, 49, 261985)),
"crawled_from": "http://www.feiradesantana.ba.gov.br/servicos.asp",
"session_at": make_aware(datetime(2019, 4, 5, 8, 30)),
"description": (
"Contratação de empresa para prestação de serviços "
"profissionais de apoio administrativo em Unidades de Saúde da "
"Secretaria Municipal de Saúde.Edital disponível no site do "
"Banco do Brasil: www.licitacoes-e.com.br.Código "
"Correspondente Banco do Brasil: nº 755980REMARCADA"
),
"codes": (
"Licita\u00e7\u00e3o 133-2018 / " "Preg\u00e3o Eletr\u00f4nico 047-2018"
),
"modality": "pregao_eletronico",
"history": [
{
"published_at": make_aware(datetime(2018, 4, 17, 8, 30, 0)),
"event": "Resposta a pedido de esclarecimento",
"url": "http://www.feiradesantana.ba.gov.br/SMS.pdf",
}
],
}
bid = save_bid(item)
assert bid.events.count() == 1
event = bid.events.first()
assert event.published_at is not None
assert event.summary == item["history"][0]["event"]
assert event.files.count() == 1
def test_handle_with_existent_event(self, mock_backup_file):
item = {
"public_agency": "PMFS",
"crawled_at": make_aware(datetime(2020, 4, 4, 14, 29, 49, 261985)),
"crawled_from": "http://www.feiradesantana.ba.gov.br/servicos.asp",
"session_at": make_aware(datetime(2019, 4, 5, 8, 30)),
"description": (
"Contratação de empresa para prestação de serviços "
"profissionais de apoio administrativo em Unidades de Saúde da "
"Secretaria Municipal de Saúde.Edital disponível no site do "
"Banco do Brasil: www.licitacoes-e.com.br.Código "
"Correspondente Banco do Brasil: nº 755980REMARCADA"
),
"codes": (
"Licita\u00e7\u00e3o 133-2018 / " "Preg\u00e3o Eletr\u00f4nico 047-2018"
),
"modality": "pregao_eletronico",
"history": [
{
"published_at": make_aware(datetime(2019, 4, 4, 16, 20, 0)),
"event": "Resposta a pedido de esclarecimento",
"url": "http://www.feiradesantana.ba.gov.br/SMS.pdf",
}
],
}
bid = save_bid(item)
assert bid.events.count() == 1
item["history"] = [
{
"published_at": make_aware(datetime(2019, 4, 4, 16, 20, 0)),
"event": "Resposta a pedido de esclarecimento",
"url": "http://www.feiradesantana.ba.gov.br/SMS.pdf",
},
{
"published_at": make_aware(datetime(2019, 4, 4, 18, 20, 0)),
"event": "Resposta a pedido de esclarecimento",
"url": "http://www.feiradesantana.ba.gov.br/SMS.pdf",
},
{
"published_at": make_aware(datetime(2019, 4, 4, 16, 20, 0)),
"event": "CORREÇÃO DE EDITAL",
"url": "http://www.feiradesantana.ba.gov.br/SMS.pdf",
},
]
save_bid(item)
assert bid.events.count() == 3
def test_handle_with_updated_bid(self, mock_backup_file):
item = {
"crawled_at": make_aware(datetime(2020, 3, 21, 7, 15, 17, 908831)),
"crawled_from": "http://www.feiradesantana.ba.gov.br/servicos.asp",
"session_at": make_aware(datetime(2018, 4, 17, 8, 30, 0)),
"public_agency": "PMFS",
"month": 4,
"year": 2018,
"description": (
"Aquisi\u00e7\u00e3o de arma de fogo longa para a "
"Guarda Municipal de Feira de Santana.OBS: EDITAL DISPON\u00cdVEL"
"NO SITE: WWW.BLLCOMPRAS.ORG.BR"
),
"history": [],
"codes": (
"Licita\u00e7\u00e3o 133-2018 / " "Preg\u00e3o Eletr\u00f4nico 047-2018"
),
"modality": "pregao_eletronico",
"files": [
{
"url": "http://www.feiradesantana.ba.gov.br/servicos.asp?id=2",
"checksum": "checksum",
"content": None,
}
],
}
bid = save_bid(item)
item["description"] = "Aquisição de arma de flores."
updated_bid = save_bid(item)
assert bid.pk == updated_bid.pk
assert bid.description != updated_bid.description
def test_create_different_bids_for_different_agency_modality(
self, mock_backup_file
):
item = {
"crawled_at": make_aware(datetime(2020, 3, 21, 7, 15, 17, 908831)),
"crawled_from": "http://www.feiradesantana.ba.gov.br/servicos.asp",
"session_at": make_aware(datetime(2018, 4, 17, 8, 30, 0)),
"public_agency": "PMFS",
"month": 4,
"year": 2018,
"description": (
"Aquisi\u00e7\u00e3o de arma de fogo longa para a "
"Guarda Municipal de Feira de Santana.OBS: EDITAL DISPON\u00cdVEL"
"NO SITE: WWW.BLLCOMPRAS.ORG.BR"
),
"history": [],
"codes": (
"Licita\u00e7\u00e3o 133-2018 / " "Preg\u00e3o Eletr\u00f4nico 047-2018"
),
"modality": "pregao_eletronico",
"files": [
{
"url": "http://www.feiradesantana.ba.gov.br/servicos.asp?id=2",
"checksum": "checksum",
"content": None,
}
],
}
bid = save_bid(item)
item["public_agency"] = "FHFS"
item["codes"] = "CHAMADA PÚBLICA 004-2019"
another_bid = save_bid(item)
assert bid.pk != another_bid.pk
|
3302a93b2a76a2898f601a9cf0312ed31f38ae0e
|
b7163b44b679e082fe97cf7fcd0c73b2fcdb38eb
|
/modules/dbnd-airflow-monitor/test_dbnd_airflow_monitor/runtime_syncer/test_error_aggregator.py
|
b82eab758afbcf1a1ed7dbb389f8b194009b886d
|
[
"Apache-2.0"
] |
permissive
|
databand-ai/dbnd
|
70c95d95e12bfb8ab471a6dce27691ed658cb92d
|
d59c99dcdcd280d7eec36a693dd80f8c8c831ea2
|
refs/heads/develop
| 2023-06-24T18:07:56.524526
| 2023-05-28T07:57:36
| 2023-05-28T07:57:36
| 231,361,064
| 257
| 33
|
Apache-2.0
| 2023-08-06T08:30:28
| 2020-01-02T10:42:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
test_error_aggregator.py
|
# © Copyright Databand.ai, an IBM Company 2022
from airflow_monitor.shared.error_aggregator import ErrorAggregator
from . import random_text
def test_01_same_reporter():
e = ErrorAggregator()
assert not e.report("reporter", None).should_update
msg1 = random_text()
res = e.report("reporter", msg1)
assert res.should_update
assert msg1 in res.message
msg2 = random_text()
res = e.report("reporter", msg2)
assert res.should_update
assert msg1 not in res.message
assert msg2 in res.message
res = e.report("reporter", None)
assert res.should_update
assert res.message is None
res = e.report("reporter", None)
assert not res.should_update
def test_02_different_reporters():
e = ErrorAggregator()
msg1 = random_text()
res = e.report("reporter1", msg1)
assert res.should_update
assert msg1 in res.message
msg2 = random_text()
res = e.report("reporter2", msg2)
assert res.should_update
assert msg1 in res.message
assert msg2 in res.message
res = e.report("reporter1", None)
assert res.should_update
assert msg1 not in res.message
assert msg2 in res.message
res = e.report("reporter2", None)
assert res.should_update
assert res.message is None
res = e.report("reporter1", None)
assert not res.should_update
|
e59357b0de18f789720b0caec3fdd4515e8bc73c
|
87491323e6295845dd7239fc213bc6b7955873c0
|
/scorers/classification/binary/average_mcc.py
|
8ac2984d20570340bd4e72b764ac6ebb93ce1756
|
[
"Apache-2.0"
] |
permissive
|
h2oai/driverlessai-recipes
|
e32d5632dfed52f19c9caeb406d30b956829658c
|
aeb082cfb773d6a0e45fbb7514610987f97b2799
|
refs/heads/master
| 2023-09-03T17:50:45.184460
| 2023-08-21T12:48:47
| 2023-08-21T12:48:47
| 174,063,977
| 239
| 103
|
Apache-2.0
| 2023-08-21T12:48:49
| 2019-03-06T03:26:19
|
Python
|
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
average_mcc.py
|
"""Averaged Matthews Correlation Coefficient (averaged over several thresholds, for imbalanced problems). Example how to use Driverless AI's internal scorer."""
import typing
import numpy as np
from h2oaicore.metrics import CustomScorer
from h2oaicore.metrics import MccScorer
from sklearn.preprocessing import LabelEncoder
class MyAverageMCCScorer(CustomScorer):
_description = "Average MCC over several thresholds"
_binary = True
_maximize = True
_perfect_score = 1
_display_name = "AVGMCC"
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None,
**kwargs) -> float:
"""Reasoning behind using several thresholds
MCC can vary a lot from one threshold to another
and especially may give different results on train and test datasets
Using an average over thresholds close to the prior may lead to a flatter
response and better generalization.
"""
lb = LabelEncoder()
labels = list(lb.fit_transform(labels))
actual = lb.transform(actual)
# Compute thresholds
if sample_weight is None:
sample_weight = np.ones(actual.shape[0])
prior = np.sum(actual * sample_weight) / np.sum(sample_weight)
thresholds = [rate * prior for rate in np.arange(0.8, 1.3, 0.1)]
# Compute average MCC for the thresholds
avg_score = 0
for t in thresholds:
avg_score += MccScorer().score(
actual=actual,
predicted=(predicted > t).astype(np.uint8),
sample_weight=sample_weight,
labels=labels
)
return avg_score / len(thresholds)
|
90b67cf71aab3098142cabee4909ca694c43b97d
|
e061ab21018ac80573d03ef0c3cba8f448c4b7cc
|
/inference/server/alembic/versions/2023_05_29_1551-5ed411a331f4_add_active_thread_tail_messsage_id_and_.py
|
e4c5045f7af8b77614013fa8e62e73fe7025a4ff
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
LAION-AI/Open-Assistant
|
8b82c24fac954da421d66c3e90fbae6776ae6280
|
8c0e1a31bea1542dd39716b1dbbecd46785d9d23
|
refs/heads/main
| 2023-08-25T23:33:38.114219
| 2023-08-22T21:04:33
| 2023-08-22T21:04:33
| 577,603,990
| 34,014
| 3,206
|
Apache-2.0
| 2023-09-11T19:13:48
| 2022-12-13T05:24:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
2023_05_29_1551-5ed411a331f4_add_active_thread_tail_messsage_id_and_.py
|
"""add_active_thread_tail_messsage_id_and_message_eval
Revision ID: 5ed411a331f4
Revises: 5b4211625a9f
Create Date: 2023-05-29 15:51:41.857262
"""
import sqlalchemy as sa
import sqlmodel
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "5ed411a331f4"
down_revision = "5b4211625a9f"
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"message_evaluation",
sa.Column("inferior_message_ids", postgresql.JSONB(astext_type=sa.Text()), nullable=True),
sa.Column("id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("chat_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("user_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column("selected_message_id", sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.ForeignKeyConstraint(
["chat_id"],
["chat.id"],
),
sa.ForeignKeyConstraint(
["selected_message_id"],
["message.id"],
),
sa.ForeignKeyConstraint(
["user_id"],
["user.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_message_evaluation_chat_id"), "message_evaluation", ["chat_id"], unique=False)
op.create_index(op.f("ix_message_evaluation_user_id"), "message_evaluation", ["user_id"], unique=False)
op.add_column("chat", sa.Column("active_thread_tail_message_id", sqlmodel.sql.sqltypes.AutoString(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("chat", "active_thread_tail_message_id")
op.drop_index(op.f("ix_message_evaluation_user_id"), table_name="message_evaluation")
op.drop_index(op.f("ix_message_evaluation_chat_id"), table_name="message_evaluation")
op.drop_table("message_evaluation")
# ### end Alembic commands ###
|
2ceeb406e90ae4a18d4e5901ce0cc71ea6eadf0d
|
fc1dbf86c08788d9d47b933a206331e0d3af3f58
|
/Chapter_13/test_ch13_r01_ut.py
|
ba69b297bc5d3baa9006d2711340e06e5edf2e4a
|
[] |
no_license
|
PacktPublishing/Modern-Python-Cookbook-Second-Edition
|
819e17fe1f3d7e4389a290bde699fb45e08543ab
|
43c2549d51f05df6d897753a0d1e979f71b0729d
|
refs/heads/master
| 2023-08-01T09:35:33.833038
| 2023-07-29T13:27:30
| 2023-07-29T13:27:30
| 225,331,228
| 107
| 60
| null | 2023-07-29T13:28:03
| 2019-12-02T09:06:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,612
|
py
|
test_ch13_r01_ut.py
|
"""Python Cookbook
Chapter 13, recipe 1.
"""
from pathlib import Path
import unittest
from unittest.mock import Mock, patch, mock_open, MagicMock, call
import Chapter_13.ch13_r01
class GIVEN_get_config_WHEN_load_THEN_overrides_found(unittest.TestCase):
def setUp(self):
self.system_exist = Mock(
name="mock Path('/etc').exist() == True",
exists=Mock(return_value=True),
open=mock_open(),
)
self.mock_system_path = Mock(
name='mock Path("/etc")',
__truediv__=Mock(return_value=self.system_exist),
)
self.exist = Mock(
name="mock Path.home().exist() == True",
exists=Mock(return_value=True),
open=mock_open(),
)
self.not_exist = Mock(
name="mock Path.home().exist() == False",
exists=Mock(return_value=False)
)
self.mock_home_path = Mock(
name="mock Path.home()",
__truediv__=Mock(side_effect=[self.not_exist, self.exist, self.exist]),
)
self.mock_path = Mock(
name="mock Path class",
return_value=self.mock_system_path, # Path("/etc")
home=Mock(return_value=self.mock_home_path), # Path.home()
)
self.mock_load_config_file = Mock(
name="mock_load_config_file",
side_effect=[{"some_setting": 1}, {"another_setting": 2}],
)
def runTest(self):
with patch("Chapter_13.ch13_r01.Path", self.mock_path), patch(
"Chapter_13.ch13_r01.load_config_file", self.mock_load_config_file
):
config = Chapter_13.ch13_r01.get_config()
# print(config)
self.assertEqual(2, config["another_setting"])
self.assertEqual(1, config["some_setting"])
self.assertEqual("Built-In Choice", config["some_option"])
# print(self.mock_load.mock_calls)
self.mock_load_config_file.assert_has_calls(
[call(self.system_exist), call(self.exist)]
)
# print(self.mock_expanded_home_path.mock_calls)
self.mock_home_path.assert_has_calls(
[
call.__truediv__(".bash_profile"),
call.__truediv__(".bash_login"),
call.__truediv__(".profile"),
]
)
# print(self.mock_path.mock_calls)
self.mock_path.assert_has_calls(
[call("/etc"), call.home(), call.home(), call.home()]
)
self.exist.assert_has_calls([call.exists()])
self.system_exist.assert_has_calls([call.exists()])
|
9ddb30ea49802e58a4b659f77f283ec745ed288f
|
8e6bb9c1a620a162b7d017c2373dd01be54ea86d
|
/bio/purge_dups/pbcstat/wrapper.py
|
9909e4cf2b75f47bfe877f18c37390232c017ef6
|
[] |
no_license
|
snakemake/snakemake-wrappers
|
5d0963502c26eb709513567e25422871fe477cf2
|
996bdcf2a96535b967dfa483c363a5496f4b3906
|
refs/heads/master
| 2023-08-19T05:18:44.337503
| 2023-08-18T12:03:38
| 2023-08-18T12:03:38
| 213,319,194
| 184
| 189
| null | 2023-09-12T11:38:35
| 2019-10-07T07:20:59
|
CAP CDS
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
wrapper.py
|
__author__ = "Filipe G. Vieira"
__copyright__ = "Copyright 2022, Filipe G. Vieira"
__license__ = "MIT"
import tempfile
from snakemake.shell import shell
extra = snakemake.params.get("extra", "")
log = snakemake.log_fmt_shell(stdout=True, stderr=True)
with tempfile.TemporaryDirectory() as tmpdir:
shell("pbcstat {extra} -O {tmpdir} {snakemake.input} {log}")
if snakemake.output.get("cov"):
shell("cat {tmpdir}/PB.base.cov > {snakemake.output.cov}")
if snakemake.output.get("stat"):
shell("cat {tmpdir}/PB.stat > {snakemake.output.stat}")
|
e433fc04521ebdb229b9758e65d1c17e77388291
|
dcfc88503e3a8df5d9083b512178d254727d1a31
|
/axelrod/tests/unit/test_deterministic_cache.py
|
7d3929a2d4f4a62d8b19aca1b0873f3110ae41e0
|
[
"MIT"
] |
permissive
|
Axelrod-Python/Axelrod
|
b8502822da103fbf1a56ffbc090453b95bf9f2d8
|
fa748627cd4f0333bb2dbfcb1454372a78a9098a
|
refs/heads/dev
| 2023-09-04T06:41:55.216809
| 2023-07-10T19:42:54
| 2023-07-14T02:37:16
| 30,959,449
| 673
| 289
|
NOASSERTION
| 2023-07-14T02:37:18
| 2015-02-18T09:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 3,980
|
py
|
test_deterministic_cache.py
|
import os
import pathlib
import pickle
import unittest
import axelrod as axl
from axelrod.load_data_ import axl_filename
C, D = axl.Action.C, axl.Action.D
class TestDeterministicCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_key = (axl.TitForTat(), axl.Defector())
cls.test_value = [(C, D), (D, D), (D, D)]
save_path = pathlib.Path("test_outputs/test_cache_save.txt")
cls.test_save_file = axl_filename(save_path)
load_path = pathlib.Path("test_outputs/test_cache_load.txt")
cls.test_load_file = axl_filename(load_path)
test_data_to_pickle = {
("Tit For Tat", "Defector"): [(C, D), (D, D), (D, D)]
}
cls.test_pickle = pickle.dumps(test_data_to_pickle)
with open(cls.test_load_file, "wb") as f:
f.write(cls.test_pickle)
@classmethod
def tearDownClass(cls):
os.remove(cls.test_save_file)
os.remove(cls.test_load_file)
def setUp(self):
self.cache = axl.DeterministicCache()
def test_basic_init(self):
self.assertTrue(self.cache.mutable)
def test_init_from_file(self):
loaded_cache = axl.DeterministicCache(file_name=self.test_load_file)
self.assertEqual(loaded_cache[self.test_key], self.test_value)
def test_setitem(self):
self.cache[self.test_key] = self.test_value
self.assertEqual(self.cache[self.test_key], self.test_value)
def test_setitem_invalid_key_not_tuple(self):
invalid_key = "test"
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
def test_setitem_invalid_key_first_two_elements_not_player(self):
invalid_key = ("test", "test")
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
invalid_key = (axl.TitForTat(), "test")
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
invalid_key = ("test", axl.TitForTat())
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
def test_setitem_invalid_key_too_many_players(self):
invalid_key = (axl.TitForTat(), axl.TitForTat(), axl.TitForTat())
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
def test_setitem_invalid_key_stochastic_player(self):
invalid_key = (axl.Random(), axl.TitForTat())
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
invalid_key = (axl.TitForTat(), axl.Random())
with self.assertRaises(ValueError):
self.cache[invalid_key] = self.test_value
def test_setitem_invalid_value_not_list(self):
with self.assertRaises(ValueError):
self.cache[self.test_key] = 5
def test_setitem_with_immutable_cache(self):
self.cache.mutable = False
with self.assertRaises(ValueError):
self.cache[self.test_key] = self.test_value
def test_save(self):
self.cache[self.test_key] = self.test_value
self.cache.save(self.test_save_file)
with open(self.test_save_file, "rb") as f:
text = f.read()
self.assertEqual(text, self.test_pickle)
def test_load(self):
self.cache.load(self.test_load_file)
self.assertEqual(self.cache[self.test_key], self.test_value)
def test_load_error_for_inccorect_format(self):
path = pathlib.Path("test_outputs/test.cache")
filename = axl_filename(path)
with open(filename, "wb") as io:
pickle.dump(range(5), io)
with self.assertRaises(ValueError):
self.cache.load(filename)
def test_del_item(self):
self.cache[self.test_key] = self.test_value
self.assertTrue(self.test_key in self.cache)
del self.cache[self.test_key]
self.assertFalse(self.test_key in self.cache)
|
733b8907e8b603219122518f56a1eef77671fb82
|
44ba493efd0fd7ae78880d3d93cc0d66166935e5
|
/tests/integrations/xcode/test_ensure_xcode_is_installed.py
|
a83b52236b929f5bf7c65ed5294100ade5499345
|
[
"BSD-3-Clause"
] |
permissive
|
beeware/briefcase
|
1b3eaebf0791728c68986809aa07abc436e422c6
|
cc2dae1ffc58f9700d0ca57461cb05909bc01bec
|
refs/heads/main
| 2023-09-01T19:24:15.424713
| 2023-09-01T04:35:53
| 2023-09-01T04:35:53
| 39,841,700
| 1,609
| 256
|
BSD-3-Clause
| 2023-09-11T10:04:34
| 2015-07-28T15:20:03
|
Python
|
UTF-8
|
Python
| false
| false
| 13,252
|
py
|
test_ensure_xcode_is_installed.py
|
import os
import subprocess
from unittest import mock
import pytest
from briefcase.exceptions import BriefcaseCommandError
from briefcase.integrations.xcode import Xcode
@pytest.fixture
def default_xcode_install_path(tmp_path):
return tmp_path / "Applications" / "Xcode.app"
@pytest.fixture
def xcode(default_xcode_install_path):
"""Create a dummy location for Xcode."""
default_xcode_install_path.mkdir(parents=True, exist_ok=True)
return os.fsdecode(default_xcode_install_path)
def test_not_installed(tmp_path, mock_tools):
"""If No Xcode is installed, raise an error."""
mock_tools.subprocess.check_output.side_effect = subprocess.CalledProcessError(
cmd=["xcode-select", "-p"],
returncode=2,
)
# Test a location where Xcode *won't* be installed
with pytest.raises(BriefcaseCommandError):
Xcode.ensure_xcode_is_installed(mock_tools)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[mock.call(["xcode-select", "-p"])],
any_order=False,
)
def test_custom_install_location(default_xcode_install_path, tmp_path, mock_tools):
"""If Xcode is in a non-default location, that's fine."""
# Create a custom Xcode location
custom_xcode_location = tmp_path / "custom" / "Xcode.app"
custom_xcode_location.mkdir(parents=True, exist_ok=True)
mock_tools.subprocess.check_output.side_effect = [
os.fsdecode(custom_xcode_location) + "\n", # xcode-select -p
"Xcode 13.3.1\nBuild version 11B500\n", # xcodebuild -version
]
Xcode.ensure_xcode_is_installed(
mock_tools,
xcode_location=default_xcode_install_path,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_command_line_tools_only(default_xcode_install_path, mock_tools):
"""If the cmdline tools are installed, but Xcode isn't, raise an error."""
mock_tools.subprocess.check_output.side_effect = [
"/Library/Developer/CommandLineTools\n", # xcode-select -p
subprocess.CalledProcessError(
cmd=["xcodebuild", "-version"],
returncode=1,
output="xcode-select: error: tool 'xcodebuild' requires Xcode, but "
"active developer directory '/Library/Developer/CommandLineTools' "
"is a command line tools instance\n",
),
]
with pytest.raises(
BriefcaseCommandError,
match=r"You have the Xcode command line tools installed",
):
Xcode.ensure_xcode_is_installed(
mock_tools,
xcode_location=default_xcode_install_path,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_installed_but_command_line_tools_selected(
default_xcode_install_path,
xcode,
mock_tools,
):
"""If Xcode is installed, but the cmdline tools are selected raise an error."""
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
subprocess.CalledProcessError(
cmd=["xcodebuild", "-version"],
returncode=1,
output="xcode-select: error: tool 'xcodebuild' requires Xcode, but "
"active developer directory '/Library/Developer/CommandLineTools' "
"is a command line tools instance\n",
),
]
with pytest.raises(
BriefcaseCommandError,
match=r"Xcode appears to be installed, but the active developer directory ",
):
Xcode.ensure_xcode_is_installed(
mock_tools,
xcode_location=default_xcode_install_path,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_custom_install_with_command_line_tools(
default_xcode_install_path,
tmp_path,
mock_tools,
):
"""If the cmdline tools are installed, and Xcode is in a non-default location, raise
an error."""
# Create a custom Xcode location
custom_xcode_location = tmp_path / "custom" / "Xcode.app"
custom_xcode_location.mkdir(parents=True, exist_ok=True)
mock_tools.subprocess.check_output.side_effect = [
"/Library/Developer/CommandLineTools\n", # xcode-select -p
subprocess.CalledProcessError(
cmd=["xcodebuild", "-version"],
returncode=1,
output="xcode-select: error: tool 'xcodebuild' requires Xcode, but "
"active developer directory '/Library/Developer/CommandLineTools' "
"is a command line tools instance\n",
),
]
with pytest.raises(
BriefcaseCommandError,
match=r"You have the Xcode command line tools installed",
):
Xcode.ensure_xcode_is_installed(
mock_tools,
xcode_location=default_xcode_install_path,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_installed_but_corrupted(xcode, mock_tools):
"""If the Xcode folder exists, but xcodebuild breaks, raise an error."""
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
subprocess.CalledProcessError(
cmd=["xcodebuild", "-version"],
returncode=1,
output="Badness occurred",
),
]
with pytest.raises(
BriefcaseCommandError, match=r"should return the current Xcode version"
):
Xcode.ensure_xcode_is_installed(mock_tools, xcode_location=xcode)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_installed_no_minimum_version(xcode, mock_tools):
"""If Xcode is installed, but there's no minimum version, check is satisfied."""
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
"Xcode 11.2.1\nBuild version 11B500\n", # xcodebuild -version
]
# Check passes without an error.
Xcode.ensure_xcode_is_installed(mock_tools, xcode_location=xcode)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_installed_extra_output(capsys, xcode, mock_tools):
"""If Xcode but outputs extra content, the check is still satisfied."""
# This specific output was seen in the wild with Xcode 13.2.1; see #668
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
"\n".join(
[
"objc[86306]: Class AMSupportURLConnectionDelegate is implemented in both /usr/lib/libauthinstall.dylib (0x20d17ab90) and /Library/Apple/System/Library/PrivateFrameworks/MobileDevice.framework/Versions/A/MobileDevice (0x1084b82c8). One of the two will be used. Which one is undefined." # noqa: E501
"objc[86306]: Class AMSupportURLSession is implemented in both /usr/lib/libauthinstall.dylib (0x20d17abe0) and /Library/Apple/System/Library/PrivateFrameworks/MobileDevice.framework/Versions/A/MobileDevice (0x1084b8318). One of the two will be used. Which one is undefined.", # noqa: E501
"Xcode 13.2.1",
"Build version 13C100",
]
),
]
# Check passes without an error.
Xcode.ensure_xcode_is_installed(
mock_tools,
xcode_location=xcode,
min_version=(11, 1),
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
# No warning generated.
out = capsys.readouterr().out
assert "WARNING" not in out
@pytest.mark.parametrize(
"min_version, version",
[
# Exact match
((11, 2, 1), "11.2.1"), # Exact match
((11, 2), "11.2.0"), # Exact match, implied revision.
((11,), "11.0.0"), # Exact match, implied minor version.
# Rules still work for single digit versions
((8, 2, 1), "8.2.1"), # Exact match
((8, 2), "8.2.0"), # Exact match, implied revision.
((8,), "8.0.0"), # Exact match, implied minor version.
# Exceeds version
((11, 2, 1), "11.2.5"), # Exceeds revision requirement
((11, 2, 1), "11.3.0"), # Exceeds minor requirement
((11, 2, 1), "12.0.0"), # Exceeds major requirement
((11, 2), "11.2.5"), # Exceeds implied revision requirement
((11, 2), "11.3.0"), # Exceeds minor requirement
((11, 2), "12.0.0"), # Exceeds major requirement
((11,), "11.2.5"), # Exceeds implied revision requirement
((11,), "11.3.0"), # Exceeds implied minor requirement
((11,), "12.0.0"), # Exceeds major requirement
# 2 digit version number
# exact match
((11, 2, 0), "11.2"), # Exact match.
((11, 2), "11.2"), # Exact match, implied revision.
((11,), "11.2"), # Exact match, implied minor version.
# exceeds version
((11, 1, 1), "11.2"), # Exact match.
((11, 1), "11.2"), # Exact match, implied revision.
((11,), "11.2"), # Exact match, implied minor version.
],
)
def test_installed_with_minimum_version_success(
min_version,
version,
capsys,
xcode,
mock_tools,
):
"""Check Xcode can meet a minimum version requirement."""
def check_output_mock(cmd_list, *args, **kwargs):
if cmd_list == ["xcode-select", "-p"]:
return xcode + "\n"
if cmd_list == ["xcodebuild", "-version"]:
return f"Xcode {version}\nBuild version 11B500\n"
return mock.DEFAULT
mock_tools.subprocess.check_output.side_effect = check_output_mock
# Check passes without an error.
Xcode.ensure_xcode_is_installed(
mock_tools,
min_version=min_version,
)
# assert xcode-select and xcodebuild were invoked
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
# Make sure the warning wasn't displayed.
out = capsys.readouterr().out
assert "WARNING" not in out
@pytest.mark.parametrize(
"min_version, version",
[
((11, 2, 5), "11.2.1"), # insufficient revision
((11, 3), "11.2.1"), # Insufficient micro version
((12,), "11.2.1"), # Insufficient major version
((8, 2, 5), "8.2.1"), # insufficient revision
((8, 3), "8.2.1"), # Insufficient micro version
((9,), "8.2.1"), # Insufficient major version
],
)
def test_installed_with_minimum_version_failure(
min_version,
version,
xcode,
mock_tools,
):
"""Check Xcode fail to meet a minimum version requirement."""
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
f"Xcode {version}\nBuild version 11B500\n", # xcodebuild -version
]
# Check raises an error.
with pytest.raises(BriefcaseCommandError):
Xcode.ensure_xcode_is_installed(
mock_tools,
min_version=min_version,
xcode_location=xcode,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
def test_unexpected_version_output(capsys, xcode, mock_tools):
"""If xcodebuild returns unexpected output, assume it's ok..."""
mock_tools.subprocess.check_output.side_effect = [
xcode + "\n", # xcode-select -p
"Wibble Wibble Wibble\n", # xcodebuild -version
]
# Check passes without an error...
Xcode.ensure_xcode_is_installed(
mock_tools,
min_version=(11, 2, 1),
xcode_location=xcode,
)
# subprocess was invoked as expected
mock_tools.subprocess.check_output.assert_has_calls(
[
mock.call(["xcode-select", "-p"]),
mock.call(["xcodebuild", "-version"]),
],
any_order=False,
)
# ...but stdout contains a warning
out = capsys.readouterr().out
assert "************" in out
|
43cd152e8b10c40081353dde2937352d880c793f
|
5ecd6c73e60e15f5d426ae8dfcc5fb34540010f7
|
/tryalgo/graph.py
|
95907f47d6259a0897da3f8fa6e2f5abae32531f
|
[
"MIT"
] |
permissive
|
jilljenn/tryalgo
|
736568f223d9a08db9ec392a2420b478aff6039a
|
634645707ebf2489356009a6f91f012b55b1ee39
|
refs/heads/master
| 2023-08-24T21:30:57.871068
| 2023-07-09T18:08:33
| 2023-07-09T18:08:33
| 50,119,000
| 390
| 124
|
MIT
| 2023-01-29T09:47:45
| 2016-01-21T16:05:54
|
Python
|
UTF-8
|
Python
| false
| false
| 14,038
|
py
|
graph.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""\
Reading graphs from files and writing into files
jill-jênn vie et christoph dürr - 2015-2019
"""
# pylint: disable=bad-whitespace, line-too-long, missing-docstring
# pylint: disable=dangerous-default-value, too-many-locals, too-many-branches
# from __future__ import annotations
from typing import List, Dict, Union, Any
def readval(file, ty):
"""Reads a line from file with an item of type ty
:param file: input stream, for example sys.stdin
:param ty: a type, for example int
:returns: an element of type ty
"""
return ty(file.readline())
def readtab(fi, ty):
"""Reads a line from file with a space separated list
of items of type ty
:param file: input stream, for example sys.stdin
:param ty: a type, for example int
:returns: a tuple with elements of type ty
"""
return tuple(map(ty, fi.readline().split()))
# pylint: disable=no-else-return
def read_graph(filename, directed=False, weighted=False, default_weight=None):
"""Read a graph from a text file
:param filename: plain text file. All numbers are separated by space.
Starts with a line containing n (#vertices) and m (#edges).
Then m lines follow, for each edge.
Vertices are numbered from 0 to n-1.
Line for unweighted edge u,v contains two integers u, v.
Line for weighted edge u,v contains three integers u, v, w[u,v].
:param directed: true for a directed graph, false for undirected
:param weighted: true for an edge weighted graph
:returns: graph in listlist format, possibly followed by weight matrix
:complexity: O(n + m) for unweighted graph,
:math:`O(n^2)` for weighted graph
"""
with open(filename, 'r') as f:
while True:
line = f.readline() # ignore leading comments
if line[0] != '#':
break
nb_nodes, nb_edges = tuple(map(int, line.split()))
graph = [[] for u in range(nb_nodes)]
if weighted:
weight = [[default_weight] * nb_nodes for v in range(nb_nodes)]
for v in range(nb_nodes):
weight[v][v] = 0
for _ in range(nb_edges):
u, v, w = readtab(f, int)
graph[u].append(v)
weight[u][v] = w
if not directed:
graph[v].append(u)
weight[v][u] = w
return graph, weight
else:
for _ in range(nb_edges):
# si le fichier contient des poids, ils seront ignorés
u, v = readtab(f, int)[:2]
graph[u].append(v)
if not directed:
graph[v].append(u)
return graph
# pylint: disable=too-many-arguments, singleton-comparison
def write_graph(dotfile, graph, directed=False,
node_label=None, arc_label=None, comment="",
node_mark=set(), arc_mark=set()):
"""Writes a graph to a file in the DOT format
:param dotfile: the filename.
:param graph: directed graph in listlist or listdict format
:param directed: true if graph is directed, false if undirected
:param weight: in matrix format or same listdict graph or None
:param node_label: vertex label table or None
:param arc_label: arc label matrix or None
:param comment: comment string for the dot file or None
:param node_mark: set of nodes to be shown in gray
:param arc_marc: set of arcs to be shown in red
:complexity: `O(|V| + |E|)`
"""
with open(dotfile, 'w') as f:
if directed:
f.write("digraph G{\n")
else:
f.write("graph G{\n")
if comment:
f.write('label="%s";\n' % comment)
V = range(len(graph))
# -- vertices
for u in V:
if node_mark and u in node_mark:
f.write('%d [style=filled, color="lightgrey", ' % u)
else:
f.write('%d [' % u)
if node_label:
f.write('label="%u [%s]"];\n' % (u, node_label[u]))
else:
f.write('shape=circle, label="%u"];\n' % u)
# -- edges
if isinstance(arc_mark, list):
arc_mark = set((u, arc_mark[u]) for u in V)
for u in V:
for v in graph[u]:
if not directed and u > v:
continue # don't show twice the edge
if arc_label and arc_label[u][v] is None:
continue # suppress arcs with no label
if directed:
arc = "%d -> %d " % (u, v)
else:
arc = "%d -- %d " % (u, v)
if arc_mark and ((v, u) in arc_mark or
(not directed and (u, v) in arc_mark)):
pen = 'color="red"'
else:
pen = ""
if arc_label:
tag = 'label="%s"' % arc_label[u][v]
else:
tag = ""
if tag and pen:
sep = ", "
else:
sep = ""
f.write(arc + "[" + tag + sep + pen + "];\n")
f.write("}")
# snip{ tree_representations
def tree_prec_to_adj(prec, root=0):
"""Transforms a tree given as predecessor table into adjacency list form
:param prec: predecessor table representing a tree, prec[u] == v iff u is
successor of v, except for the root where prec[root] == root
:param root: root vertex of the tree
:returns: undirected graph in listlist representation
:complexity: linear
"""
n = len(prec)
graph = [[prec[u]] for u in range(n)] # add predecessors
graph[root] = []
for u in range(n): # add successors
if u != root:
graph[prec[u]].append(u)
return graph
def tree_adj_to_prec(graph, root=0):
"""Transforms a tree given as adjacency list into predecessor table form.
if graph is not a tree: will return a DFS spanning tree
:param graph: directed graph in listlist or listdict format
:returns: tree in predecessor table representation
:complexity: linear
"""
prec = [None] * len(graph)
prec[root] = root # mark to visit root only once
to_visit = [root]
while to_visit: # DFS
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
prec[root] = None # put the standard mark for root
return prec
# snip}
# snip{ add_reverse_arcs
# pylint: disable=unidiomatic-typecheck
def add_reverse_arcs(graph, capac=None):
"""Utility function for flow algorithms that need for every arc (u,v),
the existence of an (v,u) arc, by default with zero capacity.
graph can be in adjacency list, possibly with capacity matrix capac.
or graph can be in adjacency dictionary, then capac parameter is ignored.
:param capac: arc capacity matrix
:param graph: in listlist representation, or in listdict representation,
in this case capac is ignored
:complexity: linear
:returns: nothing, but graph is modified
"""
for u, _ in enumerate(graph):
for v in graph[u]:
if u not in graph[v]:
if type(graph[v]) is list:
graph[v].append(u)
if capac:
capac[v][u] = 0
else:
assert type(graph[v]) is dict
graph[v][u] = 0
# snip}
# -----------------------------------------------------------------------------
# transformations between different graph representations
# listlist is an adjacency list G,
# where G[u] is the list of vertices v such that there is an arc (u,v)
# if the graph is weighted, the weights are represented by a matrix W
# such that W[u][v] is the weight of arc (u,v)
# listdict is an arc weighted adjacency list G,
# where G[u] is a dictionary.
# For each arc (u,v), G[u][v] is the weight of the arc.
# dictdict is an arc weighted adjacency dictionary G,
# where G[u] is a dictionary.
# For each arc (u,v), G[u][v] is the weight of the arc.
# matrix is an adjacency matrix M,
# such that M[u][v] is None if there is no arc (u,v)
# otherwise it is the weight of the arc.
# Value M[u][v]=True can be used for unweighted graphs.
# pylint: disable=no-else-return
def matrix_to_listlist(weight):
"""transforms a squared weight matrix in a adjacency table of type listlist
encoding the directed graph corresponding to the entries of the matrix
different from None
:param weight: squared weight matrix, weight[u][v] != None iff arc (u, v)
exists
:complexity: linear
:returns: the unweighted directed graph in the listlist representation,
listlist[u] contains all v for which arc (u,v) exists.
"""
graph = [[] for _ in range(len(weight))]
for u, _ in enumerate(graph):
for v in range(len(graph)):
if weight[u][v] is not None:
graph[u].append(v)
return graph
def listlist_and_matrix_to_listdict(graph, weight=None):
"""Transforms the weighted adjacency list representation of a graph
of type listlist + optional weight matrix
into the listdict representation
:param graph: in listlist representation
:param weight: optional weight matrix
:returns: graph in listdict representation
:complexity: linear
"""
if weight:
return [{v: weight[u][v] for v in graph[u]} for u in range(len(graph))]
else:
return [{v: None for v in graph[u]} for u in range(len(graph))]
def listdict_to_listlist_and_matrix(sparse):
"""Transforms the adjacency list representation of a graph
of type listdict into the listlist + weight matrix representation
:param sparse: graph in listdict representation
:returns: couple with listlist representation, and weight matrix
:complexity: linear
"""
V = range(len(sparse))
graph = [[] for _ in V]
weight = [[None for v in V] for u in V]
for u in V:
for v in sparse[u]:
graph[u].append(v)
weight[u][v] = sparse[u][v]
return graph, weight
def dictdict_to_listdict(dictgraph):
"""Transforms a dict-dict graph representation into a
adjacency dictionary representation (list-dict)
:param dictgraph: dictionary mapping vertices to dictionary
such that dictgraph[u][v] is weight of arc (u,v)
:complexity: linear
:returns: tuple with graph (listdict), name_to_node (dict),
node_to_name (list)
"""
n = len(dictgraph) # vertices
node_to_name = list(dictgraph.keys()) # bijection indices <-> names
node_to_name.sort() # to make it more readable
name_to_node = {}
for i in range(n):
name_to_node[node_to_name[i]] = i
sparse = [{} for _ in range(n)] # build sparse graph
for u in dictgraph:
for v in dictgraph[u]:
sparse[name_to_node[u]][name_to_node[v]] = dictgraph[u][v]
return sparse, name_to_node, node_to_name
# -----------------------------------------------------------------------------
# for shortest paths
def extract_path(prec, v):
"""extracts a path in form of vertex list from source to vertex v
given a precedence table prec leading to the source
:param prec: precedence table of a tree
:param v: vertex on the tree
:returns: path from root to v, in form of a list
:complexity: linear
"""
L = []
while v is not None:
L.append(v)
v = prec[v]
assert v not in L # prevent infinite loops for a bad formed table prec
return L[::-1]
# -----------------------------------------------------------------------------
# for exporting flows in dot format
def make_flow_labels(graph, flow, capac):
"""Generate arc labels for a flow in a graph with capacities.
:param graph: adjacency list or adjacency dictionary
:param flow: flow matrix or adjacency dictionary
:param capac: capacity matrix or adjacency dictionary
:returns: listdic graph representation, with the arc label strings
"""
V = range(len(graph))
arc_label = [{v: "" for v in graph[u]} for u in V]
for u in V:
for v in graph[u]:
if flow[u][v] >= 0:
arc_label[u][v] = "%s/%s" % (flow[u][v], capac[u][v])
else:
arc_label[u][v] = None # do not show negative flow arcs
return arc_label
# -----------------------------------------------------------------------------
# for creating a graph using vertex names
# pylint: disable=arguments-out-of-order
# snip{ class_graph
class Graph_named_vertices:
def __init__(self):
self.neighbors = []
self.name2node = {}
self.node2name = []
self.weight = []
def __len__(self):
return len(self.node2name)
def __getitem__(self, v):
return self.neighbors[v]
def add_node(self, name):
assert name not in self.name2node
self.name2node[name] = len(self.name2node)
self.node2name.append(name)
self.neighbors.append([])
self.weight.append({})
return self.name2node[name]
def add_edge(self, name_u, name_v, weight_uv=None):
self.add_arc(name_u, name_v, weight_uv)
self.add_arc(name_v, name_u, weight_uv)
def add_arc(self, name_u, name_v, weight_uv=None):
u = self.name2node[name_u]
v = self.name2node[name_v]
self.neighbors[u].append(v)
self.weight[u][v] = weight_uv
# snip}
Graph = Union[List[List[int]], List[Dict[int, Any]], Graph_named_vertices]
|
2c7b9d1ac72767c9148eefda6b13ca96c1d2f6a8
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/SecondOrder_trace_extract.py
|
0a313fd6084ccc6735009bbd441727921073641c
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,570
|
py
|
SecondOrder_trace_extract.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg.tvm
@akg.tvm.hybrid.script
def trace_extract_hybrid(input1):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
dim = input1.shape[1]
trace_tensor = allocate((1,dim), input1.dtype, 'local')
res1 = allocate(input1.shape, input1.dtype, 'local')
for i in range(dim):
for j in range(dim):
res1[0,i,j] = input1[0,i,j]
for j in range(dim):
trace_tensor[0,j] = res1[0,j,j]
return trace_tensor
def trace_extract(input1, target="cce"):
"""
Extract matrix's diag elements.
Args:
input1:tvm.Tensor of type float32 with 3d shape [1, matrix_dim, matrix_dim].
Returns:
akg.tvm.Tensor of type float32 with 2d shape [1, matrix_dim].
"""
trace_tensor = trace_extract_hybrid(input1)
return trace_tensor
|
dd51b3622a7ffe8e369fad6155207f49983fdf5c
|
88f469b619b1e5a90ca9d117f56faa43324cd6d4
|
/tests/response/unmarshal_response_test.py
|
cc302ea2f9b4f02ac1ab068f3c1d0e466a91f9f5
|
[
"BSD-3-Clause"
] |
permissive
|
Yelp/bravado-core
|
6a6967259302ce28144c2969218cb44527862a74
|
f2428528dd77a72d1a83e8da8421cb706a50d287
|
refs/heads/master
| 2023-08-31T05:16:14.256092
| 2023-07-19T01:14:48
| 2023-07-19T01:14:48
| 33,146,014
| 124
| 133
|
NOASSERTION
| 2023-08-27T19:17:04
| 2015-03-30T20:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 6,324
|
py
|
unmarshal_response_test.py
|
# -*- coding: utf-8 -*-
import msgpack
import pytest
from jsonschema import ValidationError
from mock import Mock
from mock import patch
from bravado_core.content_type import APP_JSON
from bravado_core.content_type import APP_MSGPACK
from bravado_core.response import IncomingResponse
from bravado_core.response import unmarshal_response
from bravado_core.spec import Spec
@pytest.fixture
def response_spec():
return {
'description': "Day of the week",
'schema': {
'type': 'string',
},
}
def test_no_content(empty_swagger_spec):
response_spec = {
'description': "I don't have a 'schema' key so I return nothing",
}
response = Mock(spec=IncomingResponse, status_code=200)
with patch('bravado_core.response.get_response_spec') as m:
m.return_value = response_spec
op = Mock(swagger_spec=empty_swagger_spec)
result = unmarshal_response(response, op)
assert result is None
def test_json_content(empty_swagger_spec, response_spec):
response = Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(return_value='Monday'),
)
with patch('bravado_core.response.get_response_spec') as m:
m.return_value = response_spec
op = Mock(swagger_spec=empty_swagger_spec)
assert 'Monday' == unmarshal_response(response, op)
def test_msgpack_content(empty_swagger_spec, response_spec):
message = 'Monday'
response = Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_MSGPACK},
raw_bytes=msgpack.dumps(message, use_bin_type=True),
)
with patch(
'bravado_core.response.get_response_spec',
return_value=response_spec,
):
op = Mock(swagger_spec=empty_swagger_spec)
assert message == unmarshal_response(response, op)
def test_text_content(empty_swagger_spec, response_spec):
response = Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': 'text/plain'},
text='Monday',
)
with patch('bravado_core.response.get_response_spec') as m:
m.return_value = response_spec
op = Mock(swagger_spec=empty_swagger_spec)
assert 'Monday' == unmarshal_response(response, op)
def test_skips_validation(empty_swagger_spec, response_spec):
empty_swagger_spec.config['validate_responses'] = False
response = Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(return_value='Monday'),
)
with patch('bravado_core.response.validate_schema_object') as val_schem:
with patch('bravado_core.response.get_response_spec') as get_resp:
get_resp.return_value = response_spec
op = Mock(swagger_spec=empty_swagger_spec)
unmarshal_response(response, op)
assert val_schem.call_count == 0
def test_performs_validation(empty_swagger_spec, response_spec):
empty_swagger_spec.config['validate_responses'] = True
response = Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(return_value='Monday'),
)
with patch('bravado_core.response.validate_schema_object') as val_schem:
with patch('bravado_core.response.get_response_spec') as get_resp:
get_resp.return_value = response_spec
op = Mock(swagger_spec=empty_swagger_spec)
unmarshal_response(response, op)
assert val_schem.call_count == 1
def test_unmarshal_model_polymorphic_specs(polymorphic_spec):
pet_list_dicts = [
{
'name': 'a dog name',
'type': 'Dog',
'birth_date': '2017-03-09',
},
{
'name': 'a cat name',
'type': 'Cat',
'color': 'white',
},
]
pet_list_models = unmarshal_response(
response=Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(
return_value={
'number_of_pets': len(pet_list_dicts),
'list': pet_list_dicts,
},
),
),
op=polymorphic_spec.resources['pets'].operations['get_pets'],
)
assert len(pet_list_dicts) == len(pet_list_models.list)
for list_item_model, list_item_dict in zip(pet_list_models.list, pet_list_dicts):
assert isinstance(list_item_model, polymorphic_spec.definitions['GenericPet'])
assert isinstance(list_item_model, polymorphic_spec.definitions[list_item_dict['type']])
assert list_item_model._marshal() == list_item_dict
def test_unmarshal_model_polymorphic_specs_with_invalid_discriminator(polymorphic_spec):
pet_list_dicts = [
{
'name': 'a dog name',
'type': 'a-random-value',
'birth_date': '2017-03-09',
},
]
with pytest.raises(ValidationError):
# Expecting validation error as "a-random-value" is not a valid type
unmarshal_response(
response=Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(return_value=pet_list_dicts),
),
op=polymorphic_spec.resources['pets'].operations['get_pets'],
)
def test_unmarshal_model_polymorphic_specs_with_xnullable_field(polymorphic_dict):
# Test case to ensure no further regressions on issue #359
polymorphic_dict['definitions']['GenericPet']['x-nullable'] = True
polymorphic_spec = Spec.from_dict(polymorphic_dict)
PetList = polymorphic_spec.definitions['PetList']
response = unmarshal_response(
response=Mock(
spec=IncomingResponse,
status_code=200,
headers={'content-type': APP_JSON},
json=Mock(
return_value={
'number_of_pets': 1,
'list': [None],
},
),
),
op=polymorphic_spec.resources['pets'].operations['get_pets'],
)
assert response == PetList(number_of_pets=1, list=[None])
|
b81aa93bccd38701bab271b06b32fe72cb518c02
|
4a62772f2ddb531f033ced6832ec628597406a0e
|
/django_structlog/celery/steps.py
|
85914b513eab8de80f756d33a913e27c866f5028
|
[
"MIT"
] |
permissive
|
jrobichaud/django-structlog
|
e753210f2612452f836cfb22adca8649250c62c4
|
7392c38a9776361cfb5570f98cf2d5bf386ae356
|
refs/heads/master
| 2023-08-17T14:52:34.154151
| 2023-08-14T10:24:22
| 2023-08-14T10:24:22
| 180,026,172
| 301
| 37
|
MIT
| 2023-09-12T10:09:41
| 2019-04-07T21:29:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
steps.py
|
from celery import bootsteps
from . import receivers
class DjangoStructLogInitStep(bootsteps.Step):
"""``celery`` worker boot step to initialize ``django_structlog``.
>>> from celery import Celery
>>> from django_structlog.celery.steps import DjangoStructLogInitStep
>>>
>>> app = Celery("django_structlog_demo_project")
>>> app.steps['worker'].add(DjangoStructLogInitStep)
"""
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
import celery
from celery.signals import (
before_task_publish,
after_task_publish,
task_prerun,
task_retry,
task_success,
task_failure,
task_revoked,
)
before_task_publish.connect(receivers.receiver_before_task_publish)
after_task_publish.connect(receivers.receiver_after_task_publish)
task_prerun.connect(receivers.receiver_task_pre_run)
task_retry.connect(receivers.receiver_task_retry)
task_success.connect(receivers.receiver_task_success)
task_failure.connect(receivers.receiver_task_failure)
task_revoked.connect(receivers.receiver_task_revoked)
if celery.VERSION > (4,):
from celery.signals import task_unknown, task_rejected
task_unknown.connect(receivers.receiver_task_unknown)
task_rejected.connect(receivers.receiver_task_rejected)
|
f6f5713641c0b37fa09a8e8ae553954668c4ac5c
|
319251020b7aafa853a941dde8f2c4154dc5b833
|
/tests/sensor_samples.py
|
b6f757ae58ec7e665b04e6f287a6a73df8c9f3b2
|
[] |
no_license
|
robmarkcole/Hue-sensors-HASS
|
2df7ca8a4320d6f7248fe7a827ea9072ee2b6ad1
|
efeafb4ee47966f3e6ce1253912cea494c90e1bc
|
refs/heads/master
| 2023-09-03T03:20:06.350646
| 2021-05-19T03:28:15
| 2021-05-19T03:28:15
| 98,039,328
| 376
| 112
| null | 2021-07-20T11:19:03
| 2017-07-22T15:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,139
|
py
|
sensor_samples.py
|
"""Examples of raw and parsed data for known sensors."""
# Binary sensors
MOCK_ZLLPresence = {
"state": {"presence": False, "lastupdated": "2020-02-06T07:28:08"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-05-06T13:14:45"},
"config": {
"on": True,
"battery": 58,
"reachable": True,
"alert": "lselect",
"sensitivity": 2,
"sensitivitymax": 2,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Living room sensor",
"type": "ZLLPresence",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue motion sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:17:88:01:02:00:af:28-02-0406",
"capabilities": {"certified": True, "primary": True},
}
MOCK_ZLLLightlevel = {
"state": {
"lightlevel": 0,
"dark": True,
"daylight": False,
"lastupdated": "2020-02-06T07:26:02",
},
"swupdate": {"state": "noupdates", "lastinstall": "2019-05-06T13:14:45"},
"config": {
"on": True,
"battery": 58,
"reachable": True,
"alert": "none",
"tholddark": 16000,
"tholdoffset": 7000,
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue ambient light sensor 1",
"type": "ZLLLightLevel",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue ambient light sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:17:88:01:02:00:af:28-02-0400",
"capabilities": {"certified": True, "primary": False},
}
MOCK_ZLLTemperature = {
"state": {"temperature": 1744, "lastupdated": "2020-02-06T07:26:26"},
"swupdate": {"state": "noupdates", "lastinstall": "2019-05-06T13:14:45"},
"config": {
"on": True,
"battery": 58,
"reachable": True,
"alert": "none",
"ledindication": False,
"usertest": False,
"pending": [],
},
"name": "Hue temperature sensor 1",
"type": "ZLLTemperature",
"modelid": "SML001",
"manufacturername": "Philips",
"productname": "Hue temperature sensor",
"swversion": "6.1.1.27575",
"uniqueid": "00:17:88:01:02:00:af:28-02-0402",
"capabilities": {"certified": True, "primary": False},
}
PARSED_ZLLPresence = {
"battery": 58,
"last_updated": ["2020-02-06", "07:28:08"],
"model": "SML",
"name": "Living room motion sensor",
"on": True,
"reachable": True,
"sensitivity": 2,
"state": "off",
}
PARSED_ZLLLightlevel = {
"dark": True,
"daylight": False,
"light_level": 0,
"lx": 1.0,
"threshold_dark": 16000,
"threshold_offset": 7000,
}
PARSED_ZLLTemperature = {"temperature": 17.44}
# Hue geofences
MOCK_GEOFENCE = {
"state": {"presence": False, "lastupdated": "2019-04-09T06:05:00"},
"config": {"on": True, "reachable": True},
"name": "iPhone",
"type": "Geofence",
"modelid": "HA_GEOFENCE",
"manufacturername": "1ISn0hwg7oDVAmx4-gqDTN4eRR3ncfRl",
"swversion": "A_1",
"uniqueid": "L_02_iL4n7",
"recycle": False,
}
|
e24be6e95004f30d5bb76fd197a58f118b3915c1
|
e95e60cb698b7b2e7622b33643df944c0362c02c
|
/daprApps_v1/socialNetwork/socialgraph/test/test_follow.py
|
844c7eb8e04ba3a4c3d16283ee173c9d3a9d4ca5
|
[
"MIT",
"Apache-2.0",
"GPL-1.0-or-later"
] |
permissive
|
delimitrou/DeathStarBench
|
5ae251e87052a83f0daadfe2f60231ee5e999dda
|
ef9823b441aec5a8133304f1cc322f882f767812
|
refs/heads/master
| 2023-09-01T02:23:38.342474
| 2023-08-28T17:37:44
| 2023-08-28T17:37:44
| 180,669,714
| 576
| 364
|
Apache-2.0
| 2023-09-01T17:20:13
| 2019-04-10T21:57:24
|
C
|
UTF-8
|
Python
| false
| false
| 4,151
|
py
|
test_follow.py
|
import requests
import time
getfollow_url = 'http://localhost:31992/v1.0/invoke/dapr-social-graph/method/getfollow'
getfollower_url = 'http://localhost:31992/v1.0/invoke/dapr-social-graph/method/getfollower'
follow_url = 'http://localhost:31992/v1.0/invoke/dapr-social-graph/method/follow'
unfollow_url = 'http://localhost:31992/v1.0/invoke/dapr-social-graph/method/unfollow'
# assume starting from an empty store
#---------------- Test: follow non-existing relation ----------------#
print("------ follow non-existent users ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': '2233',
'follow_id': 'bilibili',
}
r = requests.post(follow_url, json=payload)
print(r.text)
print("------ Check follow list of 2233 (bilibili) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['2233']
}
r = requests.get(getfollow_url, json=payload)
print(r.text)
print("------ Check follower list of bilibili (2233) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['bilibili']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
#---------------- Test: repetitive follow ----------------#
print("\n------ follow repetitive users ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': '2233',
'follow_id': 'bilibili',
}
r = requests.post(follow_url, json=payload)
print(r.text)
print("------ Check follow list of 2233 (bilibili) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['2233']
}
r = requests.get(getfollow_url, json=payload)
print(r.text)
print("------ Check follower list of bilibili (2233) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['bilibili']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
#---------------- Test: more follow ----------------#
print("\n------ follow of another user ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': 'BaiNianJi',
'follow_id': 'bilibili',
}
r = requests.post(follow_url, json=payload)
print(r.text)
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': 'BaiNianJi',
'follow_id': '2233',
}
r = requests.post(follow_url, json=payload)
print(r.text)
print("------ Check follow list of BaiNianJi (2233, bilibili) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['BaiNianJi']
}
r = requests.get(getfollow_url, json=payload)
print(r.text)
print("------ Check follower list of 2233 (BaiNianJi) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['2233']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
print("------ Check follower list of bilibili (2233, BaiNianJi) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['bilibili']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
#---------------- Test: unfollow non-existing user ----------------#
print("\n------ unfollow non-existing user ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': '2233',
'unfollow_id': 'AcNiang',
}
r = requests.post(unfollow_url, json=payload)
print(r.text)
print("------ Check follower list of 2233 (BaiNianJi) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['2233']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
#---------------- Test: unfollow existing user ----------------#
print("\n------ unfollow existing user ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_id': 'BaiNianJi',
'unfollow_id': 'bilibili',
}
r = requests.post(unfollow_url, json=payload)
print(r.text)
print("------ Check follower list of bilibili (BaiNianJi) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['bilibili']
}
r = requests.get(getfollower_url, json=payload)
print(r.text)
print("------ Check follow list of BaiNianJi (2233) ------")
payload = {
'send_unix_ms': int(time.time() * 1000),
'user_ids': ['BaiNianJi']
}
r = requests.get(getfollow_url, json=payload)
print(r.text)
|
69df84d27f76d380cddfe300a2b1bb9778f32e41
|
b08798b5b9b1aefa557fcf5aae2d7fcfc8310f32
|
/beets/mediafile.py
|
46288a71dc645079362775ef1e8594efc33e47e1
|
[
"MIT"
] |
permissive
|
beetbox/beets
|
f0f361fafd57977497e1981f27946fd52d428b27
|
0e5ade4f711dbf563d35c290affb0254eee41235
|
refs/heads/master
| 2023-09-01T20:50:06.125904
| 2023-08-27T19:07:13
| 2023-08-27T19:07:13
| 827,590
| 8,977
| 1,768
|
MIT
| 2023-09-13T02:33:14
| 2010-08-09T23:17:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,186
|
py
|
mediafile.py
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import mediafile
import warnings
warnings.warn(
"beets.mediafile is deprecated; use mediafile instead",
# Show the location of the `import mediafile` statement as the warning's
# source, rather than this file, such that the offending module can be
# identified easily.
stacklevel=2,
)
# Import everything from the mediafile module into this module.
for key, value in mediafile.__dict__.items():
if key not in ['__name__']:
globals()[key] = value
# Cleanup namespace.
del key, value, warnings, mediafile
|
422c4e0d04c3bff09ace19a80f8cdc9177f38515
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/compir/transform/apply_transformation.py
|
4c202b18d67fb5969a61a0704486cd7b7a784b30
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,954
|
py
|
apply_transformation.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main script for applying reversible and lossy transformation."""
from absl import app
from absl import flags
from language.compir.transform import apply_transformation_utils
FLAGS = flags.FLAGS
flags.DEFINE_enum(
"dataset",
"scan",
["scan", "cfq", "atis", "geo", "scholar"],
"The dataset to use.",
)
flags.DEFINE_enum(
"split",
"iid",
["iid", "mcd1", "mcd2", "mcd3", "template", "length", "turnleft"],
"The split to use (iid or compositional).",
)
# The follwing transformations prepare data for seq2seq_1.
# none (no transformation), rir (reversible), lird (lossy-direct),
# lird_rir (lossy-direct and reversible), lirind (lossy-indirect),
# lirind_rir (lossy-indirect and reversible).
# The following transformations prepare data for seq2seq_2, which recovers
# programs in the original formalism given the utterance and a lossy
# representation, where, e.g., lird2 expects predictions for the test set made
# by training seq2seq_1 on the data created by the lird transformation.
# lird2 (lossy-direct), lird_rir2 (lossy-direct and reversible),
# lirind2 (lossy indirect), lirind_rir2 (lossy-indirect and reversible).
flags.DEFINE_enum(
"transformation", "none", [
"none", "rir", "lird", "lird_rir", "lirind", "lirind_rir", "lird2",
"lird_rir2", "lirind2", "lirind_rir2"
], "The transformation to be applied when preparing data for seq2seq_1 or"
"seq2seq_2.")
flags.DEFINE_string("train_data_path", "", "Path to the training data.")
flags.DEFINE_string("test_data_path", "", "Path to the test data.")
flags.DEFINE_string(
"prediction_path", None,
"Path to test data predictions, relevant when preparing data for seq2seq_2,"
"when recovering programs from lossy intermediate representaitons.")
flags.DEFINE_string("output_path", "", "Path where output files are written.")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
apply_transformation_utils.transform(FLAGS.dataset, FLAGS.split,
FLAGS.transformation,
FLAGS.train_data_path,
FLAGS.test_data_path, FLAGS.output_path,
FLAGS.prediction_path)
if __name__ == "__main__":
app.run(main)
|
1dc0583776fc6ef502836d20403ec9d076b517ce
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/browser/extensions/api/image_writer_private/DEPS
|
911cdbeda57f15272f79b5627e7d80b149411765
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 44
|
DEPS
|
include_rules = [
"+device/udev_linux",
]
|
|
67061d167af808a573b42dc61051370975cc3d2c
|
e4b940707c32882d248fda5cccdf513af8f52188
|
/src/byro/members/signals.py
|
6e0ffa0570f2047ed8517c66033173969f5ae2e1
|
[
"Apache-2.0"
] |
permissive
|
byro/byro
|
5ca0d5cd6470ee18400432e85420d5594f54d927
|
3b757a4fd567a352f34c9e6638408bc98d144d03
|
refs/heads/main
| 2023-09-02T04:16:12.817733
| 2023-08-01T23:45:44
| 2023-08-02T11:44:34
| 100,076,395
| 144
| 56
|
Apache-2.0
| 2023-09-12T23:39:11
| 2017-08-11T23:21:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
signals.py
|
import django.dispatch
new_member_mail_information = django.dispatch.Signal()
"""
Receives the new member as signal. Response will be added to the email
welcoming the new member.
"""
new_member_office_mail_information = django.dispatch.Signal()
"""
Receives the new member as signal. Response will be added to the email
notifying the office about the new member.
"""
new_member = django.dispatch.Signal()
"""
Receives the new member as signal. If an exception is raised, the error
message will be displayed in the frontend as a warning.
"""
leave_member_mail_information = django.dispatch.Signal()
"""
Receives the leaving member as signal. Response will be added to the email
confirming termination to the member.
"""
leave_member_office_mail_information = django.dispatch.Signal()
"""
Receives the leaving member as signal. Response will be added to the email
notifying the office about the termination of the member.
"""
leave_member = django.dispatch.Signal()
"""
Receives the new member as signal. If an exception is raised, the error
message will be displayed in the frontend as a warning.
"""
update_member = django.dispatch.Signal()
"""
If a member is updated via the office form collection at members/view/{id}/data.
The signal receives the request, and the form_list as parameters. The changes
will already have been saved at this point.
"""
|
f14dc058143f82dd9563650fe4d79a06e2a9cf5c
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/SQLAlchemy/sqlalchemy/sql/dml.pyi
|
ee31b23435b04446559eed63544c2cd216e15d4d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 3,275
|
pyi
|
dml.pyi
|
from typing import Any
from . import roles
from .base import CompileState, DialectKWArgs, Executable, HasCompileState
from .elements import ClauseElement
from .selectable import HasCTE, HasPrefixes, ReturnsRows
class DMLState(CompileState):
isupdate: bool
isdelete: bool
isinsert: bool
def __init__(self, statement, compiler, **kw) -> None: ...
@property
def dml_table(self): ...
class InsertDMLState(DMLState):
isinsert: bool
include_table_with_column_exprs: bool
statement: Any
def __init__(self, statement, compiler, **kw) -> None: ...
class UpdateDMLState(DMLState):
isupdate: bool
include_table_with_column_exprs: bool
statement: Any
is_multitable: Any
def __init__(self, statement, compiler, **kw) -> None: ...
class DeleteDMLState(DMLState):
isdelete: bool
statement: Any
def __init__(self, statement, compiler, **kw) -> None: ...
class UpdateBase(roles.DMLRole, HasCTE, HasCompileState, DialectKWArgs, HasPrefixes, ReturnsRows, Executable, ClauseElement):
__visit_name__: str
named_with_column: bool
is_dml: bool
def params(self, *arg, **kw) -> None: ...
def with_dialect_options(self, **opt) -> None: ...
bind: Any
def returning(self, *cols) -> None: ...
@property
def exported_columns(self): ...
def with_hint(self, text, selectable: Any | None = ..., dialect_name: str = ...) -> None: ...
class ValuesBase(UpdateBase):
__visit_name__: str
select: Any
table: Any
def __init__(self, table, values, prefixes) -> None: ...
def values(self, *args, **kwargs) -> None: ...
def return_defaults(self, *cols) -> None: ...
class Insert(ValuesBase):
__visit_name__: str
select: Any
include_insert_from_select_defaults: bool
is_insert: bool
def __init__(
self,
table,
values: Any | None = ...,
inline: bool = ...,
bind: Any | None = ...,
prefixes: Any | None = ...,
returning: Any | None = ...,
return_defaults: bool = ...,
**dialect_kw,
) -> None: ...
def inline(self) -> None: ...
def from_select(self, names, select, include_defaults: bool = ...) -> None: ...
class DMLWhereBase:
def where(self, *whereclause) -> None: ...
def filter(self, *criteria): ...
def filter_by(self, **kwargs): ...
@property
def whereclause(self): ...
class Update(DMLWhereBase, ValuesBase):
__visit_name__: str
is_update: bool
def __init__(
self,
table,
whereclause: Any | None = ...,
values: Any | None = ...,
inline: bool = ...,
bind: Any | None = ...,
prefixes: Any | None = ...,
returning: Any | None = ...,
return_defaults: bool = ...,
preserve_parameter_order: bool = ...,
**dialect_kw,
) -> None: ...
def ordered_values(self, *args) -> None: ...
def inline(self) -> None: ...
class Delete(DMLWhereBase, UpdateBase):
__visit_name__: str
is_delete: bool
table: Any
def __init__(
self,
table,
whereclause: Any | None = ...,
bind: Any | None = ...,
returning: Any | None = ...,
prefixes: Any | None = ...,
**dialect_kw,
) -> None: ...
|
f93479bd762f99f902119cefcb2b54202fc2c349
|
730430ba3b45d5728ef044863598199bfa33aaaa
|
/benchmark/torch/NeurIPS2020-Learning-to-Run-a-Power-Network-Challenge/track2/evaluate.py
|
9de5bf39d8cb033108a25e47dd9fcd6e50a3e98a
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PARL
|
062d1b4a5335553be6cdfc33ad12f07ebbcd7310
|
3bb5fe36d245f4d69bae0710dc1dc9d1a172f64d
|
refs/heads/develop
| 2023-08-09T02:12:39.741551
| 2023-05-19T17:52:25
| 2023-05-19T17:52:25
| 131,044,128
| 3,818
| 988
|
Apache-2.0
| 2023-07-28T03:59:20
| 2018-04-25T17:54:22
|
Python
|
UTF-8
|
Python
| false
| false
| 3,707
|
py
|
evaluate.py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import os
from grid2op.Runner import Runner
from l2rpn_baselines.utils.save_log_gif import save_log_gif
from rl_agent import RLAgent
import argparse
import grid2op
from lightsim2grid.LightSimBackend import LightSimBackend
def parse_args():
parser = argparse.ArgumentParser(description="Eval baseline RLAgent")
parser.add_argument(
"--load_path",
default='./saved_files',
help="The path to the model [.h5]")
parser.add_argument(
"--logs_path",
required=False,
default='./logs_path',
type=str,
help="Path to output logs directory")
parser.add_argument(
"--nb_episode",
required=False,
default=1,
type=int,
help="Number of episodes to evaluate")
parser.add_argument(
"--nb_process",
required=False,
default=1,
type=int,
help="Number of cores to use")
parser.add_argument(
"--max_steps",
required=False,
default=-1,
type=int,
help="Maximum number of steps per scenario")
parser.add_argument(
"--save_gif", action='store_true', help="Enable GIF Output")
parser.add_argument(
"--verbose", action='store_true', help="Verbose runner output")
return parser.parse_args()
def evaluate(env,
load_path="saved_files",
logs_path=None,
nb_episode=1,
nb_process=1,
max_steps=-1,
verbose=False,
save_gif=False,
**kwargs):
runner_params = env.get_params_for_runner()
runner_params["verbose"] = verbose
# Create the agent (this piece of code can change)
agent = RLAgent(env.action_space)
# Load weights from file (for example)
agent.load(load_path)
# Build runner
runner = Runner(**runner_params, agentClass=None, agentInstance=agent)
# you can do stuff with your model here
# start the runner
res = runner.run(
path_save=logs_path,
nb_episode=nb_episode,
nb_process=nb_process,
max_iter=max_steps,
pbar=False)
# Print summary
print("Evaluation summary:")
for _, chron_name, cum_reward, nb_time_step, max_ts in res:
msg_tmp = "\tFor chronics located at {}\n".format(chron_name)
msg_tmp += "\t\t - cumulative reward: {:.6f}\n".format(cum_reward)
msg_tmp += "\t\t - number of time steps completed: {:.0f} / {:.0f}".format(
nb_time_step, max_ts)
print(msg_tmp)
if save_gif:
save_log_gif(logs_path, res)
if __name__ == "__main__":
"""
This is a possible implementation of the eval script.
"""
args = parse_args()
backend = LightSimBackend()
env = grid2op.make('l2rpn_neurips_2020_track2_small', backend=backend)
evaluate(
env,
load_path=args.load_path,
logs_path=args.logs_path,
nb_episode=args.nb_episode,
nb_process=args.nb_process,
max_steps=args.max_steps,
verbose=args.verbose,
save_gif=args.save_gif)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.